blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
97a0f2744a464e8b985f8af31d6623715d42c189 | 1ebbe94ff3c5475b01be75bdac07b6d0dea3d96c | /data_types_variables/ascii_table_print.py | 689cd9b7e84f225a660d254af1860c600a2758bf | [] | no_license | konsatanasoff/python-fundameltals-2020 | fae71bac5254d5102019d6493af90f44b3083ccd | 922d4b15394654bd474f56c69ef4e2f855aff0a1 | refs/heads/master | 2023-01-30T12:20:37.837293 | 2020-12-13T20:53:40 | 2020-12-13T20:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | start = int(input())
end = int(input())
sum = ""
for i in range(start, end + 1):
sum += chr(i) + " "
print(sum, end=' ')
| [
"[email protected]"
] | |
0d42a1d1d1761dbd3245d8a6f400581fb6f1dcba | 711504efdf973fecea6071b2c74e1ce7edeeab9d | /venv/Scripts/easy_install-3.6-script.py | a49bdf015e009572cd5d86750183a7507fb78a02 | [] | no_license | lost222/pycharm-git | 182addf8d8f4ac72081f823cee0e41cb7b6c914c | 14c588cdd984c43431fc524ecb76ffa38c31c470 | refs/heads/master | 2021-04-27T00:18:43.327974 | 2018-03-04T13:37:01 | 2018-03-04T13:37:01 | 123,791,253 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 466 | py | #!C:\Users\ÑîÆð\PycharmProjects\untitled4\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
adeabef3f494bc9c41687167b509aa615f902b2a | 03ad802be7a8658be0fe301f1bde9675dd7735e0 | /clionly.py | a1ab05bfc324aead4d1f0b1ab75aadf35d9dae06 | [] | no_license | Sonnenlicht/BasicPython | 466544d8c4e8bf063ee4ae509c871645288c7e71 | bf36d60b101c1f2c11d09045bf125160f68cb149 | refs/heads/master | 2021-01-16T21:40:03.450477 | 2016-07-14T23:46:30 | 2016-07-14T23:46:30 | 63,377,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | #! python3
# Command line only
import sys
if __name__ == "__main__":
print('Hello world!')
else:
#print('This module can only be run from the command-line')
raise ImportError("This module can only be run from the command-line")
| [
"[email protected]"
] | |
98a0fa51dd4d6202ca092845161834059bd7de61 | cea0fe3ca380e9d81fc1e9d3830504f79ae2cf13 | /pyThesaurus-1.1-dev-r371/pyThesaurus/Thesaurus.py | 9e87cf674bd0d18dbcf4ce045b8d5a7bd0f47cf0 | [] | no_license | Uniltaron/Projektseminar | 93551bb424db091e183de1ad90165300ded9bcec | 8436e5b64f84ba6f715c8e062473b22bcf836d23 | refs/heads/master | 2021-01-19T20:29:46.944008 | 2013-04-01T01:43:12 | 2013-04-01T01:43:12 | 8,251,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,878 | py | # begin: Platecom header
# -*- coding: latin-1 -*-
# vim: set ts=4 sts=4 sw=4 :
#
# $Id: Thesaurus.py 361 2008-07-31 22:07:50Z flarumbe $
#
# end: Platecom header
from config import *
from Concept import Concept
def _join_(self, s, l=[]):
"""
_join_(object, str, list<str>) -> list<str>
Get a list of terms@lang in supported languages
"""
return [ "%s@%s" % (s, i) for i in l if l in self._lang ]
def _split_(self, s):
"""
_split_(object, str) -> str, str
Separe the term and the language from the string
"""
return s.split("@")
class Thesaurus:
def __init__(self, lang=[], TDict=dict, TList=list):
"""
__init__(object) -> Thesurus
Init the Thesaurus.
"""
self._dict = TDict
self._list = TList
self._concept = self._dict()
self._term = self._dict()
self._context = self._dict()
self._last_concept_id = -1
self._lang = self._list()
def get_prefered(self, t, lang=[], contexts=None):
"""
get_prefered(object, str, list<str>) -> list<str>
Return prefered terms of term t in all languages in lang. If lang is [] return all terms.
"""
cs = self(t, contexts)
ts = []
for c in cs:
ts += self._concept[c].get_prefered(lang)
return ts
def set_prefered(self, t, cid):
"""
set_prefered(object, str, cid) -> None
Set the prefered term t in some language in concept cid
"""
self._concept[cid].set_prefered(t)
def get_equivalent(self, t, lang=[], contexts=None, exclude = False):
"""
get_equivalent(object, str, list<str>) -> list<str>
Return equivalent terms of term t in all languages in lang. If lang is [] return all terms.
"""
cid = self(t, contexts)
ts = []
for c in cid:
ts += self[c]['=']
ts = dict([ ("%s@%s" % (_t,_l), None) for _t,_l in [ t_.split('@') for t_ in ts ] if _l in lang ]).keys()
if exclude and t in ts:
ts.remove(t)
return ts
def get_similars(self, t, lang=[], contexts=None):
"""
get_similars(object, str, list<str>) -> list<str>
Return similar terms of term t in all languages in lang. If lang is [] return all terms.
"""
cid = self(t, contexts)
ts = []
for c in cid:
ts += self[c]['~']
ts = dict([ ("%s@%s" % (t,l), None) for t,l in [ t.split('@') for t in ts ] if l in lang ]).keys()
return ts
def get_broader(self, t, lang=[], contexts=None):
"""
get_broader(object, str, list<str>) -> list<str>
Return broaders terms of term t in all languages in lang. If lang is [] return all terms.
"""
cid = self(t, contexts)
ts = []
for c in cid:
ts += self[c]['<']
ts = dict([ ("%s@%s" % (t,l), None) for t,l in [ t.split('@') for t in ts ] if l in lang ]).keys()
return ts
def get_narrower(self, t, lang=[], contexts=None):
"""
get_narrower(object, str, list<str>) -> list<str>
Return narrower terms of term t in all languages in lang. If lang is [] return all terms.
"""
cid = self(t, contexts)
ts = []
for c in cid:
ts += self[c]['>']
ts = dict([ ("%s@%s" % (t,l), None) for t,l in [ t.split('@') for t in ts ] if l in lang ]).keys()
return ts
def get_related(self, t, lang=[], exclude=True, contexts=None):
"""
get_related(object, str, list<str>) -> list<str>
Return related terms of term t in all languages in lang. If lang is [] return all terms.
"""
cid = self(t, contexts)
ts = []
hidden = False
for c in cid:
ts += self[c]['-']
hidden = hidden or t in self[c]['#']
ts = dict([ ("%s@%s" % (_t,_l), None) for _t,_l in [ t_.split('@') for t_ in ts ] if _l in lang ]).keys()
if exclude or hidden:
return ts
else:
return ts + [ t ]
def get_concepts(self, ts, contexts=None):
"""
get_concepts(object, list<str>) -> int
Return the concepts id associated to all terms in ts.
"""
cid = []
for t in ts:
cid += self(t, contexts)
return dict([ (c, None) for c in cid ]).keys()
def get_publicNotes(self, c):
"""
get_publicNotes(object, int) -> dict
Return the dict of public notes of a concept.
get_publicNotes(c).keys() = [ 'definition', 'scopeNote', 'example', 'historyNote', 'class' ]
get_publicNotes(c)[x].keys() = [ 'value', 'date', 'creator' ]
"""
return self[c]._pubn
def get_privateNotes(self, c):
"""
get_privateNotes(object, int) -> dict
Return the dict of private notes of a concept.
get_privateNotes(c).keys() = [ 'editorialNote', 'changeNote' ]
get_privateNotes(c)[x].keys() = [ 'value', 'date', 'creator' ]
"""
return self[c]._privn
def terms(self, contexts=None):
"""
terms(object) -> list<str>
Return the list of terms of the thesauro
"""
return self._term.keys()
def concepts(self, contexts=None):
"""
terms(object) -> list[int]
Return the list of concepts id of the thesaurus
"""
return self._concept.keys()
def concepts_objects(self, contexts=None):
"""
terms(object) -> list[object]
Return the list of concepts of the thesaurus
"""
return self._concept.values()
def contexts(self):
"""
contexts(object) -> list<str>
Return a list of classes.
"""
return self._context.keys()
def search_term(self, str, contexts=None):
"""
search_term(str) -> list<str>
Return a list of similar terms to the str
"""
import re
cre = re.compile(str)
return [ k for k in self._term.keys() if cre.search(k) ]
def __getitem__(self, idx):
"""
__getitem__(object, int) -> dict
Return the concept dict of idx.
__getitem__(idx).keys() = [ '=', '#', '<', '>', '-', '~', concept_id... ]
if c not defined(object, raise IndexError, 'not defined'
"""
if idx in self._concept:
return self._concept[idx]
else:
raise IndexError, 'not defined'
def __call__(self, t, contexts=None):
"""
__call__(object, str, list<string>) -> list<int>
Return the concept id of the term.
if t not defined(object, raise IndexError, 'not defined'
"""
if t in self._term:
return [ cid for cid in self._term[t] if self.in_contexts(cid, contexts) ]
else:
raise IndexError, 'not defined'
def in_contexts(self, cid, contexts):
return contexts==None or contexts==[] or sum([ xc==xi for xc in self._concept[cid]._contexts for xi in contexts ])
def exist_term(self, t, contexts=None):
"""
exist_term(object, str, list<string>) -> list<int>
Return true if t is defined in contexts.
"""
return t in self._term and len(self(t, contexts)) > 0
def append_term(self, t, et=[], ht=[], net=[], bt=[], nt=[], rt=[], st=[], contexts=None, automatic=False):
"""
append_term(object, str, int, et=list<str>, ht=list<str>, net=list<str>, bt=list<str>,
nt=list<str>, rt=list<str>, st=list<str>, automatic=bool,
contexts=list<str>) -> None
Add a new term t of class c with the following relations.
"""
append_concept_id = Concept(et=[t] + et, ht=ht, net=net, bt=bt, nt=nt, st=st, rt=rt, contexts=contexts)
old_concept_at = self._previous_concept_to_join(append_concept_id)
if old_concept_at is None:
self.append_concept(append_concept_id)
elif automatic:
self.replace_concept_at(self[old_concept_at].join_to(append_concept_id), old_concept_at)
else:
raise ConceptsConflict, t
def _previous_concept_to_join(self, append_concept, M=matrix_comparation, S=minimum_score):
"""
_previous_concept_to_join(object, Concept, M=list<list<int>>, S=int) -> int
Return the most equal concept in the thesaurus.
"""
for idx in self._concept:
if append_concept.could_be_joined_to(self[idx], M, S): return idx
return None
def append_concept(self, c):
"""
append_concept(object, Concept) -> Id
Add a new concept to the Thesaurus, and return the concept id.
"""
self._last_concept_id += 1
self._concept[self._last_concept_id] = c
for context in c.contexts():
if not context in self._context: self._context[context] = self._list()
self._context[context].append(self._last_concept_id)
self._terms_belong_to_concept(c['='] + c['#'], self._last_concept_id)
return self._last_concept_id
def replace_concept_at(self, concept, idx):
"""
replace_concept_at(object, Concept, int) -> None
Replaces the concept with idx id with the new concept concept.
"""
self._concept[idx] = concept
self._terms_belong_to_concept(concept.et, idx)
def delete_concept(self, cid):
"""
delete_concept(object, int) -> Id
Remove a concept from the Thesaurus and its relationships.
"""
del self._concept[cid]
self.remove_concept_from(cid, self._term)
self.remove_concept_from(cid, self._context)
def remove_concept_from(self, cid, dic):
for (t, cs) in dic.items():
if cid in cs:
cs.remove(cid)
if len(cs) == 0:
del dic[cid]
def _terms_belong_to_concept(self, terms, concept_idx):
"""
_terms_belong_to_concept(list<str>, int) -> None
Associate terms belongs to concept concept_idx.
"""
for t in terms:
if t in self._term:
if concept_idx not in self._term[t]:
self._term[t].append(concept_idx)
else:
self._term[t] = [concept_idx]
def get_terms_of_context(self, context):
"""
get_terms_of_context(object, context) -> list<str>
Return the terms of a context.
"""
result = []
for concept_id in self._context[context]:
concept = self._concept[concept_id]
result.append(concept['='])
return result
def term_concepts(self, term, context=None):
return self.concepts_from_ids(self.term_concepts_ids(term, context))
def term_concepts_ids(self, term, context=None):
return self.get_concepts([term], context)
def concepts_search(self, search_expression, context=None):
reg = self.safe_regular_expression(search_expression)
return [ (cid, self[cid]) for cid in self.concepts() if self[cid].match(reg) ]
def concepts_search_ids(self, search_expression, context=None):
return [ cid for (cid, concept) in self.concepts_search(search_expression, context) ]
def concepts_search_objects(self, search_expression, context=None):
return [ concept for (cid, concept) in self.concepts_search(search_expression, context) ]
def concepts_from_ids(self, cids):
return [ self[cid] for cid in cids ]
def correct_index(self, first_result, concepts_count):
if first_result < 0 or concepts_count == 0:
first_result = 0
elif first_result > concepts_count - 1:
first_result = concepts_count - 1
return first_result
def query( self, search_expression,
narrowerthan = None, broaderthan = None, contexts = [], languages = [],
inbranch = None, hidden = None, max_results = 5, first_result = 0 ):
"""
Return terms in regexp, between 'broaderthan' and 'narrowerthan' terms, in defined contexts. If inbrach is a list of terms, then returned terms are in the same branch of these. Dont return terms in except. The precedence is (TODO):
1) Concepts whose prefered term starts with search_expression.
2) Concepts whose prefered term has a word starting with search_expression.
3) Concepts whose prefered term has search_expression inside.
4) Concepts whose equivalent terms has search_expression inside.
context == None: accept terms in all contexts.
narrowerthan == None: no limits on top of thesaurus.
broaderthan == None: no limits on bottom of thesaurus.
inbranch == None: all terms.
except == None: all terms.
@regexp: Regular expresion.
@narrowerthan: List of terms.
@broaderthan: List of terms.
@contexts: List of contexts.
@inbranch: List of terms.
@except: List of terms.
@return: List of terms.
>>> T.query('B.*', narrowerthan=['Tierra'], broaderterms=None, contexts=['geographic']
inbranch=['America'], except=None)
[ ... 'Bogota', 'Buenos Aires', 'Bolivia', ... ]
>>> T.query('B.*', narrowerthan=['Escritor', 'America'],
broaderterms=None, contexts=['geographic', 'literatura']
inbranch=['Argentina'], except=['Borges']
[ ... 'Cortaza', 'Sabato' ... ]
"""
conceptsResults = self.concepts_search(search_expression, contexts)
conceptsResults = self.refine_search(conceptsResults, narrowerthan, broaderthan, contexts, languages, inbranch)
conceptsResults = self.sort_by_priority(conceptsResults, search_expression, languages)
first_result = self.correct_index(first_result, len(conceptsResults))
return { 'concepts': [ (conceptsResults[index][1].get_prefered(languages)[0], conceptsResults[index][0]) for index in range(first_result, min(len(conceptsResults), first_result+max_results)) ],
'concepts_count': len(conceptsResults) }
def refine_search(self, conceptsResults, narrowerthan, broaderthan, contexts, languages, inbranch):
return [ (cid, concept) for (cid, concept) in conceptsResults if self.is_in_all_concepts_transitive_relation('<', concept, narrowerthan, contexts) and self.is_in_all_concepts_transitive_relation('>', concept, broaderthan, contexts) and self.in_contexts(cid, contexts) ]
def is_in_all_concepts_transitive_relation(self, relation, concept, limits, contexts):
if limits != None:
for term in limits:
if not self.is_in_concepts_transitive_relation(relation, concept, term, contexts):
return False
return True
def is_in_concepts_transitive_relation(self, relation, concept, term, contexts):
for t in concept[relation]:
if t == term:
return True
elif self.exist_term(t, contexts):
for cid in self(t, contexts):
if self.is_in_concepts_transitive_relation(relation, self[cid], term, contexts):
return True
return False
def is_broader_than_all(self, concept, term, broaderthan):
# TODO: REPLACE
return True
def sort_by_priority(self, conceptsResults, search_expression, languages):
reg = self.safe_regular_expression(search_expression)
startWithResults = self.concepts_prefered_terms_start_with(conceptsResults, reg, languages)
preferedIncludesResults = self.concepts_prefered_terms_match(conceptsResults, reg, languages)
return self.join_without_duplicates([startWithResults, preferedIncludesResults, conceptsResults])
def join_without_duplicates(self, css):
concepts_added = {}
concepts_list = []
for cs in css:
for tup in cs:
if not concepts_added.has_key(tup[0]):
concepts_added[tup[0]] = 1
concepts_list.append(tup)
return concepts_list
def concepts_prefered_terms_start_with(self, conceptsResults, reg, languages):
return [ (cid, concept) for (cid, concept) in conceptsResults if concept.prefered_terms_start_with(reg, languages) ]
def concepts_prefered_terms_match(self, conceptsResults, reg, languages):
return [ (cid, concept) for (cid, concept) in conceptsResults if concept.prefered_terms_match(reg, languages) ]
def safe_regular_expression(self, search_expression):
import re
try:
reg = re.compile(search_expression, re.IGNORECASE)
return reg
except:
raise KeyError, "Invalid search expression: %s\n%s" % (search_expression, sys.exc_value)
class ConceptsConflict(Exception):
def __init__(self, value):
self.value = value
def __call__(self):
return str(self.value)
| [
"[email protected]"
] | |
62db8ed5f3c02a9e5e943c507938b3956708f48b | 832793d1146296ad0740b2c1ba0abc3870db04dd | /thunder_backup/liff/views.py | be06e4686ce641fbdff01ee0ea02f942c3b4ad58 | [] | no_license | mayo233/todolist | 161d7b1b2fc46f347e16fc6786bdf10200995cc7 | 85fa4fe03ee114b245a4a9ec0bc68709a2abf483 | refs/heads/main | 2023-06-03T08:45:46.779620 | 2021-06-14T17:06:43 | 2021-06-14T17:06:43 | 376,892,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | from django.http.response import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'liff/index.html') | [
"[email protected]"
] | |
e4226e9a6f0e6afb491866424386c3e226400951 | c68b2f6eb401e9fbe3ecf7fc07257817f9c2e18d | /src/model/edsr.py | dd686890e2337914d66d519bee0fe203716ab92c | [] | no_license | Shualite/PCAN | d0ee4f756913065d73c79120af313e476017cfa1 | 7f306a8816b699b4b6504f703330c3cfe612595a | refs/heads/main | 2023-09-01T22:10:53.875021 | 2021-11-01T03:09:14 | 2021-11-01T03:09:14 | 423,322,160 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,067 | py | import torch
import torch.nn as nn
import math
from IPython import embed
class MeanShift(nn.Conv2d):
def __init__(self, rgb_mean, sign):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.bias.data = float(sign) * torch.Tensor(rgb_mean)
# Freeze the MeanShift layer
for params in self.parameters():
params.requires_grad = False
class _Residual_Block(nn.Module):
def __init__(self):
super(_Residual_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
identity_data = x
output = self.relu(self.conv1(x))
output = self.conv2(output)
output *= 0.1
output = torch.add(output,identity_data)
return output
class EDSR(nn.Module):
def __init__(self, scale_factor=2):
super(EDSR, self).__init__()
rgb_mean = (0.4488, 0.4371, 0.4040)
self.sub_mean = MeanShift(rgb_mean, -1)
self.conv_input = nn.Conv2d(in_channels=3, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)
self.residual = self.make_layer(_Residual_Block, 32)
self.conv_mid = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)
upscale_block = []
for i in range(int(math.log2(scale_factor))):
upscale_block.append(
nn.Conv2d(in_channels=256, out_channels=256*4, kernel_size=3, stride=1, padding=1, bias=False),
)
upscale_block.append(nn.PixelShuffle(2))
self.upscale = nn.Sequential(
*upscale_block
)
self.conv_output = nn.Conv2d(in_channels=256, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False)
self.add_mean = MeanShift(rgb_mean, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def make_layer(self, block, num_of_layer):
layers = []
for _ in range(num_of_layer):
layers.append(block())
return nn.Sequential(*layers)
def forward(self, x):
out = self.sub_mean(x)
out = self.conv_input(out)
residual = out
out = self.conv_mid(self.residual(out))
out = torch.add(out,residual)
out = self.upscale(out)
out = self.conv_output(out)
out = self.add_mean(out)
return out
if __name__=='__main__':
embed() | [
"[email protected]"
] | |
3b59957514af2caceea80eae22b19dd7ea646767 | 43c7490d67ac0f122249e53d9540cf1772ecec45 | /parking/shared/rest_models.py | 89e560f8f27884289a25bd24b221dc002c079553 | [] | no_license | manfredlift/parking | 4882bac59bf570408452d48f5634b249f78bd782 | 65acd71f4df16a1f511ff74369a332e61b995d70 | refs/heads/master | 2020-03-17T14:55:34.538109 | 2018-05-24T09:19:43 | 2018-05-24T09:19:43 | 133,691,530 | 0 | 0 | null | 2018-05-16T16:03:22 | 2018-05-16T16:03:22 | null | UTF-8 | Python | false | false | 1,079 | py | import attr
from parking.shared.location import Location
from parking.shared.util import ensure, validate_non_neg, validate_pos
@attr.s
class ParkingLot:
capacity: int = attr.ib(validator=[attr.validators.instance_of(int), validate_pos])
name: str = attr.ib(validator=attr.validators.instance_of(str))
price: float = attr.ib(validator=[attr.validators.instance_of(float), validate_non_neg])
location: Location = attr.ib(converter=ensure(Location), validator=attr.validators.instance_of(Location))
id: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
@attr.s
class ParkingLotCreationResponse:
id: int = attr.ib(validator=[attr.validators.instance_of(int), validate_non_neg], default=0)
errors: list = attr.ib(validator=attr.validators.instance_of(list), factory=list)
@attr.s
class SpaceAvailableMessage:
available: int = attr.ib(validator=[attr.validators.instance_of(int), validate_non_neg])
@attr.s
class SpacePriceMessage:
price: float = attr.ib(validator=[attr.validators.instance_of(float), validate_non_neg])
| [
"[email protected]"
] | |
61b784153c320622f4cee73ed3dc4ca481c8393a | 019ac6dfab10f38d93ad6d983123a0d03e8b2488 | /tonal_test.py | 6aa88fdcd1f2e219aca6abfb86c7fb553a6988ad | [] | no_license | ivanstaryu/machine_music | 3d351982f14225621bd8956b63905cf2b03a96e2 | 93c66903c23286212129ccc6c3700bbbf032bc72 | refs/heads/master | 2023-03-17T00:26:22.968999 | 2018-05-12T21:52:27 | 2018-05-12T21:52:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import unittest
import tonal
import mingus.core.notes
from scales import SCALE_NAMES, TonalScale
import mingus.core.scales as scales
notes = mingus.core.notes
to = tonal.Tonal()
ts = TonalScale()
class TestTonal(unittest.TestCase):
def test_scale_octave(self):
self.assertEqual(12, to.add_octave(0, 1))
def test_to_int(self):
self.assertEqual(0, to.note_to_int("C"))
self.assertEqual(11, notes.note_to_int("Cb"))
with self.assertRaises(TypeError):
to.note_to_int(0)
def test_pick_scale(self):
self.assertIsNotNone(to.pick_scale())
self.assertIn(to.pick_scale(), SCALE_NAMES)
def test_pick_base_note(self):
self.assertIn(notes.note_to_int(to.pick_base_note()), range(0, 13))
class TestScales(unittest.TestCase):
def test_select_scale(self):
self.assertIsInstance(
ts.select_scale("HarmonicMajor", "C"),
scales.HarmonicMajor
)
| [
"[email protected]"
] | |
8ddbfe8ab9df2f58b47bb9ef5f93de40257fa062 | 351d1501f9ac83206c95c2750bf51513b6c50585 | /Mutation.py | 267d72c2e0e48672963538725a3198166544fa7a | [] | no_license | profharimohanpandey/GA_Implementation | 809123daf2a2abb4cfa3477057955353a5243938 | b2c09db526e3c94bf404e507fd7ac6aa0b639781 | refs/heads/master | 2021-05-10T18:41:56.004784 | 2018-01-19T13:59:51 | 2018-01-19T13:59:51 | 118,131,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | def mutate(self, network):
"""Randomly mutate one part of the network.
Args:
network (dict): The network parameters to mutate
"""
# Choose a random key.
mutation = random.choice(list(self.nn_param_choices.keys()))
# Mutate one of the params.
network.network[mutation] = random.choice(self.nn_param_choices[mutation])
return network
| [
"[email protected]"
] | |
53221261103f62df267243163280b0d29520d6f0 | d0dc833a66e273f4620f0ed8cd6952e0d36c692e | /main.py | 77c535df48b6b1c237979aecf0df07cbbc991acc | [
"MIT"
] | permissive | huhumt/pi-weather | b999ef703ed8ed5e7f2c847c4b00b46cad20dee7 | 6b76ece39c6fb3cc66ef84ce20d186a8ac2df90c | refs/heads/master | 2021-08-19T20:05:14.622968 | 2017-11-27T09:51:30 | 2017-11-27T09:51:30 | 109,334,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from get_weather import weather_info
from text_to_speech import tts_engine_voicerss
def main():
"""
this is the main entry of the project
"""
forcast = weather_info("./config.json", "./city_list.json")
weather_response = forcast.request_server()
weather_parse = forcast.parse_weather(weather_response)
weather_forcast = forcast.generate_weather_string(weather_parse)
tts_engine_voicerss("./config.json", weather_forcast)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b0155ad0ed13c971cdf30436d585cb8a9d9318d9 | 77b16dcd465b497c22cf3c096fa5c7d887d9b0c2 | /Horan_Colby/Assignments/great_number_game/gng.py | 936d2ca5c0a84843305b1bdbb8dfd1c764cfbaab | [
"MIT"
] | permissive | curest0x1021/Python-Django-Web | a7cf8a45e0b924ce23791c18f6a6fb3732c36322 | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | refs/heads/master | 2020-04-26T17:14:20.277967 | 2016-10-18T21:54:39 | 2016-10-18T21:54:39 | 173,706,702 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import session
import random
app = Flask(__name__)
app.secret_key = 'SEEEEEEEECRET'
@app.route('/')
def index():
if not 'bingo' in session:
session['bingo'] = random.randint(0,100)
session['hidetoggle'] = 'hide'
return render_template('gng.html')
@app.route('/guess', methods=['post'])
def guess():
guess = int(request.form['guess'])
if guess < session['bingo']:
session['try'] = 'Too Fucking Low!'
session['class'] = 'red'
elif guess > session['bingo']:
session['try'] = "Too Fucking High!"
session['class'] = 'red'
else:
session['try'] = 'YOU FUCKING GOT IT!'
session['class'] = 'green'
session['hidetoggle'] = 'show'
return redirect('/')
@app.route('/reset', methods= ['post'])
def reset():
session.clear()
return redirect('/')
app.run(debug=True)
| [
"[email protected]"
] | |
fc6181d4d4c8cf151123dee3de63d63ce68e97ec | ed0f9eb0c1cb4858d91ef7e2d435db307f23a5a5 | /dist/manage/django/conf/locale/tr/formats.py | c231c93977e377b76ab7aec4fbcc111ccb8f0abf | [] | no_license | hjlhehehe123/ATC_Data | 81b4622e7279aa9cc2013db8cc5a71d33561e768 | ad35e61afb8e87d8bab2d2b3aeea08e9409d56c0 | refs/heads/master | 2023-07-13T16:23:45.951584 | 2021-08-20T12:37:34 | 2021-08-20T12:37:34 | 256,994,694 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'd F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%y-%m-%d', # '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| [
"[email protected]"
] | |
baaa7bd14df062ec3185125c7a9d62977def5c7a | ccbe6552deddbad3e8456d4c5fab4ad5db63cabe | /snapshot_v0.py | 0d15ab8b6a4df5c347bba6340f9aa98009247a82 | [
"MIT"
] | permissive | hyzzd/network-analysis | aad75abb6cfd5ca813aaf9718dabf53ba39ff47a | c655867ad1762bb9052aafbbeb41cb6951dd2863 | refs/heads/master | 2020-03-19T16:44:47.424799 | 2018-06-09T14:04:21 | 2018-06-09T14:04:21 | 136,728,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | src_path = "dataset/"
main_file = "2018.csv" #"First Set of Data - Transactions.csv"
import csv
import collections
import pprint
with open(main_file, "rb") as fp:
root = csv.reader(fp, delimiter=',')
result = collections.defaultdict(list)
# ignore the header
#next(root, None)
for row in root:
year = row[6].split("-")[1]
result[year].append(row)
print "Result:-"
#pprint.pprint(result)
for i,j in result.items():
file_path = "%s2018%s.csv"%(src_path, i)
with open(file_path, 'wb') as fp:
writer = csv.writer(fp, delimiter=',')
writer.writerows(j) | [
"[email protected]"
] | |
6e48bdf04c597e0bc5a74e637d938b26abd117c5 | a17a15211ac8b47d01c616ae87fccb0e95d8298d | /return_to_origin.py | 5f53e244b0ce3f93b2a85a68ee48fdf862d24807 | [] | no_license | prosis369/Data-Structures | 68a12a95db530adc153ca6b38b4310f5ab321f5a | cbfdb37f3145066285bf463360f9486bb64fe580 | refs/heads/master | 2021-06-25T12:10:52.894381 | 2020-11-11T08:54:17 | 2020-11-11T08:54:17 | 149,286,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | '''
657. Robot Return to Origin
There is a robot starting at position (0, 0), the origin, on a 2D plane. Given a sequence of its moves, judge if this robot ends up at (0, 0) after it completes its moves.
The move sequence is represented by a string, and the character moves[i] represents its ith move. Valid moves are R (right), L (left), U (up), and D (down). If the robot returns to the origin after it finishes all of its moves, return true. Otherwise, return false.
Note: The way that the robot is "facing" is irrelevant. "R" will always make the robot move to the right once, "L" will always make it move left, etc. Also, assume that the magnitude of the robot's movement is the same for each move.
Example 1:
Input: "UD"
Output: true
Explanation: The robot moves up once, and then down once. All moves have the same magnitude, so it ended up at the origin where it started. Therefore, we return true.
'''
class Solution:
def judgeCircle(self, moves: str) -> bool:
start = [0,0]
for i in moves:
if i == 'L':
start[0] = start[0]-1
elif i =='R':
start[0] = start[0]+1
elif i == 'U':
start[1] = start[1]+1
else:
start[1] = start[1]-1
if start == [0,0]:
return True
else:
return False | [
"[email protected]"
] | |
b5919d81dd0c5e583bc7ef6b72ce82c72879c2d1 | 48d52acfdc03388338967b1020894926cf63be07 | /tutorial/quickstart/views.py | 3a8a9250d9a93d301ed8f4eb9492123542f97a73 | [] | no_license | KimochiiStyx/Come_on_Django | f84df689265620fbacc8e9dcc3a7c5e4f5826f64 | 9397bfe800c2c7e1188cc1752dcd27d94c43a0ee | refs/heads/master | 2021-05-11T15:27:39.704349 | 2018-02-02T12:32:47 | 2018-02-02T12:32:47 | 117,451,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from django.contrib.auth.models import User, Group
from models import Company, Person
from rest_framework import viewsets
from tutorial.quickstart.serializers import UserSerializer, GroupSerializer, CompanySerializer, PersonSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class CompanyViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows companies to be viewed or edited.
"""
queryset = Company.objects.all()
serializer_class = CompanySerializer
class PersonViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows persons to be viewed or edited.
"""
queryset = Person.objects.all()
serializer_class = PersonSerializer | [
"[email protected]"
] | |
2c82b1ab8790a344e35364649acf9e32e55e9933 | 0a38890df3e399fbae4cde9c4af5ac3270e8cd29 | /Notes/Examples of HMMs/hmm_from_scratch.py | 75a88b8bf69c39a581d58bb049c21fef7a89cfc1 | [] | no_license | likitha-9/Gene-Prediction-Using-HMM-Stochastic | a38e301a01ccff642319da0277a1a6dc92135939 | 93c0ca3e0c946ba6fdb7243ec869fe7b770dad54 | refs/heads/master | 2022-07-27T08:58:04.965277 | 2020-05-16T23:38:05 | 2020-05-16T23:38:05 | 255,749,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,601 | py | import numpy as np
import pandas as pd
class ProbabilityVector:
def __init__(self, probabilities: dict):
states = probabilities.keys()
probs = probabilities.values()
assert len(states) == len(probs),"The probabilities must match the states."
assert len(states) == len(set(states)),"The states must be unique."
assert abs(sum(probs) - 1.0) < 1e-12, "Probabilities must sum up to 1."
assert len(list(filter(lambda x: 0 <= x <= 1, probs))) == len(probs), "Probabilities must be numbers from [0, 1] interval."
self.states = sorted(probabilities)
self.values = np.array(list(map(lambda x:
probabilities[x], self.states))).reshape(1, -1)
@classmethod
def initialize(cls, states: list):
size = len(states)
rand = np.random.rand(size) / (size**2) + 1 / size
rand /= rand.sum(axis=0)
return cls(dict(zip(states, rand)))
@classmethod
def from_numpy(cls, array: np.ndarray, state: list):
return cls(dict(zip(states, list(array))))
@property
def dict(self):
return {k:v for k, v in zip(self.states, list(self.values.flatten()))}
@property
def df(self):
return pd.DataFrame(self.values, columns=self.states, index=['probability'])
def __repr__(self):
return "P({}) = {}.".format(self.states, self.values)
def __eq__(self, other):
if not isinstance(other, ProbabilityVector):
raise NotImplementedError
if (self.states == other.states) and (self.values == other.values).all():
return True
return False
def __getitem__(self, state: str) -> float:
if state not in self.states:
raise ValueError("Requesting unknown probability state from vector.")
index = self.states.index(state)
return float(self.values[0, index])
def __mul__(self, other) -> np.ndarray:
if isinstance(other, ProbabilityVector):
return self.values * other.values
elif isinstance(other, (int, float)):
return self.values * other
else:
NotImplementedError
def __rmul__(self, other) -> np.ndarray:
return self.__mul__(other)
def __matmul__(self, other) -> np.ndarray:
if isinstance(other, ProbabilityMatrix):
return self.values @ other.values
def __truediv__(self, number) -> np.ndarray:
if not isinstance(number, (int, float)):
raise NotImplementedError
x = self.values
return x / number if number != 0 else x / (number + 1e-12)
def argmax(self):
index = self.values.argmax()
return self.states[index]
class ProbabilityMatrix:
def __init__(self, prob_vec_dict: dict):
assert len(prob_vec_dict) > 1, \
"The numebr of input probability vector must be greater than one."
assert len(set([str(x.states) for x in prob_vec_dict.values()])) == 1, \
"All internal states of all the vectors must be indentical."
assert len(prob_vec_dict.keys()) == len(set(prob_vec_dict.keys())), \
"All observables must be unique."
self.states = sorted(prob_vec_dict)
self.observables = prob_vec_dict[self.states[0]].states
self.values = np.stack([prob_vec_dict[x].values for x in self.states]).squeeze()
@classmethod
def initialize(cls, states: list, observables: list):
size = len(states)
rand = np.random.rand(size, len(observables)) / (size**2) + 1 / size
rand /= rand.sum(axis=1).reshape(-1, 1)
aggr = [dict(zip(observables, rand[i, :])) for i in range(len(states))]
pvec = [ProbabilityVector(x) for x in aggr]
return cls(dict(zip(states, pvec)))
@classmethod
def from_numpy(cls, array: np.ndarray, states: list, observables: list):
p_vecs = [ProbabilityVector(dict(zip(observables, x))) for x in array]
return cls(dict(zip(states, p_vecs)))
@property
def dict(self):
return self.df.to_dict()
@property
def df(self):
return pd.DataFrame(self.values, columns=self.observables, index=self.states)
def __repr__(self):
return "PM {} states: {} -> obs: {}.".format(
self.values.shape, self.states, self.observables)
def __getitem__(self, observable: str) -> np.ndarray:
if observable not in self.observables:
raise ValueError("Requesting unknown probability observable from the matrix.")
index = self.observables.index(observable)
return self.values[:, index].reshape(-1, 1)
from itertools import product
from functools import reduce
class HiddenMarkovChain:
def __init__(self, T, E, pi):
self.T = T # transmission matrix A
self.E = E # emission matrix B
self.pi = pi
self.states = pi.states
self.observables = E.observables
def __repr__(self):
return "HML states: {} -> observables: {}.".format(
len(self.states), len(self.observables))
@classmethod
def initialize(cls, states: list, observables: list):
T = ProbabilityMatrix.initialize(states, states)
E = ProbabilityMatrix.initialize(states, observables)
pi = ProbabilityVector.initialize(states)
return cls(T, E, pi)
def _create_all_chains(self, chain_length):
return list(product(*(self.states,) * chain_length))
def score(self, observations: list) -> float:
def mul(x, y): return x * y
score = 0
all_chains = self._create_all_chains(len(observations))
for idx, chain in enumerate(all_chains):
expanded_chain = list(zip(chain, [self.T.states[0]] + list(chain)))
expanded_obser = list(zip(observations, chain))
p_observations = list(map(lambda x: self.E.df.loc[x[1], x[0]], expanded_obser))
p_hidden_state = list(map(lambda x: self.T.df.loc[x[1], x[0]], expanded_chain))
p_hidden_state[0] = self.pi[chain[0]]
score += reduce(mul, p_observations) * reduce(mul, p_hidden_state)
return score
class HiddenMarkovChain_FP(HiddenMarkovChain):
def _alphas(self, observations: list) -> np.ndarray:
alphas = np.zeros((len(observations), len(self.states)))
alphas[0, :] = self.pi.values * self.E[observations[0]].T
for t in range(1, len(observations)):
alphas[t, :] = (alphas[t - 1, :].reshape(1, -1)
@ self.T.values) * self.E[observations[t]].T
return alphas
def score(self, observations: list) -> float:
alphas = self._alphas(observations)
return float(alphas[-1].sum())
class HiddenMarkovChain_Simulation(HiddenMarkovChain):
def run(self, length: int) -> (list, list):
assert length >= 0, "The chain needs to be a non-negative number."
s_history = [0] * (length + 1)
o_history = [0] * (length + 1)
prb = self.pi.values
obs = prb @ self.E.values
s_history[0] = np.random.choice(self.states, p=prb.flatten())
o_history[0] = np.random.choice(self.observables, p=obs.flatten())
for t in range(1, length + 1):
prb = prb @ self.T.values
obs = prb @ self.E.values
s_history[t] = np.random.choice(self.states, p=prb.flatten())
o_history[t] = np.random.choice(self.observables, p=obs.flatten())
return o_history, s_history
| [
"[email protected]"
] | |
4a2785205967c6c824b45557d5974388b35d9303 | f6afa468d9ab1d8eaa4b3ae032dbac1009c298f6 | /list1.py | 7ac94b575fc16ae99bacc4b4ca01482a3659083a | [] | no_license | RGD-Consulting/Py4E | 4b5064ad01009de938551a117145b6d11dde3de6 | b264f34871aca58401ddd8f0529c2f8524cbda17 | refs/heads/master | 2020-09-09T04:19:06.594672 | 2019-11-13T01:42:50 | 2019-11-13T01:42:50 | 221,346,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | li = [1, 2, 3, 4, 5]
li2 = ['a', 'b', 'c']
li3 = ['Notebooks', 'Sunglasses']
print(li3[1])
li4 = li3[0:10]
print(li3)
| [
"[email protected]"
] | |
f0dc516e03770d35c6f59f1e277face9cd05a17c | a2b3987eb8a50bee311f869e39d1c76e738ba2b8 | /authlib/oidc/core/__init__.py | 212ebc031322a8079d2801b905c424145e5fbd5b | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lepture/authlib | abb3e14b8ccacef9ade90b28efed827ab65aadce | 1846d6ac66e89bdb3268fffe15b7e49289966366 | refs/heads/master | 2023-09-04T04:27:56.650738 | 2023-09-02T07:42:47 | 2023-09-02T07:42:47 | 108,510,280 | 4,091 | 481 | BSD-3-Clause | 2023-09-13T13:04:38 | 2017-10-27T06:52:26 | Python | UTF-8 | Python | false | false | 650 | py | """
authlib.oidc.core
~~~~~~~~~~~~~~~~~
OpenID Connect Core 1.0 Implementation.
http://openid.net/specs/openid-connect-core-1_0.html
"""
from .models import AuthorizationCodeMixin
from .claims import (
IDToken, CodeIDToken, ImplicitIDToken, HybridIDToken,
UserInfo, get_claim_cls_by_response_type,
)
from .grants import OpenIDToken, OpenIDCode, OpenIDHybridGrant, OpenIDImplicitGrant
__all__ = [
'AuthorizationCodeMixin',
'IDToken', 'CodeIDToken', 'ImplicitIDToken', 'HybridIDToken',
'UserInfo', 'get_claim_cls_by_response_type',
'OpenIDToken', 'OpenIDCode', 'OpenIDHybridGrant', 'OpenIDImplicitGrant',
]
| [
"[email protected]"
] | |
23a3f87aa0a881b55e2bb861584609f639b08b47 | 3810eb933366169ee226e2db0358475b47e28f72 | /PYTHON_Stack/POO/Tienda/store.py | 4b32b0d767fc46073429a1f1ceb5ba12d0a0efd7 | [] | no_license | jsilva16/Fullstack_Python | 53b823f7cfc5e03f03bf90587c72230ce00e7e06 | fe31679dea5439fb68900984fd1d2b01a7105045 | refs/heads/master | 2023-08-13T12:59:12.509492 | 2021-10-11T12:17:49 | 2021-10-11T12:17:49 | 365,086,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | import product as prod
class Store:
def __init__(self, name, products=[]):
self.name = name
self.product = products
def add_product(self, new_prod):
self.product.append(new_prod)
print(f"Se ha agregado el producto '{new_prod}'")
return self
def delete_prod(self, id):
print("Imprimiendo al información del producto a eliminar")
self.product[id].print_info()
print("El producto ha sido eliminado")
del self.product[id]
return self
def inflation(self,porcentaje):
for prod in self.product:
prod.update_price(porcentaje)
return self
def update_category(self,category, percent):
for prod in self.product:
if prod.category == category:
prod.update_price(percent, False)
return self
store1=Store("tienda")
store1.add_product("Agua", 500, "bebidas")
store1.add_product("pan", 900, "panaderia")
print(store1.name)
print(store1.product)
dir(store1)
| [
"[email protected]"
] | |
5157c2d0dc477639cd2f76410375bd04ba1d25a4 | 59c6728976844b8003d4fc61cc9942b086df10d4 | /236/p236.py | 2b5c1160f13ed388d9e16dc731bdce9efb8a8726 | [] | no_license | hacatu/project-euler | dcafa013c3f9d630fe007357716f1408a31876ba | af43ffc5c1cd0f59fb1eea8215d4a5d48bdfc808 | refs/heads/master | 2022-06-19T21:43:52.693907 | 2022-03-08T09:29:30 | 2022-03-08T09:29:30 | 23,130,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | """
Product Supplier
A
1 Cavier 5248 = 2^7.41 640 = 2^7.5
2 Cake 1312 = 2^5.41 1888 = 2^5.59
3 Joint 2624 = 2^6.41 3776 = 2^6.59
4 Wine 5760 = 2^7.3^2.5 3776 = 2^6.59
5 Truffles 3936 = 2^5.3.41 5664 = 2^5.3.59
Sum 18880 = 2^6.5.59 15744 = 2^7.3.41
b_i*A_i/(B_i*a_i) = p/q = m = sum(a_i)*sum(B_i)/(sum(b_i)*sum(A_i))
b_i = m*a_i*B_i/A_i
sum(a_i)*sum(B_i)/(sum(A_i)*sum(m*a_i*B_i/A_i)) = m
sum(a_i)*sum(B_i)/(sum(A_i)*sum(a_i*B_i/A_i)) = m^2
sum(A_i)/sum(B_i)*sum(B_i/A_i*a_i)/sum(a_i) = m^2
5*59/(2*3*41)*(5/41*a_1 + 59/41*a_2 + 59/41*a_3 + 59/90*a_4 + 59/41*a_5)/sum(a_i) = 1/m^2
(5/41*a_1 + 59/41*a_2 + 59/41*a_3 + 59/90*a_4 + 59/41*a_5)/sum(a_i) = 2*3*41/(5*59*m^2)
59/41 + ((5/41 - 59/41)*a_1 + (59/90 - 59/41)*a_4)/sum(a_i) = 2*3*41/(5*59*m^2)
59/41 - (54/41*a_1 + (49*59/(41*90))*a_4)/sum(a_i) = 2*3*41/(5*59*m^2)
90*59 - 90*54 < 90*59 - (90*54*a_1 + 49*59*a_4)/sum(a_i) = 6*90*41^2/(5*59*m^2)
90*5 < 6*90*41^2/(5*59*m^2)
5^2*59*m^2 < 6*41^2
m < 41/5*sqrt(6/59)
Call this upper bound M
Also, q | gcd(B_i*a_i) and A_i*q | p*a_i*B_i.
Let d = gcd(a_i) and a_i = l_i*d.
This gives us
41q | 5p*d*l_1
41q | 59p*d*l_2
41q | 59p*d*l_3
90q | 59p*d*l_4
41q | 59p*d*l_5
So in particular q | 5*59*p*d, and if we assume p/q is reduced we have
q | 5*59*d
This gets another bound, since d <= A_i so q <= 5*59*d <= 5*59*1312,
so 1 <= q <= 5*59*1312 and p/q is reduced with 1 < p/q < 41/5*sqrt(6/59).
By taking the continued fraction expansion of M, we get m <= 133909/51209,
so 1476/1475 <= p/q <= 133909/51209 with p/q reduced and q <= 387040
"""
from fractions import Fraction as Q
def cfrac(cs):
f = cs[-1]
n = Q(1,1)
for c in cs[-2::-1]:
f = c + n/f
return f
cs = [2,1,1,1,1,2,13,4,14,4,1,83]
class SternBrocotTreeProvider:
def __init__(self, a, b, c, d, denom):
self.a = a
self.b = b
self.c = c
self.d = d
self.denom = denom
def traversePreorder(self, rationalVisitor):
p = self.a + self.c
q = self.b + self.d
if q > self.denom:
return
go_left, go_right = rationalVisitor(p, q)
if go_left:
SternBrocotTreeProvider(self.a, self.b, p, q, self.denom).traversePreorder(rationalVisitor)
if go_right:
SternBrocotTreeProvider(p, q, self.c, self.d, self.denom).traversePreorder(rationalVisitor)
def logRational(a, b):
left_ord = 1475*a - 1476*b
go_left = left_ord > 0
right_ord = 133909*b - 51209*a
go_right = right_ord > 0
if left_ord >= 0 and right_ord >= 0:
print(a, b)
return go_left, go_right
SternBrocotTreeProvider(1, 1, 1, 0, 387040).traversePreorder(logRational)
| [
"[email protected]"
] | |
6030376f26f6c9dd2c2c056562fb2d8ac34ce972 | 1444fbd2f50814edef7924d7a470ce6adb5e0e3e | /ribbit/urls.py | 1d80fc78eb3d9a0fef58f08ba4f507be9dbffb8d | [] | no_license | elpepov1/ArquitecturaTarea | 7db9739b38e64e1fc1049307d219bd1922dae25c | 02b408613a41077ad7d911868b333fc91caa4954 | refs/heads/master | 2022-10-15T11:19:29.253665 | 2018-11-05T04:18:42 | 2018-11-05T04:18:42 | 155,798,751 | 1 | 2 | null | 2022-10-02T12:20:18 | 2018-11-02T01:42:11 | Python | UTF-8 | Python | false | false | 814 | py | """ribbit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('ribbit_app.urls'))
]
| [
"[email protected]"
] | |
d79abed3ffcac17237d816221c670d8599e5f161 | 608110dbe16505e4266fe8eb740acb0ab9fd00b5 | /examples/3d/reconstructSurface.py | e8095edeff28901e3d9d35e9204e315913cd4d55 | [] | no_license | jiawu/tegm | 02c5a07ac863691493d6b0f313160047cefd6cf1 | 8208dddb87aecfc0b66f5b17e027cd661de95b39 | refs/heads/master | 2020-04-06T11:47:46.339002 | 2018-11-27T18:44:22 | 2018-11-27T18:44:22 | 157,430,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | #!/usr/bin/env python
# This example shows how to construct a surface from a point cloud.
# First we generate a volume using the
# vtkSurfaceReconstructionFilter. The volume values are a distance
# field. Once this is generated, the volume is countoured at a
# distance value of 0.0.
import os
import string
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Read some points. Use a programmable filter to read them.
pointSource = vtk.vtkProgrammableSource()
def readPoints():
output = pointSource.GetPolyDataOutput()
points = vtk.vtkPoints()
output.SetPoints(points)
file = open("cactus.3337.pts.txt")
line = file.readline()
while line:
data = line.split()
if data and data[0] == 'p':
x, y, z = float(data[1]), float(data[2]), float(data[3])
points.InsertNextPoint(x, y, z)
line = file.readline()
pointSource.SetExecuteMethod(readPoints)
# Construct the surface and create isosurface.
surf = vtk.vtkSurfaceReconstructionFilter()
surf.SetInputConnection(pointSource.GetOutputPort())
cf = vtk.vtkContourFilter()
cf.SetInputConnection(surf.GetOutputPort())
cf.SetValue(0, 0.0)
# Sometimes the contouring algorithm can create a volume whose gradient
# vector and ordering of polygon (using the right hand rule) are
# inconsistent. vtkReverseSense cures this problem.
reverse = vtk.vtkReverseSense()
reverse.SetInputConnection(cf.GetOutputPort())
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
map = vtk.vtkPolyDataMapper()
map.SetInputConnection(reverse.GetOutputPort())
map.ScalarVisibilityOff()
surfaceActor = vtk.vtkActor()
surfaceActor.SetMapper(map)
surfaceActor.GetProperty().SetDiffuseColor(1.0000, 0.3882, 0.2784)
surfaceActor.GetProperty().SetSpecularColor(1, 1, 1)
surfaceActor.GetProperty().SetSpecular(.4)
surfaceActor.GetProperty().SetSpecularPower(50)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(surfaceActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
ren.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren.GetActiveCamera().SetPosition(1, 0, 0)
ren.GetActiveCamera().SetViewUp(0, 0, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(20)
ren.GetActiveCamera().Elevation(30)
ren.GetActiveCamera().Dolly(1.2)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
| [
"[email protected]"
] | |
decec552e407b3d47f8a23848bfeaa27f005a136 | dcfdc9822308c21824b27c051f221a971864e637 | /desafio3aula4.py | 828b1a0b5d44cc32942839f0e0098b411e90ea1c | [] | no_license | renato29/python3-mundo-1 | 04d5f23bcf8e0f5558cd6db129896ce09e14cc6f | a7d92d5b620d56efbcaeced8e00ff4a5fb80ae23 | refs/heads/master | 2020-04-23T05:10:21.013030 | 2019-02-15T21:54:29 | 2019-02-15T21:54:29 | 170,931,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | n1=input("qual o numero 1?\n")
n2=input("qual o numero 2?\n")
soma=n1+n2
print("soma",int(n1)+int(n2))
| [
"[email protected]"
] | |
15ee603bb46c7437c126e4b32fb4f106c68bb73a | f4229b9711cc42b9b386fd446438eaf6ab746a86 | /test.py | ed2b56c357556c0270dde69976e54f4b8c4fd0f9 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Spectrum-CETB/competitive-programming | b71947a0539856831f4aef834f1933e83c36b0b7 | b0827db89916ccda94c6ed38e136fcd7776f5c22 | refs/heads/master | 2022-12-25T04:12:02.477797 | 2020-10-04T08:10:41 | 2020-10-04T08:10:41 | 283,162,958 | 2 | 7 | WTFPL | 2020-10-04T08:10:42 | 2020-07-28T09:21:00 | C++ | UTF-8 | Python | false | false | 19,069 | py | #!/usr/bin/env python3
import collections
import os
import os.path
import shutil
import subprocess
import sys
import re
languages = [
'arr', 'c', 'cc', 'cr', 'd', 'hx', 'factor', 'go', 'jl', 'js', 'lid',
'lisp', 'ml', 'moon', 'nim', 'ooc', 'pi', 'pony', 'py', 'rb', 'rkt', 'rs',
'sage', 'sml', 'stanza', 'sh', 'wren', 'zig'
]
class ansicolors:
OK = '\033[92m'
FAIL = '\033[91m'
WARN = '\033[93m'
ENDC = '\033[0m'
def print_fail(message=''):
print(ansicolors.FAIL + '[FAIL] ' + ansicolors.ENDC + message)
def print_ok(message=''):
print(ansicolors.OK + '[OK] ' + ansicolors.ENDC + message)
def print_warn(message=''):
print(ansicolors.WARN + '[WARN] ' + ansicolors.ENDC + message)
class solution(object):
def __init__(self, code, tests):
self.code = code
self.tests = tests
def __repr__(self):
return '{}: {}'.format(self.code, ', '.join(self.tests))
def _target(self):
return os.path.splitext(self.code)[0]
def build(self):
raise NotImplementedError
def run_command(self, test):
raise NotImplementedError
def clean(self):
raise NotImplementedError
def run(self, generate=False):
ok = True
for test in self.tests:
try:
if generate:
print('Generating answer for {} with {}...'
.format(test, self.code), end='')
else:
print('Checking {} for {}... '.format(self.code, test),
end='')
cmd = self.run_command(test)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True).decode('utf-8')
ans = os.path.splitext(test)[0] + '.ans'
if generate:
with open(ans, 'w') as f:
f.write(output)
print_ok()
else:
with open(ans) as f:
output2 = f.read()
if output == output2:
print_ok()
else:
print_fail()
ok = False
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
ok = False
except Exception as e:
print_fail(str(e))
ok = False
if not ok:
raise
class c(solution):
@property
def target(self):
return self._target() + '-c'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'cc {} -o {} -O2 -std=c11'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class arr(solution):
# FIXME: Pyret solutions are not checked
def run(self, generate=False):
print_warn('Pyret solutions are disabled')
def build(self):
pass
def run_command(self, test):
pass
def clean(self):
pass
class cc(solution):
@property
def target(self):
return self._target() + '-cc'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'c++ {} -o {} -O2 -std=c++14'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class cr(solution):
# FIXME: Crystal solutions are not checked
def run(self, generate=False):
print_warn('Crystal solutions are disabled')
return True
@property
def target(self):
return self._target() + '-cr'
def build(self):
return
try:
print('Building {}... '.format(self.target), end='')
cmd = 'crystal build {} -o {}'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
return True
os.remove(self.target)
class d(solution):
@property
def target(self):
return self._target() + '-d'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'dmd {} -of={} -O'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class hx(solution):
@property
def target(self):
return self._target() + '-hx'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
target = os.path.basename(self.target)
klass = os.path.basename(self._target())
dir, code = os.path.split(self.code)
cmd = 'cd {} && haxe -python {} -main {} {}'\
.format(dir, target, klass, code)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return 'pypy3 {} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class factor(solution):
def build(self):
pass
def run_command(self, test):
return './{} < {}'.format(self.code, test)
def clean(self):
pass
class go(solution):
def build(self):
pass
def run_command(self, test):
return 'go run {} < {}'.format(self.code, test)
def clean(self):
pass
class jl(solution):
def build(self):
pass
def run_command(self, test):
return './{} < {}'.format(self.code, test)
def clean(self):
pass
class js(solution):
def build(self):
pass
def run_command(self, test):
return './{} < {}'.format(self.code, test)
def clean(self):
pass
class lid(solution):
"""OpenDylan solutions"""
def build(self):
try:
print('Building {}...'.format(self._target() + '-dylan'), end='')
cmd = 'dylan-compiler -build {}'.format(self.code)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
root = os.getenv('OPEN_DYLAN_USER_ROOT', '_build')
target = os.path.basename(self._target())
return '{} < {}'.format(os.path.join(root, 'bin', target), test)
def clean(self):
root = os.getenv('OPEN_DYLAN_USER_ROOT',
os.path.join(os.path.dirname(self.code), '_build'))
shutil.rmtree(root, True)
class lisp(solution):
def build(self):
pass
def run_command(self, test):
return 'sbcl --script {} < {}'.format(self.code, test)
def clean(self):
pass
class ml(solution):
@property
def target(self):
return self._target() + '.native'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'corebuild {}'.format(self.target[2:])
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return './{} < {}'.format(os.path.basename(self.target), test)
def clean(self):
os.remove(os.path.basename(self.target))
shutil.rmtree('_build', True)
class moon(solution):
def build(self):
pass
def run_command(self, test):
return 'moon {} < {}'.format(self.code, test)
def clean(self):
pass
class nim(solution):
@property
def target(self):
return self._target() + '-nim'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'nim c -o:{} -d:release {}'.format(self.target, self.code)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
dirname = os.path.dirname(self.code)
shutil.rmtree(os.path.join(dirname, 'nimcache'), True)
class ooc(solution):
@property
def target(self):
return self._target() + '-ooc'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
target = os.path.basename(self.target)
dir, code = os.path.split(self.code)
cmd = 'cd {} && rock {} -O3 -o={}'\
.format(dir, code, target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
dirname = os.path.dirname(self.code)
shutil.rmtree(os.path.join(dirname, 'rock_tmp'), True)
shutil.rmtree(os.path.join(dirname, '.libs'), True)
class pi(solution):
def build(self):
pass
def run_command(self, test):
return 'picat {} < {}'.format(self.code, test)
def clean(self):
pass
class pony(solution):
@property
def target(self):
return self._target()
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'cd {} && ponyc'.format(os.path.dirname(self.code))
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class py(solution):
def build(self):
pass
def run_command(self, test):
return './{} < {}'.format(self.code, test)
def clean(self):
pass
class rb(solution):
def build(self):
pass
def run_command(self, test):
return 'ruby {} < {}'.format(self.code, test)
def clean(self):
pass
class rkt(solution):
def build(self):
pass
def run_command(self, test):
return 'racket {} < {}'.format(self.code, test)
def clean(self):
pass
class rs(solution):
@property
def target(self):
return self._target() + '-rs'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'rustc {} -o {} -O'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class sage(solution):
def build(self):
pass
def run_command(self, test):
return 'python {}.py < {}'.format(self.code, test)
def clean(self):
pass
class sml(solution):
@property
def target(self):
return self._target() + '-sml'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'mlton -output {} {}'.format(self.target, self.code)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class stanza(solution):
@property
def target(self):
return self._target() + '-stanza'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'stanza {} -o {} -optimize'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {}'.format(self.target, test)
def clean(self):
os.remove(self.target)
class sh(solution):
def build(self):
pass
def run_command(self, test):
return './{} < {}'.format(self.code, test)
def clean(self):
pass
class wren(solution):
def build(self):
pass
def run_command(self, test):
return 'cat {}|./{}'.format(test, self.code)
def clean(self):
pass
class zig(solution):
@property
def target(self):
return self._target() + '-zig'
def build(self):
try:
print('Building {}... '.format(self.target), end='')
cmd = 'zig build-exe {} --output {}'.format(self.code, self.target)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print_ok()
except subprocess.CalledProcessError as e:
print_fail(e.output.decode('utf-8'))
raise e
except Exception as e:
print_fail(str(e))
raise e
def run_command(self, test):
return '{} < {} 2>&1'.format(self.target, test)
def clean(self):
os.remove(self.target)
dirname = os.path.dirname(self.code)
shutil.rmtree(os.path.join(dirname, 'zig-cache'), True)
def check_code(solutions):
filtered = []
ok = True
for code, tests in solutions.items():
names = []
for lang in languages:
name = code + '.' + lang
if os.path.exists(name):
names.append((lang, name))
if len(names) == 0:
fmt = 'No solution file exists for the following test files: {}.'
print_fail(fmt.format(', '.join(tests)))
ok = False
else:
for lang, name in names:
filtered.append(globals()[lang](name, tests))
return filtered, ok
def build_solutions(solutions):
filtered = []
ok = True
for solution in solutions:
try:
solution.build()
filtered.append(solution)
except Exception:
ok = False
return solutions, ok
def run_solutions(solutions, generate):
ok = True
for solution in solutions:
try:
solution.run(generate)
except Exception:
ok = False
return ok
def clean_solutions(solutions):
print('Cleaning solutions...')
ok = True
for solution in solutions:
try:
solution.clean()
except Exception:
ok = False
return ok
def check_solutions(solutions, generate):
"""If generate is true, generate the answers instead of cheking them"""
ok = True
solutions, ok2 = check_code(solutions)
ok = ok and ok2
solutions, ok2 = build_solutions(solutions)
ok = ok and ok2
ok2 = run_solutions(solutions, generate)
ok = ok and ok2
ok2 = clean_solutions(solutions)
return ok and ok2, len(solutions)
def get_solutions(root='.'):
"""Get the expected solutions and their related input files"""
solutions = {}
for root, _, files in os.walk(root):
for f in files:
m = re.search('(.*)-\d.in$', f)
if m is None:
continue
code = os.path.join(root, m.group(1))
test = os.path.join(root, f)
if code in solutions:
solutions[code].append(test)
else:
solutions[code] = [test]
s = [(k, solutions[k]) for k in sorted(solutions.keys())]
return collections.OrderedDict(s)
def main(generate):
"""Test that all the solutions build and provide proper outputs"""
solutions = get_solutions()
ok, s = check_solutions(solutions, generate)
if generate:
print('Generated {} solutions for {} problems...'
.format(s, len(solutions)))
else:
print('Checked {} solutions for {} problems...'
.format(s, len(solutions)))
return 0 if ok else 1
if __name__ == '__main__':
sys.exit(main(len(sys.argv) > 1))
| [
"[email protected]"
] | |
7425a766feedb1ae2a6e8685f175afea1555fcc6 | 273f9563deff1808e4e31dc41057d6f84003352b | /devise/views.py | 6ba9a64539be3b1f9f4cae20f7b5cd3d14170f75 | [] | no_license | AliBouss/Projet-Convertisseur | ac68dd5cd402e25e2f0fd5744e4cdafbe64e43b8 | c475393abaf267ed2faa19ce9f84c2775fbe8298 | refs/heads/master | 2023-05-08T14:43:56.165323 | 2021-06-05T19:41:09 | 2021-06-05T19:41:09 | 359,482,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from django.http import HttpResponse
from django.shortcuts import render
def dashboard(request):
return render(request, 'devise/index.html')
| [
"[email protected]"
] | |
c8e9442da4ebb733f6e9bb872e2ccdb0a579b212 | d83da199e48e4ce8c6f1f7cb47ea536b9fdf982f | /recognition.py | 077860b949f3d596fb63a1d9635b6732c02d73ca | [] | no_license | q-viper/final-devanagari-word-char-detector | 2554eb4fb9ae897603e83061c90f320fd0e76b24 | a7c797dc79e60d0b488c4c00b2cfb3a4520325df | refs/heads/master | 2022-12-21T05:11:25.836142 | 2022-12-10T09:51:28 | 2022-12-10T09:51:28 | 218,929,020 | 11 | 7 | null | 2022-12-10T09:51:29 | 2019-11-01T06:46:40 | Jupyter Notebook | UTF-8 | Python | false | false | 1,597 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 13:50:41 2019
@author: Quassarian Viper
"""
from preprocess import preprocess, detect_text, localize
from predictor import prediction
import numpy as np
import matplotlib.pyplot as plt
import cv2
def recognition(gray_image, show):
segments, template, th_img, text_color = preprocess(gray_image)
labels = []
accuracy = []
show_img = gray_image[:]
#print(len(segments))
for segment in segments:
#plt.imshow(segment)
#plt.show()
recimg, bimg = detect_text(show_img, th_img, segment, text_color)
#print('Process: Recognition....\n')
label, sure = prediction(bimg)
if(sure > 80):
#print(segment)
labels.append(str(label))
accuracy.append(sure)
show_img = localize(show_img, th_img, segment, text_color, show)
char = labels
accuracy = np.average(accuracy)
char = ''.join(char)
if accuracy < 80:
recimg, bimg = detect_text(show_img, th_img, template, text_color)
show_img = localize(show_img, th_img, template, text_color, show)
char, accuracy = prediction(bimg)
if (show == 'show'):
plt.imshow(show_img)
plt.title('Detecting')
plt.xticks([])
plt.yticks([])
plt.show()
else:
cv2.imshow('Detecting..', cv2.cvtColor(show_img, cv2.COLOR_GRAY2BGR))
print('The prediction accuracy for ', char,' is ',"%.2f" % round(accuracy,2), '%')
#plt.imshow(cv2.cvtColor(show_img, cv2.COLOR_GRAY2RGB))
#plt.show() | [
"[email protected]"
] | |
0aae9de573edfa7e08995672402e04f0342010e6 | 6cb50457e2eca73d103745e2397b034693844892 | /venv/Lib/site-packages/selenium/webdriver/phantomjs/webdriver.py | 37c4b1b4359c5116756b5d38373ce338956f0b0c | [] | no_license | alecloz/SberTask | 64b3047c4500df1e70f982cc8060e56f66e79449 | 0ba42cedfde4cbab6d471edc51e5d8d2d55826c8 | refs/heads/master | 2023-06-10T02:10:43.115179 | 2019-11-07T13:13:35 | 2019-11-07T13:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
class WebDriver(RemoteWebDriver):
"""
Wrapper to communicate with PhantomJS through Ghostdriver.
You will need to follow all the directions here:
https://github.com/detro/ghostdriver
"""
def __init__(self, executable_path="phantomjs",
port=0, desired_capabilities=DesiredCapabilities.PHANTOMJS,
service_args=None, service_log_path=None):
"""
Creates a new instance of the PhantomJS / Ghostdriver.
Starts the service and then creates new instance of the src.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- service_args : A List of command line arguments to pass to PhantomJS
- service_log_path: Path for phantomjs service to log to.
"""
warnings.warn('Selenium support for PhantomJS has been deprecated, please use headless '
'versions of Chrome or Firefox instead')
self.service = Service(
executable_path,
port=port,
service_args=service_args,
log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(
self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
except Exception:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the PhantomJS executable
that is started when starting the PhantomJS
"""
try:
RemoteWebDriver.quit(self)
except Exception:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
| [
"[email protected]"
] | |
43beae783785a193a2d0257f6ce6022528215360 | 936d82330f6cabea8ffea6ed35cfd6784b3a7885 | /Baselibs/src/cmor/setup.py | 6ed3f068422483f55b52448f54c4ec59c9902040 | [] | no_license | aborle1/GEOS2 | cc4386e19c76fb0c11a98348ffde5f1b55db9337 | 47d25a0d92a20efd7fd552a934ae0dd7422320ba | refs/heads/AJ | 2021-07-20T22:40:42.121925 | 2017-10-17T18:46:08 | 2017-10-17T18:46:08 | 104,491,177 | 0 | 1 | null | 2017-10-17T18:46:09 | 2017-09-22T15:39:23 | Fortran | UTF-8 | Python | false | false | 3,959 | py | import numpy
from numpy.distutils.core import setup, Extension
#from numpy.distutils.ccompiler import CCompiler
import os,sys,string
include_dirs = [numpy.lib.utils.get_include(),"include","include/cdTime"]
library_dirs = [ os.path.join("/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux","lib") ,'.']
include_dirs.append(os.path.join("/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux","include"))
libraries = []
for st in ["-L/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/lib -lnetcdf -ljpeg -lmfhdf -ldf -lhdf5_hl -lhdf5 -ldl -lm -lmfhdf -ldf -lsz -ljpeg -lcurl -lssl -lcrypto -lssl -lcrypto -lz -lm -ldl -lm", "-I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include -DgFortran -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/ -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/zlib -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/szlib -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/jpeg -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/hdf5 -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/hdf -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/uuid -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/netcdf -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/udunits2", " -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/udunits2", " -L/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/lib -Wl,-rpath=/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/lib -ludunits2 -lexpat", " -I/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/include/uuid", " -L/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/lib -Wl,-rpath=/home/aborle1/Baselibs/powerpc64le-unknown-linux-gnu/gfortran_5.4.0-openmpi_1.10.2/Linux/lib -luuid"]:
sp = st.strip().split()
for s in sp:
if s[:2]=='-L':
library_dirs.append(s[2:])
if s[:2]=='-l':
libraries.append(s[2:])
if s[:2]=='-I':
include_dirs.append(s[2:])
srcfiles = "Src/cmor.c Src/cmor_variables.c Src/cmor_axes.c Src/cmor_tables.c Src/cmor_grids.c Src/cdTime/cdTimeConv.c Src/cdTime/cdUtil.c Src/cdTime/timeConv.c Src/cdTime/timeArith.c Src/cmor_cfortran_interface.c Src/cmor_md5.c".split()
srcfiles.insert(0,os.path.join("Src","_cmormodule.c"))
macros=[]
for m in " -DCOLOREDOUTPUT".split():
macros.append((m[2:],None))
ld =[]
for p in library_dirs:
if os.path.exists(p):
ld.append(p)
library_dirs=ld
ld =[]
for p in include_dirs:
if os.path.exists(p):
ld.append(p)
include_dirs=ld
print 'Setting up python module with:'
print 'libraries:',libraries
print 'libdir:',library_dirs
print 'incdir',include_dirs
print 'src:',srcfiles
print 'macros:',macros
setup (name = "CMOR",
version='2.0',
author='Charles Doutriaux, PCMDI',
description = "Python Interface to CMOR output library",
url = "http://www-pcmdi.llnl.gov/cmor",
packages = ['cmor'],
package_dir = {'cmor': 'Lib'},
ext_modules = [
Extension('cmor._cmor',
srcfiles,
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = libraries,
define_macros = macros,
extra_compile_args = [ "-g", ]
),
]
)
| [
"[email protected]"
] | |
3b92ebb7a15d77a2d75fce08ac0def58af58f685 | a23a41dd766b05cd3f762e179f01986e7203ce73 | /第一次实验/sy1_t1.py | 2fe2dfcb64a03aaffb7c88cb08172019c108bf45 | [] | no_license | QYQYQYQYQYQ/Python_bjtu_courses | 041b4c3d2e810dab6b6b482d1996939308308f5d | 446d6b3cd30e3c1338e74026c95fc8748c97ffc4 | refs/heads/master | 2022-09-11T22:17:01.603294 | 2020-05-30T17:46:24 | 2020-05-30T17:46:24 | 266,515,072 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #authored by 邱烨卿-19271222
f1 = open("text1.txt",'r')
t1 = f1.read()
print(t1)
f1.seek(0)
t2 = f1.readline()
while t2 != "":
print(t2,end="")
t2 = f1.readline()
f1.seek(0)
t3 = f1.readline()
print("\n"+t3.replace("Python","java"),end="")
while t3 != "":
t3 = f1.readline()
print(t3.replace("Python","java"),end="") | [
"[email protected]"
] | |
50e4da80ec9967a19f4a89b12ea87445776ca627 | 75191657d2226dab4dbaa4baf0dda64f9cc67691 | /python/BMatzip/mat_data.py | 34c148cfcc63a2db8bed1e469a297a81d0f62c6e | [] | no_license | dkaylee/ClassProject | d1c02b65b9a4f45b51a43af2a139f5f19b10214a | 0e6488fb598899621642b9a809796299632c74f8 | refs/heads/master | 2023-03-15T17:09:42.482162 | 2021-03-16T12:11:02 | 2021-03-16T12:11:02 | 299,173,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | import logging
import logging.config
import csv
##############################
#logging.config.fileConfig('logging.conf')
logger = logging.getLogger('main')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(process)d %(message)s')
data_log_handler = logging.FileHandler('mat_data.log')
data_log_handler.setFormatter(formatter)
logger.addHandler(data_log_handler)
logger.setLevel(logging.DEBUG)
##############################
line_counter = 0 # csv 파일의 총 행의 수
data_header = [] # 리스트의 해더
sample_list = [] # 리스트의 요소(행)
insert_count = 0 # 새로운 행이 몇개 입력되었는지 카운트
insert_list = []
# 프로그램의 작업이 시작할 때 info
logger.info('Open file {}'.format('07_24_05_P_2.csv'))
# 07_24_05_P_1.csv -> sapmle_list 리스트에 저장
try:
with open('07_24_05_P_2.csv', encoding='cp949') as file:
while True:
data = file.readline().strip('\n')
# 마지막 줄 -> 더이상 가져올 데이터가 없으면 None 반환 -> 반복문을 탈출
if not data:
break
if line_counter == 0 : # header 저장
data_header = data.split(',')
print(data_header)
else:
# 새로운 리스트에 추가할 행
insert_list = data.split(',')
#insert_list[6].strip()
#print(insert_list)
# 1-> 영업중, 3-> 폐업 , 커피숍, 서울지역
if insert_list[1] == '1' != -1:
sample_list.append(insert_list)
insert_count += 1
line_counter += 1
logger.info('data read complete')
except FileNotFoundError as e:
# 파일이 없는경우 예외에 대한 로그 등록
logger.error('File Not Found {}'.format(e))
print('total-Count', line_counter)
print('insert-count', insert_count)
for i in range(20):
print('data-',i,':', sample_list[i])
with open('new_salad_store_data.csv', 'w', encoding='utf8') as new_file:
writer = csv.writer(new_file, delimiter=',', quotechar="'", quoting=csv.QUOTE_ALL)
# 해더 쓰기
writer.writerow(data_header)
for row in sample_list:
writer.writerow(row)
print('쓰기 완료')
logger.info('writer ok!')
# 현재 영업중인 서울시에 위치한 커피숍에서 스타벅스만 찾아 저장
for shop in sample_list:
if shop[3].find(u'샐러드') != -1:
insert_list.append(shop)
with open('new_salad_store_data.csv', 'w', encoding='utf-8') as star_file:
for star_shop in insert_list:
star_file.write(','.join(star_shop).strip('\n')+'\n')
print('전체_샐러드_리스트 파일 생성')
logger.info('writer ok!')
print('파일작성이 완료되었습니다.')
logger.info('program exit')
for s_shop in insert_list:
print(s_shop) | [
"[email protected]"
] | |
abc399c1b0a5eb06f408279dc075c4c7b5750846 | 7c7d2cb72b6c2ac57605250bcde2143988137c9c | /0x01-python-if_else_loops_functions/8-uppercase.py | d9f862c5829adaabd09b000787637d0067b9432d | [] | no_license | garthus23/holbertonschool-higher_level_programming | 915f2cc501ffc1f9b9efd7f9d7dfcae5b50e9edf | 9ab8ade0b615b669ee7807caf560b411e09634af | refs/heads/main | 2023-04-24T08:59:33.043402 | 2021-05-12T19:57:30 | 2021-05-12T19:57:30 | 319,256,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | #!/usr/bin/python3
def uppercase(str):
for i in str:
if ord(i) >= 97 and ord(i) <= 122:
i = chr(ord(i) - 32)
print("{}".format(i), end="")
print('')
| [
"[email protected]"
] | |
ee2d594ca917245a4670b0f459d13c3cb3900661 | 0d0d9c4a1ecd7bc067eca561afa9da559270a92b | /QuestionList/nonOverlappingIntervals.py | 23db98c51357802b7532a60567c45c65f3365979 | [] | no_license | aashrit-luthra/LCSolutions | af95323b02d810b59a3018bdd3c5661f0d98a95f | a4df47ee5d5daa4d54bda54e6e8f1cc980844c93 | refs/heads/main | 2023-07-30T09:22:59.563904 | 2021-09-10T17:18:30 | 2021-09-10T17:18:30 | 389,480,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
if not intervals:
return []
numRemovals = 0
intervals.sort(key=lambda x: x[0])
runningInterval = intervals[0]
for i in range(1, len(intervals)):
currInterval = intervals[i]
if currInterval[0] >= runningInterval[1]:
runningInterval = currInterval
else:
runningInterval[1] = min(runningInterval[1], currInterval[1])
numRemovals += 1
return numRemovals | [
"[email protected]"
] | |
cf58a6dbd75371fe2af32b1f5b96ddbb9a5eeee0 | e7950d663e0655ae2a3808bc3a8da4e8c2e1d43f | /lms/migrations/0100_auto_20160329_1215.py | 8ace96eddf9599d7734b86cadc183763461ebc0e | [] | no_license | sanketlolge/gktcs | 6ec4d05812caa95824f35c9c7d13dfdff15678b6 | 0c3d300ff9d1fdb9eb4cd7ecb6471a9ffce35e92 | refs/heads/master | 2021-01-10T03:58:24.471078 | 2016-12-12T10:38:16 | 2016-12-12T10:38:16 | 51,284,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lms', '0099_userprofile'),
]
operations = [
migrations.DeleteModel(
name='temptest',
),
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.DeleteModel(
name='userprofile',
),
]
| [
"[email protected]"
] | |
54e5aceba5b879aeed3c9e293cd506c8860d9945 | c1d62a0506dac6234cedd08d6772ae668042bfad | /Django_beltExam/Django_beltExam/wsgi.py | 4c6bcd6b1dfdb51ed47a0d65e2c74d827075016d | [] | no_license | alish1331/Python-Django | 0263ab080910cb0c3bef93eecd64698ca7166a96 | 7030a469aaa5221af365f516e9645a0ffa924301 | refs/heads/master | 2020-11-26T01:47:45.711116 | 2020-01-13T23:15:40 | 2020-01-13T23:15:40 | 228,928,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
WSGI config for Django_beltExam project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Django_beltExam.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
2c27aa0c30f0c2a44a61e8a498c6eb2a703ec614 | 50281a785c6fd4ed3ca47c5facc586546ffc6db7 | /scrapy_project/ganji/ganji/pipelines.py | 589a3058e91ae4544cbe78bc35cbd2ae499d9d9f | [] | no_license | zuohd/python-excise | 9c40a7ab0caca18fdcf10d52123a9f9c515c2527 | d5236c35761dbb8c45f79c54ab806556241f0ef0 | refs/heads/master | 2021-07-10T14:18:10.463614 | 2019-03-13T13:15:25 | 2019-03-13T13:15:25 | 148,882,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
class GanjiPipeline(object):
def open_spider(self, spider):
self.con = sqlite3.connect("xarenthouse.sqlite")
def process_item(self, item, spider):
print(spider.name)
# insert_sql="insert into zufang(title,money) values('{}','{}')".format(item['title'],item['money'])
# print("1111",insert_sql)
# self.con.cursor().execute(insert_sql)
# self.con.commit()
return item
def close_spider(self, spider):
self.con.close()
| [
"[email protected]"
] | |
f56286a025e615958ddb8ba01d7fb534450b623e | c139bfbec0ba4f80f4a339b45f183952057768e2 | /About_CharacterSet/PlugBoard/PlugBoard.py | 324d0e892c6eebad2a4f919c3bc215626a737d60 | [] | no_license | WryHarpy/Enigma-Machine | 53b29a386ddb0e0d582cf14999d2c3c5d5d62ddd | 3e3fabbec84f46d018dc3d85130cd45b14449b5b | refs/heads/master | 2020-07-23T21:03:00.228495 | 2019-12-09T14:35:03 | 2019-12-09T14:35:03 | 207,705,635 | 2 | 6 | null | 2019-11-25T15:20:35 | 2019-09-11T02:30:16 | Python | UTF-8 | Python | false | false | 1,559 | py | # PlugBoard.py
# Quang Hoang
# 24 Sept 2019
# PlugBoard for the Enigma Machine
import os
# Open the letter set file and get the letter sets
def getSet(charSet):
doc = open(charSet, 'r')
doc = doc.readlines()
set = []
for i in doc:
i = i.strip("\n")
set.append(i)
return set
# Get the message
def getText():
#text = input(str("Enter your message: "))
replaceTextItems = [' ', '\n', '—', '-']
#'''
with open ("GettysburgAddress.txt", 'r') as text:
text = text.read()
print(text)
#'''
for i in replaceTextItems:
text = text.replace(i, '')
return text
# Run the scrambling and put it to a file
def runPlugBoard(text):
letterSet = getSet("letterset.txt")
plugBoard = getSet("plugboard.txt")
# Check if the file is already exists
if os.path.exists("encrypt message.txt"):
os.remove("encrypt message.txt")
file = open ("encrypt message.txt", 'a')
else:
file = open ("encrypt message.txt", 'a')
# Append new message
for i in text:
index = letterSet.index(i)
file.write(plugBoard[index])
file.close()
# Re- write the message to file and screen
def writeMessage():
file = open ("encrypt message.txt", 'r')
file = file.read()
message = open ("encrypt message.txt", 'w')
for i in range(0, len(file), 17):
message.write(str(file[i:i+17]) + " ")
print (file[i:i+17], end= ' ')
# ## M A I N ##
runPlugBoard(getText())
writeMessage()
| [
"[email protected]"
] | |
130714484ab7bd4391518ceba64a99c41e7365b0 | 6e6e1301d2aa55d1542640a76ad30b68314b4b56 | /radiobutton.py | 9c2465760ade1ce9fa9259d8835e84a7534a0f66 | [] | no_license | Akhil-64/tkinter-python- | e3906f16930b3a7bb544696808a6b7f7e5b310fb | dbe55e82194de7f1e826f4d236592fb4e86d34aa | refs/heads/master | 2020-09-06T02:44:11.918618 | 2019-11-11T11:20:39 | 2019-11-11T11:20:39 | 220,293,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from tkinter import *
root=Tk()
v=IntVar()
master=Radiobutton(root,text="GfG",variable=v,value=1).pack(anchor=W)#anchor for left alignment
master=Radiobutton(root,text="Pnj",variable=v,value=2).pack(anchor=W)
root.mainloop()
| [
"[email protected]"
] | |
b6edd35cd22a0b768255f1100a8fa6b9fdb1ef6e | 06a366f002e41334a07f2449581204e03d52449b | /datalogtest.py | c4f865ed91c4cd468bbb823baa898de238e78436 | [] | no_license | seanachaidh/experiments | dca2f466566fa217f79267abd23c4bfd2aef18b0 | 1ab86a1f6f390e4ea0f74218ef74f79414205e6c | refs/heads/master | 2020-09-16T10:00:29.011363 | 2016-09-23T18:35:45 | 2016-09-23T18:35:45 | 67,930,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | ##from pyDatalog import pyDatalog
##pyDatalog.create_terms('N,X0,X1,X2,X3,X4,X5,X6,X7')
##pyDatalog.create_terms('ok,queens,next_queen')
##
### the queen in the first column can be in any row
##queens(X0) <= (X0._in(range(8)))
##
### to find the queens in the first 2 columns, find the first one first, then find a second one
##queens(X0,X1) <= queens(X0) & next_queen(X0,X1)
##
### repeat for the following queens
##queens(X0,X1,X2) <= queens(X0,X1) & next_queen(X0,X1,X2)
##queens(X0,X1,X2,X3) <= queens(X0,X1,X2) & next_queen(X0,X1,X2,X3)
##queens(X0,X1,X2,X3,X4) <= queens(X0,X1,X2,X3) & next_queen(X0,X1,X2,X3,X4)
##queens(X0,X1,X2,X3,X4,X5) <= queens(X0,X1,X2,X3,X4) & next_queen(X0,X1,X2,X3,X4,X5)
##queens(X0,X1,X2,X3,X4,X5,X6) <= queens(X0,X1,X2,X3,X4,X5) & next_queen(X0,X1,X2,X3,X4,X5,X6)
##queens(X0,X1,X2,X3,X4,X5,X6,X7) <= queens(X0,X1,X2,X3,X4,X5,X6) & next_queen(X0,X1,X2,X3,X4,X5,X6,X7)
##
### the second queen can be in any row, provided it is compatible with the first one
##next_queen(X0,X1) <= queens(X1) & ok(X0,1,X1)
##
### to find the third queen, first find a queen compatible with the second one, then with the first
### re-use the previous clause for maximum speed, thanks to memoization
##next_queen(X0,X1,X2) <= next_queen(X1,X2) & ok(X0,2,X2)
##
### repeat for all queens
##next_queen(X0,X1,X2,X3) <= next_queen(X1,X2,X3) & ok(X0,3,X3)
##next_queen(X0,X1,X2,X3,X4) <= next_queen(X1,X2,X3,X4) & ok(X0,4,X4)
##next_queen(X0,X1,X2,X3,X4,X5) <= next_queen(X1,X2,X3,X4,X5) & ok(X0,5,X5)
##next_queen(X0,X1,X2,X3,X4,X5,X6) <= next_queen(X1,X2,X3,X4,X5,X6) & ok(X0,6,X6)
##next_queen(X0,X1,X2,X3,X4,X5,X6,X7) <= next_queen(X1,X2,X3,X4,X5,X6,X7) & ok(X0,7,X7)
##
### it's ok to have one queen in row X1 and another in row X2 if they are separated by N columns
##ok(X1, N, X2) <= (X1 != X2) & (X1 != X2+N) & (X1 != X2-N)
##print
from pyDatalog import pyDatalog
Rainy
| [
"pieter@Wodan"
] | pieter@Wodan |
131e309c0b50460e66d4e5a9a60fa88662524d57 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /For Irene/DFS/0104_Maximum_Depth_of_Binary_Tree.py | be6de331f2aee2e7984f41c5fcbadc9e59b7a7b9 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 541 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
def postorder(node):
if not node:
return 0
left = postorder(node.left)
right = postorder(node.right)
return max(left, right) + 1
return postorder(root)
| [
"[email protected]"
] | |
6519c3d8e645e3763e579ed202267054d59f3223 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_23/models/policy_member.py | be52a484c3c24eaafba329d0bce3cec995432afc | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,961 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.23
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_23 import models
class PolicyMember(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'destroyed': 'bool',
'enabled': 'bool',
'member': 'FixedReferenceWithType',
'policy': 'FixedReferenceWithType',
'time_remaining': 'int'
}
attribute_map = {
'destroyed': 'destroyed',
'enabled': 'enabled',
'member': 'member',
'policy': 'policy',
'time_remaining': 'time_remaining'
}
required_args = {
}
def __init__(
self,
destroyed=None, # type: bool
enabled=None, # type: bool
member=None, # type: models.FixedReferenceWithType
policy=None, # type: models.FixedReferenceWithType
time_remaining=None, # type: int
):
"""
Keyword args:
destroyed (bool): Returns a value of `true` if the member is destroyed.
enabled (bool): Returns a value of `true` if the policy is enabled.
member (FixedReferenceWithType): Reference to the resource that the policy is applied to.
policy (FixedReferenceWithType): Reference to the policy.
time_remaining (int): The amount of time left, in milliseconds, until the destroyed policy member is permanently eradicated.
"""
if destroyed is not None:
self.destroyed = destroyed
if enabled is not None:
self.enabled = enabled
if member is not None:
self.member = member
if policy is not None:
self.policy = policy
if time_remaining is not None:
self.time_remaining = time_remaining
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMember`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMember`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMember`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMember`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyMember, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyMember):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
88aa9c9e0dd1fe3fd0adc143f1631d6bb4bdbf22 | 099976fab0dc207b7e4116d6c9965102a307fdaf | /kNN/KNN.py | 8eec7af052246e2fb31b1cae6ff9d03b1c7ed8f5 | [] | no_license | enningxie/mlWin | 8d387dd8e6228377b916f68661e471a5d9d9fd57 | fce35bcc71f9ac4a8059d022c6b216a2c4cd2834 | refs/heads/master | 2021-01-22T23:57:56.313185 | 2018-03-29T13:42:59 | 2018-03-29T13:42:59 | 85,682,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | #coding=utf-8
from numpy import *
from os import listdir
import operator
def classify0(inX, dataSet, labels, k):
"""
分类器
参数说明:用于分类的输入向量inX/输入的训练样本集dataSet/标签向量labels/用于选择最近邻向量的数目k
"""
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize,1)) - dataSet #tile函数以inX为基准生成shape为(dataSetSize,1)大小的array
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis = 1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort() #argsort()函数返回排序后的索引值
classCount = {} #用于投票表决的字典
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True) #将投票表决后的结果按逆序排序
return sortedClassCount[0][0] #返回类别
def img2vector(filename):
"""
将32x32存储的img转换成行向量
"""
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
linStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(linStr[j])
return returnVect
def handwritingClassTest():
"""
手写数字识别测试函数
"""
hwLabels = []
trainingFileList = listdir('trainingDigits') #将指定文件夹下的文件名以list的格式取出来
m = len(trainingFileList)
trainingMat = zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0]) #取出训练集中各个训练实例的标签
hwLabels.append(classNumStr)
trainingMat[i,:] = img2vector('trainingDigits/%s'%fileNameStr)
testFileList = listdir('testDigits') #取出测试集文件夹下的文件名
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('testDigits/%s'%fileNameStr)
classifierResult = classify0(vectorUnderTest,trainingMat,hwLabels,3)
print"The classifier came back with:%d,the real answer is :%d" % (classifierResult,classNumStr)
if(classifierResult != classNumStr):
errorCount += 1.0
print"\nthe total number of errors is %d"%errorCount
print"\nthe total error rate is %f" % (errorCount/float(mTest)) | [
"[email protected]"
] | |
a4763ae237c078246199f4f7ac24897c0b297b50 | 07dc7b55944fee252652f4c6b32d67163f297060 | /30.sum while true.py | 3ffd1b80ba86e4140023a7484a199b1e97a208a4 | [] | no_license | maysuircut007/python-basic | 356cc815e34cdfbd533c3a7cf52199999dc654c8 | 714eca954e8c9c7901223522791a9e8cd718030b | refs/heads/master | 2023-07-05T05:17:01.587965 | 2021-08-23T17:14:20 | 2021-08-23T17:14:20 | 399,177,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py |
sum = 0
while True:
number = int(input("ป้อนตัวเลข : "))
sum += number
if sum >= 100:
break
print("ผลรวม = ", sum)
| [
"[email protected]"
] | |
0d556bf100ee647928151bb47b989764f52027ff | ffcf6b4336c686e718038efd8b1c85889d72e2f1 | /newspaper_project/urls.py | aa23f4a0ff74c6f28a938312c2d9705198e4ff30 | [] | no_license | NanaAsabere/Newspaper | fd30c75b82a546d8e9355ba0d57bac35b11c9993 | fbfa5966b261fe75f6aa3bed4cbc7215e2ac0ad0 | refs/heads/main | 2023-06-05T05:06:48.214158 | 2021-06-28T20:20:49 | 2021-06-28T20:20:49 | 381,153,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | """newspaper_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('articles/', include('articles.urls')),
path('', include('pages.urls'))
]
| [
"[email protected]"
] | |
5d6d76fca7697a49e08aee67a0ae85c1c5adb328 | 695803cf1ae81f7a8ad63faa80545c3c913cee02 | /Part1/week2/chapter10/exercise/guest_10.3.py | b190270eef1664d80d2869917fa4a7cf697b3ce1 | [] | no_license | superstones/LearnPython | fa72a249a69323927da81887ce4b9f400552a1d0 | 5ea25f9b9922654d67c6b31475cdf02b9fe99c7e | refs/heads/master | 2023-06-24T10:40:18.639288 | 2021-07-22T08:34:11 | 2021-07-22T08:34:11 | 370,679,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | # 10.3访客
guest_name = input("Please enter your name: ")
file_name = 'guest.txt'
with open(file_name, 'a') as file_object:
file_object.write(guest_name + "\n")
| [
"[email protected]"
] | |
d3528e1d17c1e7b29841d77b4f103c4b61bc682d | 2c17d22b564c133871eb3f6bc21da88bcf8e0d2d | /CodeMethodeOptimisation.py | 968815e8c1fdfe0133a1fb6327daaa578cfb2a5b | [] | no_license | MiKiDe/Falconer2020 | ac6de5c7b9603f615108ceef779aa732cf8620c1 | d5611a8fb7746ba871c09ef3fc275697ab568cc7 | refs/heads/master | 2022-11-06T20:42:04.003182 | 2020-07-01T08:09:42 | 2020-07-01T08:09:42 | 249,779,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,788 | py | import numpy as np
import pylab as plt
from mpl_toolkits import mplot3d
import numpy.random as rd
N=50
## Choix des figures
# Formes géométriques : rond, carré, triangle
g1=np.zeros((N,N))
for i in range(N):
for j in range(N):
if np.sqrt((i-N/2)**2+(j-N/2)**2)<=N/4:
g1[i,j]=1
g2=np.zeros((N,N))
for i in range(N):
for j in range(N):
if N/3<=i<=2*N/3 and N/3<=j<=2*N/3:
g2[i,j]=1
g3=np.zeros((N,N))
for i in range(N):
for j in range(N):
if i+N/4>=3*N/5 and j<=3*N/3-(i+N/4)/2 and j>=(i+N/4)/2:
g3[i,j]=1
# Chiffres : 1, 2 et 3
c1=np.zeros((N,N))
for i in range(N):
for j in range(N):
if j==int(N/2) and N/4<i<3*N/4:
c1[i,j]=1
if j==int(N/2)-1 and N/4<i<3*N/4:
c1[i,j]=1
c2=np.zeros((N,N))
for i in range(N):
for j in range(N):
if i==int(N/4) and N/3<j<2*N/3:
c2[i,j]=1
if j==int(2*N/3) and N/4<i<N/2:
c2[i,j]=1
if i==int(N/2) and N/3<j<2*N/3:
c2[i,j]=1
if j==int(N/3) and N/2<i<3*N/4:
c2[i,j]=1
if i==int(3*N/4) and N/3<j<2*N/3:
c2[i,j]=1
c3=np.zeros((N,N))
for i in range(N):
for j in range(N):
if i==int(N/4) and N/3<j<2*N/3:
c3[i,j]=1
if j==int(2*N/3) and N/4<i<N/2:
c3[i,j]=1
if i==int(N/2) and N/3<j<2*N/3:
c3[i,j]=1
if j==int(2*N/3) and N/2<i<3*N/4:
c3[i,j]=1
if i==int(3*N/4) and N/3<j<2*N/3:
c3[i,j]=1
# Fixer les formes pour la suite :
f1 = c1
f2 = c2
f3 = c3
## Fonction de projection et de figures
def proj2(Z):
plt.subplot(1,3,1)
T1=np.zeros((N,N))
for i in range(N):
for j in range(N):
T1[i,j]=max(Z[i,j,:])
plt.imshow(T1,cmap="binary",interpolation="none")
plt.subplot(1,3,2)
T2=np.zeros((N,N))
for j in range(N):
for k in range(N):
T2[j,k]=max(Z[:,j,k])
plt.imshow(T2,cmap="binary",interpolation="none")
plt.subplot(1,3,3)
T3=np.zeros((N,N))
for i in range(N):
for k in range(N):
T3[i,k]=max(Z[i,:,k])
plt.imshow(T3,cmap="binary",interpolation="none")
#return (T1,T2,T3)
plt.show()
def make_ax(grid=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.grid(grid)
return ax
## Condition initiale :
A=rd.random((N,N,N))
A*=0.15
## Choix de la fonction coût :
p=np.log(N)/np.log(1.1)#pour avoir norme inf<=norme p<= 1.1*norme inf
def f(X):
s=0
for x in X:
s+=x**p
return s**(1/p)
def F(M):
s=0
for i in range(N):
for j in range(N):
s+=(f1[i,j]-f(A[i,j,:]))**2
for j in range(N):
for k in range(N):
s+=(f2[j,k]-f(A[:,j,k]))**2
for i in range(N):
for k in range(N):
s+=(f3[i,k]-f(A[i,:,k]))**2
return s
def derf(k,X):
if f(X)==0:
return 0
else:
return (X[k]**(p-1))/(f(X)**(p-1))
def gradF(i,j,k,A):
return-2*derf(k,A[i,j,:])*(f1[i,j]-f(A[i,j,:]))-2*derf(i,A[:,j,k])*(f2[j,k]-f(A[:,j,k]))-2*derf(j,A[i,:,k])*(f3[i,k]-f(A[i,:,k]))
## Boucle principale : descente de gradient
s = 90 # seuil au deça duquel on arrête la boucle
d = 10 # Pas de la descente de gradient
t = 1
q = F(A)
while q>s:
# On choisit de faire des pas de descente plus petit à mesure qu'on se rapproche d'un minimum :
l = q
if d<0.5:
t=10**(-3)
if d<1:
t=10**(-2)
elif q<300:
t=10**(-1)
for k in range(N):
for i in range(N):
for j in range(N):
v=A[i,j,k]
m=gradF(i,j,k,A)
A[i,j,k]=max(0,min(v-t*m,1))
q=F(A)
d=l-q
print(q)
# Figures :
proj2(A)
ax = make_ax(True)
ax.voxels(A, facecolors='#1f77b430', edgecolors='gray', shade=True)
plt.show()
## Seuil
B=deepcopy(A)
def dist(A,C):
x=0
for i in range(N):
for j in range(N):
x+=(A[i,j]-C[i,j,])**2
return x
def proj2(Z):
plt.subplot(1,3,1)
T1=np.zeros((N,N))
for i in range(N):
for j in range(N):
T1[i,j]=max([Z[i,j,k] for k in range(N)])
plt.imshow(T1,cmap="binary",interpolation="none")
plt.subplot(1,3,2)
T2=np.zeros((N,N))
for j in range(N):
for k in range(N):
T2[j,k]=max([Z[i,j,k] for i in range(N)])
plt.imshow(T2,cmap="binary",interpolation="none")
plt.subplot(1,3,3)
T3=np.zeros((N,N))
for i in range(N):
for k in range(N):
T3[i,k]=max([Z[i,j,k] for j in range(N)])
plt.imshow(T3,cmap="binary",interpolation="none")
return (T1,T2,T3)
plt.show()
M=1000
L=[]
G=[]
S=[k/(8*M) for k in range(M)]
for k in range(M):
if k%int(M/10)==0:
print(int(10-10*k/M))
s=S[k]
B=deepcopy(A)
for i in range(N):
for j in range(N):
for k in range(N):
if B[i,j,k]>=s:
B[i,j,k]=1
else:
B[i,j,k]=0
P1,P2,P3=proj2(B)
d=dist(P1,f1)+dist(P2,f2)+dist(P3,f3)
L.append(d)
G.append(B)
for k in range(M):
if L[k]==min(L):
X=deepcopy(G[k])
proj2(X)
def make_ax(grid=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.grid(grid)
return ax
ax = make_ax(True)
ax.voxels(X, facecolors='#1f77b430', edgecolors='gray', shade=False)
plt.show()
| [
"[email protected]"
] | |
ba200e3d13d1f1665df809c94d97f51cf50fcba0 | c5201677a094a2d85ca1917ac67b82ed049b87ce | /parseps.py | aa41550ee96b6c617a9a4bfda85b3402e671b0b0 | [] | no_license | anantpbhat/Python | 0b510982e187570363e2883f71787b333ebe9637 | b6675b31604a3c095a1c4bf0fe4204651834aca8 | refs/heads/master | 2022-10-16T04:11:01.407536 | 2022-09-26T00:53:21 | 2022-09-26T00:53:21 | 145,345,000 | 0 | 0 | null | 2022-09-26T00:51:24 | 2018-08-19T23:32:34 | Python | UTF-8 | Python | false | false | 3,934 | py | #!/usr/bin/env python3.6
########################################################################
# #
# Parse Unix PS output, takes input file as an argument or will prompt #
# for User input. #
# Author: Anant Bhat. #
# #
# Please capture all version changes below #
# Version 1.0 - Initial creation, Anant, 12/24/2018 #
########################################################################
import argparse, re
from datetime import datetime
class Argsnregx():
def __init__(self):
self.parser = argparse.ArgumentParser(description="Parse Unix PS output from a file.")
self.parser.add_argument("-i", "--infile", metavar="", help="Specify PS Data file path")
self.parser.add_argument("-p", "--pid", metavar="", help="Specify PID to monitor")
self.parser.add_argument("-u", "--user", metavar="", help="Specify Username to monitor")
self.args = self.parser.parse_args()
# First Header line output in CSV File
self.far = ["Date", "Time", "PID", "User", "%CPU", "%Mem", "Threads", "Resident Mem", "Virtual Mem", "Command"]
self.cur_yr = "2018" # Specify Current year here
self.qp = re.compile(r'^q$|^quit$', re.I) # Case insensitive search pattern for quit
self.dtp = re.compile(r'E[SD]T ' + self.cur_yr + r'$') # Search for ending with EST/EDT & current year
class GetInput(Argsnregx):
def quitout(self, qstr):
self.qstr = qstr
if self.qp.search(self.qstr):
print("Quiting at users request...")
exit(0)
def getdt(self): # Get Date & Time in correct format
self.dttm = datetime.now()
self.dt = "-".join([str(self.dttm.year), str(self.dttm.month), str(self.dttm.day)])
self.tm = ":".join([str(self.dttm.hour), str(self.dttm.minute), str(self.dttm.second)])
def chkin(self): # Get all the Inputs
if not self.args.infile:
self.infile = input("Enter Filename with PS output or (q|Q) to quit: ").strip()
self.quitout(self.infile)
else:
self.infile = self.args.infile
if not self.args.pid:
self.pid = input("Enter PID for the process to be monitored or (q}Q) to quit: ").strip()
self.quitout(self.pid)
else:
self.pid = self.args.pid
if not self.args.user:
self.user = input("Enter Username of the process or (q|Q) to quit: ").strip()
self.quitout(self.user)
else:
self.user = self.args.user
if __name__ == "__main__":
gtinp = GetInput()
gtinp.getdt()
gtinp.chkin()
pup = re.compile(r'\b' + gtinp.pid + " " + gtinp.user + r'\b') # Search word boundaries with variable in pattern
wfile = "/home/abhat/psout_" + gtinp.dt + ".csv"
jo = ",\t"
fstline = jo.join(gtinp.far)
with open(gtinp.infile) as psrf, open(wfile, 'w') as pswf:
pswf.write(fstline + "\n")
for psline in psrf:
if gtinp.dtp.search(psline): # Date line ending with EST/EDT & current year
dtary = psline.split()
strng1 = dtary[1] + " " + dtary[2]
pswf.write(jo.join([strng1, dtary[3]]) + jo)
elif pup.search(psline): # Search for PID & Username in PS output
ary1 = psline.split()
if len(ary1) > 10:
del ary1[10:]
pswf.write(jo.join(ary1) + "\n")
else:
pass
| [
"[email protected]"
] | |
01afceb569c6f7189fe92a4880be05e98b268961 | 156e28f7e9d5d6f240cc8e03d462c3451a870ecd | /test_match_func_names.py | f8633113bc9cd6f74294ba23a61a649f7bda9c2b | [] | no_license | augustovictor/python-pytest-lab | c225b3103e6502f6e672f08e7ce4840bf81cfb28 | 371a31b278bc1ba8bc436cac2dec43a5daba2547 | refs/heads/master | 2022-12-17T04:34:10.171571 | 2018-06-29T02:36:22 | 2018-06-29T02:36:22 | 135,964,672 | 0 | 0 | null | 2021-06-01T22:18:46 | 2018-06-04T03:13:06 | Python | UTF-8 | Python | false | false | 190 | py | import pytest
def test_should_match_this_name():
assert 1 == 1
def test_shouldnt_match_this_name():
assert 1 == 1
# pytest test_match_func_names.py -v -k '_match and not shouldnt' | [
"[email protected]"
] | |
c85032b63cbdc557d2481ca0b0b2b8af2bf6d57f | dd46c2eed4291a2b6895401e6c18442bb59c70de | /学习/pygame库/贪吃蛇1.0.py | 9b9b88be69f892b02f79b49b898d856c4287616a | [] | no_license | linzhongxiazhi/student_python | 0894b3f7e23b516a5ffc6bc056d3a3d77589338d | 0bbd71ea2f0b9c886f4cbecc9a1e2a3d8fec6ffe | refs/heads/master | 2023-06-30T21:38:23.624449 | 2021-08-07T15:20:03 | 2021-08-07T15:20:03 | 393,718,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,683 | py | import pygame, sys, time, random
from pygame.locals import *
# 定义颜色变量
redColour = pygame.Color(255, 0, 0)
blackColour = pygame.Color(0, 0, 0)
whiteColour = pygame.Color(255, 255, 255)
greyColour = pygame.Color(150, 150, 150)
# 定义gameOver函数
def gameOver(playSurface):
gameOverFont = pygame.font.Font('arial.ttf', 72)
gameOverSurf = gameOverFont.render('Game Over', True, greyColour)
gameOverRect = gameOverSurf.get_rect()
gameOverRect.midtop = (320, 10)
playSurface.blit(gameOverSurf, gameOverRect)
pygame.display.flip()
time.sleep(5)
pygame.quit()
sys.exit()
# 定义main函数
def main():
# 初始化pygame
pygame.init()
fpsClock = pygame.time.Clock()
# 创建pygame显示层
playSurface = pygame.display.set_mode((640, 480))
pygame.display.set_caption('Raspberry Snake')
# 初始化变量
snakePosition = [100, 100]
snakeSegments = [[100, 100], [80, 100], [60, 100]]
raspberryPosition = [300, 300]
raspberrySpawned = 1
direction = 'right'
changeDirection = direction
while True:
# 检测例如按键等pygame事件
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
# 判断键盘事件
if event.key == K_RIGHT or event.key == ord('d'):
changeDirection = 'right'
if event.key == K_LEFT or event.key == ord('a'):
changeDirection = 'left'
if event.key == K_UP or event.key == ord('w'):
changeDirection = 'up'
if event.key == K_DOWN or event.key == ord('s'):
changeDirection = 'down'
if event.key == K_ESCAPE:
pygame.event.post(pygame.event.Event(QUIT))
# 判断是否输入了反方向
if changeDirection == 'right' and not direction == 'left':
direction = changeDirection
if changeDirection == 'left' and not direction == 'right':
direction = changeDirection
if changeDirection == 'up' and not direction == 'down':
direction = changeDirection
if changeDirection == 'down' and not direction == 'up':
direction = changeDirection
# 根据方向移动蛇头的坐标
if direction == 'right':
snakePosition[0] += 20
if direction == 'left':
snakePosition[0] -= 20
if direction == 'up':
snakePosition[1] -= 20
if direction == 'down':
snakePosition[1] += 20
# 增加蛇的长度
snakeSegments.insert(0, list(snakePosition))
# 判断是否吃掉了树莓
if snakePosition[0] == raspberryPosition[0] and snakePosition[1] == raspberryPosition[1]:
raspberrySpawned = 0
else:
snakeSegments.pop()
# 如果吃掉树莓,则重新生成树莓
if raspberrySpawned == 0:
x = random.randrange(1, 32)
y = random.randrange(1, 24)
raspberryPosition = [int(x * 20), int(y * 20)]
raspberrySpawned = 1
# 绘制pygame显示层
playSurface.fill(blackColour)
for position in snakeSegments:
pygame.draw.rect(playSurface, whiteColour, Rect(position[0], position[1], 20, 20))
pygame.draw.rect(playSurface, redColour, Rect(raspberryPosition[0], raspberryPosition[1], 20, 20))
# 刷新pygame显示层
pygame.display.flip()
# 判断是否死亡
if snakePosition[0] > 620 or snakePosition[0] < 0:
gameOver(playSurface)
if snakePosition[1] > 460 or snakePosition[1] < 0:
for snakeBody in snakeSegments[1:]:
if snakePosition[0] == snakeBody[0] and snakePosition[1] == snakeBody[1]:
gameOver(playSurface)
# 控制游戏速度
fpsClock.tick(5)
if __name__ == "__main__":
main() | [
"1134393383"
] | 1134393383 |
ae7734e049fcefae6e173f67ed11572958ee6c53 | 3dc189017a38d6230fde6b2cbdc309b3d7198af9 | /contacts.py | d48779ef417b02586cecf3b65bdbe93c073ac5ff | [] | no_license | codejoncode/build-a-blog-web2py | 6bf27505126f1cf3b0bbddeb74702f2e52717af5 | 53215799b5446cb690dd39a1a18cc7bf6a06b4a1 | refs/heads/master | 2020-04-15T02:56:29.131249 | 2019-01-12T02:54:11 | 2019-01-12T02:54:11 | 164,329,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | # -*- coding: utf-8 -*-
# try something like
def index(): return dict(message="hello from contacts.py")
def add():
form = SQLFORM(db.contacts).process()
return locals()
def data():
rows = db(db.contacts).select()
return locals()
def filter():
#get count
rows1_count = db(db.contacts.state_name == 'CA').count()
#get all records, sorted by name
rows2_all_sorted_by_name = db(db.contacts).select(orderby=~db.contacts.last_name | db.contacts.first_name)
#filter, show only those whose last_name starts with M
rows3_startswith = db(db.contacts.last_name.startswith('M')).select(orderby=db.contacts.state_name | db.contacts.last_name)
return locals()
def view():
if request.args(0) is None:
rows = db(db.contacts).select(orderby=db.contacts.last_name | db.contacts.first_name)
else:
letter = request.args(0)
rows = db(db.contacts.last_name.startswith(letter)).select(orderby=db.contacts.last_name | db.contacts.first_name)
return locals()
def update():
record = db.contacts(request.args(0)) or redirect(URL('view'))
form = SQLFORM(db.contacts, record)
if form.process().accepted:
response.flash = T('Record Updated')
else:
response.flash = T('Please complete the form.')
return locals() | [
"[email protected]"
] | |
2593c52a740421c29980a6d9c58cdd80d6b05e5a | 65ba0ce7b8f2952a70ba745bcdf1d3d5e4ea310f | /src/pygo_plugin/proto/grpc_stdio_pb2_grpc.py | a137e14779c1ff8acaa07f3a75b9fe370e60750f | [
"MPL-2.0",
"Apache-2.0"
] | permissive | justinfx/pygo-plugin | b86820a97e9bd22e5274b21581a939a47502d528 | e83ad5f0a91174936bf5c2868aba84f094fe33d5 | refs/heads/main | 2023-07-02T21:08:31.236426 | 2021-08-11T20:03:51 | 2021-08-11T20:03:51 | 352,862,766 | 4 | 2 | Apache-2.0 | 2021-05-01T08:39:24 | 2021-03-30T03:49:38 | Python | UTF-8 | Python | false | false | 3,204 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from pygo_plugin.proto import grpc_stdio_pb2 as pygo__plugin_dot_proto_dot_grpc__stdio__pb2
class GRPCStdioStub(object):
"""GRPCStdio is a service that is automatically run by the plugin process
to stream any stdout/err data so that it can be mirrored on the plugin
host side.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StreamStdio = channel.unary_stream(
'/plugin.GRPCStdio/StreamStdio',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=pygo__plugin_dot_proto_dot_grpc__stdio__pb2.StdioData.FromString,
)
class GRPCStdioServicer(object):
"""GRPCStdio is a service that is automatically run by the plugin process
to stream any stdout/err data so that it can be mirrored on the plugin
host side.
"""
def StreamStdio(self, request, context):
"""StreamStdio returns a stream that contains all the stdout/stderr.
This RPC endpoint must only be called ONCE. Once stdio data is consumed
it is not sent again.
Callers should connect early to prevent blocking on the plugin process.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GRPCStdioServicer_to_server(servicer, server):
rpc_method_handlers = {
'StreamStdio': grpc.unary_stream_rpc_method_handler(
servicer.StreamStdio,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=pygo__plugin_dot_proto_dot_grpc__stdio__pb2.StdioData.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'plugin.GRPCStdio', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GRPCStdio(object):
"""GRPCStdio is a service that is automatically run by the plugin process
to stream any stdout/err data so that it can be mirrored on the plugin
host side.
"""
@staticmethod
def StreamStdio(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/plugin.GRPCStdio/StreamStdio',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
pygo__plugin_dot_proto_dot_grpc__stdio__pb2.StdioData.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"[email protected]"
] | |
ca0d1cf09ce84ee5763e630588db125be8b05173 | 2be237f8737c970f2dcb0667244638a011108ea4 | /webtest/models.py | f574a46662781030d70b042b22344257b72f67a1 | [] | no_license | yujm2018/autotest | 5f34e1dc5bce96cb31735b4a8d100e4898970986 | 50a6f39245f474abd8bdbd504b5e92f0f91152fb | refs/heads/master | 2020-04-26T17:14:32.252845 | 2019-03-19T07:00:53 | 2019-03-19T07:00:53 | 173,706,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | from django.db import models
class Webcase(models.Model):
Product = models.ForeignKey(
'product.Product',
on_delete=models.CASCADE,
null=True) # 关联产品id
webcasename = models.CharField('用例名称', max_length=200) # 测试用例名称
webtestresult = models.BooleanField('测试结果') # 测试结果
webtester = models.CharField('测试负责人', max_length=16) # 测试负责人
create_time = models.DateTimeField('创建时间', auto_now=True) # 创建时间-自动获取当前时间
class Meta:
verbose_name = 'web测试用例'
verbose_name_plural = 'web测试用例'
def __str__(self):
return self.webcasename
class Webcasestep(models.Model):
Webcase = models.ForeignKey(Webcase, on_delete=models.CASCADE) # 关联接口id
webcasename = models.CharField('测试用例标题', max_length=200) # 测试用例标题
webteststep = models.CharField('测试步聚', max_length=200) # 测试步聚
webtestobjname = models.CharField('测试对象名称描述', max_length=200) # 测试对象名称描述
webfindmethod = models.CharField('定位方式', max_length=200) # 定位方式
webevelement = models.CharField('控件元素', max_length=800) # 控件元素
weboptmethod = models.CharField('操作方法', max_length=200) # 操作方法
webtestdata = models.CharField('测试数据', max_length=200, null=True) # 测试数据
webassertdata = models.CharField('验证数据', max_length=200) # 验证数据
webtestresult = models.BooleanField('测试结果') # 测试结果
create_time = models.DateTimeField('创建时间', auto_now=True) # 创建时间-自动获取当前时间
def __str__(self):
return self.webteststep
| [
"[email protected]"
] | |
8d5bfc287da53aea57a2730d52c305f29e8dbbe7 | 4852e5970a8a30ba0ae81de051fd8ddc0254ec2a | /v1sualizati0n.py | 0dcbb9023265f0b3bb06cd466af06442044a54b3 | [] | no_license | DianaNeumann/r5nd0m-m3m0r1e5 | c2f6e9b276ea0d384546104d5f6b2dc5ff1121a9 | e7d2855bdc34d1e251bd10eb6eed9074486d3d76 | refs/heads/main | 2023-06-14T05:12:09.344690 | 2021-07-05T18:08:15 | 2021-07-05T18:08:15 | 326,486,022 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | import sys
import os
import math
from PIL import Image
IMAGE_WIDTH = 256 # Width of the final photo, in pixels
def main():
if len(sys.argv) != 2:
print('[-] Usage: python v1sualizati0n.py your_file')
exit(1)
input_file_name = sys.argv[1]
input_file = open(input_file_name, 'rb')
input_data = bytearray(input_file.read())
if len(input_data) == 0:
print("[-] File is empty.")
return exit(1)
image_size = (IMAGE_WIDTH, int(math.ceil(len(input_data) / (IMAGE_WIDTH * 1.0))))
image = Image.new("RGB", image_size, "white")
fill_image(input_data, image, image_size)
image.convert("P").save(input_file_name + ".png", "PNG")
def fill_image(input_data, image, image_size):
x_range = range(IMAGE_WIDTH)
y_range = range(image_size[1])
d_range = len(input_data)
pixel = image.load()
index = 0
for y in y_range:
for x in x_range:
pixel[x,y] = convert_color(input_data[index])
index += 1
if index >= d_range:
break
def convert_color(byte):
if byte >= 0x80:
return 0x000000
elif byte > 0x20:
return 0x0000ff
elif byte >= 0x01:
return 0xffff00
else:
return 0xffffff
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
2e3cc91c6885fe6c3a6dd5ef1088324c81cd7616 | 13b113a5effbf3cb31e456c4b5ccb00795bada99 | /main.py | 0faab044e40830d3322dc5dd1d1708830b5f7c06 | [
"MIT"
] | permissive | LuizFelipeAG/clothing_detect | 75e23d90911b99b0acbe7e180ad8bdc19a95fc52 | 596a6b4c100dba2f0ecb7ac240749dfeb8b25216 | refs/heads/main | 2023-05-14T20:57:43.289453 | 2021-06-11T23:20:24 | 2021-06-11T23:20:24 | 371,721,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from fastapi import FastAPI
from utils.io_utils import load_config
from utils.model_utils import load_model, load_vectorizer, predict
config = load_config()
model = load_model(config["paths"]["model"])
vectorizer = load_vectorizer()
app = FastAPI()
@app.get("/")
def read_root():
return "Up and running"
@app.get("/predict/")
def predict_url(url):
"""Endpoint de previsao
Args:
url ([str]): url da imagem
Returns:
[dict]: dicionario de previsao
"""
return predict(url, vectorizer, model)
| [
"[email protected]"
] | |
ce9109623245d5b42b72378d6a3b91c95cc43305 | 939ce3b263bcf2a349cd9baa87c7b14a20a01927 | /script.py | c1a430e9a7c3329cf313b5f740767ff7bffe6fd9 | [] | no_license | petehewage/PyTranslator | c9972b6f00a4306b69017436934a31da9c115ac9 | c81b0fe7abefdcffc838097b110ad2a50b247372 | refs/heads/master | 2022-12-15T15:13:18.726820 | 2020-08-29T15:52:38 | 2020-08-29T15:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,973 | py | from playsound import playsound
from gtts import gTTS
import sys
import os
import time
import goslate
import speech_recognition as sr
import pyaudio
from googletrans import Translator
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
in_use = True
srinp = ""
thelang = ''
thelang_keyVal = ''
convo_mode = False
oppoLang = ''
languageDict = {"french":"fr",
"chinese":"zh-cn",
"srilankan":"si",
"sri lankan":"si",
"tamil":"ta",
"russian":"ru",
"polish":"pl",
"romanian":"ro",
"dutch":"nl",
"latin":"la",
"japanese":"ja",
"hindi":"hi",
"spanish":"es",
"german":"de",
"greek":"el",
"italian":"it"}
recieveLangDict = {"french":'fr-FR',
"chinese":"zh-CN",
"srilankan":"si-LK",
"sri lankan":"si-LK",
"russian":"ru-RU",
"polish":"pl",
"romanian":"ro-RO",
"dutch":"dl-DL",
"latin":"la",
"japanese":"ja",
"hindi":"hi-IN",
"spanish":"es-ES",
"german":"de-DE",
"greek":"el-GR",
"italian":"it-IT"}
def initialise():
global languageDict
global thelang
is_init = False
global oppoLang
global convo_mode
speak("What language shall your voice be translated into", 'en-uk')
retrieve('en-GB')
#print(srinp)
for key in languageDict.keys():
if srinp.lower() == key:
is_init = True
thelang = languageDict[key]
thelang_keyVal = key
oppoLang = recieveLangDict[key]
print(thelang + " " + oppoLang)
if is_init == False:
speak("Please say a valid Language. Try Again", 'en-uk')
initialise()
else:
speak("Okay, one last step. Would you like to enter conversation mode?", 'en-uk')
retrieve('en-GB')
if srinp.lower() == "yes" or srinp.lower == "okay" or srinp.lower == "sure":
convo_mode = True
else:
speak("Okay then, single translation it is!", 'en-uk')
convo_mode = False
speak("Okay, Setup complete, the translator is ready", 'en-uk')
print("")
print("")
print("--------------------------")
print("")
print("")
def retrieve(langRET):
global srinp
global in_use
r = sr.Recognizer()
with sr.Microphone() as source:
print(bcolors.OKBLUE + "MICROPHONE ON:" + bcolors.ENDC + "Please Speak")
playsound("beep.mp3")
audio = r.listen(source)
try:
#print(langRET)
text = r.recognize_google(audio, language=langRET)
#print("x " +r.recognize_google(audio, language='fr-FR') + " y")
print(bcolors.WARNING + "You said :" + bcolors.ENDC +" {}".format(text))
if text == "setup" or text == "set up":
print("-------SETUP-------")
speak("Returning to setup", "en")
main()
elif text == "goodbye" or text == "exit"or text == "leave":
print("-------EXIT-------")
speak("Goodbye", "en")
in_use = False
srinp = text
except Exception as e:
print("Could not make that out, please say it louder")
speak("Could not make that out, please say it louder", 'en')
print(e)
def translate_to(inp,lang,langIN):
global thetranslation
text = inp
translator = Translator()
translatedText = translator.translate(inp,dest=lang,src=langIN)
print(bcolors.HEADER + "----TRANSLATION----" + bcolors.ENDC)
print("TRANSLATION = " + translatedText.text)
speak(translatedText.text,lang)
def speak(temp, lang):
voice = gTTS(text=temp, lang=lang)
voice.save("voice.mp3")
print(bcolors.FAIL + "Speaking," + bcolors.ENDC + bcolors.OKGREEN + "DO NOT SPEAK" + bcolors.ENDC)
playsound("voice.mp3")
time.sleep(1)
os.remove("voice.mp3")
def prereq():
skip = False
speak("Hello, welcome to PyTranslator. Do you want to skip the tutorial?", "en")
retrieve('en-gb')
if srinp.lower() == "yes" or srinp.lower() == "okay" or srinp.lower() == "sure" or srinp.lower() == "please":
skip = True
initialise()
else:
speak("Okay, welcome to PyTranslator", "en")
speak("This is a tool for translation, you can translate in a conversation, between english and another language, or single translation", "en")
speak("To use this tool properly, remember, only speak after you hear a beep, that means your microphone is ready", "en")
speak("If you are in conversation mode, the microphone alternates between english and your other language", "en")
speak("Supported languages include:", "en")
speak("Frech, Chinese, Sri Lankan, Tamil, Russian, Polish, Romanian, Dutch, Latin, Japanese, Hindi, Spanish, German, Greek, Italian", "en")
speak("If you would like to review this list, you can do so on GitHub", "en")
speak("At any time, if you would like to return to setup, to change language or mode, say. setup. when the microphone is receiving english", "en")
speak("To leave the translator, say leave, or exit in english", "en")
speak("Tutorial complete! Beginning setup...", "en")
initialise()
def main():
initialise()
print("--------------LANGUAGE1 enGB-----------------")
print("")
if convo_mode == True:
while in_use == True:
retrieve('en-GB')
if in_use == True:
print("")
translate_to(srinp,thelang,'en')
print("")
print("--------------LANGUAGE2-----------------")
print("")
if in_use == True:
retrieve(oppoLang)
if in_use == True:
translate_to(srinp,'en',thelang)
print("")
print("")
print("--------------LANGUAGE1 enGB-----------------")
print("")
elif convo_mode == False:
while in_use == True:
retrieve('en-GB')
if in_use == True:
print("")
translate_to(srinp,thelang,'en')
print("")
print("")
print("-------------------------------")
print("")
main()
#initialise()
#retrieve('hi-IN')
| [
"[email protected]"
] | |
770e71fd92eeb7374603e5616670abbe1c52790f | 9abd86eb8afadcc852292597862888a4afda8152 | /postman/urls.py | 05f81583bcddc101ab1e6555d8cdfd2890065d96 | [] | no_license | wanghailong50/cmfz2 | 7fb1be1d2dd9f7e615759d896cd1b4ecebdc1c6d | 78b8884293a086ffd2cc47f5a28fc2b86bd5beca | refs/heads/master | 2022-10-07T05:02:31.163882 | 2020-06-09T13:55:21 | 2020-06-09T13:55:21 | 271,017,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py |
from django.contrib import admin
from django.urls import path,include
from postman import views
app_name='postman'
urlpatterns = [
path('admin/', admin.site.urls),
path('first_page/',views.first_page,name='first_page'),
path('album_detail/',views.album_detail,name='album_detail'),
path('register/',views.register,name='register'),
path('change_user/',views.change_user,name='change_user'),
]
| [
"[email protected]"
] | |
9235a4e25c904a61ce16f1660c39cd3e8c9f63ff | 9a819fc91e17ef9a44e45cf68e76cf696381d06d | /snake_bnb_project/src/program_hosts.py | f95975366365a90441d74c0a94f79a315eac1054 | [] | no_license | Gautam3994/Dark-Knight | aef1d6383e0785130db75e80ed40f544a120579e | 327b2d58851a42da1b707addea73e40fac6a61cc | refs/heads/master | 2022-12-01T11:58:39.857379 | 2020-09-05T18:07:51 | 2020-09-05T18:07:55 | 203,866,327 | 0 | 1 | null | 2022-11-24T09:16:18 | 2019-08-22T20:14:43 | Python | UTF-8 | Python | false | false | 7,024 | py | from colorama import Fore
from infrastructure.switchlang import switch
import infrastructure.state as state
from services import data_service
import dateutil, datetime
def run():
print(' ****************** Welcome host **************** ')
print()
show_commands()
while True:
action = get_action()
with switch(action) as s:
s.case('c', create_account)
s.case('a', log_into_account)
s.case('l', list_cages)
s.case('r', register_cage)
s.case('u', update_availability)
s.case('v', view_bookings)
s.case('m', lambda: 'change_mode')
s.case(['x', 'bye', 'exit', 'exit()'], exit_app)
s.case('?', show_commands)
s.case('', lambda: None)
s.default(unknown_command)
if action:
print()
if s.result == 'change_mode':
return
def show_commands():
print('What action would you like to take:')
print('[C]reate an account')
print('Login to your [a]ccount')
print('[L]ist your cages')
print('[R]egister a cage')
print('[U]pdate cage availability')
print('[V]iew your bookings')
print('Change [M]ode (guest or host)')
print('e[X]it app')
print('[?] Help (this info)')
print()
def create_account():
print(' ****************** REGISTER **************** ')
name = input("What is your name?")
email = input("What is your mail id?").strip().lower()
old_account = data_service.find_account_by_email(email)
if old_account:
error_msg(f"The account with the mail id {email} already exists")
return
state.active_account = data_service.create_account(name, email)
print("The account was successfully created")
def log_into_account():
print(' ****************** LOGIN **************** ')
email = input("Enter your mail id?").strip().lower()
login_mail = data_service.find_account_by_email(email)
if not login_mail:
error_msg("This email is not registered with any account.")
return
state.active_account = login_mail
print("You have logged in successfully")
def register_cage():
print(' ****************** REGISTER CAGE **************** ')
if not state.active_account:
error_msg("You must require an account to regsiter")
return
while True:
meters = input("Length of the cage required?")
if not meters:
error_msg("This is a required field")
else:
try:
length = float(meters)
except:
error_msg("You must enter numbers only")
else:
break
carpeted = input("Is it carpeted [y, n]?").lower().startswith('y')
toys = input("It has toys [y, n]?").lower().startswith('y')
dangerous_snake = input("Is it a venomous snake [y, n]?").lower().startswith('y')
client = input("Who is getting this cage?")
cage = data_service.register_cage_host(state.active_account, client, length, carpeted, toys, dangerous_snake)
state.reload_account()
success_msg(f"Registerd cage with id{cage.id}")
def list_cages(supress_header=False):
if not supress_header:
print(' ****************** Your cages **************** ')
if not state.active_account:
error_msg("You must login to find the list of cages")
return
your_cages = data_service.get_cages(state.active_account)
print(f"You have {len(your_cages)} cages registered")
for cage_no, cage in enumerate(your_cages):
print(f"{cage_no + 1}- The {cage.name} is {cage.square_meters} meters")
for booking in cage.bookings:
print(f"Booking: {booking.check_in_date}, {(booking.check_out_date - booking.check_in_date).days}, "
f"booked{'Yes' if booking.booked_date is not None else 'No'} ")
def update_availability():
print(' ****************** Add available date **************** ')
if not state.active_account:
error_msg("You must login to add the availability of your cages")
return
while True:
list_cages(supress_header=False)
cage_to_check = input("Enter the number of the cage which you wanna update")
if not cage_to_check:
print("Please enter the suitable number")
else:
try:
cage_number = int(cage_to_check)
except:
print("Please enter the suitable number")
else:
break
cages = data_service.get_cages(state.active_account)
selected_cage = cages[cage_number - 1]
success_msg(f"You have selected the cage {selected_cage.name}")
start_date = dateutil.parser.parse(input("Enter an available start date [yyyy-mm-dd]"))
no_of_days_required = int(input("Enter the number of days you want to make it available"))
data_service.add_availability(selected_cage, start_date, no_of_days_required)
success_msg(
f"The {selected_cage.name} has been marked available for {no_of_days_required} days starting from {start_date}")
def view_bookings():
print(' ****************** Your bookings **************** ')
if not state.active_account:
error_msg("You must login to add the availability of your cages")
return
your_cages = data_service.get_cages(state.active_account)
bookings = [(cage, booking) for cage in your_cages for booking in cage.bookings if booking.booked_date is not None]
for cage, booking in bookings:
print(" * Cage : {}, booked date: {}, from {} for {} days".format(cage.name,
datetime.date(booking.booked_date.year,
booking.booked_date.month,
booking.booked_date.day),
datetime.date(booking.check_in_date.year,
booking.check_in_date.month,
booking.check_in_date.day),
(booking.check_out_date - booking.check_in_date).days
))
def exit_app():
print()
print('bye')
raise KeyboardInterrupt()
def get_action():
text = '> '
if state.active_account:
text = f'{state.active_account.name}> '
action = input(Fore.YELLOW + text + Fore.WHITE)
return action.strip().lower()
def unknown_command():
print("Sorry we didn't understand that command.")
def success_msg(text):
print(Fore.LIGHTGREEN_EX + text + Fore.WHITE)
def error_msg(text):
print(Fore.LIGHTRED_EX + text + Fore.WHITE)
| [
"[email protected]"
] | |
621bdc09aff1254bdeb0b5cea4249380e5b4b554 | b22203455a8131ffc72df358ae0add94430d505a | /Library/Locator.py | ab67b46850074fbdf1df460ac50ef25cc6de87ca | [] | no_license | MonishGit/RobotFramework | f4adfc303ecb8d214fc51476defeb3b6bd95eae7 | 7aac9d702a406686fc8f7749ced65c50cacd6878 | refs/heads/master | 2020-08-15T07:51:08.465689 | 2019-10-18T13:11:11 | 2019-10-18T13:11:11 | 215,304,194 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import json
import jsonpath
import os
def get_element_locator(locatorname):
f= open("C:/Users/mopawar/PycharmProjects/Creating_NewFramework/Data/EelementLocator.json")
accessJson = json.loads(f.read())
getLocators = jsonpath.jsonpath(accessJson,locatorname)
return getLocators[0] | [
"[email protected]"
] | |
df24293736dfe5dde88d52d02ccb88c1b609cf9c | 1875015b968db17337ff6e95ebbc30c3309ced2b | /google/cloud/bigquery_storage_v1beta1/proto/storage_pb2_grpc.py | 1e5bf4799926e5582e7215266fb1c8a74606bd1d | [
"Apache-2.0"
] | permissive | emkornfield/python-bigquery-storage | 230a0d76d434c3727a1b918e5823e66ee140dce5 | a6dffb892cbfd59ac25fcb20e4f34779377d785b | refs/heads/master | 2022-11-23T19:03:54.531481 | 2020-07-01T00:08:21 | 2020-07-01T00:08:21 | 278,273,270 | 0 | 0 | null | 2020-07-09T05:40:16 | 2020-07-09T05:40:16 | null | UTF-8 | Python | false | false | 9,253 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.bigquery_storage_v1beta1.proto import (
storage_pb2 as google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigQueryStorageStub(object):
"""BigQuery storage API.
The BigQuery storage API can be used to read data stored in BigQuery.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateReadSession = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession",
request_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.CreateReadSessionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.ReadSession.FromString,
)
self.ReadRows = channel.unary_stream(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows",
request_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsResponse.FromString,
)
self.BatchCreateReadSessionStreams = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams",
request_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsResponse.FromString,
)
self.FinalizeStream = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream",
request_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.FinalizeStreamRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SplitReadStream = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream",
request_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamResponse.FromString,
)
class BigQueryStorageServicer(object):
"""BigQuery storage API.
The BigQuery storage API can be used to read data stored in BigQuery.
"""
def CreateReadSession(self, request, context):
"""Creates a new read session. A read session divides the contents of a
BigQuery table into one or more streams, which can then be used to read
data from the table. The read session also specifies properties of the
data to be read, such as a list of columns or a push-down filter describing
the rows to be returned.
A particular row can be read by at most one stream. When the caller has
reached the end of each stream in the session, then all the data in the
table has been read.
Read sessions automatically expire 24 hours after they are created and do
not require manual clean-up by the caller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ReadRows(self, request, context):
"""Reads rows from the table in the format prescribed by the read session.
Each response contains one or more table rows, up to a maximum of 10 MiB
per response; read requests which attempt to read individual rows larger
than this will fail.
Each request also returns a set of stream statistics reflecting the
estimated total number of rows in the read stream. This number is computed
based on the total table size and the number of active streams in the read
session, and may change as other streams continue to read data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BatchCreateReadSessionStreams(self, request, context):
"""Creates additional streams for a ReadSession. This API can be used to
dynamically adjust the parallelism of a batch processing task upwards by
adding additional workers.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def FinalizeStream(self, request, context):
"""Triggers the graceful termination of a single stream in a ReadSession. This
API can be used to dynamically adjust the parallelism of a batch processing
task downwards without losing data.
This API does not delete the stream -- it remains visible in the
ReadSession, and any data processed by the stream is not released to other
streams. However, no additional data will be assigned to the stream once
this call completes. Callers must continue reading data on the stream until
the end of the stream is reached so that data which has already been
assigned to the stream will be processed.
This method will return an error if there are no other live streams
in the Session, or if SplitReadStream() has been called on the given
Stream.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SplitReadStream(self, request, context):
"""Splits a given read stream into two Streams. These streams are referred to
as the primary and the residual of the split. The original stream can still
be read from in the same manner as before. Both of the returned streams can
also be read from, and the total rows return by both child streams will be
the same as the rows read from the original stream.
Moreover, the two child streams will be allocated back to back in the
original Stream. Concretely, it is guaranteed that for streams Original,
Primary, and Residual, that Original[0-j] = Primary[0-j] and
Original[j-n] = Residual[0-m] once the streams have been read to
completion.
This method is guaranteed to be idempotent.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_BigQueryStorageServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateReadSession": grpc.unary_unary_rpc_method_handler(
servicer.CreateReadSession,
request_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.CreateReadSessionRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.ReadSession.SerializeToString,
),
"ReadRows": grpc.unary_stream_rpc_method_handler(
servicer.ReadRows,
request_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsResponse.SerializeToString,
),
"BatchCreateReadSessionStreams": grpc.unary_unary_rpc_method_handler(
servicer.BatchCreateReadSessionStreams,
request_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsResponse.SerializeToString,
),
"FinalizeStream": grpc.unary_unary_rpc_method_handler(
servicer.FinalizeStream,
request_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.FinalizeStreamRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"SplitReadStream": grpc.unary_unary_rpc_method_handler(
servicer.SplitReadStream,
request_deserializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery__storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.bigquery.storage.v1beta1.BigQueryStorage", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| [
"[email protected]"
] | |
1c07941454a843cf848a97e1f4e55353ee9ed6a7 | d542c384a8a8fdea29f24010f10f62f59f5a1058 | /manage.py | 6560b6188f4b3ad52a49eec9b00a8977ad43f6b4 | [] | no_license | DanielKalinin/galamart_tracker_django | a10e5e84ef0e4dc2f5c3fdb3f1bf0501e61175fc | faaca711adf93d3a9656c93c2ceb1f6a7cfb3e70 | refs/heads/master | 2021-05-07T02:27:30.719152 | 2017-11-13T15:49:19 | 2017-11-13T15:49:19 | 110,566,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hel.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
c9c7e2bfde0e9f30320765355d36f8fba7e065bb | bdfb6084a33e4b443ffc2a97673ecbfa736d947b | /.history/vision/tensorflow_object_detect/scripts/detect_lane_20210224073617.py | 4b9d7673c398293b97536af85ab936b7119f8430 | [
"MIT"
] | permissive | robcn/Autopilot-Demo | 5c830a0f721d3e8df864c0fcb26e9ea280bbe3fe | 0b7178ae3f417f529d7015373a1e51eb71df28ab | refs/heads/master | 2023-03-16T00:20:31.498672 | 2021-02-24T13:34:06 | 2021-02-24T13:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,020 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import rospy
import numpy as np
import cv2
from cv_bridge import CvBridge
from std_msgs.msg import UInt8, Float64
from sensor_msgs.msg import Image, CompressedImage
import time
# from vision_msgs.msg import Center
class DetectLane():
def __init__(self):
self.sub_image_original = rospy.Subscriber('/camera/image', Image, self.cbFindLane, queue_size = 1)
self.pub_image_detect = rospy.Publisher('/detect/lane', Image, queue_size = 1)
self.pub_center_white_lane = rospy.Publisher('/control/white_lane', Float64, queue_size = 1)
self.pub_center_yellow_lane = rospy.Publisher('/control/yellow_lane', Float64, queue_size = 1)
self.pub_center = rospy.Publisher('/control/center', Float64, queue_size = 1)
self.cvBridge = CvBridge()
self.counter = 1
self.hue_white_l = 0
self.hue_white_h = 179
self.saturation_white_l = 0
self.saturation_white_h = 30
self.lightness_white_l = 221
self.lightness_white_h = 255
self.hue_yellow_l = 26
self.hue_yellow_h = 34
self.saturation_yellow_l = 43
self.saturation_yellow_h = 255
self.lightness_yellow_l = 46
self.lightness_yellow_h = 255
def cbFindLane(self, image_msg):
# Change the frame rate by yourself. Now, it is set to 1/3 (10fps).
# Unappropriate value of frame rate may cause huge delay on entire recognition process.
# This is up to your computer's operating power.
if self.counter % 3 != 0:
self.counter += 1
return
else:
self.counter = 1
cv_image = self.cvBridge.imgmsg_to_cv2(image_msg, "bgr8")
# find White and Yellow Lanes
self.maskLane(cv_image)
# yellow_fraction, cv_yellow_lane = self.maskYellowLane(cv_image)
def maskLane(self,image):
# convert image to hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# param of irange hsv(white & yellow)
Hue_white_h = self.hue_white_l
Hue_white_l = self.hue_white_h
Saturation_white_h = self.saturation_white_l
Saturation_white_l = self.saturation_white_h
Lightness_white_h = self.lightness_white_h
Lightness_white_l = self.lightness_white_l
Hue_yellow_h = self.hue_yellow_l
Hue_yellow_l = self.hue_yellow_h
Saturation_yellow_h = self.saturation_yellow_l
Saturation_yellow_l = self.saturation_yellow_h
Lightness_yellow_h = self.lightness_yellow_h
Lightness_yellow_l = self.lightness_yellow_l
# define range of white color in HSV
lower_white = np.array([Hue_white_h, Saturation_white_h, Lightness_white_l])
upper_white = np.array([Hue_white_l, Saturation_white_l, Lightness_white_h])
lower_yellow = np.array([Hue_yellow_h, Saturation_yellow_h, Lightness_yellow_l])
upper_yellow = np.array([Hue_yellow_l, Saturation_yellow_l, Lightness_yellow_h])
# Threshold the HSV image to get only white colors
mask_white = cv2.inRange(hsv, lower_white, upper_white)
mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5))
erosion_white = cv2.erode(mask_white,kernel)
erosion_yellow = cv2.erode(mask_yellow,kernel)
Gaussian_white = cv2.GaussianBlur(erosion_white, (5,5),0)
Gaussian_yellow = cv2.GaussianBlur(erosion_yellow, (5,5),0)
# findContours of image
contours_white, hierarchy_white = cv2.findContours(Gaussian_white, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours_yellow, hierarchy_yellow = cv2.findContours(Gaussian_yellow, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Get the position of where value equals 255 to find contours position
pos_white = np.where(Gaussian_white == 255)
pos_yellow = np.where(Gaussian_yellow == 255)
pos_white_sum = 0
pos_yellow_sum = 0
pos_white_len = 0
pos_yellow_len = 0
for i in range(len(pos_white[0])):
if pos_white[0][i] > 230:
pos_white_sum += pos_white[1][i]
pos_white_len += 1
for i in range(len(pos_yellow[0])):
if pos_yellow[0][i] > 210:
pos_yellow_sum += pos_yellow[1][i]
pos_yellow_len += 1
pos_white_x = pos_white_sum / pos_white_len
pos_yellow_x = pos_yellow_sum / pos_yellow_len
# print(pos_yellow[0])
# print(pos_yellow[1])
print(pos_white_x)
print(pos_yellow_x)
# # draw the contours in origin image
# cv2.drawContours(image, contours_white, -1, (139,104,0), 3)
# cv2.drawContours(image, contours_yellow, -1, (139,104,0), 3)
# # try:
# white_center = self.calculate_average(contours_white[0])
# # print("white: ",white_center)
# # except:
# # is_detect_white = 0
# # print("The Camera Can`t Catch The White Lane.")
# # try:
# yellow_center = self.calculate_average(contours_yellow[0])
# # print("yellow: ",yellow_center)
# # except:
# # is_detect_yellow = 0
# # print("The Camera Can`t Catch The Yellow Lane.")
# # Publish Image
# self.pub_image_detect.publish(self.cvBridge.cv2_to_imgmsg(image,'bgr8'))
# # Publish Center
# self.pub_center_white_lane.publish(white_center)
# self.pub_center_yellow_lane.publish(yellow_center)
# self.pub_center.publish((white_center+yellow_center)/2)
def calculate_average(self,input):
sum_x = 0
for i in input:
print(i[0][1])
if i[0][1] > 200:
sum_x += i[0][0]
return sum_x/len(input)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('detect_lane')
node = DetectLane()
node.main() | [
"[email protected]"
] | |
b4e3f251554702622795308b65c3662dca3efff2 | bdaff2ef9b86450da50cd6ab88bf5810d6d9cd1f | /explosion.py | 22a725730ee156cad13e1c301fb81b917f1c6546 | [] | no_license | s3704429/xBombs | 610be1803ef6274b8ee27cc127593f40b5e81ebf | 4a82c378f54fd236d62a8d341ba19c27db081f2d | refs/heads/master | 2021-04-12T03:31:44.033219 | 2018-05-19T11:10:27 | 2018-05-19T11:10:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | # class for explosion graphics
import pygame
class Fireball(object):
'''
classdocs
'''
def __init__(self, board=0, gridCoord=0, powerup=0):
self.gridCoord = gridCoord
self.powerup = powerup
self.board = board
'''
Constructor
'''
self.material = "explosion" # string
self.fuse = 14 # int
#self.droppedBy = player #
#self.gridPosition = gridPosition # list
self.frame = 0
self.animate = [pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast.png'),
pygame.image.load('images/explode/blast1.png'),
pygame.image.load('images/explode/blast1.png'),
pygame.image.load('images/explode/blast2.png'),
pygame.image.load('images/explode/blast2.png'),
#pygame.image.load('images/explode/blast2.png'),
pygame.image.load('images/explode/blast3.png'),
pygame.image.load('images/explode/blast3.png'),
#pygame.image.load('images/explode/blast3.png'),
#pygame.image.load('images/explode/blast1.png'),
#pygame.image.load('images/explode/blast1.png'),
#pygame.image.load('images/explode/blast1.png')]
]
self.image = self.animate[self.frame]
pass
def animateExplosion(self):
if self.fuse == 0 and self.gridCoord != 0:
self.board[self.gridCoord[0]][self.gridCoord[1]] = self.powerup
if self.frame == 13:
self.fuse -= 1
self.image = self.animate[self.frame]
self.frame = 0
else:
self.frame += 1
self.fuse -= 1
self.image = self.animate[self.frame]
| [
"[email protected]"
] | |
481f2671c9ce0db5b50bcaebd4377a044fd44018 | 32b9bcbed4641025dbe14196c22fb14e03329c65 | /node_modules/ccap/build/config.gypi | f6c3a3372bf2ac80e8a167f7964f674ea471d874 | [
"MIT"
] | permissive | 343829084/microblog | 0538d2376df08678ca3ac4308c3e4726bad2894a | cc67010860afa1193c124ebe5604e5c881a83954 | refs/heads/master | 2016-09-01T16:12:54.217514 | 2016-01-28T08:32:13 | 2016-01-28T08:32:13 | 50,162,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,806 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"python": "/home/iojs/bin/python",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/gpr/.node-gyp/5.4.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/3.3.12 node/v5.4.1 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"force": "",
"only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/home/gpr/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"progress": "true",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/usr/local",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/gpr/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "5.4.1",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/gpr/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"[email protected]"
] | |
602ddc0ee396f621056fbf937d332f0ded0b491c | 23f807d6105f6184238b992db8d3ea6bb8837867 | /openvim/openflow_thread.py | d4d33b0496628611c35bed7fba919fd17cd2145b | [
"Apache-2.0"
] | permissive | msgpo/openmano | a8bce3379fba055f44d9d6186c857770e153cd6e | 4f21dbac5b3c2e955382556453dee58dcce19bf2 | refs/heads/master | 2021-05-28T08:43:04.103872 | 2015-03-17T17:06:41 | 2015-03-17T17:07:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,774 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: [email protected]
##
'''
This thread interacts with a openflow floodligth controller to create dataplane connections
'''
__author__="Alfonso Tierno"
__date__ ="$28-oct-2014 12:07:15$"
import json
import utils.auxiliary_functions as af
import threading
import time
import Queue
import requests
import itertools
class openflow_thread(threading.Thread):
def __init__(self, of_url, of_dpid, db, db_lock, of_test, pmp_with_same_vlan):
threading.Thread.__init__(self)
self.dpid= str(of_dpid)
self.db = db
self.pmp_with_same_vlan = pmp_with_same_vlan
self.name = "openflow"
self.url = of_url
self.test = of_test
self.db_lock = db_lock
self.pp2ofi={} # Physical Port 2 OpenFlow Index
#self.curlPoster=pycurl.Curl()
#self.curlGetter=curl.Curl()
self.flowsctr=0 # flows counter to generate unique flow names
self.headers = {'content-type':'application/json', 'Accept':'application/json'}
self.queueLock = threading.Lock()
self.taskQueue = Queue.Queue(50)
def get_of_controller_info(self):
if self.test:
return 0, None
try:
of_response = requests.get(self.url+"/wm/core/controller/switches/json", headers=self.headers)
#print vim_response.status_code
if of_response.status_code != 200:
print self.name, ": get_of_controller_info:", self.url, of_response
raise requests.exceptions.RequestException("Openflow response " + str(of_response.status_code))
info = of_response.json()
index = -1
for i in range(0,len(info)):
if info[i]["dpid"] == self.dpid:
index = i
break
if index == -1:
text = "Error "+self.dpid+" not present in controller "+self.url
print self.name, ": get_of_controller_info ERROR", text
return -1, text
else:
for port in info[index]["ports"]:
self.pp2ofi[ str(port["name"]) ] = str(port["portNumber"] )
print self.name, ": get_of_controller_info ports:", self.pp2ofi
return 0, None
except requests.exceptions.RequestException, e:
print self.name, ": get_of_controller_info Exception:", str(e)
return -1, str(e)
except ValueError, e: # the case that JSON can not be decoded
print self.name, ": get_of_controller_info Exception:", str(e)
return -1, str(e)
def del_flow(self, flow_name):
if self.test:
print self.name, ": FAKE del_flow", flow_name
return 0, None
try:
of_response = requests.delete(self.url+"/wm/staticflowentrypusher/json",
headers=self.headers, data='{"switch": "'+self.dpid+'","name":"'+flow_name+'"}')
print self.name, ": del_flow", flow_name, of_response
#print vim_response.status_code
if of_response.status_code != 200:
raise requests.exceptions.RequestException("Openflow response " + str(of_response.status_code))
return 0, None
except requests.exceptions.RequestException, e:
print self.name, ": del_flow", flow_name, "Exception:", str(e)
return -1, str(e)
def new_flow(self, data):
if self.test:
print self.name, ": FAKE new_flow", data
return 0, None
try:
of_response = requests.post(self.url+"/wm/staticflowentrypusher/json",
headers=self.headers, data=json.dumps(data) )
print self.name, ": new_flow():", data, of_response
#print vim_response.status_code
if of_response.status_code != 200:
raise requests.exceptions.RequestException("Openflow response " + str(of_response.status_code))
return 0, None
except requests.exceptions.RequestException, e:
print self.name, ": new_flow Exception:", str(e)
return -1, str(e)
def insert_task(self, task, *aditional):
try:
self.queueLock.acquire()
task = self.taskQueue.put( (task,) + aditional, timeout=5)
self.queueLock.release()
return 1, None
except Queue.Full:
return -1, "timeout inserting a task over openflow thread " + self.name
def run(self):
while True:
self.queueLock.acquire()
if not self.taskQueue.empty():
task = self.taskQueue.get()
else:
task = None
self.queueLock.release()
if task is None:
time.sleep(1)
continue
print self.name, ": processing task", task[0]
if task[0] == 'update-net':
self.update_of_flows(task[1])
elif task[0] == 'clear-all':
self.clear_all_flows(task[1])
elif task[0] == 'exit':
self.terminate()
return 0
else:
print self.name, ": unknown task", task
def terminate(self):
print self.name, ": exit from openflow_thread"
def update_of_flows(self, net_id):
self.db_lock.acquire()
result, content = self.db.get_table(FROM='nets', SELECT=('type','admin_state_up', 'vlan'),WHERE={'uuid':net_id} )
self.db_lock.release()
if result < 0:
print self.name, ": update_of_flows() ERROR getting net", content
return -1, content
elif result==0:
#net has been deleted
ifaces_nb = 0
else:
net = content[0]
if net['admin_state_up'] == 'false':
ifaces_nb = 0
else:
self.db_lock.acquire()
ifaces_nb, ports = self.db.get_table(
FROM='ports',
SELECT=('switch_port','vlan','vlan_changed','uuid','mac','type'),
WHERE={'net_id':net_id, 'admin_state_up':'true', 'status':'ACTIVE'} )
self.db_lock.release()
if ifaces_nb < 0:
print self.name, ": update_of_flows() ERROR getting ports", ports
return -1, ports
#Get the name of flows that will be affected by this NET or net_id==NULL that means net deleted (At DB foreign key: On delete set null)
self.db_lock.acquire()
result, flows = self.db.get_table(FROM='of_flows', SELECT=('name','id'),WHERE={'net_id':net_id},WHERE_OR={'net_id':None} )
self.db_lock.release()
if result < 0:
print self.name, ": update_of_flows() ERROR getting flows", flows
return -1, flows
elif result > 0:
#delete flows
for flow in flows:
#print self.name, ": update_of_flows() Deleting", flow['name']
r,c= self.del_flow(flow['name'])
self.db_lock.acquire()
if r>=0:
self.db.delete_row_by_key('of_flows', 'id', flow['id'])
else:
#keep the flow, but put in actions the error
self.db.update_rows('of_flows', {'actions':c}, {'id':flow['id']})
self.db_lock.release()
tagged = None
if ifaces_nb < 2:
return 0, 'Success'
if net['type'] == 'ptp':
if ifaces_nb > 2:
print self.name, 'Error, network '+str(net_id)+' has been defined as ptp but it has '+str(ifaces_nb)+' interfaces.'
return -1, 'Error, network '+str(net_id)+' has been defined as ptp but it has '+str(ifaces_nb)+' interfaces.'
elif net['type'] == 'data':
if ifaces_nb > 2 and self.pmp_with_same_vlan:
#Change vlan in host
#check all ports are VLAN (tagged) or none
tagged = ports[0]['vlan']
if ports[0]['type']=='external' and ports[0]['vlan'] != None and ports[0]['vlan']!=net['vlan']:
text='Error external port connected with different vlan net to a point to multipoint net'
print self.name, text
return -1, text
for port in ports[1:]:
if type(port['vlan']) != type(tagged):
text='Error Can not connect vlan with no vlan ports'
print self.name, text
return -1, text
if port['type']=='external':
if tagged != None and port['vlan']!=net['vlan']:
text='Error external port conected with different vlan net to a point to multipoint net'
print text
return -1, text
#change VLAN of ports to net vlan
if tagged != None :
for port in ports:
port_vlan = port['vlan_changed'] if port['vlan_changed']!=None else port['vlan']
if port_vlan != net['vlan']:
result, content = self.change_vlan(port['uuid'], net['vlan'])
if result < 0:
return result, content
port['vlan'] = net['vlan']
else:
return -1, 'Only ptp and data networks are supported for openflow'
#ensure SRIOV ports are in the right VLAN, They can have a different VLAN if previosly it is attached to a pmp VLAN
if tagged == None:
for port in ports:
if port['vlan_changed'] != None and port['vlan'] != port['vlan_changed']:
result, content = self.change_vlan(port['uuid'], port['vlan'])
if result < 0:
return result, content
#launch to openflow
db_of_inserts = self.install_netrules(net_id, ports)
for INSERT in db_of_inserts:
self.db_lock.acquire()
result, content = self.db.new_row('of_flows', INSERT)
self.db_lock.release()
if result < 0:
print self.name, ": ports=", ports
return -1, content
return 0, 'Success'
def clear_all_flows(self):
if self.test:
return 0, None
try:
of_response = requests.get(self.url+"/wm/staticflowentrypusher/clear/"+str(self.dpid)+"/json")
print self.name, ": clear_all_flows:", of_response
if of_response.status_code != 200:
raise requests.exceptions.RequestException("Openflow response " + str(of_response.status_code))
return 0, None
except requests.exceptions.RequestException, e:
print self.name, ": clear_all_flows Exception:", str(e)
return -1, str(e)
def change_vlan(self, uuid, net_vlan):
if self.test:
return 0, None
'''Change vlan in server'''
self.db_lock.acquire()
result, content = self.db.get_table(
FROM='( resources_port as rp join resources_port as rp2 on rp.root_id=rp2.id join numas on rp.numa_id=numas.id) join hosts on numas.host_id=hosts.uuid',
SELECT=('hosts.ip_name', 'hosts.user', 'hosts.password', 'rp.source_name', 'rp2.source_name as parent_source_name', 'rp.id'),
WHERE={'rp.port_id':uuid}
)
self.db_lock.release()
if result < 0:
print self.name, ": ports", content
return -1, content
elif result==0: #
return -1, "change_vlan() Error, no port get from database"
row = content[0]
print self.name, ": change_vlan() ports", row
if row['source_name']!=row['parent_source_name']:
cmd = 'sudo ip link set '+row['parent_source_name']+' vf '+row['source_name']+' vlan '+str(net_vlan)
#print cmd
r,c = af.get_ssh_connection(row['ip_name'], row['user'], row['password'])
if not r:
return -1, c
r,c = af.run_in_remote_server(c,cmd)
if not r:
print self.name, ": change_vlan() error ejecutando:", c
return -1, c
#print self.name, ": change_vlan() despues ejecutar"
self.db_lock.acquire()
r,c = self.db.update_rows('ports', {'vlan_changed':net_vlan}, {'uuid':uuid})
self.db_lock.release()
if r<0:
print self.name, ": change_vlan() error ejecutando:", c
print self.name, ": change_vlan() Error updating DB", c
else:
print self.name, ": change_vlan() vlan was not changed since it is a physiscal port"
return 1, None #Its is a physiscal port
return 0, None #vlan was changed
def install_netrules(self, net_id, ports):
db_list = []
nb_rules = len(ports)
#Insert rules so each point can reach other points using dest mac information
pairs = itertools.product(ports, repeat=2)
index = 0
for pair in pairs:
if pair[0]['switch_port'] == pair[1]['switch_port']:
continue
flow = {
'switch':self.dpid,
"name": net_id+'_'+str(index),
"priority":"1000",
"ingress-port": self.pp2ofi[str(pair[0]['switch_port'])] if not self.test else str(pair[0]['switch_port']),
"active":"true",
'actions':''
}
#allow that one port have no mac
if pair[1]['mac'] is None or nb_rules==2: #point to point or nets with 2 elements
flow['priority'] = "990" #less priority
else:
flow['dst-mac'] = str(pair[1]['mac'])
if pair[0]['vlan'] != None:
flow['vlan-id'] = str(pair[0]['vlan'])
if pair[1]['vlan'] == None:
if pair[0]['vlan'] != None:
flow['actions'] = 'strip-vlan,'
else:
flow['actions'] = 'set-vlan-id='+str(pair[1]['vlan'])+','
flow['actions'] += 'output='+ ( self.pp2ofi[str(pair[1]['switch_port'])] if not self.test else str(pair[1]['switch_port']) )
index += 1
self.new_flow(flow)
INSERT={'name':flow['name'], 'net_id':net_id, 'vlan_id':flow.get('vlan-id',None), 'ingress_port':str(pair[0]['switch_port']),
'priority':flow['priority'], 'actions':flow['actions'], 'dst_mac': flow.get('dst-mac', None)}
db_list.append(INSERT)
#BROADCAST:
if nb_rules > 2: #point to multipoint or nets with more than 2 elements
for p1 in ports:
flow = {
'switch':self.dpid,
"priority":"1000",
'dst-mac': 'ff:ff:ff:ff:ff:ff',
"active":"true",
}
actions=''
flow['ingress-port'] = self.pp2ofi[str(p1['switch_port'])] if not self.test else str(p1['switch_port'])
flow['name'] = net_id+'_'+str(index)
if p1['vlan'] != None:
flow['vlan-id'] = str(p1['vlan'])
last_vlan=0 #indicates that a packet contains a vlan, and the vlan
else:
last_vlan=None
for p2 in ports:
if p1 == p2: continue
if last_vlan != p2['vlan']:
if p2['vlan'] != None:
actions += 'set-vlan-id='+str(p2['vlan'])+','
last_vlan = p2['vlan']
else:
actions += 'strip-vlan,'
last_vlan = None
actions += 'output=' + (self.pp2ofi[str(p2['switch_port'])] if not self.test else str(p2['switch_port']) ) +','
index += 1
#remove last coma
actions = actions[:-1]
flow['actions'] = actions
self.new_flow(flow)
INSERT={'name':flow['name'], 'net_id':net_id, 'vlan_id':flow.get('vlan-id',None), 'ingress_port':str(p1['switch_port']),
'priority':flow['priority'], 'actions':flow['actions'], 'dst_mac': flow.get('dst-mac', None)}
db_list.append(INSERT)
return db_list
| [
"[email protected]"
] | |
92ae87ad1bd162dbf5f83068e00dcf951fcdce79 | 9a0451bb892db73eba31d1de1cb9a7bb21e1473b | /IP_Ch1_13_Connector.py | bfc3d6993d16a948c2bf88290cef7caacc9b7232 | [] | no_license | DJacobo/InteractivePython | 0d7e9df62e7a433f1d73477c349526272912f752 | 58b0cf349adbc9c0d9e7fa983f326e8e1ded2350 | refs/heads/master | 2020-03-29T15:42:17.469252 | 2018-10-20T09:32:05 | 2018-10-20T09:32:05 | 150,076,266 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,848 | py | # OOP design will use a connector between LogicGates
# Connector Has-A LogicGate
from IP_Ch1_13_LogicGate import *
class Connector:
def __init__(self, fGate, tGate):
self.fromGate = fGate
self.toGate = tGate
tGate.setNextPin(self)
def getFrom(self):
return self.fromGate
def getTo(self):
return self.toGate
# Given example
# g1 = AndGate("G1")
# g2 = AndGate("G2")
# g3 = OrGate("G3")
# g4 = NotGate("G4")
# c1 = Connector(g1,g3)
# c2 = Connector(g2,g3)
# c3 = Connector(g3,g4)
# print(g4.getOutput())
# # Self-Check - prove the following equality NOT (( A and B) or (C and D)) is that same as NOT( A and B ) and NOT (C and D)
# # NOT (( A and B) or (C and D))
# g1 = AndGate('A and B')
# g2 = AndGate('C and D')
# g3 = OrGate('g1 or g2')
# g4 = NotGate('NOT g3')
# c1 = Connector(g1,g3)
# c2 = Connector(g2,g3)
# c3 = Connector(g3,g4)
# print(g4.getOutput())
# # NOT( A and B ) and NOT (C and D)
# g1 = AndGate('A and B')
# g2 = AndGate('C and D')
# g3 = NotGate('NOT g1')
# g4 = NotGate('NOT g2')
# g5 = AndGate('g3 AND g4')
# c1 = Connector(g1,g3)
# c2 = Connector(g2,g4)
# c3 = Connector(g3,g5)
# c3 = Connector(g4,g5)
# print(g5.getOutput())
# Create a half-adder, which only adds two bits together:
# 2 inputs, A and B will generate two outputs, sum and carry
# Sum = A XOR B, carry = A AND B
# A,B = S,C 0,0 = 0,0 0,1 = 1,0 1,0 = 1,0 1,1 = 0,1
# g1 = XorGate('A XOR B')
# g2 = AndGate('A AND B')
# a = int(input('Please enter input for A'))
# b = int(input('Please enter input for B'))
# g1.pinA = a
# g1.pinB = b
# g2.pinA = a
# g2.pinB = b
# print('SUM = %s' % g1.getOutput())
# print('CARRY = %s ' % g2.getOutput())
# Create a full-adder, Uses 3 inputs, A B and Carry-in
# Results in a sum and carry out
# Create Circuit
x0 = XorGate('x0 - AB')
x1 = XorGate('x1 - Cinx0')
a0 = AndGate('a0 - Cinx0')
a1 = AndGate('a1 - AB')
o0 = OrGate('o0 - a0a1')
c0 = Connector(x0, x1)
c0 = Connector(x0, a0)
c0 = Connector(a0, o0)
c0 = Connector(a1, o0)
# Ask for inputs
a = int(input('Please enter input for A: '))
b = int(input('Please enter input for B: '))
cin = int(input('Please enter input for Cin: '))
#Set initial pins
x0.pinA = a
x0.pinB = b
x1.pinB = cin
a0.pinB = cin
a1.pinA = a
a1.pinB = b
#getOutput()
print('S = %s' % x1.getOutput())
print('Cout = %s' % o0.getOutput())
# HELL NO I'M NOT MAKING AN 8-BIT ADDER
# binaryAdd1 = [1, 0, 1, 0, 1, 0, 1, 0]
# binaryAdd2 = [0, 0, 1, 1, 0, 0, 1, 1]
# x0 = XorGate('x0')
# a0 = AndGate('a0')
# x1 = XorGate('x1')
# a1 = AndGate('a1')
# x2 = XorGate('x2')
# a2 = AndGate('a2')
# x3 = XorGate('x3')
# a3 = AndGate('a3')
# x4 = XorGate('x4')
# a4 = AndGate('a4')
# x5 = XorGate('x5')
# a5 = AndGate('a5')
# x6 = XorGate('x6')
# a6 = AndGate('a6')
# x7 = XorGate('x7')
# a7 = AndGate('a7')
# c0 = Connector()
# c1
| [
"[email protected]"
] | |
505bb735533b3334b3929e04260b649d5d342389 | 046116f7dac7c15cc8af87761fad6d69125cb553 | /Django Web应用开发实战/chapter16/16.3/MyDjango_User/MyDjango_User/settings.py | 9a6e74f6b085b43d6bdefa9d55d70d2116b4b020 | [] | no_license | xingzuhui/Python-Web-Django | 36a0d5e70dd9d4660103c0deeff98abc2984349b | 1b4bd259dd7d6343bf24fa79dc631d6d6fb64a58 | refs/heads/master | 2022-10-11T03:00:07.049245 | 2020-06-14T13:22:49 | 2020-06-14T13:22:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,651 | py | """
Django settings for MyDjango_User project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd@0zpkxhc6yu^_^ywje4_0)gk0wfgrmqu_p+v(*+*ijt#yg#8l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
'user'
]
MIDDLEWARE = [
# 'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyDjango_User.urls'
WSGI_APPLICATION = 'MyDjango_User.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# STATIC_URL = '/static/'
| [
"[email protected]"
] | |
87b4e02b7e5842acb4020612476b7709c67338c0 | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/code-reuse/k_permutation_module.py | 3c3a299dca423f00935250d56d37946d618d8687 | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 313 | py | import sys
from mymath import fact
if len(sys.argv) != 3:
exit(f"Usage: {sys.argv[0]} n r")
'''
n!
P(n, r) = -----
(n-r)!
'''
n = int(sys.argv[1])
r = int(sys.argv[2])
n_fact = fact(n)
#print(n_fact)
n_r_fact = fact(n-r)
#print(n_r_fact)
P = n_fact // n_r_fact
print(P)
| [
"[email protected]"
] | |
b98226a43a743737f9eb71ce6ed9630eba432a0a | f43d31672b333b0442eadd76c9ab2d4364d4bf49 | /src/strategies/BNFORB30Min.py | 32e5ccb2451e4a6924e6a7872b7f0508a17202c9 | [] | no_license | ambhautik11/sdoosa-algo-trade-python | 5e852cdc584ada43780a3e3a857c36c59a0f51c5 | 557c827a0f4cbe04ddec1e3d00bb340b0b5d1d59 | refs/heads/master | 2023-04-22T18:43:43.115787 | 2021-05-14T17:56:37 | 2021-05-14T17:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,300 | py | import logging
from datetime import datetime
from instruments.Instruments import Instruments
from models.Direction import Direction
from models.ProductType import ProductType
from strategies.BaseStrategy import BaseStrategy
from utils.Utils import Utils
from trademgmt.Trade import Trade
from trademgmt.TradeManager import TradeManager
# Each strategy has to be derived from BaseStrategy
class BNFORB30Min(BaseStrategy):
__instance = None
@staticmethod
def getInstance(): # singleton class
if BNFORB30Min.__instance == None:
BNFORB30Min()
return BNFORB30Min.__instance
def __init__(self):
if BNFORB30Min.__instance != None:
raise Exception("This class is a singleton!")
else:
BNFORB30Min.__instance = self
# Call Base class constructor
super().__init__("BNFORB30Min")
# Initialize all the properties specific to this strategy
self.productType = ProductType.MIS
self.symbols = []
self.slPercentage = 0
self.targetPerncetage = 0
self.startTimestamp = Utils.getTimeOfToDay(9, 45, 0) # When to start the strategy. Default is Market start time
self.stopTimestamp = Utils.getTimeOfToDay(14, 30, 0) # This is not square off timestamp. This is the timestamp after which no new trades will be placed under this strategy but existing trades continue to be active.
self.squareOffTimestamp = Utils.getTimeOfToDay(15, 0, 0) # Square off time
self.capital = 100000 # Capital to trade (This is the margin you allocate from your broker account for this strategy)
self.leverage = 0
self.maxTradesPerDay = 1 # Max number of trades per day under this strategy
self.isFnO = True # Does this strategy trade in FnO or not
self.capitalPerSet = 100000 # Applicable if isFnO is True (1 set means 1CE/1PE or 2CE/2PE etc based on your strategy logic)
def process(self):
now = datetime.now()
processEndTime = Utils.getTimeOfToDay(9, 50, 0)
if now < self.startTimestamp:
return
if now > processEndTime:
# We are interested in creating the symbol only between 09:45 and 09:50
# since we are not using historical candles so not aware of exact high and low of the first 30 mins
return
symbol = Utils.prepareMonthlyExpiryFuturesSymbol('BANKNIFTY')
quote = self.getQuote(symbol)
if quote == None:
logging.error('%s: Could not get quote for %s', self.getName(), symbol)
return
if symbol not in self.tradesCreatedSymbols:
self.generateTrade(symbol, Direction.LONG, quote.high, quote.low)
self.generateTrade(symbol, Direction.SHORT, quote.high, quote.low)
# add symbol to created list
self.tradesCreatedSymbols.append(symbol)
def generateTrade(self, tradingSymbol, direction, high, low):
trade = Trade(tradingSymbol)
trade.strategy = self.getName()
trade.isFutures = True
trade.direction = direction
trade.productType = self.productType
trade.placeMarketOrder = True
trade.requestedEntry = high if direction == Direction.LONG else low
trade.timestamp = Utils.getEpoch(self.startTimestamp) # setting this to strategy timestamp
# Calculate lots
numLots = self.calculateLotsPerTrade()
isd = Instruments.getInstrumentDataBySymbol(tradingSymbol) # Get instrument data to know qty per lot
trade.qty = isd['lot_size']
trade.stopLoss = low if direction == Direction.LONG else high
slDiff = high - low
# target is 1.5 times of SL
if direction == 'LONG':
trade.target = Utils.roundToNSEPrice(trade.requestedEntry + 1.5 * slDiff)
else:
trade.target = Utils.roundToNSEPrice(trade.requestedEntry - 1.5 * slDiff)
trade.intradaySquareOffTimestamp = Utils.getEpoch(self.squareOffTimestamp)
# Hand over the trade to TradeManager
TradeManager.addNewTrade(trade)
def shouldPlaceTrade(self, trade, tick):
# First call base class implementation and if it returns True then only proceed
if super().shouldPlaceTrade(trade, tick) == False:
return False
if tick == None:
return False
if trade.direction == Direction.LONG and tick.lastTradedPrice > trade.requestedEntry:
return True
elif trade.direction == Direction.SHORT and tick.lastTradedPrice < trade.requestedEntry:
return True
return False
| [
"[email protected]"
] | |
7fdd464803df821f2821b9304cd413b9e8ef8c7e | 12412a9d3f3434d9a0f9d6db099a6cb75a35a77f | /bgx/consensus/pbft_python/bgx_pbft_common/utils.py | 8038c84491b0ec08a432c0a21700ad8579aa44e6 | [
"Zlib",
"MIT",
"Apache-2.0"
] | permissive | DGT-Network/DGT-Mississauga | 3cf6d79618d2b076ac75b771910cabc5bdd6431a | fb0a558f1304696cdf4278b8009001212ee17cb9 | refs/heads/master | 2023-03-05T08:58:09.141087 | 2022-05-10T15:48:04 | 2022-05-10T15:48:04 | 241,790,384 | 0 | 0 | Apache-2.0 | 2023-03-02T17:42:45 | 2020-02-20T04:10:38 | Python | UTF-8 | Python | false | false | 2,227 | py | # Copyright 2016, 2018 NTRlab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Utility methods .
"""
import json
from collections import OrderedDict
def _short_id(id):
return '/' + id[:8] + '..' + id[-8:] + '/'
def pretty_print_dict(dictionary):
"""Generates a pretty-print formatted version of the input JSON.
Args:
dictionary (str): the JSON string to format.
Returns:
str: pretty-print formatted string.
"""
return \
json.dumps(_ascii_encode_dict(dictionary), indent=2, sort_keys=True)
def json2dict(dictionary):
"""Deserializes JSON into a dictionary.
Args:
dictionary (str): the JSON string to deserialize.
Returns:
dict: a dictionary object reflecting the structure of the JSON.
"""
return _ascii_encode_dict(json.loads(dictionary))
def dict2json(dictionary):
"""Serializes a dictionary into JSON.
Args:
dictionary (dict): a dictionary object to serialize into JSON.
Returns:
str: a JSON string reflecting the structure of the input dict.
"""
return json.dumps(_ascii_encode_dict(dictionary))
def _ascii_encode_dict(item):
"""
Support method to ensure that JSON is converted to ascii since unicode
identifiers, in particular, can cause problems
"""
if isinstance(item, dict):
return OrderedDict(
(_ascii_encode_dict(key), _ascii_encode_dict(item[key]))
for key in sorted(item.keys()))
if isinstance(item, list):
return [_ascii_encode_dict(element) for element in item]
if isinstance(item, str):
return item
return item
| [
"[email protected]"
] | |
77e29850d4c78ce54d481184506e6d1cf29c757d | 9b1308a9e200cb1ca4bbe1f5c7c53bc5cf42ee9a | /run_match_traj_met_script.py | 3d58a3ea342375b672b0ea198d74f17d8d476703 | [] | no_license | crterai/LagrangianAnalysis | 7349684e0dd3a29f78c1a3e3575ddbd74d5e032e | 3b3fb9fe8bf9ad26dcd2a4a3869b7a445dae2d22 | refs/heads/master | 2021-06-27T02:34:40.630928 | 2020-09-28T15:53:52 | 2020-09-28T15:53:52 | 145,752,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | #!/usr/bin/env python
"""
This script takes the Start_time passed through by run_match_traj_exec.sh, adds 2500 to get end_index, and runs the match_traj_parallelized_general script that produces the text files with the output.
"""
import argparse,datetime,gc,re,sys,time
import cdms2 as cdm
import MV2 as MV #stuff for dealing with masked values.
import cdutil as cdu
import glob
import os
from string import replace
import numpy as np
from durolib import globalAttWrite,writeToLog,trimModelList
from socket import gethostname
import pandas
from match_traj_parallelized_general_library import match_traj_parallelized_metvariables,match_traj_parallelized_windSST
Start_index=int(sys.argv[1])
#End_index=Start_index+2500 #Add 2500 to get the End_index (this 2500 is an arbritary choice and can be modified)
End_index=int(sys.argv[2])
match_traj_parallelized_metvariables(Start_index,End_index)
| [
"[email protected]"
] | |
bb4fb0abc4f215ff1f24bbd3d42b98f6ba977ffd | da690e5c9dd411039642ca366600a7a19eebcd97 | /TestCase.py | 69234c4c66315367ba7135f4e343ff79f58c76ec | [] | no_license | vijayrajarathinam/Carrom-Strike | 067724658cbb545fbb19d191a273cba69b281067 | 1a4409c7387037e8eaf26c15d7c9473b9c9a3c61 | refs/heads/master | 2020-06-02T02:48:34.593625 | 2019-06-09T13:45:19 | 2019-06-09T13:45:19 | 191,011,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | from index import Game
'''
__name__ = Clean Strike
'''
class GameTestCases(Game):
def __init__(self,):
super(GameTestCases,self).__init__("Player 1", "Player 2")
self.is_file = False
self.outputHeader = [
["Player 1","Player 2",
"Player 1 Points","Player 2 Points",
"Total Coins","Total Black","Total Red"]
]
# getter for input list
@property
def fileIO(self):
return self.__fileIO
# getter for input list
@fileIO.setter
def fileIO(self,name):
self.__fileIO = name
@property
def __eqIO(self):
a, b = [], []
try:
for IO in range(len(self.fileIO)):
a.append(self.fileIO[IO][0])
b.append(self.fileIO[IO][1])
return len(a) == len(b)
except IndexError:
print("scenario's for both player does not match please look into the input")
return False
#test cases starts running
def run(self):
lists = []
if self.__eqIO:
for testCase in self.fileIO:
if self.fileIO.index(testCase) == 0: continue
try:
print(testCase)
self.playChance(self.player1, pstrike=int(testCase[0]))
self.playChance(self.player2, pstrike=int(testCase[1]))
lists.append([testCase[0], testCase[1], self.player1.get_point,
self.player2.get_point, self.get_all, self.get_black, self.get_red])
if self.mid_validation :break
except ValueError as ve:
print(ve)
break
except EnvironmentError as ee:
print(ee)
break
except TypeError as te:
print(te)
break
print("Player 1 scores = {p1} | Player 2 scores = {p2}".format(p1=self.player1.get_point,p2=self.player2.get_point))
print("coins Remaining Black : {b} | Red : {r} | total : {t}".format(b=self.get_black, r=self.get_red,t=self.get_all))
self.file(lists) if self.is_file else print(self.decide())
#set value in file
def file(self,lists):
with open("output.csv", 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(self.outputHeader + lists + [['RESULT',self.decide(),'','','','','']])
print("look into output.csv for result")
#get value from file
def get_file(self,file_name,per):
with open(file_name, per) as csvFile:
reader = csv.reader(csvFile)
self.fileIO,self.is_file = [row for row in reader],True
if __name__ =="__main__":
import sys,csv
#init the GameTestCases
testCase = GameTestCases()
# try to fetch the file else except with pre define variables
try:
listarg = sys.argv
arg1 = listarg[1]
if arg1 is not None: testCase.get_file(arg1,"r")
except IndexError:
print("Match Inputs are taken from a variable(testCases) below")
testCase.fileIO = [
["player 1","player 2"],
[2, 2, ],[1, 3, ],
[2, 2, ],[4, 6, ],
[1, 1, ],[5, 1, ],
]
# run the senarios to get output
testCase.run() | [
"[email protected]"
] | |
ab3070ed80091961e4c958a272cc403253c8eaf9 | 7f0dac79f94a9e74ad5e8538ea25fe9ad49a7d9e | /analyzers/APT_book/apt_ips.py | a630ed92395ad767c965abe79d129a1a60289e45 | [] | no_license | wzr/gummer | a3df5f4105e12737858d307cc3cc7a0151460e65 | e1d29b81a243c6c9960e3317d810e63848c534fe | refs/heads/master | 2021-01-15T10:26:11.547059 | 2015-03-14T15:10:34 | 2015-03-14T15:10:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import db_loader
#Example of analizer
aid = "apt_ips" # Must be unique
name = "TOP Direct connection to IPs"
desc = "Retrieves the top 10 HTTP/S most repeated connections to IP addresses "
desc += "instead of to domains.\n"
def launch(connector):
query = "select count(*) as Hits, ip_src, url "
query += "from squid3 "
query += "where ip_dst = domain "
query += "group by url "
query += "order by Hits desc "
query += "limit 10"
data = db_loader.db_query(connector, query)
return data | [
"[email protected]"
] | |
b5a2a28f4ee3a8cfe8b5fb8192e35f5770d3f712 | e91f216ea2cf9f2ecf2a770ebc6334d7034cbf94 | /Baekjoon/12865평범한배낭.py | 30036c49effd71e510e02b034165940c43252a73 | [] | no_license | HelloMandu/OnlineJudge | 4c870a737ba8c479adf0f7763ca38afd7761e9a3 | 2dd0abc4066fd0b1f28346f8913ece88167eee99 | refs/heads/master | 2023-01-09T22:53:27.036594 | 2022-12-28T09:38:39 | 2022-12-28T09:38:39 | 168,956,239 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | n, k = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(n)]
dp = [[0] * (k + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, k + 1):
w, v = arr[i - 1]
if j - w >= 0:
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - w] + v)
else:
dp[i][j] = dp[i - 1][j]
print(dp[n][k]) | [
"[email protected]"
] | |
4cf41cca444588c2b9063780949e82ccecc5e2bc | 0c0af1aaae764d16e4adda2432c05bd659fc4290 | /varclassifiers.py | 2732f273642a2f7d95e3af1c82da4c6ef9a6455a | [] | no_license | sreelathav/datascience | 31d74707013e29376d50587039d8c6d14026cd26 | 3d061e1894ad6f20049e4e82654819b284be2483 | refs/heads/master | 2020-06-20T16:02:16.493202 | 2018-10-16T13:51:16 | 2018-10-16T13:51:16 | 74,855,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| [
"[email protected]"
] | |
cd49ce6e63bf72dcb48e1535dbdf32b3c3b35764 | 0dc3c1f447641c6f9c66e67ae8ec6be9a4e5eade | /inner_module/datetime_test.py | aea3dc14718224b159b4a07d49be2bd2bde0f377 | [] | no_license | wangwuuw/pythonTest | 41a6ada4111a56204492bf98bd8b101cc4d1e998 | 92c940032a7e4d0dee304aacfaf1dc3bee30b55f | refs/heads/main | 2023-07-13T20:37:12.718635 | 2021-08-30T06:01:43 | 2021-08-30T06:01:43 | 365,891,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | from datetime import datetime, timedelta, timezone
now = datetime.now()
dt = datetime(2015, 4, 19, 12, 20)
print(now)
print(dt)
print(type(now))
print(dt.timestamp())
cday = datetime.strptime('2015-6-1 18:19:59', '%Y-%m-%d %H:%M:%S')
print(cday)
print(now.strftime('%a, %b %d %H:%M'))
now = now + timedelta(hours=10)
print(now)
tz_utc_8 = timezone(timedelta(hours=8)) # 创建时区UTC+8:00
now = datetime.now()
dt = now.replace(tzinfo=tz_utc_8)
print(dt)
| [
"[email protected]"
] | |
626712a3f27429ba2161e5bff01a4d2c343cd8c3 | 01d31e7e969be9977781ada4a77291f6a73080bb | /Programming101/week5/2-Cinema-Reservation-System/Magic-Cinema-No-OOP/cinema_reservation.py | b9e419e9eed42689c1f3533a7f827e7806382685 | [] | no_license | stanislavBozhanov/HackBulgaria | 93a337ad823a3f1ccd305dd1c57df26c4c7d49a2 | 0fd0744a48c337576ca4dfaedf6d8b417dea212a | refs/heads/master | 2021-01-20T12:12:46.582051 | 2015-06-21T20:40:27 | 2015-06-21T20:40:27 | 24,852,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,092 | py | import sqlite3
def create_table_movies(db, cursor):
cursor.execute('''CREATE TABLE IF NOT EXISTS movies(
id INTEGER PRIMARY KEY,
name TEXT,
rating REAL
)''')
db.commit()
def create_table_projections(db, cursor):
cursor.execute('''CREATE TABLE IF NOT EXISTS projections(
id INTEGER PRIMARY KEY,
movie_id INTEGER,
type_ TEXT,
date_ TEXT,
time_ TEXT,
FOREIGN KEY(movie_id) REFERENCES movies(id)
)''')
db.commit()
def create_table_reservations(db, cursor):
cursor.execute('''CREATE TABLE IF NOT EXISTS reservations(
id INTEGER PRIMARY KEY,
username TEXT,
projection_id INTEGER,
row INTEGER,
col INTEGER,
FOREIGN KEY(projection_id) REFERENCES projections(id)
)''')
db.commit()
def insert_movie(db, cursor, name, rating):
cursor.execute('''INSERT INTO movies(name, rating) VALUES(?,?)''', (name, rating))
db.commit()
def show_movies(db, cursor):
result = cursor.execute('SELECT id, name, rating FROM movies ORDER BY rating')
for row in result:
print('[{id}] - {name} ({rating})'.format(**row))
def insert_projection(db, cursor, movie_id, type_, date_, time_):
cursor.execute('''INSERT INTO projections(movie_id, type_, date_, time_)
VALUES(?,?,?,?)''', (movie_id, type_, date_, time_))
db.commit()
def show_projections(db, cursor, movie_id, date_=None):
if not date_:
result = cursor.execute(''' SELECT projections.id, projections.date_, projections.time_, movies.name, projections.type_
FROM projections
INNER JOIN movies
ON projections.movie_id = movies.id
WHERE movie_id = ?
ORDER BY date_''', (movie_id,))
for row in result:
#print("Projections for movie '{name}':".format(**row))
print('[{id}] - {date_} ({type_})'.format(**row))
else:
result = cursor.execute(''' SELECT projections.id, projections.date_, projections.time_, movies.name, projections.type_
FROM projections
INNER JOIN movies
ON projections.movie_id = movies.id
WHERE movie_id = ? AND date_ = ?
ORDER BY date_''', (movie_id, date_))
for row in result:
#print("Projections for movie '{name}' on date {date_}:".format(**row))
print('[{id}] - {date_} ({type_})'.format(**row))
def get_reserved_seats(db, cursor, projection_id):
seats = []
result = cursor.execute(''' SELECT row, col
FROM reservations
WHERE projection_id = ?''', (projection_id,))
for line in result:
seats.append((line['row'], line['col']))
return seats
def reserve_seats(reserved):
matrix = [
[' ', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[2, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[3, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[4, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[5, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[6, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[7, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[8, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[9, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
[10, '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],
]
for item in reserved:
matrix[item[0]][item[1]] = 'X'
return matrix
def print_seats(matrix):
for row in matrix:
print(row)
def get_projections_for_movie(db, cursor, movie_id):
result = cursor.execute(''' SELECT id, date_, time_, type_
FROM projections
WHERE movie_id = ?''', (movie_id,)).fetchall()
for line in result:
reserved = get_reserved_seats(db, cursor, line['id'])
print('[{id}] - {date_} {time_} ({type_})'.format(**line) + ' - {} spots available'.format(100 - len(reserved)))
def insert_reservation(db, cursor, username, projection_id, row, col):
cursor.execute('''INSERT INTO reservations(username, projection_id, row, col)
VALUES(?,?,?,?)''', (username, projection_id, row, col))
db.commit()
def create_all_tables(db, cursor):
create_table_movies(db, cursor)
create_table_projections(db, cursor)
create_table_reservations(db, cursor)
# def show_reservations():
# matrix = ['.' for x in range(1,11) ]
def main():
db = sqlite3.connect('cinema.db')
db.row_factory = sqlite3.Row
db.execute('PRAGMA foreign_keys = ON')
db.commit()
cursor = db.cursor()
create_all_tables(db, cursor)
while True:
command = input('command>')
command = command.split()
if command[0] == 'show_movies':
show_movies(db, cursor)
elif command[0] == 'insert_movie':
name = input('movie_name>')
rating = float(input('movie_rating>'))
insert_movie(db, cursor, name, rating)
elif command[0] == 'insert_projection':
movie_id = int(input('movie_id>'))
type_ = input('type>')
date_ = input('date>')
time_ = input('time>')
insert_projection(db, cursor, movie_id, type_, date_, time_)
elif command[0] == 'show_projections':
movie_id = int(command[1])
if len(command) == 3:
date_ = command[2]
show_projections(db, cursor, movie_id, date_)
elif len(command) == 2:
show_projections(db, cursor, movie_id)
elif command[0] == 'insert_reservation':
username = input('username>')
projection_id = int(input('projection_id>'))
row = int(input('row>'))
col = int(input('col>'))
insert_reservation(db, cursor, username, projection_id, row, col)
elif command[0] == 'make_reservation':
username = input('Choose name>')
number_of_tickets = int(input('Choose number of tickets>'))
print('Current movies:')
show_movies(db, cursor)
movie_id = int(input('Choose a movie>'))
movie_name = cursor.execute('SELECT name FROM movies WHERE id = ?', (movie_id,)).fetchone()['name']
print('Projections for movie {}'.format(movie_name))
get_projections_for_movie(db, cursor, movie_id)
elif command[0] == 'break':
db.close()
break
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ea99d17392d2ab82f43d373528db6b93b900ec56 | 678f961dc43513e7238ad33fe0bdbb50ec523caa | /cases/file/test_download_log_file_hk.py | b9b1c75ff687c6f40fe7bfa71298b9f681efba0a | [] | no_license | nevermorer1/NEMO | 5afd2784bd86ab33cf9e41cf121e6298ec98e41f | 4f83d90a227203da96b3342022d2818009f5390f | refs/heads/master | 2020-03-20T14:07:17.791530 | 2018-08-24T08:18:02 | 2018-08-24T08:18:02 | 137,476,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | from common.dataHandle import DataHandle
import unittest
from common.log import Log
from cases.login import Login
from cases.downloadfile import DownloadFile
from cases.userinfo import UserInfo
class TestDownloadLogFileHK(unittest.TestCase):
"""日志部分-下载log文件"""
@classmethod
def setUpClass(cls):
Log.info('TestDownloadLogFileHK START HK端')
cls.node = 1
cls.dh = DataHandle()
# 香港端
cls.L = Login(node=cls.node)
cls.admin_cookies = cls.L.get_cookie()
cls.df = DownloadFile(node=cls.node)
cls.ui = UserInfo(node=cls.node)
# auto登录cookie
cls.AUTO_cookies = cls.ui.modify_cookies(data_id=3001, node=cls.node)
def setUp(self):
Log.debug('---------')
pass
def test_download_log_file_success(self):
"""HK 日志部分-下载log文件 成功"""
Log.debug('test_download_log_file_success start')
para_id = 43
data_id = 43001
res = self.df.base_download_log_file(para_id=para_id, data_id=data_id, cookies=self.admin_cookies,
flag=True)
self.assertTrue(res, msg='result check fail')
Log.debug('test_download_log_file_success end')
pass
def test_download_log_file_no_session_fail(self):
"""HK 日志部分-下载log文件 未登录,失败"""
Log.debug('test_download_log_file_no_session_fail start')
para_id = 43
data_id = 43002
res = self.df.base_download_log_file(para_id=para_id, data_id=data_id, cookies=None,
flag=False)
self.assertTrue(res, msg='result check fail')
Log.debug('test_download_log_file_no_session_fail end')
pass
def test_download_log_file_not_admin_fail(self):
"""HK 普通用户下载log失败"""
Log.debug('test_download_log_file_not_admin_fail start')
para_id = 43
data_id = 43003
res = self.df.base_download_log_file(para_id=para_id, data_id=data_id, cookies=self.AUTO_cookies,
flag=False)
self.assertTrue(res, msg='result check fail')
Log.debug('test_download_log_file_not_admin_fail end')
pass
def tearDown(self):
Log.debug('---------')
pass
@classmethod
def tearDownClass(cls):
Log.info('TestDownloadLogFileHK END HK端')
| [
"nevermorer1"
] | nevermorer1 |
092b2ee98e997749a84f6261fe7e40ac394cd874 | e937e58d1041cd0cb93efe5d1197e7ced65dacf9 | /Homeworks/Homework2/ExerciseA/client.py | 13fc8037c0d870bf9f23421f513991072b7aac8a | [
"MIT"
] | permissive | secano97/cliente-servidor | 3c6b135bb43e6611a28efea6050568134c5a7215 | 8a19157ad723fc0384d687343b8093266d002544 | refs/heads/master | 2021-04-28T02:43:09.204906 | 2018-05-18T18:57:42 | 2018-05-18T18:57:42 | 122,123,622 | 1 | 2 | null | 2018-02-19T21:36:50 | 2018-02-19T21:36:50 | null | UTF-8 | Python | false | false | 3,964 | py | import argparse
import zmq
import os
import pyaudio
import wave
import _thread
from pynput.keyboard import Key, Listener
from src.helpers.utils import *
# CONSTANTS SECTION
PORTS = []
WAVE_OUTPUT_FILENAME = "voice.wav"
ID_USERINFO = {} # User information
CLIENT_LPORT = 4000
def receive_data():
while True:
try:
# data = s.recv(CHUNK) #Send socket
receive_stream.write(data)
except:
pass
def send_data():
while True:
try:
data = send_stream.read(CHUNK)
#s.sendall(data) #send socket sender
except:
pass
def main():
""" Client of the Chatvoice application
Evil-Labs, UTP
By default, this python file will start the development server
on the internal IP at a give port, if not port is supplied port 4000 will be
used.
Args:
--port: An integer representing your Port on the localhost.
--ip_address: be considered to be IPv4 by default 0.0.0.0 or localhost.
Returns:
An open socket/threated connection, listening on port parameter and ipv4 address.
Raises:
ValueError: if there are too many connections or reach the
limit of 30 concurrent clients, Raise a phthreat error, given by the
zmq address
"""
# Implement a Python arg parser using the standard lib
parser = argparse.ArgumentParser(
description=bannerc(),
prog='ChatVoice-Server',
epilog="Written by Hector F. Jimenez and Sebastian Cano"
)
parser.add_argument(
'raddress', help="Remote Ipv4 server socket.")
parser.add_argument(
'rport', help="Remote server port to connect.")
parser.add_argument(
'username', help="identify yourself against the server")
parser.add_argument('--otherusername',
help="Insert the name of your friend")
parser.add_argument('--ping',
help="Test your client and server connection. You should get an Ok acknowledgement")
args = parser.parse_args()
# Creates a 0mq context, from docs You should create and use exactly one context
# in your process. Technically, the context is the container for all sockets
# in a single process
context = zmq.Context()
REMOTE_PORT = int(args.rport)
REMOTE_IP = args.raddress
ports = [REMOTE_PORT, CLIENT_LPORT]
# Create two different sockets
listener_client = context.socket(zmq.REP) # Listen data
sender_client = context.socket(zmq.REQ) # Send data to server
print('{} Setting Networking Context and Address Family{}'.format(ok, reset))
# sleep(1)
try:
# All client will be the same, but every client should be in a
# different ip, and using different ports
listener_client.bind("tcp://*:{}".format(ports[1]))
#print('{} Client is Listening on Port: {}{} {}for incomming voice message'.format(
# ok, bold, ports[1], reset))
print('{} Trying to contact server {}:{}'.format(atn,REMOTE_IP, REMOTE_PORT))
sender_client.connect("tcp://{}:{}".format(REMOTE_IP, REMOTE_PORT))
#Send an initial connection to the server
#sender_client.send_json({"cmdquery": "hello"})
#ToDo: Confirm Connection with server, something like an ack
except zmq.error.ZMQError as e:
print('{} Failed to create a Socket in Port {}, due to {}'.format(
err, REMOTE_PORT, e))
p = pyaudio.PyAudio()
receive_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=CHUNK)
send_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
print("Voice chat running")
_thread.start_new_thread(receive_data, ())
_thread.start_new_thread(send_data, ())
while True:
pass
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
16d1f16ca50e5d93a6391652f767d28400d8ace9 | d005737bd133debbd792ccd8364b2f5b04682292 | /Day01-15/04.py | 7e4fac8da885628518a3ff7ccc646b92dd0a6166 | [] | no_license | XJawher/Python-100 | 522a218136dd8479c83f1cdfbc57200238b81989 | abd65c4095f75d3bd744a6f75d7a4c86b368bf54 | refs/heads/master | 2020-06-19T02:19:32.958386 | 2019-07-25T00:25:49 | 2019-07-25T00:25:49 | 196,530,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # -*- coding: utf-8 -*
"""python 的循环结构大致分为两种,一种是 for in 一种是 while 循环"""
# sum = 0
# for x in range(20):
# sum += x
# print(sum)
# import random
# def random():
# counter = 0
# answer = 20
# while True:
# counter += 1
# number = int(input('请输入: '))
# if number < answer:
# print('大一点')
# elif number > answer:
# print('小一点')
# else:
# print('恭喜你猜对了!')
# break
# print('你总共猜了%d次' % counter)
# if counter > 7:
# print('你的智商余额明显不足')
# random()
# row = int(input('请输入行数: '))
# for i in range(row):
# for _ in range(i + 1):
# print('*', end='')
# print()
# for i in range(row):
# for j in range(row):
# if j < row - i - 1:
# print(' ', end='')
# else:
# print('*', end='')
# print()
# for i in range(row):
# for _ in range(row - i - 1):
# print(' ', end='')
# for _ in range(2 * i + 1):
# print('*', end='')
# print()
# def multiplication():
# for i in range(1, 10):
# for j in range(1, i + 1):
# print(('%d*%d=%d' % (i, j, i * j)), end='\t')
# print()
# multiplication()
# def testAbs():
# print('abs(-45)', file=open('../data.txt', 'w'))
# print('abs(-119L)', abs(110))
# testAbs()
# def iterable(parma):
# print(all(parma))
# iterable(['', ''])
# ascii
def pyAscii(parma):
print(ascii(parma))
print(parma)
pyAscii('')
| [
"[email protected]"
] | |
c063bdd4e660972e8aad8582c52327669c234386 | 383b8adf5a8b3140e8b2075be3021ac4b7fd400b | /2020_11_24/11000_강의실_배정/moonyeol.py | 5c56e9b19c538f30a6a1e656ced78036b7facbc1 | [] | no_license | aimhigh53/AlgoWing | eecd0ddb34221eb1ea8e6a92cd03afd612e13bb8 | 0804e4f57d87c19f861bb871fb9a39585b90f724 | refs/heads/master | 2023-03-13T17:00:22.171601 | 2021-03-08T14:42:31 | 2021-03-08T14:42:31 | 286,629,140 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import sys
import heapq
input = sys.stdin.readline
def solution():
global lessons, N
ans = 1
times = [lessons[0][1]]
for i in range(1,N):
now = lessons[i]
if now[0] >= times[0]:
heapq.heappop(times)
else:
ans +=1
heapq.heappush(times,now[1])
return ans
N = int(input())
lessons = []
for i in range(N):
lessons.append(list(map(int, input().split())))
lessons.sort()
print(solution())
| [
"[email protected]"
] | |
372588b33202d69d524844c22a983e70dc3faf58 | 548385de21cfc34f0af2543f77ef8af306a83ef6 | /tests/test_data_processor.py | 9b719130c3ba07fd1d5f465f95940d5e7ab0006d | [] | no_license | mwmichaelas/data_jump | 714cabb58f69a709cfd828449e4b94ed8280c010 | cc3b1d784737144251d78de6e2dd981889c0d7bf | refs/heads/main | 2023-03-04T04:31:47.651305 | 2021-02-16T10:48:17 | 2021-02-16T10:48:17 | 339,348,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,650 | py | import csv
import sys
import os
from data_jump.app import app
from data_jump.data_processor import DataProcessor
sys.path.append(os.path.join(os.path.dirname(__file__),os.pardir,"data_jump"))
import pytest
import pandas as pd
from pandas import DataFrame
import requests_mock
def file_get_contents(filename):
with open(filename, 'r') as f:
return f.read()
response_1 = [{"date": "22-01-2021", "impressions": 1376}, {"date": "21-01-2021", "impressions": 1906},
{"date": "20-01-2021", "impressions": 2818}, {"date": "19-01-2021", "impressions": 1024},
{"date": "18-01-2021", "impressions": 646}, {"date": "17-01-2021", "impressions": 2885},
{"date": "16-01-2021", "impressions": 1889}, {"date": "15-01-2021", "impressions": 1534},
{"date": "14-01-2021", "impressions": 995}, {"date": "13-01-2021", "impressions": 1251}]
SERVICE_ENDPOINTS = {
'Service X': 'http://test.com/abc/1234'
}
class TestDataProcessor:
@pytest.fixture
def app_context(self):
with app.app_context():
yield
@requests_mock.Mocker(kw='mock')
def test_get_data_from_endpoints(self, **kwargs):
service_1_name = list(SERVICE_ENDPOINTS)[0]
service_1_url = SERVICE_ENDPOINTS[service_1_name]
kwargs['mock'].get(service_1_url,
headers={
"content-type": "application/json;charset=UTF-8",
},
json=response_1)
data_processor = DataProcessor(SERVICE_ENDPOINTS.items())
data = data_processor.get_data_from_endpoints()
assert data == [response_1]
def test_combine_dataframes(self):
data_frames: list = []
for segment in [response_1, response_1]:
data_frames.append(pd.DataFrame(segment))
concatenated_data_frames: DataFrame = pd.concat(data_frames, ignore_index=True)
data_processor = DataProcessor(SERVICE_ENDPOINTS.items())
combined_data: DataFrame = data_processor.combine_dataframes([response_1, response_1])
assert combined_data.to_string() == concatenated_data_frames.to_string()
def test_find_sum_and_mean(self):
data_frames: list = []
for segment in [response_1, response_1]:
data_frames.append(pd.DataFrame(segment))
concatenated_data_frames: DataFrame = pd.concat(data_frames, ignore_index=True)
data_processor = DataProcessor(SERVICE_ENDPOINTS.items())
output = data_processor.find_sum_and_mean(concatenated_data_frames)
assert output == {'mean': 1632.4, 'sum': 32648}
| [
"[email protected]"
] | |
657972b17491b37cf30ce08ca58445dd72f61ac7 | d56540f1a623de8222d8d92473796df1183850e5 | /LibraryManager/admin.py | 7287ff3590385a66cff2121c8fef5263815f32c3 | [] | no_license | LucasWagler/Django-Library-Manager | 340677b04dcbc6af14162bc9e0ff0c0af272b4db | 3855f6072e51d527711b3ed4c6a6c9dd8f39ec03 | refs/heads/master | 2023-06-21T21:42:56.946077 | 2021-07-12T21:07:53 | 2021-07-12T21:07:53 | 380,251,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from django.contrib import admin
from .models import Author, Book, BookBranchCopies, Branch, Department, Employee
# Register your models here.
class AuthorAdmin(admin.ModelAdmin):
# fields = ['name', 'country']
list_display = ('name', 'country')
search_fields = ['name', 'country']
class BookBranchCopiesInline(admin.TabularInline):
model = BookBranchCopies
extra = 1
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'price')
list_filter = ['price']
search_fields = ['title', 'author__name', 'author__country']
inlines = [BookBranchCopiesInline]
# fields = ['image_tag']
# readonly_fields = ['image_tag']
class BranchAdmin(admin.ModelAdmin):
list_display = ['name', 'city', 'phone']
search_fields = ['name', 'city', 'phone']
class DepartmentAdmin(admin.ModelAdmin):
list_display = ['name', 'budget']
search_fields = ['name']
# filter by budget
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['user', 'department', 'branch']
search_fields = ['name', 'department__name', 'branch__name', 'email']
admin.site.register(Author, AuthorAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Branch, BranchAdmin)
admin.site.register(Department, DepartmentAdmin)
admin.site.register(Employee, EmployeeAdmin)
admin.AdminSite.site_header = 'Aesop Bookstore'
admin.AdminSite.site_title = 'Aesop Bookstore'
admin.AdminSite.site_url = '/admin'
admin.AdminSite.index_title = 'Bookstore Administration'
| [
"[email protected]"
] | |
87d6805f645884b7278039939c74e61c0db8c098 | a0b720271eb091f0f231c327f97135aeae311d29 | /sendengo/apps/shippers/views.py | ec43611dd78cae573876c8d9ca689e3258ea4d82 | [] | no_license | osharim/shippers | 138a39d1933afcedf4cacbb119d1556509ea00ae | ffcb79becac84ef48b9fa901494bd8e8dcdbe379 | refs/heads/master | 2020-05-21T02:16:56.942018 | 2019-05-13T02:06:07 | 2019-05-13T02:06:07 | 185,873,538 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | # enconding: utf-8
from rest_framework import viewsets
from .serializer import ShipperSerializer, ShipperRequirementSerializer
from .models import Shipper, ShipperRequirement
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
class ShipperViewSet(viewsets.ModelViewSet):
queryset = Shipper.objects.all()
serializer_class = ShipperSerializer
def list(self, request):
if self.request.user.is_authenticated:
serializer = self.serializer_class(self.queryset, many=True)
return Response(serializer.data)
else:
raise PermissionDenied()
class ShipperRequirementViewSet(viewsets.ModelViewSet):
queryset = ShipperRequirement.objects.all()
serializer_class = ShipperRequirementSerializer
def list(self, request, shipper_pk):
print(shipper_pk)
if self.request.user.is_authenticated:
serializer = self.serializer_class(self.queryset.filter(category=shipper_pk), many=True)
return Response(serializer.data)
else:
raise PermissionDenied()
| [
"[email protected]"
] | |
dd873a36e434f4eccf19aa5138c193a824b80b0e | bac5eec28e25a7320c71292cdc7f5935455c2dcf | /api_yamdb/settings.py | c68a58b8482e1219c3aabe63b20c0349b9054e9e | [] | no_license | Nadine45832/yamdb_final | 342886c0402581edbbadb2a63168ebd6385a9743 | a40cf281c489798f201fd2424babb8d9c13e3e5f | refs/heads/master | 2023-08-26T11:12:29.880986 | 2021-11-12T01:41:25 | 2021-11-12T01:41:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | import os
from datetime import timedelta
from distutils.util import strtobool
import dotenv
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
SECRET_KEY = os.environ.get('SECRET_KEY', default="SUP3R-S3CR3T-K3Y-F0R-MY-PR0J3CT")
DEBUG = bool(strtobool(os.getenv('DEBUG', 'False')))
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '*').split(',')
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework_simplejwt",
"rest_framework_simplejwt.token_blacklist",
"django_filters",
"api",
"reviews",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "api_yamdb.urls"
TEMPLATES_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [TEMPLATES_DIR],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "api_yamdb.wsgi.application"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "ru"
TIME_ZONE = "America/New_York"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework_simplejwt.authentication.JWTAuthentication",
],
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
}
AUTH_USER_MODEL = "reviews.User"
PASSWORD_RESET_TIMEOUT_DAYS = 1 / 24
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=100),
"REFRESH_TOKEN_LIFETIME": timedelta(days=5),
"ROTATE_REFRESH_TOKENS": False,
"BLACKLIST_AFTER_ROTATION": True,
"UPDATE_LAST_LOGIN": False,
"ALGORITHM": "HS256",
"SIGNING_KEY": SECRET_KEY,
"VERIFYING_KEY": None,
"AUDIENCE": None,
"ISSUER": None,
"JWK_URL": None,
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"USER_AUTHENTICATION_RULE": "rest_framework_simplejwt.authentication.default_user_authentication_rule",
"AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
"TOKEN_TYPE_CLAIM": "token_type",
"JTI_CLAIM": "jti",
"SLIDING_TOKEN_REFRESH_EXP_CLAIM": "refresh_exp",
"SLIDING_TOKEN_LIFETIME": timedelta(minutes=10),
"SLIDING_TOKEN_REFRESH_LIFETIME": timedelta(days=5),
}
OLD_PASSWORD_FIELD_ENABLED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_UNIQUE_EMAIL = True
REST_USE_JWT = True
JWT_AUTH_COOKIE = "my-app-auth"
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "sent_emails")
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
}
}
| [
"[email protected]"
] | |
9fc9bfc859d1abf43f7450af98bda58100accfc7 | f31e082967a104f621e9d2acdd0e2e472672a06e | /analysis/management/commands/get_matches_fromcsv.py | b6ec2f1c79057ed0bb80956d8228ad037c0589c7 | [] | no_license | PrajwalChigod/django_ipl | f9933bd6c300a97bac6e791ea89e87f4e8ccf948 | 0da488f4faf82d4358a1c74b9b9d1f1aa4644d85 | refs/heads/master | 2020-04-16T22:26:31.136251 | 2019-01-16T10:16:30 | 2019-01-16T10:16:30 | 165,967,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | from django.core.management.base import BaseCommand, CommandError
from analysis.models import Matches
import csv
class Command(BaseCommand):
help = 'Gets data from csv to the sqlite3 database'
def handle(self, *args, **options):
matches_file = 'matches.csv'
with open(matches_file) as matches_csv:
matches_reader = csv.DictReader(matches_csv)
for match in matches_reader:
# print(match)
match_created = Matches.objects.get_or_create(
id = match['id'],
season = match['season'],
city = match['city'],
date = match['date'],
team1 = match['team1'],
team2 = match['team2'],
toss_winner = match['toss_winner'],
result = match['result'],
dl_applied = match['dl_applied'],
winner = match['winner'],
win_by_runs = match['win_by_runs'],
win_by_wickets = match['win_by_wickets'],
player_of_match = match['player_of_match'],
venue = match['venue'],
umpire1 = match['umpire1'],
umpire2 = match['umpire2'],
umpire3 = match['umpire3']
)
| [
"[email protected]"
] | |
9a29d8a4fbcdcac971a68f6ca936be3acb5d1ca9 | 0ac6ba2a881e1bd9c3f1d602e3e280853be0a55f | /winrateprediction/urls.py | 93154eae5b652787eb80ce61b918ecd4cf6b34dc | [] | no_license | antiflee/DOTA_Oracle | b0aec55b2faed0ed5ea64a2c8448be62e36c79aa | 1744b9382b035d76948dcb4c4ae2cc33a56d9c34 | refs/heads/master | 2021-05-07T16:30:23.775415 | 2017-10-30T16:43:09 | 2017-10-30T16:43:09 | 108,560,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.conf.urls import url
from . import views
app_name = 'winrateprediction'
urlpatterns = [
url(r'^$', views.winrateHome, name='winrateHome'),
url(r'^result/', views.winrateResult, name='winrateResult'),
]
| [
"[email protected]"
] | |
3c07f2937e72991c56c1d37c7f4a29416844fca4 | 6d465ab8ec7994362ce69bd1b6e456a7f4ac9b97 | /results/format-covid-forecast/format_data_state_case.py | 3d544db444a286503d76af3b6bbb7778d6cb6006 | [
"MIT"
] | permissive | guizhen-wang/ReCOVER-COVID-19 | cbc8a09dc790e2cbb0d663e5b5339a3b2aaed705 | 466886b42467e505c812922ad632c3c1cae1656a | refs/heads/master | 2023-01-04T18:52:30.868428 | 2020-10-23T11:03:53 | 2020-10-23T11:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,252 | py | import datetime
import pandas as pd
import csv
import urllib.request
import io
FORECAST_DATE = datetime.datetime.today()
FIRST_WEEK = FORECAST_DATE + datetime.timedelta(5)
INPUT_FILENAME_STATE = "us_forecasts_current_0.csv"
INPUT_FILENAME_GLOBAL = "global_forecasts_current_0.csv"
OUTPUT_FILENAME = FORECAST_DATE.strftime("%Y-%m-%d") + "-USC-SI_kJalpha.csv"
COLUMNS = ["forecast_date", "target", "target_end_date", "location", "type", "quantile", "value"]
ID_STATE_MAPPING = {}
STATE_ID_MAPPING = {}
def load_state_id_mapping():
"""
Return a mapping of <state name, state id>.
"""
MAPPING_CSV = "./locations_state.csv"
with open(MAPPING_CSV) as f:
reader = csv.reader(f)
state_id_mapping = {}
# Skip the header
next(reader)
for row in reader:
state_id = row[1]
state_name = row[2]
state_id_mapping[state_name] = state_id
return state_id_mapping
def load_id_state_mapping():
"""
Return a mapping of <state id, state name>.
"""
MAPPING_CSV = "./locations.csv"
with open(MAPPING_CSV) as f:
reader = csv.reader(f)
id_state_mapping = {}
# Skip the header
next(reader)
for row in reader:
state_id = row[1]
state_name = row[2]
id_state_mapping[state_id] = state_name
return id_state_mapping
def load_truth_cumulative_cases():
dataset = {}
URL = "https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master/data-truth/truth-Cumulative%20Cases.csv"
f = io.StringIO(urllib.request.urlopen(URL).read().decode('utf-8'))
reader = csv.reader(f)
header = next(reader, None)
location_col = -1
date_col = -1
value_col = -1
for i in range(0, len(header)):
if (header[i] == "location"):
location_col = i
elif (header[i] == "date"):
date_col = i
elif (header[i] == "value"):
value_col = i
for row in reader:
state_id = row[location_col]
date = row[date_col]
val = int(row[value_col])
if date not in dataset:
dataset[date] = {}
dataset[date][state_id] = val
return dataset
def load_csv(input_filename_state, input_filename_global):
"""
Read our forecast reports and return a dictionary structuring of <date_str, <state_id, value>>
e.g.
{
"2020-06-22": {
'10': 2000.0,
'11': 3000.0,
...
},
"2020-06-23": {
'10': 800.0,
'11': 900.0,
...
},
...
}
"""
dataset = {}
with open(input_filename_state) as f:
reader = csv.reader(f)
header = next(reader, None)
for i in range(2, len(header)):
date_str = header[i]
# Initialize the dataset entry on each date.
dataset[date_str] = {}
for row in reader:
state = row[1]
# Skip the state if it is not listed in reichlab's state list.
if state not in STATE_ID_MAPPING:
continue
state_id = STATE_ID_MAPPING[state]
for i in range(2, len(header)):
date_str = header[i]
val = float(row[i])
dataset[date_str][state_id] = val
with open(input_filename_global) as f:
reader = csv.reader(f)
header = next(reader, None)
for row in reader:
country = row[1]
# Skip other countries.
if not country == "US":
continue
for i in range(2, len(header)):
date_str = header[i]
val = float(row[i])
dataset[date_str]["US"] = val
return dataset
def generate_new_row(forecast_date, target, target_end_date,
location, type, quantile, value):
"""
Return a new row to be added to the pandas dataframe.
"""
new_row = {}
new_row["forecast_date"] = forecast_date
new_row["target"] = target
new_row["target_end_date"] = target_end_date
new_row["location"] = location
new_row["type"] = type
new_row["quantile"] = quantile
new_row["value"] = value
return new_row
def add_to_dataframe(dataframe, forecast, observed):
"""
Given a dataframe, forecast, and observed data,
generate a pandas dataframe of incident cases.
"""
# Write incident forecasts.
cum_week = 0
forecast_date_str = FORECAST_DATE.strftime("%Y-%m-%d")
for target_end_date_str in sorted(forecast.keys()):
target_end_date = datetime.datetime.strptime(target_end_date_str, "%Y-%m-%d")
# Terminate the loop after 8 weeks of forecasts.
if cum_week >= 8:
break
# Skip forecasts before the forecast date.
if target_end_date <= FORECAST_DATE:
continue
if target_end_date >= FIRST_WEEK and target_end_date.weekday() == 5:
cum_week += 1
target = str(cum_week) + " wk ahead inc case"
last_week_date = target_end_date - datetime.timedelta(7)
last_week_date_str = last_week_date.strftime("%Y-%m-%d")
if last_week_date_str in observed:
for state_id in forecast[target_end_date_str].keys():
dataframe = dataframe.append(
generate_new_row(
forecast_date=forecast_date_str,
target=target,
target_end_date=target_end_date_str,
location=str(state_id),
type="point",
quantile="NA",
value=max(forecast[target_end_date_str][state_id]-observed[last_week_date_str][state_id], 0)
), ignore_index=True)
elif last_week_date_str in forecast:
for state_id in forecast[target_end_date_str].keys():
dataframe = dataframe.append(
generate_new_row(
forecast_date=forecast_date_str,
target=target,
target_end_date=target_end_date_str,
location=str(state_id),
type="point",
quantile="NA",
value=max(forecast[target_end_date_str][state_id]-forecast[last_week_date_str][state_id], 0)
), ignore_index=True)
return dataframe
# Main function
if __name__ == "__main__":
STATE_ID_MAPPING = load_state_id_mapping()
ID_STATE_MAPPING = load_id_state_mapping()
print("loading forecast...")
forecast = load_csv(INPUT_FILENAME_STATE, INPUT_FILENAME_GLOBAL)
observed = load_truth_cumulative_cases()
dataframe = pd.read_csv(OUTPUT_FILENAME, na_filter=False)
dataframe = add_to_dataframe(dataframe, forecast, observed)
print("writing files...")
dataframe.to_csv(OUTPUT_FILENAME, index=False)
print("done") | [
"[email protected]"
] | |
0ba89a7e990d1d019fb7a73a2ee89ae7710b387f | e53d9840342af43e7fb541bb014d61ebfd93c822 | /corona/my_app/migrations/0003_news.py | 358d2b35f12d1d70859a9f35b5c8cd38abcec5db | [] | no_license | webron9/corona_pandemic | 435dd5a7f3f2c635516388d89a59b15d18a82f84 | aea34b4d9dbb1e1d58f5d50064e6ac94a0024baa | refs/heads/master | 2023-09-05T19:18:55.049074 | 2023-08-18T06:50:34 | 2023-08-18T06:50:34 | 284,206,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # Generated by Django 3.0.3 on 2020-05-14 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_app', '0002_stats_new_date'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headline', models.CharField(max_length=264)),
('link', models.URLField()),
],
),
]
| [
"[email protected]"
] | |
2b5581a82a00609c38b0340906dde5249b677912 | 6b7586d26c58eb680339fe69838849e94d569f8d | /bin/gprof2dot | 5bd594195c47b1441d92bcbe46d50f0f52780c1f | [] | no_license | dant00ine/csv-parser | 50355cc12c88feef2e254225f3f448c0ebbaf8cf | 6eabab5ad9e530ab270d7c665701e6997e63b7c7 | refs/heads/master | 2021-03-22T04:30:44.412547 | 2018-06-14T02:06:07 | 2018-06-14T02:06:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | #!/Users/funglr-dan/CODE/csv_sample/django_app/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gprof2dot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
e6d71dc0b14742f7bfd8673efacb6e2c4f5b510b | cef59836927e958edb8db5f9c7e3bb9f33897991 | /Словари_7.py | 615cccfe76b55182efc77f58fc51cf6ca76fae40 | [] | no_license | setishin/Homework | 73d93f91a7c30e9e70917bdd315e0ca390a8f7bf | 1e700f3e357c9d2963bca1411abd70f73348a61d | refs/heads/main | 2023-01-25T02:11:59.945398 | 2020-12-04T17:39:32 | 2020-12-04T17:39:32 | 316,254,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | dic1={1:10, 2:20}
dic2={3:30, 4:40}
dic3={5:50,6:60}
dic4={}
dic4.update(dic1)
dic4.update(dic2)
dic4.update(dic3)
s=1
for i in dic4:
s=s*dic4[i]
print (s) | [
"[email protected]"
] | |
fe75eb46516cd3b3ecc95dc2faccbe419fdc9146 | 141c9714925dd95156460e583b8d711fcc9b8797 | /src/products/migrations/0002_auto_20190126_0028.py | 9c47b317c26c63d981047607c79206d3e21d9b1e | [] | no_license | sjb3/try_django | 5971211777d5fda9d1ab5bcbb5608237b10ddd28 | b81da0a01f3a29f1977afbb0fe532975f5c86609 | refs/heads/master | 2020-04-18T17:08:33.792225 | 2019-01-27T07:16:12 | 2019-01-27T07:16:12 | 167,648,134 | 0 | 0 | null | 2019-01-27T07:16:13 | 2019-01-26T04:07:04 | Python | UTF-8 | Python | false | false | 709 | py | # Generated by Django 2.1.5 on 2019-01-26 00:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=10000),
),
migrations.AlterField(
model_name='product',
name='summary',
field=models.TextField(),
),
]
| [
"[email protected]"
] | |
58d7514b96b34cee6b47d8dbecc807eacdf13fd8 | 01b7ad17cf0f214dc4c6be46337ce112e7eac840 | /healpixify/__init__.py | 7a58f411ea5dd82c1ba6b2396eba2335a825d085 | [
"MIT"
] | permissive | kadrlica/healpixify | ff91a9716664c8bb748e7d49be61b3ebf463e9ee | db44339071ee0e134b12dd938dab3101b729cdc9 | refs/heads/master | 2021-01-19T16:58:28.175393 | 2017-07-11T18:19:45 | 2017-07-11T18:19:45 | 88,294,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #!/usr/bin/env python
"""
Generic python script.
"""
__author__ = "Alex Drlica-Wagner"
import fitsio
import healpy as hp
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
| [
"[email protected]"
] | |
6d9a119d11c145cf7d2aab6cc451ee96dac0a8e2 | b3c44b75cedd2694e20861b3a217e0e10926038d | /Python Exercises/Exercise_11.py | 3aa10d9a57f3ef40cea23523be35e80cb1e19b95 | [] | no_license | testautomation8/Learn_Python | a85c95ef9103f3d9198a9a3a9df0be073fa6ea70 | 0d79d772b0ebed56266b3393eb2018b926dfaaac | refs/heads/master | 2021-11-24T16:44:38.110007 | 2021-11-02T11:42:06 | 2021-11-02T11:42:06 | 159,313,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | """Ask the user for a number and determine whether the number is prime or not. Use Functions"""
def prime(number):
flag = ""
for x in range(number):
if x != 0 and x != 1:
flag = True
if number/x == 0 or number % x == 0:
flag = False
if flag:
return str(number) + " is a prime number."
else:
return str(number) + " is not a prime number."
number = int(input("Please enter the integer: "))
print(prime(number))
| [
"Voltas83"
] | Voltas83 |
2e34bbdf6c4f21b23057859b9dbdf31976c85e41 | caeb9c7ad9606ed440dcb6f852e6de6d7eb0dd91 | /unqomp/examples/recursivecircuit.py | c1598bd17a54094418ebfcbb457b1ff61fa92183 | [
"MIT"
] | permissive | eth-sri/Unqomp | 2e05395d4d68ca643ccfa9a2fa09693cbc792fd4 | 9d7e885af1ebfdeab7e8059d13149aadeed8a6d6 | refs/heads/master | 2023-03-29T19:26:36.273288 | 2021-04-01T09:38:58 | 2021-04-01T09:38:58 | 344,804,721 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | from qiskit.circuit import QuantumRegister, QuantumCircuit
import numpy as np
from unqomp.ancillaallocation import AncillaRegister, AncillaCircuit
def makesFGate(m, auto_uncomp = True):
# takes as input m bits, outputs m bits.
# for now : 2x+1
qr_input = QuantumRegister(m, "input")
qr_output = QuantumRegister(m, "output")
circuit = QuantumCircuit(qr_input, qr_output, name = "f")
for i in range(m - 1):
circuit.cx(qr_input[i], qr_output[i + 1])
circuit.x(qr_output[0])
return circuit.to_gate()
def makeRecursiveCircuitQiskit(n, m):
# creates (recursively) a circuit to compute u_n where u_0 is the input on m bits, and u_(n+1) = f(u_n), with manual uncomputation
f_gate = makesFGate(m, False)
qr_input = QuantumRegister(m, "input")
qr_output = QuantumRegister(m, "output")
if n == 1:
circuit = QuantumCircuit(qr_input, qr_output)
circuit.append(f_gate, [*qr_input, *qr_output])
return (circuit, [])
qr_ancillas = QuantumRegister(m, "ancilla-" + str(n))
circuit = QuantumCircuit(qr_input, qr_output, qr_ancillas)
(rec_circ, ancilla_list) = makeRecursiveCircuitQiskit(n - 1, m)
rec_gate = rec_circ.to_gate()
old_ancillas = [anc for qr_anc in ancilla_list for anc in qr_anc]
for qr_anc in ancilla_list:
circuit.add_register(qr_anc)
circuit.append(rec_gate, [*qr_input, *qr_ancillas, *old_ancillas])
circuit.append(f_gate, [*qr_ancillas, *qr_output])
circuit.append(rec_gate.inverse(), [*qr_input, *qr_ancillas, *old_ancillas])
ancilla_list.append(qr_ancillas)
return (circuit, ancilla_list)
def makeRecursiveCircuitUnqomp(n, m):
# creates (recursively) a circuit to compute u_n where u_0 is the input on m bits, and u_(n+1) = f(u_n), returns an AncillaCircuit, without uncomputation
f_gate = makesFGate(m, True)
qr_input = QuantumRegister(m, "input")
qr_output = QuantumRegister(m, "output")
if n == 1:
circuit = AncillaCircuit(qr_input, qr_output)
circuit.append(f_gate, [*qr_input, *qr_output])
return circuit
qr_ancillas = AncillaRegister(m, "ancilla-" + str(n))
circuit = AncillaCircuit(qr_input, qr_output, qr_ancillas)
rec_circ = makeRecursiveCircuitUnqomp(n - 1, m)
rec_gate = rec_circ.to_ancilla_gate()
circuit.append(rec_gate, [*qr_input, *qr_ancillas])
circuit.append(f_gate, [*qr_ancillas, *qr_output])
return circuit
| [
"[email protected]"
] | |
d75d53b225a7d7185fe74255e67235960c9f244d | 6f0effa6451622a10e2e991508547db4ed6b813c | /stud_mgmt_sys/admin.py | bd2e5ba51f5b1370fe5f09caa2e57d1500063ca7 | [] | no_license | umanga24/studentmanagement | 61ec15206ec33477f87cb6314631465d1469a319 | 4e0336dc1af514535e1d776e7f4d8ffe48baeaf1 | refs/heads/master | 2023-03-11T22:33:20.517305 | 2021-03-03T04:57:10 | 2021-03-03T04:57:10 | 344,005,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from django.contrib import admin
from stud_mgmt_sys.models import *
admin.site.register(Students)
admin.site.register(Supervisor)
admin.site.register(Faculty)
admin.site.register(Section)
admin.site.register(Batch)
| [
"[email protected]"
] | |
4464818865a84d247744c2d224e43f52e09f84b7 | d2ba9a9f7e9c984e492bb9a7a04e4144aacf888c | /core/mkdir.py | 41c3cd9f6cf25dae3e6c0f4de6b805591edb4660 | [] | no_license | lxorz/ftp_client | fb0d12051d30bab65ae210342df3e235d133d907 | 1d2f4cf85bf27773f78ee7cc548613d983d5bc41 | refs/heads/master | 2020-03-31T22:25:09.534815 | 2018-10-11T15:51:40 | 2018-10-11T15:51:40 | 152,617,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | # -*- coding:utf-8 -*-
# Author: LiuXing
# date: 9/29/2018 12:04 AM
import json
from conf import settings
def client_mkdir(self, *args):
"""实现用户在当前目录下可创建目录的功能"""
cm_split = args[0].split()
if len(cm_split) > 1:
new_dir = cm_split[1]
msg_dic = {
"action":"mkdir",
"new_dir":new_dir,
"overriden":False,
}
self.client.send(json.dumps(msg_dic).encode())
server_response = self.client.recv(1024)
print(json.loads(server_response.decode()))
else:
print("%s:命令错误" % settings.LOGIN_STATE["cmd_error"]) | [
"[email protected]"
] | |
34007e87bd6b9063ea1b6c3cdfee9585b0f06cb2 | 9d5bd76e93e263ec48e54f0cfbe46a5315c255dd | /help.py | b679b11061aa0e61859bf2b3e0c2b8ea9e8bd1f7 | [] | no_license | anipedia/AnimeBot | da92df35a404a3ff5679c15c3b59b0269ae757ae | bb4fda139b5a18fc23541083163cebb187e549e3 | refs/heads/master | 2020-08-16T14:21:37.232268 | 2019-10-16T09:46:38 | 2019-10-16T09:46:38 | 215,511,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,180 | py | import asyncio
import random
import time
import discord
import nekos
from discord.ext import commands
class HelpFormat:
def __init__(self):
self.raw_colors = ['discord.Colour.blue()', 'discord.Colour.blurple()', 'discord.Colour.dark_blue()', 'discord.Colour.dark_gold()', 'discord.Colour.dark_green()', 'discord.Colour.dark_grey()', 'discord.Colour.dark_magenta()', 'discord.Colour.dark_orange()', 'discord.Colour.dark_purple()', 'discord.Colour.dark_red()', 'discord.Colour.dark_teal()','discord.Colour.darker_grey()', 'discord.Colour.gold()', 'discord.Colour.green()', 'discord.Colour.greyple()', 'discord.Colour.light_grey()', 'discord.Colour.lighter_grey()', 'discord.Colour.magenta()', 'discord.Colour.orange()', 'discord.Colour.purple()', 'discord.Colour.red()', 'discord.Colour.teal()']
async def get_colors(self):
return self.raw_colors
async def commands(self):
return {'Anime':[],'Fun':[]}
class Mod(HelpFormat):
def __init__(self,obj):
self.msg=obj
async def over_all(self):
emb=discord.Embed(title='Mod Commands')
emb.set_thumbnail(url=self.msg.guild.icon)
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.set_footer(text='Use s.help {command_name} to get more details on a command')
message=await self.msg.send(embed=emb)
return message.id
class Fun(HelpFormat):
def __init__(self,obj):
super().__init__()
self.msg=obj
async def command_name(self,name:str):
await eval(f"{'self'}.{name}()")
async def command_list(self):
return ['lizard', 'dog', 'neko_gif', 'lewd', 'holo', 'slap', 'why', 'waifu', '8ball', 'joke', 'erofeet', 'poke', 'avatar', 'cat', 'kemonomimi',
'pi', 'owoify', 'randomshow', 'randomfact', 'randomanime', 'textcat', 'randommovie', 'neko1', 'neko', 'fox_girl', 'fox', 'hug',
'pfp', 'wallpaper', 'meow', 'tickle', 'gecg', 'kiss', 'add_adatus', 'feed']
async def over_all(self,owner_icon):
emb=discord.Embed(title='Fun Commands',colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=self.msg.guild.icon_url)
emb.add_field(name='Lizard',value='s.lizard')
emb.add_field(name='Dog',value='s.dog')
emb.add_field(name='Neko Gif',value='s.neko-gif')
emb.add_field(name='Lewd',value='s.lewd')
emb.add_field(name='Holo',value='s.holo')
emb.add_field(name='Slap',value='s.slap @User1 @User2')
emb.add_field(name='Why',value='s.why')
emb.add_field(name='Waifu',value='s.waifu')
emb.add_field(name='8ball',value='s.8ball Will I pass the exam?')
emb.add_field(name='Joke',value='s.joke')
emb.add_field(name='Erofeet',value='s.erofeet')
emb.add_field(name='Poke',value='s.poke @User1 @User2')
emb.add_field(name='Avatar',value='s.avatar')
emb.add_field(name='Cat',value='s.cat')
emb.add_field(name='Kemonomimi',value='s.kemonomimi')
emb.add_field(name='Pi',value='s.pi')
emb.add_field(name='Owoify',value='s.owoify Hello there, how are you?')
emb.add_field(name='Random_Show',value='s.randomshow')
emb.add_field(name='Random_Fact',value='s.randomfact')
emb.add_field(name='Random_Anime',value='s.randomanime')
emb.add_field(name='Random_Movie',value='s.randommovie')
emb.add_field(name='TextCat',value='s.textcat')
emb.add_field(name='Neko1',value='s.neko1')
emb.add_field(name='Neko',value='s.neko')
emb.add_field(name='Fox_Girl',value='s.fox_girl')
emb.add_field(name='Fox',value='s.fox')
emb.add_field(name='Hug',value='s.hug @User1 @User2')
emb.add_field(name='Pfp', value='s.pfp @User1 @User2')
emb.add_field(name='Wallpaper', value='s.wallpaper')
emb.add_field(name='Meow',value='s.meow')
emb.add_field(name='Tickle',value='s.tickle @User1 @User2')
emb.add_field(name='Gecg',value='s.gecg')
emb.add_field(name='Kiss',value='s.kiss @User1 @User2')
# emb.add_field(name='Add_Status',value='s.add_status League of Legends')
emb.add_field(name='Feed',value='s.feed @User1 @User2')
emb.set_footer(text='Use s.help {command_name} to get more details on a command (s.help fox)',icon_url=owner_icon)
message=await self.msg.send(embed=emb)
return message.id
async def kiss(self):
emb=discord.Embed(title='Kiss',description='Sends a kiss gif to a person tagged',colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=nekos.img('kiss'))
emb.add_field(name='s.kiss',value='s.kiss @Cheng Yue#2945')
emb.set_footer(text='More than one person can be mentioned for this command')
await self.msg.send(embed=emb)
async def add_status(self):
emb=discord.Embed(title='add_status',description='Submit a status for the bot',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.add_status',value='s.add_status *watching* Hello Kitty')
emb.set_footer(text='Indicate the status type before the status name')
await self.msg.send(embed=emb)
async def neko(self):
emb = discord.Embed(title='Neko', description='Sends a neko picture', colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=nekos.img('neko'))
emb.add_field(name='s.neko', value='s.neko')
await self.msg.send(embed=emb)
async def why(self):
emb=discord.Embed(title='Why',description='Sends a question starting with why',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.why',value='s.why')
await self.msg.send(embed=emb)
async def eball(self):
emb=discord.Embed(title='8ball',description='Ask a yes or no question',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.8ball',value='s.8ball Will I pass the exam tomorrow?')
await self.msg.send(embed=emb)
async def erofeet(self):
emb=discord.Embed(title='erofeet',description='Sends a ero feet of a random anime character',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.erofeet',value='s.erofeet')
await self.msg.send(embed=emb)
async def avatar(self):
emb=discord.Embed(title='s.avatar',description='Generates a random aniem girl and sends it in embed',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.avatar',value='s.avatar @Cheng Yue#2945')
emb.set_footer(text='You can mention more than 1 user by adding space')
await self.msg.send(embed=emb)
async def kemonomimi(self):
emb=discord.Embed(title='s.kemonomimi',description='Sends a kemonomimi, give it a try',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.kemonomimi', value='s.kemonomimi')
await self.msg.send(embed=emb)
async def owoify(self):
emb=discord.Embed(title='Owoify',description='Replaces certain words to make it look like owo, give it a try!',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.owoify',value='s.owoify Hello there Alice, how are you today?')
await self.msg.send(embed=emb)
async def randomfact(self):
emb=discord.Embed(title='randomfact',description='Sends a randomfact to you about something random',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.randomfact',value='s.randomfact')
await self.msg.send(embed=emb)
async def textcat(self):
emb=discord.Embed(title='Textcat',description='Sends a text cat emoji',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.textcat',value='s.textcat')
await self.msg.send(embed=emb)
async def neko1(self):
emb=discord.Embed(title='Neko1',description='Sends you a neko picture, can be a little lewd',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.neko1',value='s.neko1')
await self.msg.send(embed=emb)
async def fox_girl(self):
emb=discord.Embed(title='Fox Girl',description='Sends you a picture of a fox girl, like neko except fox',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.fox_girl',value='s.fox_girl or s.fox-girl')
await self.msg.send(embed=emb)
async def hug(self):
emb=discord.Embed(title='Hug',description='Sends a hug gif adds the names of the user(s) you mention',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.hug',value='s.hug @Cheng Yue#2945 @Alice#3545')
emb.set_footer(text='You can mention more than 1 user by adding space')
await self.msg.send(embed=emb)
async def wallpaper(self):
emb = discord.Embed(title='Wallpaper', description='Sends you a random picture for a wallpaper',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.wallpaper',value='s.wallpaper')
await self.msg.send(embed=emb)
async def meow(self):
emb = discord.Embed(title='Meow', description='Sends a random picture of a cat',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.meow', value='s.meow')
await self.msg.send(embed=emb)
async def tickle(self):
emb = discord.Embed(title='Tickle', description='Sends an embed tickle gif with the names of the users you mentioned',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='',value='')
await self.msg.send(embed=emb)
async def gecg(self):
emb = discord.Embed(title='Gecg', description='Sends a funny post about genetically engineered cat-girls',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.gecg',value='s.gecg, cat-girl, or cat_girl')
await self.msg.send(embed=emb)
async def holo(self):
emb = discord.Embed(title='Holo', description='Sends a picture of the anime character Holo',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.holo',value='s.holo')
await self.msg.send(embed=emb)
async def feed(self):
emb = discord.Embed(title='Feed', description='Sends an gif of feeding someone and adds the users you mentioned in the embed',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='',value='s.feed @Cheng Yue#2945 @Alice#2352')
emb.set_footer(text='More than 1 user can be mentioned by adding space')
await self.msg.send(embed=emb)
async def lizard(self):
emb = discord.Embed(title='Lizard', description='Sends a picture of a lizard :)',colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=nekos.img('lizard'))
emb.add_field(name='s.lizard',value='s.lizard')
await self.msg.send(embed=emb)
async def dog(self):
emb = discord.Embed(title='Dog', description='Sends a picture of a dog, sometimes a gif',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.dog',value='s.dog')
await self.msg.send(embed=emb)
async def neko_gif(self):
emb = discord.Embed(title='Neko Gif', description='Sends a gif of a neko',colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=nekos.img('neko'))
emb.add_field(name='s.neko-gif',value='s.neko_gif, s.nekogif, or s.neko-gif')
await self.msg.send(embed=emb)
async def lewd(self):
emb = discord.Embed(title='Lewd', description='Send a NSFW lewd (mostly anime)picture, sometimes gif',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.lewd',value='s.lewd')
emb.set_footer(text='Channel must have NSFW enabled for this command to work')
await self.msg.send(embed=emb)
async def slap(self):
emb = discord.Embed(title='Slap', description='Sends a slap gif and adds the name(s) of the user(s) you mention',colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=nekos.img('slap'))
emb.add_field(name='s.slap',value='s.slap @Cheng Yue#2945 @Jayce#2342')
emb.set_footer(text='More than one user can be mentioned')
await self.msg.send(embed=emb)
async def waifu(self):
emb = discord.Embed(title='Waifu', description='Sends a random anime waifu girl',colour=eval(random.choice(self.raw_colors)))
emb.set_thumbnail(url=nekos.img('waifu'))
emb.add_field(name='s.waifu',value='s.waifu')
await self.msg.send(embed=emb)
async def joke(self):
emb = discord.Embed(title='Joke', description='Generates a random joke, sometimes funny ,but not always',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.joke',value='s.joke')
await self.msg.send(embed=emb)
async def poke(self):
emb = discord.Embed(title='Poke', description='Generates a poke gif and adds the names of the users you mentioned',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='',value='s.poke @Cheng Yue#2923 @Katie#2334')
await self.msg.send(embed=emb)
async def cat(self):
emb = discord.Embed(title='Cat', description='Generates a random cat picture',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.cat',value='s.cat')
await self.msg.send(embed=emb)
async def pi(self):
emb = discord.Embed(title='Pi', description='Sends a random pi fact',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.pi',value='s.pi')
await self.msg.send(embed=emb)
async def randomshow(self):
emb = discord.Embed(title='Random Show', description='Generates a random show',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.randomshow',value='s.randomshow')
await self.msg.send(embed=emb)
async def randomanime(self):
emb = discord.Embed(title='Random Anime', description='Generates a random anime',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.randomanime',value='s.randomanime, s.random_anime, or s.random-anime')
await self.msg.send(embed=emb)
async def randommovie(self):
emb = discord.Embed(title='Random Movie', description='Generates a random movie',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.randommovie',value='s.randommovie, s.random_movie, or s.random-movie')
await self.msg.send(embed=emb)
async def fox(self):
emb = discord.Embed(title='Fox', description='Generates a random fox picture',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.fox',value='s.fox')
await self.msg.send(embed=emb)
async def pfp(self):
emb = discord.Embed(title='Pfp', description='Sends the URL picture of the mentioned user(s)',colour=eval(random.choice(self.raw_colors)))
emb.add_field(name='s.pfp',value='s.pfp @Cheng Yue#2341 @Katie#2341')
emb.set_footer(text='More than one user can be mentioned by adding space')
await self.msg.send(embed=emb)
class League(HelpFormat):
def __init__(self,obj):
self.msg=obj
async def over_all(self):
emb = discord.Embed(title='League Commands')
emb.set_thumbnail(url=self.msg.guild.icon)
emb.add_field(name='', value='')
emb.add_field(name='', value='')
emb.add_field(name='', value='')
emb.set_footer(text='Use s.help {command_name} to get more details on a command')
message=await self.msg.send(embed=emb)
return message.id
class Anime(HelpFormat):
def __init__(self,obj):
self.msg=obj
async def over_all(self,owner_icon):
emb=discord.Embed(title='Anime Commands')
emb.set_thumbnail(url=self.msg.guild.icon)
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.add_field(name='',value='')
emb.set_footer(text='Use s.help {command_name} to get more details on a command',icon_url=owner_icon)
message=await self.msg.send(embed=emb)
return message.id
class Help(commands.Cog):
def __init__(self,client):
self.bot=client
#NOTE: Only fun commands are added in the list at the moment
self.commands=['fun','mod','anime','lizard', 'dog', 'neko_gif', 'lewd', 'holo', 'slap', 'why', 'waifu', '8ball', 'joke', 'erofeet', 'poke', 'avatar', 'cat', 'kemonomimi', 'pi', 'owoify', 'cms', 'randomshow', 'randomfact', 'randomanime', 'textcat', 'randommovie', 'neko1', 'neko', 'fox_girl', 'fox', 'hug', 'pfp', 'wallpaper', 'meow', 'tickle', 'gecg', 'kiss', 'add_adatus', 'feed']
self.help_message={}
self.raw_colors=None
self.command_list=None
asyncio.run(self.tasks())
async def set_color(self):
self.raw_colors=await HelpFormat().get_colors()
async def help_commands(self):
self.command_list=await HelpFormat().commands()
async def tasks(self):
tks=[self.set_color,self.help_commands]
for i in tks:
await i()
#TODO: Add a time of when the help command was requested from the user or when the help message was created
async def adding_command_list(self):
"""
Add all the command names into the self.commands variable
"""
command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names
#NOTE: fun command added
for i in self.bot.commands:
self.commands.append(i.name)
for i in command_aliases:
self.commands.append(i)
"""
External None Discord Events
"""
async def clear_help_messages(self):
while True:
await asyncio.sleep(10)
for i in self.help_message:
if self.help_message[i]['time'] <= time.time():
del self.help_message[i]
await self.help_message[i]['msg'].delete()
@commands.Cog.listener('on_ready')
async def help_ready(self):
# background_events=asyncio.wait([self.clear_help_messages()])
# await background_events
print("Help commands ready!")
async def fun_commands(self,msg):
emb=discord.Embed(title='Anime Commands',colour=discord.Colour.blue())
emb.set_thumbnail(url=msg.guild.icon)
emb.add_field(name='cmd1',value='description')
emb.set_footer(text=f'Requested by: {msg.author.name}',icon_url=msg.author.avatar_url)
# @commands.Cog.listener('on_reaction_add')
# async def help_reaction_add(self,emote,user):
# me = self.bot.get_user(185181025104560128).avatar_url
# if emote.emoji == '❤' and emote.message.id == self.help_message[user.id].id:
# await Fun(emote.channel).over_all(me)
# if emote.emoji == '💚' and emote.message.id == self.help_message[user.id].id:
# await Anime(emote.channel).over_all(me)
async def help_alone(self,msg):
# help_emotes = ['❤', '💚']
emb=discord.Embed(title='Help Menu',colour=eval(random.choice(self.raw_colors)),description='s.help {command_name}')
emb.set_thumbnail(url=msg.guild.icon_url)
# emb.add_field(name='Anime Commands', value='s.help anime',inline=True)
emb.add_field(name='Fun Commands', value='s.help fun')
emb.set_footer(text=f'{msg.message.created_at} UTC',icon_url=msg.author.avatar_url)
message=await msg.send(embed=emb)
self.help_message[msg.author.id]={'time':time.time()+300,'msg':message}
# for i in help_emotes:
# await message.add_reaction(i)
self.help_message[msg.author.id]=[message.id,time.time()+300]
async def help_command(self,msg,name):
me=self.bot.get_user(185181025104560128)
if name.lower() not in self.commands:
await msg.send(f"Command **{name}** is not found")
if name.lower() == 'mod':
await Mod(msg).over_all()
if name.lower() == 'anime':
await Anime(msg).over_all(me)
if name.lower() == 'fun':
await Fun(msg).over_all(me.avatar_url)
else:
if name.lower() in await Fun(msg).command_list():
await Fun(msg).command_name(name.lower())
@commands.command()
async def help(self,msg,*,name=None):
if name == None:
await self.help_alone(msg)
if name != None:
await self.help_command(msg,name)
def setup(bot):
bot.add_cog(Help(bot))
| [
"[email protected]"
] | |
3431ac52f6eaf42511fba1e27741419740043526 | 23f3e1792c7ad71a31be5a1a6dcdb978b641b555 | /rest/restapi/admin.py | 28c555c69b084cde8ca7a249dbb3eca9b18e0893 | [] | no_license | andrejBel/REST-API-for-image-gallery-service | 2b03f5f263c1fe4aa0d7f1efe4713ada166cfe58 | a64aced12b3f28f51aec122fa56b6677ed4ea3e5 | refs/heads/master | 2022-10-02T14:27:26.416771 | 2020-06-07T20:32:52 | 2020-06-07T20:32:52 | 270,421,301 | 3 | 3 | null | 2020-06-07T20:30:47 | 2020-06-07T20:16:39 | Python | UTF-8 | Python | false | false | 1,334 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .models import Item, Image, Comment, MyUser, Vote, Favourite
class VotesInline(admin.TabularInline):
model = Image.votes.through
verbose_name = "Vote"
verbose_name_plural = "Votes"
extra = 1
class CommentsInline(admin.TabularInline):
model = Image.comments.through
verbose_name = "Comment"
verbose_name_plural = "Comments"
extra = 1
class FavouritesInline(admin.TabularInline):
model = Image.favourites.through
verbose_name = "Favourite"
verbose_name_plural = "Favourite"
extra = 1
class ReportsInline(admin.TabularInline):
model = Image.reports.through
verbose_name = "Report"
verbose_name_plural = "Report"
extra = 1
class ImageInline(admin.ModelAdmin):
model = Image
exclude = ("votes", "comments")
inlines = (
VotesInline, CommentsInline, FavouritesInline, ReportsInline
)
class UserAdmin(BaseUserAdmin):
exclude = ("votes", "comments")
inlines = (
VotesInline, CommentsInline, FavouritesInline, ReportsInline
)
admin.site.register(MyUser, UserAdmin)
admin.site.register(Item)
admin.site.register(Image, ImageInline)
admin.site.register(Favourite)
admin.site.register(Comment)
admin.site.register(Vote)
| [
"[email protected]"
] | |
288840689f07f7413473cee89a78c264d7391bd4 | 09769af3c5397e3b23b8ab28eb20e063178d1d3a | /Basics/Loops.py | dfdb82485f9dbcfb00ab6df7017011e55b950bc6 | [] | no_license | guptamadhur/Python_GettingStarted | f7c9144901f7695ad24589724c90e9abb66ede78 | d10f740ddf8cd6b6188b6cf51abd349feb1f1993 | refs/heads/master | 2020-04-24T14:33:20.927771 | 2019-02-23T07:42:15 | 2019-02-23T07:42:15 | 172,024,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # Author: Madhur Gupta
# Github: github.com/guptamadhur
# Project: Python Getting Started
#
# Example file for working with loops
#
def main():
x = 0
# define a while loop
while (x < 5):
print (x)
x = x + 1
# define a for loop
for x in range(5, 10):
print (x)
# use a for loop over a collection
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
for d in days:
print (d)
# use the break and continue statements
for x in range(5, 10):
# if (x == 7): break
# if (x % 2 == 0): continue
print (x)
# using the enumerate() function to get index
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
for i, d in enumerate(days):
print (i, d)
if __name__ == "__main__":
main()
| [
"guptamadhur"
] | guptamadhur |
e12613dfb2f4eaff7147038711bfa7c6b7cf944e | 5cd5332e073323dfdd0ff607504787239a6fb820 | /alphabetList.py | 92417226cebadcad636ad3308fc5a0c80c277dd2 | [] | no_license | stephenjmuth/PythonLessons | dabcdb9c3b90e9eb5fa9bd4e9da5a9cecc2655d0 | c2f04c75fa62d3667ad93e0593a5c06b33252c05 | refs/heads/master | 2020-09-06T20:46:00.962283 | 2019-12-26T16:03:18 | 2019-12-26T16:03:18 | 220,545,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from string import ascii_lowercase
print(ascii_lowercase)
letters=ascii_lowercase
LenOfAlphabet=len(letters)
print(LenOfAlphabet)
alphabet=list(letters)
print(alphabet)
i=0
for letter in letters:
spaces=' '*i
print(f"{spaces}{letter}")
i+=1
| [
"[email protected]"
] | |
2abb11573f51431a9b7bca690bdae3263cd98f57 | 64e9494b3eb5be8a65446dbffe848dff26409fc0 | /dataVisualization/main.py | 45e2a1c25a2ff35063af6255a04a65f554a3042b | [] | no_license | hexing2333/python3 | c3797a6efb355d8dd5e569d7a3d387504b583a39 | 1c32cd675ed7d826ef1a03f227368131e78d0493 | refs/heads/main | 2023-02-09T09:27:50.063960 | 2020-12-13T17:20:33 | 2020-12-13T17:20:33 | 321,117,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,834 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
import pandas as pd
#作业一
def job1():
plt.style.use('seaborn-bright')
fig, ax = plt.subplots()
ax.set_title("function x*x+x")
x = np.arange(-10, 11, 1)
ax.set_xlim(-11, 11)
ax.set_xticks(x)
y = x * x + x
plt.bar(x, y, color='g', label='y=x*x+x')
plt.legend(loc='upper left')
plt.xlabel("x")
plt.ylabel("y")
plt.grid(True, linestyle='-.', alpha=1)
for a, b in zip(x, y):
plt.text(a, b / 2, '%d' % b, ha='center', va='bottom', fontsize=10)
plt.show()
#作业二
def job2():
def count_elements(scores):
scorescount = {}
for i in scores:
scorescount[int(i)] = scorescount.get(int(i), 0) + 1
_scorescount = {}
i = 60
while (i < 95):
for j in range(5):
_scorescount[str(i) + "-" + str(i + 4)] = _scorescount.get(str(i) + "-" + str(i + 4),
0) + scorescount.get(i + j, 0)
i += 5
return _scorescount
filename = "data1402.csv"
scores = []
with open(filename, 'r') as csvfile:
f_csv = csv.reader(csvfile)
for row in f_csv:
scores.append(float(row[0]))
counted = count_elements(scores)
plt.ylim(0, 100)
plt.bar(counted.keys(), counted.values(), 0.5, alpha=0.5, color='b')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.xlabel("成绩分布")
plt.ylabel("人数")
plt.title("分段成绩直方图")
plt.legend(["人数"],loc='upper left')
plt.grid(True, linestyle='-.', alpha=1)
for a, b in zip(counted.keys(), counted.values()):
plt.text(a, b + 0.3, '%d' % b, ha='center', va='bottom', fontsize=10)
plt.show()
#作业三
def job3():
# 随机生成10x3数组(范围1-200)
rank = np.random.randint(1, 201, size=(10, 3))
print(rank)
fig,ax=plt.subplots()
plt.rcParams['font.sans-serif']=['SimHei']
Semester1 = []
Semester2 = []
Semester3 = []
for row in rank:
Semester1.append(int(row[0]))
Semester2.append(int(row[1]))
Semester3.append(int(row[2]))
x= np.arange(1,11)
plt.bar(x-0.3,Semester1,0.3,alpha=0.5,color='g')
plt.bar(x, Semester2, 0.3, alpha=0.5, color='b')
plt.bar(x+0.3,Semester3,0.3,alpha=0.5,color='r')
for a, b in zip(x, Semester1):
plt.text(a - 0.3, b + 0.2, '%d' % b, ha='center', va='bottom', fontsize=10)
for a, b in zip(x, Semester2):
plt.text(a, b + 0.2, '%d' % b, ha='center', va='bottom', fontsize=10)
for a, b in zip(x, Semester3):
plt.text(a + 0.3, b + 0.2, '%d' % b, ha='center', va='bottom', fontsize=10)
ax.set_xticks(x)
plt.ylim(0, 250)
plt.legend(['第一学期', '第二学期', '第三学期'], loc='upper left')
plt.title('10名学生排名数据直方图')
plt.xlabel('学生序号')
plt.ylabel('名次')
plt.grid(True, linestyle='-.', alpha=1)
plt.show()
def job4():
fig,ax=plt.subplots()
x=np.linspace(-5*np.pi,5*np.pi,640)
cos,sin=2*np.cos(x),np.sin(x*3)
ax.set_xticks([i * np.pi for i in range(-5, 6)])
plt.plot(x, cos, color="blue", linewidth=2, linestyle="-", label="cos")
plt.plot(x, sin, color="red", linewidth=2, linestyle="--", label="sin")
plt.legend(loc='lower left')
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines['bottom'].set_position(('data', 0))
ax.xaxis.set_ticks_position('bottom')
ax.spines['left'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
plt.show()
def job5():
fileNameStr = 'BeijingPM20100101_20151231.csv'
df = pd.read_csv(fileNameStr, encoding='utf-8')
df.drop(df.columns[[range(10, 18)]], axis=1, inplace=True)
# 去掉三列PM数据全部为空的行
df.dropna(axis=0, how='all', subset=['PM_Dongsi', 'PM_Dongsihuan', 'PM_Nongzhanguan', 'PM_US Post'], inplace=True)
df['sum'] = df[['PM_Dongsi', 'PM_Dongsihuan', 'PM_Nongzhanguan', 'PM_US Post']].sum(axis=1)
df['count'] = df[['PM_Dongsi', 'PM_Dongsihuan', 'PM_Nongzhanguan', 'PM_US Post']].count(axis=1)
df['ave'] = round(df['sum'] / df['count'], 2)
bj2 = df.groupby(['year', 'month'])['ave'].mean()#按月份计算平均值
am =pd.DataFrame(bj2).reset_index()
year_2010 = []
year_2011 = []
year_2012 = []
year_2013 = []
year_2014 = []
year_2015 = []
for i in range(0, 12):
print(am.iloc[i]['ave'])
year_2010.append(am.iloc[i]['ave'])
year_2011.append(am.iloc[i + 12]['ave'])
year_2012.append(am.iloc[i + 24]['ave'])
year_2013.append(am.iloc[i + 36]['ave'])
year_2014.append(am.iloc[i + 48]['ave'])
year_2015.append(am.iloc[i + 60]['ave'])
plt.rcParams['font.sans-serif'] = ['SimHei']
x = np.arange(1, 13)
plt.xticks(range(1, 13))
plt.title('10-15年PM指数月平均数据变化情况')
plt.xlabel('月份')
plt.ylabel('PM指数')
plt.plot(x, year_2010, color="b", linewidth=2, linestyle="-", label="2010")
plt.plot(x, year_2011, color="g", linewidth=2, linestyle="-", label="2011")
plt.plot(x, year_2012, color="r", linewidth=2, linestyle="-", label="2012")
plt.plot(x, year_2013, color="c", linewidth=2, linestyle="-", label="2013")
plt.plot(x, year_2014, color="m", linewidth=2, linestyle="-", label="2014")
plt.plot(x, year_2015, color="y", linewidth=2, linestyle="-", label="2015")
plt.legend(loc='upper right')
plt.show()
if __name__ == '__main__':
job1()
job2()
job3()
job4()
job5()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.