max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
server.py | PaulsBecks/fc-reliable-messaging | 0 | 12797851 | from http.server import HTTPServer,BaseHTTPRequestHandler
import signal
import sys
class Server(BaseHTTPRequestHandler) :
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length'])
data = self.rfile.read(content_length).decode('utf-8')
dataSplit = data.split(" ")
peerId = str(dataSplit[0])
count = str(dataSplit[-1])
peerLogFile = "server-"+peerId+".log"
def send_response():
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(("ACK "+count).encode("utf-8"))
try:
with open(peerLogFile, "r") as f:
lastEntry = f.readlines()[-1]
lastId = str(lastEntry.split(" ")[-1])
if int(lastId) > int(count):
send_response()
return
except (FileNotFoundError, IndexError):
print("No server.log file available yet.")
with open(peerLogFile, "a") as f:
print(str(data))
f.write(str(data) + "\n")
f.close()
send_response()
def stop_server(server):
print("Stop server.")
server.server_close()
sys.exit(0)
def run(server_class=HTTPServer, handler_class=Server):
print("Start server on port 8000.")
server_address = ('', 8000)
httpd = server_class(server_address, handler_class)
httpd.handle_request()
try:
httpd.serve_forever()
except KeyboardInterrupt :
stop_server(httpd)
if __name__ == "__main__":
print("Create server")
run() | 2.859375 | 3 |
reseaut/apps/profiles/serializers.py | koladev32/Beta | 0 | 12797852 | from rest_framework import serializers
from .models import Profile
class ProfileSerializer(serializers.ModelSerializer):
last_name = serializers.CharField(source='user.last_name')
bio = serializers.CharField(allow_blank=True,required=False)
#work_domain = serializers.CharField(max_length=50)
image = serializers.SerializerMethodField()
following = serializers.SerializerMethodField()
class Meta:
model = Profile
fields = ('last_name','bio','image','following')#,'work_domain')
read_only_fields = ('last_name',)
def get_image(self,obj):
if obj.image:
return obj.image
return 'https://image.flaticon.com/icons/svg/1738/1738691.svg'
def get_following(self,instance):
request = self.context.get('request',None)
if request is None:
return False
if not request.user.is_authenticated:
return False
follower = request.user.profile
followee = instance
return follower.is_following(followee) | 2.328125 | 2 |
loldib/getratings/models/NA/na_fiora/na_fiora_mid.py | koliupy/loldib | 0 | 12797853 | from getratings.models.ratings import Ratings
class NA_Fiora_Mid_Aatrox(Ratings):
pass
class NA_Fiora_Mid_Ahri(Ratings):
pass
class NA_Fiora_Mid_Akali(Ratings):
pass
class NA_Fiora_Mid_Alistar(Ratings):
pass
class NA_Fiora_Mid_Amumu(Ratings):
pass
class NA_Fiora_Mid_Anivia(Ratings):
pass
class NA_Fiora_Mid_Annie(Ratings):
pass
class NA_Fiora_Mid_Ashe(Ratings):
pass
class NA_Fiora_Mid_AurelionSol(Ratings):
pass
class NA_Fiora_Mid_Azir(Ratings):
pass
class NA_Fiora_Mid_Bard(Ratings):
pass
class NA_Fiora_Mid_Blitzcrank(Ratings):
pass
class NA_Fiora_Mid_Brand(Ratings):
pass
class NA_Fiora_Mid_Braum(Ratings):
pass
class NA_Fiora_Mid_Caitlyn(Ratings):
pass
class NA_Fiora_Mid_Camille(Ratings):
pass
class NA_Fiora_Mid_Cassiopeia(Ratings):
pass
class NA_Fiora_Mid_Chogath(Ratings):
pass
class NA_Fiora_Mid_Corki(Ratings):
pass
class NA_Fiora_Mid_Darius(Ratings):
pass
class NA_Fiora_Mid_Diana(Ratings):
pass
class NA_Fiora_Mid_Draven(Ratings):
pass
class NA_Fiora_Mid_DrMundo(Ratings):
pass
class NA_Fiora_Mid_Ekko(Ratings):
pass
class NA_Fiora_Mid_Elise(Ratings):
pass
class NA_Fiora_Mid_Evelynn(Ratings):
pass
class NA_Fiora_Mid_Ezreal(Ratings):
pass
class NA_Fiora_Mid_Fiddlesticks(Ratings):
pass
class NA_Fiora_Mid_Fiora(Ratings):
pass
class NA_Fiora_Mid_Fizz(Ratings):
pass
class NA_Fiora_Mid_Galio(Ratings):
pass
class NA_Fiora_Mid_Gangplank(Ratings):
pass
class NA_Fiora_Mid_Garen(Ratings):
pass
class NA_Fiora_Mid_Gnar(Ratings):
pass
class NA_Fiora_Mid_Gragas(Ratings):
pass
class NA_Fiora_Mid_Graves(Ratings):
pass
class NA_Fiora_Mid_Hecarim(Ratings):
pass
class NA_Fiora_Mid_Heimerdinger(Ratings):
pass
class NA_Fiora_Mid_Illaoi(Ratings):
pass
class NA_Fiora_Mid_Irelia(Ratings):
pass
class NA_Fiora_Mid_Ivern(Ratings):
pass
class NA_Fiora_Mid_Janna(Ratings):
pass
class NA_Fiora_Mid_JarvanIV(Ratings):
pass
class NA_Fiora_Mid_Jax(Ratings):
pass
class NA_Fiora_Mid_Jayce(Ratings):
pass
class NA_Fiora_Mid_Jhin(Ratings):
pass
class NA_Fiora_Mid_Jinx(Ratings):
pass
class NA_Fiora_Mid_Kalista(Ratings):
pass
class NA_Fiora_Mid_Karma(Ratings):
pass
class NA_Fiora_Mid_Karthus(Ratings):
pass
class NA_Fiora_Mid_Kassadin(Ratings):
pass
class NA_Fiora_Mid_Katarina(Ratings):
pass
class NA_Fiora_Mid_Kayle(Ratings):
pass
class NA_Fiora_Mid_Kayn(Ratings):
pass
class NA_Fiora_Mid_Kennen(Ratings):
pass
class NA_Fiora_Mid_Khazix(Ratings):
pass
class NA_Fiora_Mid_Kindred(Ratings):
pass
class NA_Fiora_Mid_Kled(Ratings):
pass
class NA_Fiora_Mid_KogMaw(Ratings):
pass
class NA_Fiora_Mid_Leblanc(Ratings):
pass
class NA_Fiora_Mid_LeeSin(Ratings):
pass
class NA_Fiora_Mid_Leona(Ratings):
pass
class NA_Fiora_Mid_Lissandra(Ratings):
pass
class NA_Fiora_Mid_Lucian(Ratings):
pass
class NA_Fiora_Mid_Lulu(Ratings):
pass
class NA_Fiora_Mid_Lux(Ratings):
pass
class NA_Fiora_Mid_Malphite(Ratings):
pass
class NA_Fiora_Mid_Malzahar(Ratings):
pass
class NA_Fiora_Mid_Maokai(Ratings):
pass
class NA_Fiora_Mid_MasterYi(Ratings):
pass
class NA_Fiora_Mid_MissFortune(Ratings):
pass
class NA_Fiora_Mid_MonkeyKing(Ratings):
pass
class NA_Fiora_Mid_Mordekaiser(Ratings):
pass
class NA_Fiora_Mid_Morgana(Ratings):
pass
class NA_Fiora_Mid_Nami(Ratings):
pass
class NA_Fiora_Mid_Nasus(Ratings):
pass
class NA_Fiora_Mid_Nautilus(Ratings):
pass
class NA_Fiora_Mid_Nidalee(Ratings):
pass
class NA_Fiora_Mid_Nocturne(Ratings):
pass
class NA_Fiora_Mid_Nunu(Ratings):
pass
class NA_Fiora_Mid_Olaf(Ratings):
pass
class NA_Fiora_Mid_Orianna(Ratings):
pass
class NA_Fiora_Mid_Ornn(Ratings):
pass
class NA_Fiora_Mid_Pantheon(Ratings):
pass
class NA_Fiora_Mid_Poppy(Ratings):
pass
class NA_Fiora_Mid_Quinn(Ratings):
pass
class NA_Fiora_Mid_Rakan(Ratings):
pass
class NA_Fiora_Mid_Rammus(Ratings):
pass
class NA_Fiora_Mid_RekSai(Ratings):
pass
class NA_Fiora_Mid_Renekton(Ratings):
pass
class NA_Fiora_Mid_Rengar(Ratings):
pass
class NA_Fiora_Mid_Riven(Ratings):
pass
class NA_Fiora_Mid_Rumble(Ratings):
pass
class NA_Fiora_Mid_Ryze(Ratings):
pass
class NA_Fiora_Mid_Sejuani(Ratings):
pass
class NA_Fiora_Mid_Shaco(Ratings):
pass
class NA_Fiora_Mid_Shen(Ratings):
pass
class NA_Fiora_Mid_Shyvana(Ratings):
pass
class NA_Fiora_Mid_Singed(Ratings):
pass
class NA_Fiora_Mid_Sion(Ratings):
pass
class NA_Fiora_Mid_Sivir(Ratings):
pass
class NA_Fiora_Mid_Skarner(Ratings):
pass
class NA_Fiora_Mid_Sona(Ratings):
pass
class NA_Fiora_Mid_Soraka(Ratings):
pass
class NA_Fiora_Mid_Swain(Ratings):
pass
class NA_Fiora_Mid_Syndra(Ratings):
pass
class NA_Fiora_Mid_TahmKench(Ratings):
pass
class NA_Fiora_Mid_Taliyah(Ratings):
pass
class NA_Fiora_Mid_Talon(Ratings):
pass
class NA_Fiora_Mid_Taric(Ratings):
pass
class NA_Fiora_Mid_Teemo(Ratings):
pass
class NA_Fiora_Mid_Thresh(Ratings):
pass
class NA_Fiora_Mid_Tristana(Ratings):
pass
class NA_Fiora_Mid_Trundle(Ratings):
pass
class NA_Fiora_Mid_Tryndamere(Ratings):
pass
class NA_Fiora_Mid_TwistedFate(Ratings):
pass
class NA_Fiora_Mid_Twitch(Ratings):
pass
class NA_Fiora_Mid_Udyr(Ratings):
pass
class NA_Fiora_Mid_Urgot(Ratings):
pass
class NA_Fiora_Mid_Varus(Ratings):
pass
class NA_Fiora_Mid_Vayne(Ratings):
pass
class NA_Fiora_Mid_Veigar(Ratings):
pass
class NA_Fiora_Mid_Velkoz(Ratings):
pass
class NA_Fiora_Mid_Vi(Ratings):
pass
class NA_Fiora_Mid_Viktor(Ratings):
pass
class NA_Fiora_Mid_Vladimir(Ratings):
pass
class NA_Fiora_Mid_Volibear(Ratings):
pass
class NA_Fiora_Mid_Warwick(Ratings):
pass
class NA_Fiora_Mid_Xayah(Ratings):
pass
class NA_Fiora_Mid_Xerath(Ratings):
pass
class NA_Fiora_Mid_XinZhao(Ratings):
pass
class NA_Fiora_Mid_Yasuo(Ratings):
pass
class NA_Fiora_Mid_Yorick(Ratings):
pass
class NA_Fiora_Mid_Zac(Ratings):
pass
class NA_Fiora_Mid_Zed(Ratings):
pass
class NA_Fiora_Mid_Ziggs(Ratings):
pass
class NA_Fiora_Mid_Zilean(Ratings):
pass
class NA_Fiora_Mid_Zyra(Ratings):
pass
| 1.390625 | 1 |
ProcessDoc.py | CharlesG12/IndexCompression | 0 | 12797854 | import os
import Token
from xml.dom import minidom
class ProcessDoc:
def __init__(self, path, lemma, stem):
self.collection_dic = {}
self.doc_info = []
self.lemma = lemma
self.stem = stem
self.path = path
def run(self):
for filename in os.listdir(self.path):
doc_no, doclen, max_tf, doc_dic = self.load_file(os.path.join(self.path, filename))
for key, value in doc_dic.items():
# add token to dictionary if not exist
if key not in self.collection_dic:
self.collection_dic[key] = [1, value, [doc_no, value, doclen, max_tf]]
# increase the frequency by one if exist in the dictionary
else:
self.collection_dic[key][0] += 1
self.collection_dic[key][1] += value
self.collection_dic[key].append([doc_no, value, doclen, max_tf])
def load_file(self, url):
# parse xml doc from the url
mydoc = minidom.parse(url)
# read doc NO
doc = mydoc.getElementsByTagName('DOCNO')[0]
doc_no = int(doc.firstChild.data)
# read doc text file
text = mydoc.getElementsByTagName('TEXT')[0]
data = text.firstChild.data
token = Token.Token()
if self.lemma == 1:
token.apply_lemma()
elif self.stem == 1:
token.apply_stemming()
return token.tokenize(data, doc_no)
| 2.84375 | 3 |
apis/watsonNLUTa.py | amol-m-deshpande/testing-remote | 0 | 12797855 | <gh_stars>0
from flask_restful import Resource
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson import ToneAnalyzerV3
from ibm_watson.natural_language_understanding_v1 \
import Features, EntitiesOptions, KeywordsOptions, \
SyntaxOptions, SyntaxOptionsTokens, CategoriesOptions, ConceptsOptions
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from flask import request, jsonify
from operator import itemgetter
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from datetime import datetime
class WatsonNLUTA(Resource):
NLU_API_KEY_ID = ""
NLU_URL = ""
TONE_API_KEY_ID = ""
TONE_URL = ""
def __init__(self):
try:
with open('naturallanguageunderstanding.json', 'r') as credentialsFile:
credentials1 = json.loads(credentialsFile.read())
self.NLU_API_KEY_ID = credentials1.get('apikey')
self.NLU_URL = credentials1.get('url')
nlu_authenticator = IAMAuthenticator(self.NLU_API_KEY_ID)
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2021-08-01',
authenticator=nlu_authenticator
)
natural_language_understanding.set_service_url(self.NLU_URL)
self.natural_language_understanding = natural_language_understanding
except json.decoder.JSONDecodeError:
print("Natural Language Understanding credentials file is empty, please enter the credentials and try again.")
try:
with open('toneanalyzer.json', 'r') as credentialsFile:
credentials2 = json.loads(credentialsFile.read())
self.TONE_API_KEY_ID = credentials2.get('apikey')
self.TONE_URL = credentials2.get('url')
tone_analyzer_authenticator = IAMAuthenticator(self.TONE_API_KEY_ID)
tone_analyzer = ToneAnalyzerV3(
version='2017-09-21',
authenticator=tone_analyzer_authenticator
)
tone_analyzer.set_service_url(self.TONE_URL)
self.tone_analyzer = tone_analyzer
except json.decoder.JSONDecodeError:
print("Tone Analyzer credentials file is empty, please enter the credentials and try again.")
def get(self):
pass
def post(self):
if request.method == 'POST':
body = json.loads(request.get_data())
options = body
fileName = body.get('filename')
''' Prepare the text for Analysis'''
with open('static/transcripts/'+fileName, 'r') as text_file:
text = text_file.read()
''' Initialize a return variable '''
myJsonDict = {}
''' Extract Category with NLU '''
if options.get('category') == "True":
try:
response = self.natural_language_understanding.analyze(
language='en',
text=text,
features=Features(categories=CategoriesOptions(limit=1))).get_result()
category = response['categories'][0]
myJsonDict.update({"category": category})
except:
myJsonDict.update({"category": "Text too small to extract category"})
else:
pass
''' Extract Concepts with NLU '''
if options.get('concepts') == "True":
try:
response = self.natural_language_understanding.analyze(
language='en',
text=text,
features=Features(concepts=ConceptsOptions(limit=3))).get_result()
concepts = sorted(response['concepts'],
key=itemgetter('relevance'), reverse=True)
myJsonDict.update({"concepts": concepts})
except:
myJsonDict.update({"concepts": "Text too small to extract concepts"})
else:
pass
''' Extract Entity with NLU '''
if options.get('entity') == "True":
try:
response = self.natural_language_understanding.analyze(
language='en',
text=text,
features=Features(entities=EntitiesOptions(limit=1))).get_result()
entity = sorted(response['entities'],
key=itemgetter('relevance'), reverse=True)
myJsonDict.update({"entity": entity[0]})
except:
myJsonDict.update({"entity": "Text too small to extract entity"})
else:
pass
''' Extract Sentiments and Emotions with NLU '''
if options.get('sentiments') == "True":
try:
response = self.natural_language_understanding.analyze(
language='en',
text=text,
features=Features(keywords=KeywordsOptions(sentiment=True, emotion=True, limit=10))).get_result()
keywords = sorted(response['keywords'],
key=itemgetter('relevance'), reverse=True)
keywords_sentiments_emotions = []
for i in keywords:
keywords_sentiments_emotions_buffer = {
'keyword': i['text'],
'sentiment': i['sentiment']['label'],
'emotion': ''
}
maximum = i['emotion']['sadness']
keywords_sentiments_emotions_buffer['emotion'] = 'sadness'
if i['emotion']['joy'] > maximum:
maximum = i['emotion']['joy']
keywords_sentiments_emotions_buffer['emotion'] = 'joy'
elif i['emotion']['fear'] > maximum:
maximum = i['emotion']['fear']
keywords_sentiments_emotions_buffer['emotion'] = 'fear'
elif i['emotion']['disgust'] > maximum:
maximum = i['emotion']['disgust']
keywords_sentiments_emotions_buffer['emotion'] = 'disguest'
elif i['emotion']['anger'] > maximum:
maximum = i['emotion']['anger']
keywords_sentiments_emotions_buffer['emotion'] = 'anger'
keywords_sentiments_emotions.append(
keywords_sentiments_emotions_buffer)
myJsonDict.update({"sentiments": keywords_sentiments_emotions})
except:
myJsonDict.update({"sentiments": "Text too small to extract sentiments"})
else:
pass
''' Analyse tone to get top 5 positive sentences '''
if options.get('positiveSentences') == "True":
tone_analysis = self.tone_analyzer.tone(
{'text': text},
content_type='application/json'
).get_result()
sentences_with_joy = []
# print(json.dumps(tone_analysis, indent=2))
try:
for tone in tone_analysis['sentences_tone']:
try:
if tone['tones'][0]['tone_name'] == "Joy":
tempDict = {"sentence_id": tone['sentence_id'],
"text": tone['text'],
"score": tone['tones'][0]['score']}
sentences_with_joy.append(tempDict)
except:
continue
sentences_with_joy = sorted(
sentences_with_joy, key=itemgetter('score'), reverse=True)
myJsonDict.update(
{"positiveSentences": sentences_with_joy[:5]})
except:
tempDict = {"sentence_id": '',
"text": 'Text file too small to get positive sentences, please try again with a bigger document.',
"score": '100'}
myJsonDict.update(
{"positiveSentences": [tempDict]})
# return sentences_with_joy[:5] ['text'] ['score']
else:
pass
''' Pre-Processing parts of speech to plot Word Cloud '''
try:
response = self.natural_language_understanding.analyze(
language='en',
text=text,
features=Features(
syntax=SyntaxOptions(
sentences=True,
tokens=SyntaxOptionsTokens(
lemma=True,
part_of_speech=True,
)))).get_result()
verbs = []
for i in response['syntax']['tokens']:
if i['part_of_speech'] == 'VERB':
verbs.append(i['text'])
nouns = []
for i in response['syntax']['tokens']:
if i['part_of_speech'] == 'NOUN':
nouns.append(i['text'])
adj = []
for i in response['syntax']['tokens']:
if i['part_of_speech'] == 'ADJ':
adj.append(i['text'])
nouns_adjectives = []
for x in nouns:
nouns_adjectives.append(x)
for y in adj:
nouns_adjectives.append(y)
comment_words_verbs = ' '
comment_words_nouns_adj = ' '
stopwords = set(STOPWORDS)
for val in verbs:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
for words in tokens:
comment_words_verbs = comment_words_verbs + words + ' '
for val in nouns_adjectives:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
for words in tokens:
comment_words_nouns_adj = comment_words_nouns_adj + words + ' '
wordcloud_verbs = WordCloud(width=800, height=800,
background_color='white',
stopwords=stopwords,
min_font_size=10,
max_font_size=150,
random_state=42).generate(comment_words_verbs)
wordcloud_nouns_adj = WordCloud(width=800, height=800,
background_color='white',
colormap="Dark2",
stopwords=stopwords,
min_font_size=10,
max_font_size=150,
random_state=42).generate(comment_words_nouns_adj)
todayDate = datetime.today().strftime('%m-%d-%Y-%s')
verbsWC = "static/images/verbs"+todayDate+'.png'
plt.switch_backend('Agg')
plt.figure(figsize=(5, 5), facecolor=None)
plt.imshow(wordcloud_verbs)
plt.axis("off")
plt.tight_layout(pad=0)
plt.savefig(verbsWC, title=True)
nounsAdjWC = "static/images/nouns_adjectives"+todayDate+'.png'
plt.switch_backend('Agg')
plt.figure(figsize=(5, 5), facecolor=None)
plt.imshow(wordcloud_nouns_adj)
plt.axis("off")
plt.tight_layout(pad=0)
plt.savefig(nounsAdjWC, title=True)
wordclouds = [nounsAdjWC, verbsWC]
myJsonDict.update({"wordclouds": wordclouds})
except:
myJsonDict.update({"wordclouds": "Text too small to extract wordclouds"})
# print(json.dumps(myJsonDict, indent=2))
return jsonify(myJsonDict) | 2.796875 | 3 |
applepushnotification/tests/test_basic.py | xiaohaifxb/applepushnotification | 7 | 12797856 | #!/usr/bin/env python
from applepushnotification import *
from unittest import TestCase
from applepushnotification.tests import TestAPNS
import struct, time
try:
import json
except ImportError, e:
import simplejson as json
class TestBasic(TestAPNS):
def test_construct_service(self):
service = self.create_service()
service.start()
service.stop()
self.assertTrue(service._send_greenlet is None)
self.assertTrue(service._error_greenlet is None)
def test_construct_message(self):
msg = self.create_message()
encoded = str(msg)
command, identifier, expiry, tok_length = struct.unpack("!bIIH",
encoded[0:11])
self.assertEquals(command, 1)
self.assertEquals(identifier, msg.identifier)
self.assertTrue(expiry > time.time())
self.assertEquals(tok_length, 32)
data = encoded[45:]
m = json.loads(data)
self.assertTrue("aps" in m)
def test_send_message(self):
service = self.create_service()
service.start()
service.send(self.create_message())
self.assertTrue(service.stop())
| 2.5625 | 3 |
tests/Unit/AutoML/test_feature_processing.py | nielsuit227/AutoML | 2 | 12797857 | <reponame>nielsuit227/AutoML
import unittest
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from Amplo.AutoML import FeatureProcesser
class TestFeatureProcessing(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_regression(self):
x, y = make_regression()
x, y = pd.DataFrame(x), pd.Series(y)
fp = FeatureProcesser(max_lags=2, mode='regression')
xt, sets = fp.fit_transform(x, y)
def test_classification(self):
x, y = make_classification()
x, y = pd.DataFrame(x), pd.Series(y)
fp = FeatureProcesser(max_lags=2, mode='classification')
xt, sets = fp.fit_transform(x, y)
def test_co_linearity(self):
y = pd.Series(np.linspace(2, 100, 100))
x = pd.DataFrame({'a': np.linspace(-4, 4, 100), 'b': np.linspace(-4, 4, 100)})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert len(fp.coLinearFeatures) != 0, "Colinear feature not removed"
def test_multiply_features(self):
y = pd.Series(np.linspace(2, 100, 100))
b = pd.Series(np.linspace(-4, 4, 100) ** 2)
x = pd.DataFrame({'a': y / b, 'b': b})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert len(fp.crossFeatures) != 0, "Multiplicative feature not spotted"
def test_division(self):
y = pd.Series(np.linspace(2, 100, 100))
b = pd.Series(np.linspace(-4, 4, 100) ** 2)
x = pd.DataFrame({'a': y * b, 'b': b})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert len(fp.crossFeatures) != 0, "Division feature not spotted"
def test_trigonometry(self):
y = pd.Series(np.sin(np.linspace(0, 100, 100)))
x = pd.DataFrame({'a': np.linspace(0, 100, 100)})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert len(fp.trigonometricFeatures) != 0, "Trigonometric feature not spotted"
def test_lagged(self):
y = pd.Series(np.random.randint(0, 100, 100))
x = pd.DataFrame({'a': np.roll(y, -5)})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert len(fp.laggedFeatures) != 0, "Lagged feature not spotted"
def test_diff(self):
y = pd.Series(np.random.randint(1, 100, 100))
x = pd.DataFrame({'a': np.cumsum(y)})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert len(fp.diffFeatures) != 0, "Difference feature not spotted"
def test_select(self):
n = 1000
y = pd.Series(np.linspace(0, 100, n))
x = pd.DataFrame({'a': y, 'b': np.random.normal(0, 1, n)})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert all([len(i) == 1 for i in sets.values()]), f"Random Feature Selected: {sets}"
def test_settings(self):
y = pd.Series(np.random.randint(1, 100, 100))
b = pd.Series(np.linspace(-4, 4, 100))
x = pd.DataFrame({'a': np.cumsum(y), 'b': np.roll(y, -5), 'c': y / b, 'd': y * b})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
settings = fp.get_settings()
fpn = FeatureProcesser(mode='regression')
fpn.load_settings(settings)
for k, v in sets.items():
xtn = fpn.transform(x, k)
assert len(v) == len(xtn.keys()), "Incorrect number of keys"
assert all(xt[v].keys() == xtn.keys()), 'Keys are not correct'
assert np.allclose(xt[v], xtn), 'Transformed data not consistent for {} set'.format(k)
def test_get_required(self):
y = pd.Series(np.linspace(2, 100, 100))
b = pd.Series(np.linspace(-4, 4, 100) ** 2)
x = pd.DataFrame({'a': y / b, 'b': b, 'c': b / 2})
fp = FeatureProcesser(mode='regression')
xt, sets = fp.fit_transform(x, y)
assert set(fp.get_required_features(['a__x__b'])) == set(['b', 'a'])
| 2.953125 | 3 |
eval/read_code_table_and_train_text.py | YangXuepei/ime-eval | 0 | 12797858 | <reponame>YangXuepei/ime-eval
# -*- coding:utf-8 -*-#
import csv
def readfile(filename):
csvfile = open(filename, 'rb')
reader = csv.reader(csvfile)
result = {}
for item in reader:
result[item[0].decode('utf-8')] = item[1]
csvfile.close()
# print result
# return a directory {character:code}
return result
def load_test(testname):
# store the testdata in a string
test = ""
file = open(testname)
for line in file.readlines():
test += line.strip('\n')
test = test.decode('utf-8')
return test
def load_all_pinyin(filename):
pinyin = []
pyfile = open(filename)
for line in pyfile:
pinyin.append(line.strip())
return pinyin
| 3.234375 | 3 |
src/dash/apps/pca.py | ijmiller2/COVID-19_Multi-Omics | 4 | 12797859 | <gh_stars>1-10
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import datetime
from data import get_omics_data, get_biomolecule_names, get_combined_data
from plot import biomolecule_bar, boxplot, pca_scores_plot, pca_loadings_plot
from nav import navbar
external_stylesheets=[dbc.themes.BOOTSTRAP]
"""app = dash.Dash(
__name__,
external_stylesheets=external_stylesheets)
app.title = 'COVID-19 Multi-Omics'"""
from app import app
print()
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print("Loading data for pca...")
print()
# load metabolomics data matrix
print("Loading metabolomics data...")
from app import metabolomics_df, metabolomics_quant_range
print("Metabolomics data shape: {}".format(metabolomics_df.shape))
print("Loading lipidomics data...")
from app import lipidomics_df, lipidomics_quant_range
print("Lipidomics data shape: {}".format(lipidomics_df.shape))
print("Loading proteomics data...")
from app import proteomics_df, proteomics_quant_range
print("Proteomics data shape: {}".format(proteomics_df.shape))
print("Loading transcriptomics data...")
from app import transcriptomics_df, transcriptomics_quant_range
print("Transcriptomics data shape: {}".format(transcriptomics_df.shape))
available_datasets = ['Proteins', 'Lipids', 'Metabolites', 'Transcripts', 'Combined Biomolecules']
# define dataset dictionaries
from app import dataset_dict, df_dict, quant_value_range_dict, global_names_dict
from app import metabolomics_biomolecule_names_dict
from app import lipidomics_biomolecule_names_dict
from app import proteomics_biomolecule_names_dict
from app import transcriptomics_biomolecule_names_dict
# get combined omics df and quant value range
print("Creating combined omics df...")
df_dict, quant_value_range_dict = get_combined_data(df_dict, quant_value_range_dict)
# start with proteomics data
sorted_biomolecule_names_dict = {k: v for k, v in sorted(proteomics_biomolecule_names_dict.items(), key=lambda item: item[1])}
#available_biomolecules = proteomics_biomolecule_names_dict.values()
#available_biomolecules = proteomics_df.columns[:proteomics_quant_range].sort_values().tolist()
default_biomolecule = list(sorted_biomolecule_names_dict.keys())[0]
plotly_config = {"toImageButtonOptions":{'format':'svg',
'filename': 'dash_plot'},
"displaylogo": False}
first_card = dbc.Card(
[
dbc.CardHeader("PCA SCORES PLOT",
style={"background-color":"#5bc0de",
"font-weight":"bold",
"font-size":"large"}),
dbc.CardBody(dcc.Graph(id='pca-scores-figure',
config=plotly_config))
])
second_card = dbc.Card(
[
dbc.CardHeader("PCA LOADINGS PLOT",
style={"background-color":"#5bc0de",
"font-weight":"bold",
"font-size":"large"}),
dbc.CardBody(dcc.Graph(id='pca-loadings-figure',
config=plotly_config))
])
third_card = dbc.Card(
[
dbc.CardHeader("BIOMOLECULE BARPLOT",
style={"background-color":"#5bc0de",
"font-weight":"bold",
"font-size":"large"}),
dbc.CardBody(dcc.Graph(id='biomolecule-barplot',
config=plotly_config))
])
fourth_card = dbc.Card(
[
dbc.CardHeader("BIOMOLECULE BOXPLOT",
style={"background-color":"#5bc0de",
"font-weight":"bold",
"font-size":"large"}),
dbc.CardBody(dcc.Graph(id='biomolecule-boxplot',
config=plotly_config))
])
###
control_panel = dbc.Card(
[
dbc.CardHeader("CONTROL PANEL",
style={"background-color":"#5bc0de",
"font-weight":"bold",
"font-size":"large"}),
dbc.CardBody(
[html.P("Select Dataset", className="card-title", style={"font-weight":"bold"}),
dcc.Dropdown(
id='dataset_id',
options=[{'label': i, 'value': i} for i in available_datasets],
# only passing in quant value columns
value=available_datasets[0]),
html.Hr(),
html.P("Select Biomolecule", className="card-title", style={"font-weight":"bold"}),
# NOTE: This is dcc object not dbc
dcc.Dropdown(
id='biomolecule_id',
# label maps to biomolecule name, value to biomolecule_id
options=[{'label': value, 'value': key} for key, value in sorted_biomolecule_names_dict.items()],
# only passing in quant value columns
value=default_biomolecule,
className="dropdown-item p-0"),
])
])
#app.layout = dbc.Container([
layout = dbc.Container([
navbar,
html.Hr(),
dbc.Row(dbc.Col(html.H1("COVID-19 Multi-Omics Data Dashboard"), width={"size": 6, "offset": 3})),
html.Hr(),
dbc.Row(
[dbc.Col(
dbc.Nav(
[
html.H3("TYPE OF ANALYSIS", style={"font-weight":"bold", "color":"black"}),
dbc.NavItem(dbc.NavLink("PCA", active=True, href="pca", style={"background-color":"grey"})),
dbc.NavItem(dbc.NavLink(
html.Span(
"Linear Regression",
id="tooltip-lr",
style={"cursor": "pointer", "color":"grey"},
),disabled=False, href="linear_regression")),
dbc.NavItem(dbc.NavLink(
html.Span(
"Differential Expression",
id="tooltip-de",
style={"cursor": "pointer", "color":"grey"},
),disabled=False, href="differential_expression")),
dbc.NavItem(dbc.NavLink(
html.Span(
"Clustergrammer",
id="tooltip-cg",
style={"cursor":"pointer", "color":"grey"},
),disabled=False, href="clustergrammer")),
html.Hr(),
control_panel
],
vertical="md",
pills=True
), md=2, className="mb-3"),
#dbc.Col(control_panel, md=6)
dbc.Col(first_card, md=4),
dbc.Col(second_card, md=6)
],
className="mb-3"),
dbc.Row([dbc.Col(third_card, md=7, align="center"), dbc.Col(fourth_card, md=5, align="center")], className="mb-3")
], fluid=True)
@app.callback(
dash.dependencies.Output('biomolecule_id', 'options'),
[Input('dataset_id', 'value')])
def update_biomolecule_options(dataset_id):
dataset = dataset_dict[dataset_id]
biomolecule_names_dict = global_names_dict[dataset]
df = df_dict[dataset]
quant_value_range = quant_value_range_dict[dataset]
# get list of columns for dataset
available_biomolecules = df.columns[:quant_value_range].sort_values().tolist()
sorted_biomolecule_names_dict = {k: v for k, v in sorted(biomolecule_names_dict.items(), key=lambda item: item[1])}
options=[{'label': value, 'value': key} for key, value in sorted_biomolecule_names_dict.items() if key in available_biomolecules]
#print(options)
return options
@app.callback(
Output('biomolecule_id', 'value'),
[Input('dataset_id', 'value')])
def update_default_biomolecule(dataset_id):
dataset = dataset_dict[dataset_id]
biomolecule_names_dict = global_names_dict[dataset]
sorted_biomolecule_names_dict = {k: v for k, v in sorted(biomolecule_names_dict.items(), key=lambda item: item[1])}
default_biomolecule=list(sorted_biomolecule_names_dict.keys())[0]
return default_biomolecule
@app.callback(
Output('pca-scores-figure', 'figure'),
[Input('dataset_id', 'value')])
def update_pca_scores_plot(dataset_id):
dataset = dataset_dict[dataset_id]
df = df_dict[dataset]
quant_value_range = quant_value_range_dict[dataset]
fig = pca_scores_plot(df, quant_value_range)
return fig
@app.callback(
Output('pca-loadings-figure', 'figure'),
[Input('dataset_id', 'value'),
Input('biomolecule_id', 'value')])
def update_pca_loadings_plot(dataset_id, biomolecule_id):
dataset = dataset_dict[dataset_id]
df = df_dict[dataset]
biomolecule_names_dict = global_names_dict[dataset]
quant_value_range = quant_value_range_dict[dataset]
# build ome type list for coloring
if not dataset == 'combined':
ome_type_list = [dataset] * quant_value_range
else:
ome_type_list = ['proteomics'] * quant_value_range_dict['proteomics']
ome_type_list.extend(['lipidomics'] * quant_value_range_dict['lipidomics'])
ome_type_list.extend(['metabolomics'] * quant_value_range_dict['metabolomics'])
ome_type_list.extend(['transcriptomics'] * quant_value_range_dict['transcriptomics'])
# get biomolecule index
biomolecule_index = df.columns.tolist().index(biomolecule_id)
ome_type_list[biomolecule_index] = 'selected_biomolecule'
fig = pca_loadings_plot(df, quant_value_range, dataset_id, biomolecule_names_dict, ome_type_list)
return fig
@app.callback(
Output('biomolecule-barplot', 'figure'),
[Input('biomolecule_id', 'value'),
Input('dataset_id', 'value')])
def update_biomolecule_barplot(biomolecule_id, dataset_id):
dataset = dataset_dict[dataset_id]
df = df_dict[dataset]
biomolecule_names_dict = global_names_dict[dataset]
biomolecule_name = biomolecule_names_dict[biomolecule_id]
fig = biomolecule_bar(df, biomolecule_id, biomolecule_names_dict)
return fig
@app.callback(
Output('biomolecule-boxplot', 'figure'),
[Input('biomolecule_id', 'value'),
Input('dataset_id', 'value')])
def update_biomolecule_boxplot(biomolecule_id, dataset_id):
dataset = dataset_dict[dataset_id]
df = df_dict[dataset]
biomolecule_names_dict = global_names_dict[dataset]
biomolecule_name = biomolecule_names_dict[biomolecule_id]
fig = boxplot(df, biomolecule_id, biomolecule_names_dict)
return fig
print("Starting server...")
if __name__ == '__main__':
app.run_server(
debug=True,
host='0.0.0.0',
#port='8080'
)
| 2.328125 | 2 |
apps/availability/migrations/0001_initial.py | ExpoAshique/ProveBanking__s | 0 | 12797860 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UpdateRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('requested_by', models.ManyToManyField(related_name=b'requested_availability_updates', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(related_name=b'update_requests', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Week',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField()),
('allocation', models.IntegerField(default=0)),
('proposed', models.ManyToManyField(related_name=b'availability_weeks', to='projects.ProposedResource')),
('user', models.ForeignKey(related_name=b'availability_weeks', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date',),
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='week',
unique_together=set([('user', 'date')]),
),
]
| 1.664063 | 2 |
region_cache/region.py | jheard-tw/region_cache | 2 | 12797861 | <reponame>jheard-tw/region_cache
import pickle
import redis
from collections.abc import MutableMapping
from datetime import datetime
from functools import wraps
import blinker
import logging
from logging import getLogger
_logger = getLogger('region_cache')
class Region(MutableMapping):
"""
A bound cache region. Do not instantiate these directly. Instead use the RegionCache.region() function.
This will make for proper nesting of cache structures.
"""
def __init__(self, region_cache, name, timeout=None, update_resets_timeout=True, serializer=pickle):
self._region_cache = region_cache
self.name = name
self._region_cache.conn.hset(name, '__cache_region_created_at__', datetime.utcnow().isoformat())
self._timeout = None
self._region_cache = region_cache
self._serializer = serializer
self._pipe = None
self._children_key = self.name + "::child_caches"
self._update_resets_timeout = update_resets_timeout
if timeout:
self._timeout = timeout
self._region_cache.conn.expire(name, timeout)
if '.' in name:
parent = name.rsplit('.', 1)[0]
parent = self._region_cache.region(parent)
parent.add_child(self)
def __repr__(self):
return "Region({})".format(self.name)
def region(self, name=None, timeout=None, update_resets_timeout=None, serializer=None):
"""
Get a sub-region from this region. When this region is invalidated, the subregion will be too.
:param name: The name of the subregion. If dots are included, then the dotted regions are treated as subregions
of this subregion.
:param timeout: The timeout in seconds for this region. Defaults to the parent region's timeout
:param update_resets_timeout: Whether updating the region resets the timeout. Defaults to the parent region's
setting.
:param serializer: The serializer to use. Must define loads and dumps(). Defaults to the parent region's setting
:return: Region
"""
return self._region_cache.region(
name=self.name + '.' + name,
timeout=timeout or self._timeout,
update_resets_timeout=(
update_resets_timeout if self._update_resets_timeout is not None else self._update_resets_timeout),
serializer=serializer or self._serializer
)
def invalidate(self, pipeline=None):
"""
Delete this region's cache data and all its subregions.
:param pipeline: Used internally.
:return: None
"""
_logger.debug("Invalidating region %s", self.name)
if pipeline is None:
pipeline = self._region_cache.conn.pipeline()
is_root_call = True
else:
is_root_call = False
for child in self.children():
child.invalidate(pipeline)
pipeline.delete(self.name)
if is_root_call:
pipeline.execute()
def invalidate_on(self, *signals):
"""
Bind this cache region to blinker signals. When any of the signals have been triggered, invalidate the cache.
:param signals: blinker signal objects or string names for named signals.
:return: None
"""
def handler(sender, **kwargs):
try:
self.invalidate()
except redis.TimeoutError:
logging.getLogger('region_cache').exception(
"Invalidation of {self.name} in signal handler timed out. Flush it manually".format(self=self))
for sig in signals:
if isinstance(sig, str):
sig = blinker.signal(sig)
sig.connect(handler, weak=False)
def cached(self, f):
"""
Decorator that uses a serialized form of the input args as a key and caches the result of calling the method.
Subsequent calls to the method with the same arguments will return the cached result.
"""
@wraps(f)
def wrapper(*args, **kwargs):
key = self._serializer.dumps((args, kwargs))
try:
ret = self[key]
except KeyError:
ret = f(*args, **kwargs)
self[key] = ret
return ret
return wrapper
def get_or_compute(self, item, alt):
"""
Get the value, or if the value is not in the cache, compute it from `alt`. Alt can be a callable or a scalar.
:param item: The key to get
:param alt: Callable or scalar. The value to return. Will be stored in the cache on computation.
:return: The value in the cache.
"""
try:
return self[item]
except KeyError:
value = alt() if callable(alt) else alt
self[item] = value
return value
except redis.TimeoutError:
_logger.warning("Cannot reach cache. Using alternative")
if callable(alt):
return alt()
else:
return alt
def __getitem__(self, item):
timed_out = False
if self._region_cache.is_disconnected():
raise KeyError(item)
# pylint: disable=W0212
if self._region_cache._raise_on_timeout: # raise the redis timeout error instead of a key error
raw_value = self._region_cache.read_conn.hget(self.name, item)
else:
try:
raw_value = self._region_cache.read_conn.hget(self.name, item)
except redis.TimeoutError:
raw_value = None
timed_out = True
if timed_out:
# pylint: disable=W0212
if self._region_cache._reconnect_on_timeout:
self._region_cache.invalidate_connections()
raise KeyError(item)
if raw_value is not None:
return self._serializer.loads(raw_value)
else:
raise KeyError(item)
def __setitem__(self, key, value):
raw_value = self._serializer.dumps(value)
if not self._region_cache.is_disconnected():
should_reset_timeout = (not self._pipe and
self._timeout and
(self._update_resets_timeout or not len(self)))
if self._pipe:
self._pipe.hset(self.name, key, raw_value)
else:
self._region_cache.conn.hset(self.name, key, raw_value)
if should_reset_timeout:
self._region_cache.conn.expire(self.name, self._timeout)
def __delitem__(self, key):
if not self._region_cache.is_disconnected():
should_reset_timeout = (not self._pipe and
self._timeout and
(self._update_resets_timeout or (len(self) == 1 and key in self)))
if self._pipe:
self._pipe.hdel(self.name, key)
else:
self._region_cache.conn.hdel(self.name, key)
if should_reset_timeout:
self._region_cache.conn.expire(self.name, self._timeout)
else:
raise redis.TimeoutError(f"Cannot delete item {key} from {self.name} because we are disconnected.")
def __iter__(self):
for k in self._region_cache.read_conn.hkeys(self.name):
if not k.decode('utf-8').startswith('__'):
yield k
def __len__(self):
return self._region_cache.conn.hlen(self.name)
def __enter__(self):
if not self._pipe:
self._pipe = self._region_cache.conn.pipeline()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
# if we started with nothing in the cache, reset it
should_reset_timeout = (self._timeout and (self._update_resets_timeout or len(self) == 0))
self._pipe.execute()
if should_reset_timeout:
self._region_cache.conn.expire(self.name, self._timeout)
retval = True
else:
self._pipe.reset()
retval = False
self._pipe = None
return retval
def __eq__(self, other):
return other.name == self.name
def children(self):
return (self._region_cache.region(name.decode('utf-8'))
for name in self._region_cache.read_conn.smembers(self._children_key))
def add_child(self, child):
self._region_cache.conn.sadd(self._children_key, child.name)
def reset_timeout(self):
self._region_cache.conn.expire(self.name, self._timeout)
| 2.34375 | 2 |
tests/test_failure.py | kprzybyla/resultful | 0 | 12797862 | <gh_stars>0
import pytest
from hypothesis import (
given,
strategies as st,
)
from resultful import (
unsafe,
success,
failure,
unwrap_failure,
Result,
NoResult,
)
from .conftest import (
st_exceptions,
unreachable,
)
@given(error=st_exceptions())
def test_special_methods(error: BaseException) -> None:
result = failure(error)
assert bool(result) is False
assert repr(result) == f"resultful.Failure({error!r})"
@given(error=st_exceptions())
def test_unsafe(error: BaseException) -> None:
with pytest.raises(BaseException) as exception:
unsafe(failure(error))
assert exception.value is error
@given(error=st_exceptions())
def test_equality(error: BaseException) -> None:
assert failure(error) == failure(error)
@given(error=st_exceptions())
def test_inequality_with_success(error: BaseException) -> None:
assert failure(error) != success(error)
@given(error=st_exceptions())
def test_unwrap_failure_from_failure(error: BaseException) -> None:
result = unwrap_failure(failure(error))
assert result is error
@given(value=st.integers())
def test_unwrap_failure_from_success(value: int) -> None:
result = unwrap_failure(success(value))
assert result is NoResult
@given(error=st_exceptions())
def test_error(error: BaseException) -> None:
result = failure(error)
assert not result
assert result.error is error
@given(error=st_exceptions())
def test_error_wrapped_in_failure(error: BaseException) -> None:
result = failure(failure(error))
assert not result
assert result.error is error
@given(value=st.integers())
def test_error_wrapped_in_success(value: int) -> None:
result = failure(success(value))
assert result is NoResult
@given(error=st_exceptions())
def test_result_if_condition(error: BaseException) -> None:
def compute() -> Result[int, BaseException]:
return failure(error)
result = compute()
if not result:
assert result.error is error
else:
unreachable()
if result.is_failure:
assert result.error is error
else:
unreachable()
if result.is_success:
unreachable()
else:
assert result.error is error
@given(error=st_exceptions())
def test_result_if_condition_walrus_operator(error: BaseException) -> None:
def compute() -> Result[int, BaseException]:
return failure(error)
if not (result := compute()):
assert result.error is error
else:
unreachable()
if (result := compute()).is_failure:
assert result.error is error
else:
unreachable()
if (result := compute()).is_success:
unreachable()
else:
assert result.error is error
| 2.3125 | 2 |
data.py | hashmymind/ML-Final | 0 | 12797863 | <filename>data.py
import pytreebank
import pickle
import numpy as np
dataset = pytreebank.load_sst()
def get_sst(cate='5'):
train_X = []
train_y = []
for e in dataset['train']:
label, sentence = e.to_labeled_lines()[0]
if cate == '2' and label == 2:
continue
if cate == '2':
label = 1 if label >2 else 0
train_X.append(sentence)
train_y.append(label)
test_X = []
test_y = []
for e in dataset['test']:
label, sentence = e.to_labeled_lines()[0]
if cate == '2' and label == 2:
continue
if cate == '2':
label = 1 if label >2 else 0
test_X.append(sentence)
test_y.append(label)
return (train_X,train_y), (test_X, test_y)
if __name__ == '__main__':
pass | 2.8125 | 3 |
config.py | Pandinosaurus/pnn.pytorch | 1 | 12797864 | # config.py
import os
import datetime
import argparse
result_path = "results/"
result_path = os.path.join(result_path, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S/'))
parser = argparse.ArgumentParser(description='Your project title goes here')
# ======================== Data Setings ============================================
parser.add_argument('--dataset-test', type=str, default='CIFAR10', metavar='', help='name of training dataset')
parser.add_argument('--dataset-train', type=str, default='CIFAR10', metavar='', help='name of training dataset')
parser.add_argument('--split_test', type=float, default=None, metavar='', help='percentage of test dataset to split')
parser.add_argument('--split_train', type=float, default=None, metavar='', help='percentage of train dataset to split')
parser.add_argument('--dataroot', type=str, default='../../data', metavar='', help='path to the data')
parser.add_argument('--save', type=str, default=result_path +'Save', metavar='', help='save the trained models here')
parser.add_argument('--logs', type=str, default=result_path +'Logs', metavar='', help='save the training log files here')
parser.add_argument('--resume', type=str, default=None, metavar='', help='full path of models to resume training')
parser.add_argument('--nclasses', type=int, default=10, metavar='', help='number of classes for classification')
parser.add_argument('--input-filename-test', type=str, default=None, metavar='', help='input test filename for filelist and folderlist')
parser.add_argument('--label-filename-test', type=str, default=None, metavar='', help='label test filename for filelist and folderlist')
parser.add_argument('--input-filename-train', type=str, default=None, metavar='', help='input train filename for filelist and folderlist')
parser.add_argument('--label-filename-train', type=str, default=None, metavar='', help='label train filename for filelist and folderlist')
parser.add_argument('--loader-input', type=str, default=None, metavar='', help='input loader')
parser.add_argument('--loader-label', type=str, default=None, metavar='', help='label loader')
# ======================== Network Model Setings ===================================
parser.add_argument('--nblocks', type=int, default=10, metavar='', help='number of blocks in each layer')
parser.add_argument('--nlayers', type=int, default=6, metavar='', help='number of layers')
parser.add_argument('--nchannels', type=int, default=3, metavar='', help='number of input channels')
parser.add_argument('--nfilters', type=int, default=64, metavar='', help='number of filters in each layer')
parser.add_argument('--avgpool', type=int, default=1, metavar='', help='set to 7 for imagenet and 1 for cifar10')
parser.add_argument('--level', type=float, default=0.1, metavar='', help='noise level for uniform noise')
parser.add_argument('--resolution-high', type=int, default=32, metavar='', help='image resolution height')
parser.add_argument('--resolution-wide', type=int, default=32, metavar='', help='image resolution width')
parser.add_argument('--ndim', type=int, default=None, metavar='', help='number of feature dimensions')
parser.add_argument('--nunits', type=int, default=None, metavar='', help='number of units in hidden layers')
parser.add_argument('--dropout', type=float, default=None, metavar='', help='dropout parameter')
parser.add_argument('--net-type', type=str, default='noiseresnet18', metavar='', help='type of network')
parser.add_argument('--length-scale', type=float, default=None, metavar='', help='length scale')
parser.add_argument('--tau', type=float, default=None, metavar='', help='Tau')
# ======================== Training Settings =======================================
parser.add_argument('--cuda', type=bool, default=True, metavar='', help='run on gpu')
parser.add_argument('--ngpu', type=int, default=1, metavar='', help='number of gpus to use')
parser.add_argument('--batch-size', type=int, default=64, metavar='', help='batch size for training')
parser.add_argument('--nepochs', type=int, default=500, metavar='', help='number of epochs to train')
parser.add_argument('--niters', type=int, default=None, metavar='', help='number of iterations at test time')
parser.add_argument('--epoch-number', type=int, default=None, metavar='', help='epoch number')
parser.add_argument('--nthreads', type=int, default=20, metavar='', help='number of threads for data loading')
parser.add_argument('--manual-seed', type=int, default=1, metavar='', help='manual seed for randomness')
parser.add_argument('--port', type=int, default=8097, metavar='', help='port for visualizing training at http://localhost:port')
# ======================== Hyperparameter Setings ==================================
parser.add_argument('--optim-method', type=str, default='Adam', metavar='', help='the optimization routine ')
parser.add_argument('--learning-rate', type=float, default=1e-3, metavar='', help='learning rate')
parser.add_argument('--learning-rate-decay', type=float, default=None, metavar='', help='learning rate decay')
parser.add_argument('--momentum', type=float, default=0.9, metavar='', help='momentum')
parser.add_argument('--weight-decay', type=float, default=1e-4, metavar='', help='weight decay')
parser.add_argument('--adam-beta1', type=float, default=0.9, metavar='', help='Beta 1 parameter for Adam')
parser.add_argument('--adam-beta2', type=float, default=0.999, metavar='', help='Beta 2 parameter for Adam')
args = parser.parse_args() | 2.421875 | 2 |
researchutils/chainer/initializers/__init__.py | keio-ytlab/researchutils | 1 | 12797865 | from researchutils.chainer.initializers.normal_with_loc import NormalWithLoc
| 1.195313 | 1 |
exercicios/exercicio087.py | Helton-Rubens/Python-3 | 0 | 12797866 | matriz = [[], [], []]
par = 0
somatre = 0
for c in range(0, 3):
for i in range(0, 3):
matriz[c].append(int(input(f'Digite um valor para a posição [{c}/{i}]: ')))
if matriz[c][i] % 2 == 0:
par += matriz[c][i]
somatre = somatre + matriz[c][2]
print('=+'*28)
for c in range(0, 3):
print('|', end='')
for i in range(0, 3):
print(f'{matriz[c][i]:^5}', end='|')
print()
print('-'*30)
print(f'A soma dos valores pares é: {par}')
print(f'A soma dos valores da terceira coluna é: {somatre}')
print(f'O maior valor da segunda linha é: {max(matriz[1])}')
| 3.65625 | 4 |
study/study8.py | tanyong-cq/pythonlearning | 0 | 12797867 | <reponame>tanyong-cq/pythonlearning
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
dict
'''
d1 = {'a':1, 'b':2, 'c':3}
print(d1)
print(d1.keys())
print(d1.values())
print(str(d1))
print(len(d1))
print(d1['a'])
d1['a'] = 10
print(d1['a'])
del d1['a']
print(d1)
d1.clear()
print(d1)
print(d1.get('a'))
| 3.84375 | 4 |
openprocurement/tender/cfaselectionua/models/submodels/organizationAndPocuringEntity.py | tarasvaskiv/openprocurement.tender.cfaselectionua | 0 | 12797868 | from openprocurement.api.models import Organization as BaseOrganization
from openprocurement.tender.cfaselectionua.models.submodels.contactpoint import ContactPoint
from schematics.types import StringType
from schematics.types.compound import ModelType
from openprocurement.api.roles import RolesFromCsv
from openprocurement.api.models import ListType
class Organization(BaseOrganization):
"""An organization."""
contactPoint = ModelType(ContactPoint)
class ProcuringEntity(Organization):
"""An organization."""
class Options:
roles = RolesFromCsv('ProcuringEntity.csv', relative_to=__file__)
kind = StringType(choices=['general', 'special', 'defense', 'other'])
additionalContactPoints = ListType(
ModelType(ContactPoint, required=True),
required=False
)
| 2.171875 | 2 |
src/contrail/vncNetwork.py | madhukar32/contrail-hybrid-cloud | 0 | 12797869 | <gh_stars>0
from vnc_api import vnc_api
from contrail.util import (readTenant, readNetwork)
from hybridLogger import hybridLogger
from exception import *
import uuid
class vncNetwork(hybridLogger):
def __init__(self, vnc, domain, tenantName ,logLevel='INFO'):
self.log = super(vncNetwork, self).log(level=logLevel, name=vncNetwork.__name__)
self.vnc = vnc
self.tenantName = tenantName
self.domain = domain
self.tenantObj = readTenant(self.vnc, domain=self.domain, tenantName=self.tenantName)
def createNetwork(self, **kwargs):
_requiredArgs = ['cidr', 'name']
try:
cidr = kwargs['cidr']
networkName = kwargs['name']
prefix, prefixLen = cidr.split('/')
except KeyError:
raise ArguementError(_requiredArgs, vncNetwork.createNetwork.__name__)
except Exception as e:
self.log.error("Function: createNetwork Message: cidr is not in correct format : {0}".format(e))
return False
prefixLen = int(prefixLen)
try:
allocationPool = kwargs['allocationPool']
except KeyError, AttributeError:
allocationPool = False
try:
routeTarget = kwargs['routeTarget']
except KeyError, AttributeError:
routeTarget = False
allocTypeList = []
if allocationPool:
if type(allocationPool) == list:
allocationPoolList = allocationPool
else:
allocationPoolList = [allocationPool]
for allocationPool in allocationPoolList:
try:
allocationPoolStart, allocationPoolStop = allocationPool.split('-')
allocType = vnc_api.AllocationPoolType()
allocType.set_start(allocationPoolStart)
allocType.set_end(allocationPoolStop)
allocTypeList.append(allocType)
except Exception as e:
self.log.error("Function: createNetwork Message: allocationPool error : {0}".format(e))
return False
try:
fipPoolName = kwargs['fipPoolName']
except KeyError, AttributeError:
fipPoolName = False
if fipPoolName:
routerExternal = True
else:
routerExternal = False
try:
networkObj = vnc_api.VirtualNetwork(name=networkName, parent_obj=self.tenantObj, router_external=routerExternal)
networkExists = self._checkIfNetworkExists(networkObj, self.tenantObj)
if networkExists:
self.log.warn("Network: {0} already exists".format(networkName))
return networkObj
subnet = vnc_api.SubnetType(prefix, prefixLen)
if not allocTypeList:
ipamSubnet = vnc_api.IpamSubnetType(subnet = subnet)
else:
ipamSubnet = vnc_api.IpamSubnetType(subnet = subnet, allocation_pools=allocTypeList)
networkObj.add_network_ipam(vnc_api.NetworkIpam(),vnc_api.VnSubnetsType([ipamSubnet]))
newNetworkId = self.vnc.virtual_network_create(networkObj)
self.log.info("Virtual Network: {0} created ".format(networkName))
except Exception as e:
self.log.error("Function: createNetwork Message: Error While Creating network : {0}".format(e))
return False
if routeTarget:
try:
updateNetwork = self._addRouteTarget(networkObj, routeTarget)
except Exception as e:
self.log.error("Function: createNetwork Message: Error While adding route target to the network: {0}".format(e))
return False
if fipPoolName:
fipObj = self.returnFipObj(fipPoolName, networkObj)
else:
fipObj = None
return networkObj, fipObj
def returnFipObj(self, fipPoolName, networkObj):
fipPoolId = str(uuid.uuid4())
fipPool = vnc_api.FloatingIpPool(name = fipPoolName, parent_obj = networkObj)
fipPool.uuid = fipPoolId
self.vnc.floating_ip_pool_create(fipPool)
self.tenantObj.add_floating_ip_pool(fipPool)
self.vnc.project_update(self.tenantObj)
return fipPool
def _checkIfNetworkExists(self, networkObj, tenantObj):
newFqName = networkObj.get_fq_name()
vnList = self.vnc.virtual_networks_list(parent_id=tenantObj.uuid)
if not vnList:
return False
else:
for elem in vnList['virtual-networks']:
if(elem['fq_name'] == newFqName):
return True
else:
continue
return False
def _addRouteTarget(self, networkObj, routeTarget):
try:
routeTargets = vnc_api.RouteTargetList(['target:' + routeTarget])
networkObj.set_route_target_list(routeTargets)
return self.vnc.virtual_network_update(networkObj)
except Exception as e:
raise ContrailError(vncApi='vnc_api.RouteTargetList', error=e)
| 2.296875 | 2 |
main.py | j4g3/terminal-python | 0 | 12797870 | import os
import glob
files = open('dados.dll')
data = files.read()
files.close
#desktop of user
user_info = os.path.expanduser('~')
location_default = os.path.expanduser('~\\Desktop')
location = os.path.expanduser('~\\Desktop')
desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}')
os.chdir(location)
help_git = '''
These are common Git commands used in various situations:
start a working area (see also: git help tutorial)
clone Clone a repository into a new directory
init Create an empty Git repository or reinitialize an existing one
work on the current change (see also: git help everyday)
add Add file contents to the index
mv Move or rename a file, a directory, or a symlink
reset Reset current HEAD to the specified state
rm Remove files from the working tree and from the index
examine the history and state (see also: git help revisions)
bisect Use binary search to find the commit that introduced a bug
grep Print lines matching a pattern
log Show commit logs
show Show various types of objects
status Show the working tree status
grow, mark and tweak your common history
branch List, create, or delete branches
checkout Switch branches or restore working tree files
commit Record changes to the repository
diff Show changes between commits, commit and working tree, etc
merge Join two or more development histories together
rebase Reapply commits on top of another base tip
tag Create, list, delete or verify a tag object signed with GPG
collaborate (see also: git help workflows)
fetch Download objects and refs from another repository
pull Fetch from and integrate with another repository or a local branch
push Update remote refs along with associated objects
'git help -a' and 'git help -g' list available subcommands and some
concept guides. See 'git help <command>' or 'git help <concept>'
to read about a specific subcommand or concept.
'''
command_list = '''
List of Commands
wget missing URL
git help -g' list available subcommands
cat list content in file
cd browse directories
ls listing files
clear clean terminal
'''
while True:
command = input(f'{desktop}:# ')
if command == 'ls':
#listing files
os.chdir(location)
print(location)
location = os.getcwd()
desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}')
for file in glob.glob("*"):
print(file)
elif(command[0] == 'c' and command[1] == 'd'):
#browsing the files
location = str(command).replace("cd", "").replace(" ","")
if(command.count("..")):
os.chdir('../')
location = os.getcwd()
desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}')
else:
command = command.replace("cd ", "")
os.chdir(command)
location = os.getcwd()
desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}')
elif command == 'clear':
#clean in terminal
os.system('cls' if os.name == 'nt' else 'clear')
elif command == 'refresh':
#restart of terminal
os.system('cls' if os.name == 'nt' else 'clear')
os.system('python refresh.py')
exit()
elif(command.count('cat ')):
command = command.replace("cat ", "")
cat = open(f'{command}', 'r')
content = cat.readlines()
#listing content of file
print('\n')
for line in content:
print(line)
cat.close()
print('\n')
elif command[0] == 'g' and command[1] == 'i' and command[2] == 't':
os.system(command)
elif command == '':
pass
elif command == 'help' or command == 'commands':
print(command_list)
elif(command.count('wget ')):
print(os.getcwd())
elif(command == 'pwd'):
print(location)
| 2.5 | 2 |
src/pactor/nodes_commands.py | kstrempel/pactor | 1 | 12797871 | <reponame>kstrempel/pactor
from pactor.vm import VM
from pactor.node_parent import AstNode
from pactor.node_stack_helper import pop_value, pop
class WhenNode(AstNode):
def run(self, vm: VM):
quote = pop(vm)
is_true = pop_value(vm)
if is_true:
vm.run_ast(quote.ast)
def __repr__(self):
return 'when'
class IfNode(AstNode):
def run(self, vm: VM):
quote_false = pop(vm)
quote_true = pop(vm)
is_true = pop_value(vm)
if is_true:
vm.run_ast(quote_true.ast)
else:
vm.run_ast(quote_false.ast)
def __repr__(self):
return 'if'
class TimesNode(AstNode):
def run(self, vm: VM):
quote = pop(vm)
count = pop_value(vm)
for _ in range(0, count):
vm.run_ast(quote.ast)
def __repr__(self):
return 'times'
| 2.4375 | 2 |
GameObject.py | P3D-Space-Tech-Demo/Section2SpaceflightDocking | 0 | 12797872 | <gh_stars>0
from panda3d.core import Vec4, Vec3, Vec2, Plane, Point3, BitMask32
from direct.actor.Actor import Actor
from panda3d.core import CollisionSphere, CollisionCapsule, CollisionNode, CollisionRay, CollisionSegment, CollisionHandlerQueue
from direct.gui.OnscreenText import OnscreenText
from direct.gui.OnscreenImage import OnscreenImage
from panda3d.core import TextNode
from panda3d.core import AudioSound
from panda3d.core import PointLight
from panda3d.core import NodePath, PandaNode
from panda3d.core import Quat
from Section2SpaceflightDocking.CommonValues import *
from Section2SpaceflightDocking.Common import Common
import math, random
FRICTION = 10.0
class GameObject():
def __init__(self, pos, modelName, modelAnims, maxHealth, maxSpeed, colliderName, weaponIntoMask, size):
self.root = Common.framework.showBase.render.attachNewNode(PandaNode("obj"))
self.colliderName = colliderName
self.modelName = modelName
if modelName is None:
self.actor = NodePath(PandaNode("actor"))
elif modelAnims is None:
self.actor = Common.framework.showBase.loader.loadModel(modelName)
else:
self.actor = Actor(modelName, modelAnims)
self.actor.reparentTo(self.root)
if pos is not None:
self.root.setPos(pos)
self.maxHealth = maxHealth
self.health = maxHealth
self.healthRechargeRate = 2.0
self.healthRechargeSuppressionTimer = 0
self.healthRechargeSuppressionDuration = 0.5
self.maxSpeed = maxSpeed
self.terminalVelocity = 50
self.flinchCounter = 0
self.velocity = Vec3(0, 0, 0)
self.acceleration = 300.0
self.inControl = True
self.outOfControlTimer = 0
self.walking = False
self.size = size
if colliderName is not None:
colliderNode = CollisionNode(colliderName)
colliderNode.addSolid(CollisionSphere(0, 0, 0, size))
self.colliderNP = self.root.attachNewNode(colliderNode)
self.colliderNP.setPythonTag(TAG_OWNER, self)
colliderNode.setFromCollideMask(0)
colliderNode.setIntoCollideMask(weaponIntoMask)
#self.colliderNP.show()
else:
self.colliderNP = self.root.attachNewNode(PandaNode("stand-in"))
self.deathSound = None
def physicalImpact(self, surfaceNormal):
proj = self.velocity.project(surfaceNormal)
self.velocity -= proj*2
def update(self, dt, fluid = False):
speed = self.velocity.length()
if self.inControl:
if self.walking and speed > self.maxSpeed:
self.velocity.normalize()
self.velocity *= self.maxSpeed
speed = self.maxSpeed
else:
if speed > self.terminalVelocity:
self.velocity.normalize()
self.velocity *= self.terminalVelocity
speed = self.terminalVelocity
if Common.useFriction:
if not self.walking:
perc = speed/self.maxSpeed
frictionVal = FRICTION*dt/(max(1, perc*perc))
if not self.inControl:
frictionVal *= 0.8
if frictionVal > speed:
self.velocity.set(0, 0, 0)
else:
frictionVec = -self.velocity
frictionVec.normalize()
frictionVec *= frictionVal
self.velocity += frictionVec
if not self.inControl:
if speed < 0.1:
self.inControl = True
else:
self.outOfControlTimer -= dt
if self.outOfControlTimer <= 0:
self.inControl = True
if fluid:
self.root.setFluidPos(self.root.getPos() + self.velocity*dt)
else:
self.root.setPos(self.root.getPos() + self.velocity*dt)
if self.healthRechargeSuppressionTimer > 0:
self.healthRechargeSuppressionTimer -= dt
else:
self.alterHealth(self.healthRechargeRate*dt, None, 0, 0)
def alterHealth(self, dHealth, incomingImpulse, knockback, flinchValue, overcharge = False):
previousHealth = self.health
self.health += dHealth
if incomingImpulse is not None and knockback > 0.1:
self.velocity += incomingImpulse*knockback
self.inControl = False
self.outOfControlTimer = knockback*0.1
self.walking = False
if dHealth < 0:
self.healthRechargeSuppressionTimer = self.healthRechargeSuppressionDuration
if self.health < 0:
self.health = 0
if flinchValue > 0:
self.flinchCounter -= flinchValue
if dHealth > 0 and self.health > self.maxHealth and not overcharge:
self.health = self.maxHealth
if previousHealth > 0 and self.health <= 0 and self.deathSound is not None:
self.deathSound.play()
def turnTowards(self, target, turnRate, dt):
if isinstance(target, NodePath):
target = target.getPos(Common.framework.showBase.render)
elif isinstance(target, GameObject):
target = target.root.getPos(Common.framework.showBase.render)
diff = target - self.root.getPos(Common.framework.showBase.render)
selfQuat = self.root.getQuat(Common.framework.showBase.render)
selfForward = selfQuat.getForward()
axis = selfForward.cross(diff.normalized())
axis.normalize()
if axis.lengthSquared() < 0.1:
return
angle = selfForward.signedAngleDeg(diff.normalized(), axis)
quat = Quat()
angle = math.copysign(min(abs(angle), turnRate*dt), angle)
quat.setFromAxisAngle(angle, axis)
newQuat = selfQuat*quat
self.root.setQuat(Common.framework.showBase.render, newQuat)
def getAngleWithVec(self, vec):
forward = self.actor.getQuat(Common.framework.showBase.render).getForward()
forward2D = Vec2(forward.x, forward.y)
vec = Vec2(vec.x, vec.y)
vec.normalize()
angle = forward2D.signedAngleDeg(vec)
return angle
def cleanup(self):
if self.colliderNP is not None and not self.colliderNP.isEmpty():
self.colliderNP.clearPythonTag(TAG_OWNER)
self.colliderNP.removeNode()
self.colliderNP = None
if self.actor is not None:
if isinstance(self.actor, Actor):
self.actor.cleanup()
self.actor.removeNode()
self.actor = None
if self.root is not None:
self.root.removeNode()
self.root = None
class ArmedObject():
def __init__(self):
self.weaponSets = []
self.weaponNPs = {}
self.lockedTarget = None
def weaponFired(self, weapon):
pass
def weaponReset(self, weapon):
pass
def addWeapon(self, weapon, setIndex, sourceNP):
while len(self.weaponSets) <= setIndex:
self.weaponSets.append([])
self.weaponSets[setIndex].append(weapon)
self.weaponNPs[weapon] = sourceNP
def startFiringSet(self, weaponSet):
if weaponSet < len(self.weaponSets):
for weapon in self.weaponSets[weaponSet]:
if not weapon.active:
weapon.triggerPressed(self)
def ceaseFiringSet(self, weaponSet):
if weaponSet < len(self.weaponSets):
for weapon in self.weaponSets[weaponSet]:
if weapon.active:
weapon.triggerReleased(self)
def update(self, dt):
for weaponSet in self.weaponSets:
for weapon in weaponSet:
weapon.update(dt, self)
def attackPerformed(self, weapon):
pass
def cleanup(self):
for weaponSet in self.weaponSets:
for weapon in weaponSet:
weapon.cleanup()
self.weaponSets = []
self.weaponNPs = {}
class Blast():
def __init__(self, model, minSize, maxSize, duration):
self.model = model
self.model.setTwoSided(True)
self.model.setTransparency(True)
self.model.setBillboardPointEye()
self.minSize = minSize
self.maxSize = maxSize
self.sizeRange = self.maxSize - self.minSize
self.duration = duration
self.timer = duration
def update(self, dt):
self.timer -= dt
if self.timer < 0:
self.timer = 0
perc = 1.0 - (self.timer / self.duration)
self.model.setScale(self.minSize + self.sizeRange*perc)
self.model.setAlphaScale(math.sin(perc*3.142))
def cleanup(self):
if self.model is not None:
self.model.removeNode()
self.model = None | 2.03125 | 2 |
synapse/dispatcher.py | mrmuxl/synapse-agent | 1 | 12797873 | import sys
import time
import signal
import socket
from Queue import Queue
from synapse.scheduler import SynSched
from synapse.amqp import AmqpSynapse, AmqpAdmin, AmqpError
from synapse.config import config
from synapse.controller import Controller
from synapse.logger import logger
from synapse.synapse_exceptions import ResourceException
@logger
class Dispatcher(object):
"""This module dispatches commands incoming from the command line to
specific transports. It is also responsible for starting threads and
catching signals like SIGINT and SIGTERM.
"""
def __init__(self, transport):
self.transport = transport
self.force_close = False
# Handle signals
#signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
# Threads instances variables
self.controller = None
self.sched = None
self.resourcefile = None
# These queues will be shared between the controller and the
# transport and are used for incoming tasks and responses
self.publish_queue = Queue()
self.tasks_queue = Queue()
def stop(self, signum, frame):
"""This method handles SIGINT and SIGTERM signals. """
self.logger.debug("Stopping due to signal #%d" % signum)
self.stop_synapse()
def stop_synapse(self):
"""Closes all threads and exits properly.
"""
if self.resourcefile:
self.resourcefile.done = True
# Close the controller and wait for it to quit
if self.controller:
if self.controller.isAlive():
self.controller.close()
self.controller.join()
self.logger.debug("Controller thread stopped")
# Shutdown the scheduler/monitor
if self.sched:
if self.sched.isAlive():
self.sched.shutdown()
self.sched.join()
self.logger.debug("Scheduler stopped")
self.force_close = True
self.logger.info("Successfully stopped.")
def dispatch(self):
"""This method actually dispatches to specific transport methods
according to command line parameters.
"""
self.logger.info('Starting on %s transport' %
self.transport.capitalize())
transports = {
'amqp': self.start_amqp,
'http': self.start_resourcefile,
'file': self.start_resourcefile,
}
try:
transports[self.transport]()
except (AttributeError, KeyError), err:
self.logger.error("Transport unknown. [%s]" % err)
self.stop_synapse()
sys.exit()
def start_amqp(self):
"""Starts all needed threads: scheduler, controller and AMQP transport
IOLOOP.
"""
retry_timeout = config.rabbitmq['retry_timeout']
try:
self.amqpadmin = AmqpAdmin(config.rabbitmq)
while not self.force_close:
try:
self.amqpadmin.connect()
break
except (socket.timeout, IOError) as err:
self.logger.error(err)
try:
self.logger.debug("Sleeping %d sec" % retry_timeout)
time.sleep(retry_timeout)
except KeyboardInterrupt:
self.stop_synapse()
raise SystemExit
except AmqpError as err:
break
except KeyboardInterrupt:
self.stop_synapse()
raise SystemExit
self.sched = SynSched()
self.controller = Controller(scheduler=self.sched,
tasks_queue=self.tasks_queue,
publish_queue=self.publish_queue)
# Start the controller
self.controller.start()
# Start the scheduler
self.sched.start()
self.amqpsynapse = AmqpSynapse(config.rabbitmq,
publish_queue=self.publish_queue,
tasks_queue=self.tasks_queue)
while not self.force_close:
try:
self.amqpsynapse.connect()
except (AmqpError, IOError) as err:
self.logger.error(err)
try:
self.logger.debug("Sleeping %d sec" % retry_timeout)
time.sleep(retry_timeout)
except KeyboardInterrupt:
self.stop_synapse()
except KeyboardInterrupt:
self.stop_synapse()
except SystemExit:
pass
except ResourceException as err:
self.logger.error(str(err))
def start_resourcefile(self):
"""This method handles the --uri file and --uri http commands.
"""
from synapse.resourcefile import ResourceFile
try:
self.resourcefile = ResourceFile(self.transport)
self.resourcefile.fetch()
except KeyboardInterrupt:
self.stop_synapse()
| 2.484375 | 2 |
explore_data.py | adamw00000/MAK-Datahub | 1 | 12797874 | # %%
import os
import sys
# os.chdir("../../..")
os.environ['DJANGO_SETTINGS_MODULE'] = 'MAKDataHub.settings'
import django
django.setup()
# %%
import math
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from MAKDataHub.services import Services
profile_service = Services.profile_service()
storage_service = Services.storage_service()
last_run = profile_service.get_last_profile_creation_run()
## New database
full_df: pd.DataFrame = pickle.load(last_run.unlock_data.open('rb'))
# full_df = full_df.loc[full_df.DeviceId != 3].reset_index(drop = True)
## Old database
# unlock_data_path = storage_service.download_file(last_run.unlock_data_uri)
# full_df: pd.DataFrame = pickle.load(open(unlock_data_path, 'rb'))
# full_df = full_df.loc[full_df.DeviceId != '1439cbc3ad71ac06'].reset_index(drop = True)
# %%
df = full_df.iloc[:, list(range(36)) + list(range(72, 108)) + list(range(108, 144)) + list(range(180, 216)) + [216]].reset_index(drop = True)
df = df.loc[(df.DeviceId != 4) & (df.DeviceId != 7), :].reset_index(drop = True)
X, y = df.iloc[:, 0:-1], df.iloc[:, -1]
#%%
full_df.shape
# %%
display(full_df.iloc[:, list(range(36)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(36, 72)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(72, 108)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(108, 144)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(144, 180)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
display(full_df.iloc[:, list(range(180, 216)) + [216]].groupby('DeviceId').agg([np.min, np.max]))
# %%
display(full_df.iloc[:, list(range(36)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(36, 72)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(72, 108)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(108, 144)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(144, 180)) + [216]].groupby('DeviceId').agg([np.mean]))
display(full_df.iloc[:, list(range(180, 216)) + [216]].groupby('DeviceId').agg([np.mean]))
# %%
sns.boxplot(df.DeviceId, df.AccMgn_mean)
# %%
sns.boxplot(df.DeviceId, df.AccMgn_median)
# %%
sns.boxplot(df.DeviceId, df.GyrMgn_amax)
# %%
sns.pairplot(df.loc[df.DeviceId != 3, :], hue="DeviceId", vars=["AccMgn_mean", "AccMgn_median"], markers='.')
# %%
test = df.loc[df.DeviceId != '3', :]
sns.swarmplot(data = test, x="DeviceId", y="RotMgn_median")
# %%
test = full_df.loc[:, :]
sns.boxplot(data = test, x="DeviceId", y="GrvMgn_amax")
# %%
print('OneClassSVM')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = y[y == device_id]
X_device = X.loc[y == device_id, :]
X_non_device = X.loc[y != device_id, :]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2)
from sklearn.svm import OneClassSVM
estimator = OneClassSVM(random_state = 12369)
estimator.fit_predict(X_train)
tp = np.mean(estimator.predict(X_test) == 1)
fn = np.mean(estimator.predict(X_test) == -1)
tn = np.mean(estimator.predict(X_non_device) == 1)
fp = np.mean(estimator.predict(X_non_device) == 1)
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * recall * precision / (recall + precision)
accuracies.append(accuracy if not np.isnan(accuracy) else 0)
precisions.append(precision if not np.isnan(precision) else 0)
recalls.append(recall if not np.isnan(recall) else 0)
fscores.append(fscore if not np.isnan(fscore) else 0)
print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}')
# print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_test) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}')
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('IsolationForest')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = y[y == device_id]
X_device = X.loc[y == device_id, :]
X_non_device = X.loc[y != device_id, :]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2)
from sklearn.ensemble import IsolationForest
estimator = IsolationForest(n_estimators = 10)
estimator.fit(X_train)
tp = np.mean(estimator.predict(X_test) == 1)
fn = np.mean(estimator.predict(X_test) == -1)
tn = np.mean(estimator.predict(X_non_device) == 1)
fp = np.mean(estimator.predict(X_non_device) == 1)
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * recall * precision / (recall + precision)
accuracies.append(accuracy if not np.isnan(accuracy) else 0)
precisions.append(precision if not np.isnan(precision) else 0)
recalls.append(recall if not np.isnan(recall) else 0)
fscores.append(fscore if not np.isnan(fscore) else 0)
print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}')
# print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_device) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}')
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('LOF')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = y[y == device_id]
X_device = X.loc[y == device_id, :]
X_non_device = X.loc[y != device_id, :]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2)
from sklearn.neighbors import LocalOutlierFactor
estimator = LocalOutlierFactor(n_neighbors = 10, novelty = True, contamination = 'auto')
estimator.fit(X_train)
tp = np.mean(estimator.predict(X_test) == 1)
fn = np.mean(estimator.predict(X_test) == -1)
tn = np.mean(estimator.predict(X_non_device) == 1)
fp = np.mean(estimator.predict(X_non_device) == 1)
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * recall * precision / (recall + precision)
accuracies.append(accuracy if not np.isnan(accuracy) else 0)
precisions.append(precision if not np.isnan(precision) else 0)
recalls.append(recall if not np.isnan(recall) else 0)
fscores.append(fscore if not np.isnan(fscore) else 0)
print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}')
# print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_device) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}')
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('LinearSVC')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.svm import LinearSVC
estimator = LinearSVC(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('KNeighborsClassifier')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier()
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('GaussianNB')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.naive_bayes import GaussianNB
estimator = GaussianNB()
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier - global model')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 12369)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
# %%
print('RandomForestClassifier - standardized')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_std, y_device, test_size=0.2, random_state = 12369)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + RFECV')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 12369)
from yellowbrick.model_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFECV(estimator, cv = 5, scoring='f1_weighted', step = 0.05)
selector.fit(X_train, y_train)
selector.show()
from sklearn.metrics import classification_report
print(classification_report(y_test, selector.predict(X_test)))
# %%
print('RandomForestClassifier + RFE20')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SelectFromModel')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = SelectFromModel(estimator, max_features = 20)
selector.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.estimator_.predict(X_test)))
report = classification_report(y_test, selector.estimator_.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + PCA')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.decomposition import PCA
pca = PCA(n_components=20).fit(X_train)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SelectKBest (f_classif)')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import SelectKBest, f_classif
selector = SelectKBest(score_func = f_classif, k=20).fit(X_train, y_train)
X_train = selector.transform(X_train)
X_test = selector.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SelectKBest (mutual_info_classif)')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import SelectKBest, mutual_info_classif
selector = SelectKBest(score_func = mutual_info_classif, k=20).fit(X_train, y_train)
X_train = selector.transform(X_train)
X_test = selector.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
estimator.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, estimator.predict(X_test)))
report = classification_report(y_test, estimator.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + RandomUnderSampler')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.under_sampling import RandomUnderSampler
X_oversampled, y_oversampled = RandomUnderSampler().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + RandomOverSampler')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.over_sampling import RandomOverSampler
X_oversampled, y_oversampled = RandomOverSampler().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SMOTE')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.over_sampling import SMOTE
X_oversampled, y_oversampled = SMOTE().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SMOTEENN')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTEENN
X_oversampled, y_oversampled = SMOTEENN().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('RandomForestClassifier + SMOTETomek')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTETomek
X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('BalancedRandomForestClassifier')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from sklearn.feature_selection import RFE
from imblearn.ensemble import BalancedRandomForestClassifier
estimator = BalancedRandomForestClassifier(n_estimators = 10, random_state = 12369)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_train, y_train)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
print('Hyperparameter tuning')
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTETomek
X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369, \
n_estimators = 50,
min_samples_leaf = 1, \
min_samples_split = 2, \
bootstrap = False, \
max_features = 'sqrt', \
max_depth = 20)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
# from sklearn.model_selection import GridSearchCV
# param_grid = {
# 'estimator__n_estimators': [10, 50, 100, 200, 500],
# 'estimator__max_features': ['auto', 'sqrt', 'log2'],
# 'estimator__max_depth': [4, 5, 6, 7, 8],
# 'estimator__criterion': ['gini', 'entropy']
# }
from sklearn.model_selection import RandomizedSearchCV
param_grid = {
'estimator__n_estimators': [10, 20, 50, 100],
'estimator__max_features': ['auto', 'sqrt', 'log2'],
'estimator__max_depth': [int(x) for x in np.linspace(2, 20, num = 2)] + [None],
'estimator__min_samples_split': [2, 3, 4, 5],
'estimator__min_samples_leaf': [1, 2, 3],
'estimator__bootstrap': [True, False]
}
grid = RandomizedSearchCV(estimator = selector, \
param_distributions = param_grid, \
n_iter = 100, \
cv = 3, \
verbose = 2, \
random_state = 42, \
n_jobs = -1)
grid.fit(X_oversampled, y_oversampled)
print(grid.best_params_)
# %%
print('RandomForestClassifier + SMOTETomek + parameters')
accuracies = []
precisions = []
recalls = []
fscores = []
for device_id in y.unique():
y_device = np.where(y == device_id, 1, 0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369)
from imblearn.combine import SMOTETomek
X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train)
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(random_state = 12369, \
n_estimators = 50,
min_samples_leaf = 1, \
min_samples_split = 2, \
bootstrap = False, \
max_features = 'sqrt', \
max_depth = 20)
selector = RFE(estimator, n_features_to_select = 20, step = 0.05)
selector.fit(X_oversampled, y_oversampled)
from sklearn.metrics import classification_report
print(f'Device {device_id}:')
print(classification_report(y_test, selector.predict(X_test)))
report = classification_report(y_test, selector.predict(X_test), output_dict=True)
accuracies.append(report['accuracy'])
precisions.append(report['1']['precision'])
recalls.append(report['1']['recall'])
fscores.append(report['1']['f1-score'])
print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}')
# %%
| 1.9375 | 2 |
bytefall/_compat/tracing.py | NaleRaphael/bytefall | 1 | 12797875 | import sys
from pdb import Pdb, getsourcelines
from .utils import check_frame
from bytefall._modules import sys as py_sys
from bytefall._c_api import convert_to_builtin_frame
from bytefall.config import EnvConfig
__all__ = ['PdbWrapper']
class PdbWrapper(object):
@staticmethod
@check_frame
def set_trace(frame, *args, **kwargs):
return pdb_wrapper(frame)()
def pdb_wrapper(this_frame):
DEBUG_INTERNAL = EnvConfig().get('DEBUG_INTERNAL')
_pdb = Pdb() if DEBUG_INTERNAL else _Pdb()
def wrapper():
if DEBUG_INTERNAL:
_pdb.set_trace(sys._getframe(3))
else:
# Frame to be stepped in is not retrieved by `sys._getframe()`,
# so that we don't need to pass its `f_back` into `set_trace()`
_pdb.set_trace(this_frame)
return wrapper
class _Pdb(Pdb):
def do_longlist(self, arg):
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
# Here we need to convert `self.curframe` to builtin frame
# for `getsourcelines`, in which `inspect.findsource()`
# requires a builtin frame to work.
converted = convert_to_builtin_frame(self.curframe)
lines, lineno = getsourcelines(converted)
except OSError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
do_ll = do_longlist
def set_continue(self):
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# Here we need to replace the implementation of `sys.settrace()`
# and `sys._getframe()`.
py_sys.settrace(None)
# In the original implementation, here it calls
# `sys._getframe().f_back` to get the caller of this method.
# However, we cannot get caller `pyframe.Frame` that calling
# `py_sys._getframe()`, but it does not affect the result.
# Because the current running frame in vm is what we want here.
frame = py_sys._getframe()
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_trace(self, frame=None):
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
py_sys.settrace(self.trace_dispatch)
| 2.03125 | 2 |
func/model/status_processing.py | leonidasnascimento/sosi_func0007_company_finacial_report | 0 | 12797876 | class StatusProcessing():
success: bool
message: str
err_stack: str
def __init__(self, _success: bool, _message: str, _err_stack: str = ""):
self.success = _success
self.message = _message
self.err_stack = _err_stack
pass
pass | 2.390625 | 2 |
PathTracking/inverseKinematics/inv_kinematics.py | deeksha777/roboscience | 2 | 12797877 | import pylab as plt
import numpy as np
from math import *
N=100
t0 = 0.0
t1 = 2.0
t = np.linspace(t0,t1,N)
dt = (t1-t0)/N
one = np.ones((N))
xp = np.zeros((N))
yp = np.zeros((N))
th = np.zeros((N))
x = t*t
y = t
plt.figure()
plt.plot(x,y,'g-')
plt.legend(['Path'],loc='best')
plt.title('Quadratic Path')
plt.show()
doty=one
dotx=2*t
ddoty=0
ddotx=2*one
r = 1.0
L = 4.0
v = np.sqrt(dotx*dotx + doty*doty)
kappa = (dotx*ddoty - doty*ddotx)/(v*v*v)
dotphi1 = (v/r)*(kappa*L +1)
dotphi2 = (v/r)*(-kappa*L+1)
plt.plot(t,dotphi1,'b-', t,dotphi2,'g-')
plt.title('Wheel Speeds')
plt.legend(['Right', 'Left'],loc='best')
plt.show()
xp[0] = 0.0
yp[0] = 0.0
th[0] = 1.5707963267949
for i in range(N-1):
xp[i+1] = xp[i] + (r*dt/2.0) * (dotphi1[i]+dotphi2[i]) * cos(th[i])
yp[i+1] = yp[i] + (r*dt/2.0)*(dotphi1[i]+dotphi2[i])* sin(th[i])
th[i+1] = th[i] + (r*dt/(2.0*L))*(dotphi1[i]-dotphi2[i])
plt.figure()
plt.plot(x,y,'g-', xp, yp, 'bx')
plt.legend(['Original Path', 'Robot Path'],loc='best')
plt.title('Path')
plt.show() | 2.875 | 3 |
docker/base/installation/source/mpi4py/gatherUpper.py | WCU-EDGE/TinySup | 0 | 12797878 | #!/usr/bin/env python
# gatherUpper.py
import numpy
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
LENGTH = 3
x = None
x_local = numpy.linspace(rank*LENGTH,(rank+1)*LENGTH, LENGTH)
print(x_local)
if rank == 0:
x = numpy.zeros(size*LENGTH)
print (x)
comm.Gather(x_local, x, root=0)
#you should notice that only the root process has a value for x that
#is not "None"
print ("process", rank, "x:", x)
print ("process", rank, "x_local:", x_local) | 2.34375 | 2 |
sort_anagrams.py | carlb15/Python | 2 | 12797879 | def sort_anagrams_1(strs):
"""
:type strs List[str]
:rtype List[List[str]]
"""
map = {}
for v in strs:
target = ''.join(sorted(v))
print(target)
if target not in map:
map[target] = []
map[target].append(v)
print('building map ', map[target])
result = []
for value in map.values():
print('anagrams ', value)
result += [sorted(value)]
return result
def sort_anagrams(myList):
def convert_strs_to_hashes(myList):
d = dict()
for anagram in myList:
h = sum([ord(i) for i in anagram])
if h not in d:
d[h] = []
d[h].append(anagram)
return d
if not myList or len(myList) < 2:
return 0 if not myList else 1
d = convert_strs_to_hashes(myList)
outputList = []
for key, values in d.items():
outputList.extend(values)
return outputList
if __name__=="__main__":
anagrams = ["acme", "acre", "came", "care", "mace", "race"]
print("Before ", anagrams)
print("After ", sort_anagrams(anagrams))
anagrams = ["acme", "acre", "came", "care", "mace", "race"]
print("Before ", anagrams)
print("After ", sort_anagrams_1(anagrams))
| 4.0625 | 4 |
2_simulate_sdc.py | sztal/sda-model | 1 | 12797880 | """Run simulations for SDC model.
Parameters
----------
N_JOBS
Number of cores used for parallelization.
RANDOM_SEED
Seed for the random numbers generator.
SPACE
Types of social space.
Available values: 'uniform', 'lognormal', 'clusters_normal'.
N
Sizes of networks,
NDIM
Number of dimensions of simulated social spaces.
DATA_REP
Number of independent realizations of social spaces.
SDA_PARAMS
k
Expected average degree.
alpha
Homophily level.
directed
Directed/undirected networks.
p_rewire
Probability of random rewiring.
SDA_REP
Number of independent realizations of adjacency matrices.
SIM_PARAMS
degseq_type
Degree sequence type.
One of: 'poisson', 'negbinom', 'powerlaw'.
degseq_sort
Should degree sequence be sorted by expected node degrees.
"""
import os
import gc
import numpy as np
import pandas as pd
from sklearn.externals.joblib import Memory
import _
# Globals
ROOT = os.path.dirname(os.path.realpath(__file__))
HERE = ROOT
DATAPATH = os.path.join(HERE, 'raw-data')
# Persistence
MEMORY = Memory(location='.cache', verbose=1)
N_JOBS = 4
# Data generation params
RANDOM_SEED = 101
SPACE = ('uniform', 'lognormal', 'clusters_normal')
N = (1000, 2000, 4000, 8000)
NDIM = (1, 2, 4, 8, 16)
CENTERS = (4,)
DATA_REP = 2
# SDA params
SDA_PARAMS = {
'k': (30,),
'alpha': (2, 4, 8, np.inf),
'directed': (False,),
'p_rewire': (.01,)
}
SDA_REP = 3
SIM_PARAMS = {
'degseq_type': ('poisson', 'negbinom', 'powerlaw'),
'sort': (True, False)
}
@MEMORY.cache(ignore=['n_jobs'])
def simulate_cm(space, dparams, drep, sdaparams, sdarep, simparams, n_jobs):
return _.simulate(space, dparams, drep, sdaparams, sdarep,
simparams, n_jobs, simfunc=_.run_sdac)
# Run simulations
if RANDOM_SEED is not None:
np.random.seed(RANDOM_SEED)
sim = lambda s: simulate_cm(
space=s,
dparams=(N, NDIM, CENTERS),
drep=DATA_REP,
sdaparams=SDA_PARAMS,
sdarep=SDA_REP,
simparams=SIM_PARAMS,
n_jobs=N_JOBS
)
df = None # main data frame
gdf = None # graph data frame
for s in SPACE:
sim(s)
gc.collect()
for s in SPACE:
print(f"\rloading and processing '{s}' space' ...", end="")
_df = sim(s)
_df.drop(columns=['A', 'labels'], inplace=True)
if df is None:
df = _df
else:
df = pd.concat((df, _df), ignore_index=True)
# Save data -------------------------------------------------------------------
# Standard data get saved as feather file, so it can be easily
# shared with R for data analysis and visualization.
# Adjacency matrices data is saved as a separate pickle file.
# It will be used for graph visualizations.
os.makedirs(DATAPATH, exist_ok=True)
# Save main data as a feather file
df.to_feather(os.path.join(DATAPATH, 'sda-data-cm.feather'))
# Save graph data as a pickle file
# joblib.dump(gdf, os.path.join(DATAPATH, 'sda-graphs-cm.pkl'))
| 2.5625 | 3 |
lib/piservices/fabops.py | creative-workflow/pi-setup | 1 | 12797881 | <reponame>creative-workflow/pi-setup<filename>lib/piservices/fabops.py
import os, color
from fabric import operations, api
from fabric.contrib import files
from policies import PiServicePolicies
class FabricTaskOperator:
def __init__(self, local_path, remote_path):
self.remote_path = remote_path
self.local_path = local_path
self.ops = operations
self.api = api
self.env = api.env
def run(self, command, *args, **kwargs):
try:
with color.for_run():
if self.is_local():
return self.ops.local('cd %s && %s' % (self.local_path, command), *args, **kwargs)
else:
return self.ops.run('cd %s && %s' % (self.remote_path, command), *args, combine_stderr=True, pty=False, **kwargs)
except Exception as e:
with color.for_error():
raise e
def local(self, command, *args, **kwargs):
try:
with color.for_run():
return self.ops.local('cd %s && %s' % (self.local_path, command), *args, **kwargs)
except Exception as e:
with color.for_error():
raise e
def sudo(self, command, *args, **kwargs):
return self.run('sudo %s' % command, *args, **kwargs)
def put(self, src, dest='', *args, **kwargs):
with color.for_put():
with self.api.settings(warn_only=True):
self.ops.put(src, self.normalize_path(dest), *args, **kwargs)
def get(self, src, dest='', *args, **kwargs):
with color.for_get():
with self.api.settings(warn_only=True):
self.ops.get(self.normalize_path(src), dest, *args, **kwargs)
def cd(self, path="~/"):
return self.run(path)
def file_exists(self, path):
if not path: return False
method = files.exists
if PiServicePolicies.is_local():
method = os.path.isfile
return method(self.normalize_path(path))
def cp(self, i, o):
self.sudo('cp %s %s' % (self.normalize_path(i), self.normalize_path(o)))
def normalize_path(self, path):
if path.startswith('/'): return path
return self.remote_path+'/'+path
def zip_files_and_copy(self, file_list, target):
if not file_list or len(file_list) < 1: return
#make path relative, so they will be extracted correct on the remote end
file_list = [x.replace(self.local_path+'/', '') for x in file_list]
tmp_file_list = '/tmp/pisetup.%s.transfer.list' % self.name
tmp_archive = '/tmp/pisetup.%s.transfer.tar' % self.name
with open(tmp_file_list, 'w') as f:
f.write('\n'.join(file_list))
self.api.local('cd %s && tar cvf - -T %s > %s' % (self.local_path, f.name, tmp_archive))
self.put(tmp_archive, '/tmp/', use_sudo=True)
self.run('tar xvf %s -C %s ' % (tmp_archive, target))
os.unlink(f.name)
os.unlink(tmp_archive)
def clean_git_checkout(self, git_repo, target):
target = self.local_path+'/'+target
if os.path.isdir(target+'/.git'):
self.ops.local('cd %s && git checkout master' % target)
self.ops.local('cd %s && git reset --hard origin/master' % target)
else:
self.ops.local('mkdir -p '+target)
self.ops.local('cd '+target+" && git clone "+git_repo+" . && git submodule init && git submodule update")
| 2.109375 | 2 |
intel/ll_signals.py | YmonOy/lastline_api | 2 | 12797882 | <reponame>YmonOy/lastline_api<filename>intel/ll_signals.py<gh_stars>1-10
from django.contrib.auth.signals import user_logged_in
from request_provider.signals import get_request
import ll_logger
from ll_debug import __debugvar__
class SignalHandler:
def __init__(self, logger):
self.logger = logger
# self = sender in this context, when looked from the perspective of the
# Django signal system
def login(self, user, request, **kwargs):
self.logger.instance.addFilter(ll_logger.ContextFilter(request))
msg = 'User({0}) logged in'.format(request.user.username)
self.logger.info(self.logger.to_request('Login', msg))
def logout(self, user, request, **kwargs):
self.logger.instance.addFilter(ll_logger.ContextFilter(request))
msg = 'User({0}) logged out'.format(request.user.username)
self.logger.info(self.logger.to_request('Logout', msg))
# Must override logged username from POST since user is not logged in
def failed_login(self, credentials, **kwargs):
request = get_request()
username = request.POST.get('username', 'Undefined')
self.logger.instance.addFilter(
ll_logger.ContextFilter(request, overrides={'user':username})
)
msg = 'User({0}) failed login'.format(username)
self.logger.info(self.logger.to_request('Login.fail', msg))
| 2.015625 | 2 |
mapping/star/discretized_bath/symmetric_spquad.py | fhoeb/py-mapping | 1 | 12797883 | <filename>mapping/star/discretized_bath/symmetric_spquad.py
"""
Discretized bath for the generation of direct symmetric discretization coefficients, where the integrals for
the couplings and energies are evaluated using scipy quad
"""
import numpy as np
from scipy import integrate
from mapping.star.discretized_bath.base.symmetric import BaseDiscretizedSymmetricBath
from mapping.utils.integration_defaults import default_epsabs, default_epsrel, default_limit
from mapping.star.discretized_bath.stopcoeff import StopCoefficients
from mapping.star.discretized_bath.intervals import get_symmetric_interval_points
class SpQuadDiscretizedSymmetricBath(BaseDiscretizedSymmetricBath):
def __init__(self, J, domain, max_nof_coefficients=100, interval_type='lin', **kwargs):
"""
Generates direct discretization coefficients from a spectral density J, by computing the integrals:
gamma_i = sqrt(int_i^i+1 J(x) dx)
xi_i = int_i^i+1 J(x) * x dx/ gamma_i^2
:param J: Spectral density. A function defined on 'domain', must be >0 in the inner part of domain
:param domain: List/tuple of two elements for the left and right boundary of the domain of J. The
domain must contain 0.
:param max_nof_coefficients: Size of the buffers which hold gamma and xi coefficients (maximum number of
these coefficients that can be calculated)
:param interval_type: see star.get_discretized_bath for an explanation of the available types
:param kwargs: may contain 'ignore_zeros' If one gamma_i is numerically 0, the corresponding xi_i is also set 0,
default is False
'epsabs': absolute tolerance for the scipy integrations, default is 1e-11
'epsrel': relative tolerance for the scipy integrations, default is 1e-11
'limit': limit parameter for the scipy quad function, default is 100
"""
assert not np.isinf(domain[0]) and not np.isinf(domain[1])
if not domain[0] < 0 < domain[1]:
print('Domain must contain 0!')
raise AssertionError
try:
self.ignore_zeros = kwargs['ignore_zeros']
except KeyError:
self.ignore_zeros = False
try:
self.epsabs = kwargs['epsabs']
except KeyError:
self.epsabs = default_epsabs
try:
self.epsrel = kwargs['epsrel']
except KeyError:
self.epsrel = default_epsrel
try:
self.limit = kwargs['limit']
except KeyError:
self.limit = default_limit
self.interval_type = interval_type
self.x_pts_p, self.x_pts_m = get_symmetric_interval_points(domain, max_nof_coefficients,
interval_type=interval_type,
get_spacing=False, **kwargs)
self.J = J
super().__init__(self.compute_coefficients, max_nof_coefficients=max_nof_coefficients)
def compute_coefficients(self, stop_n):
"""
Calculates the discretization coefficients up to stop_n (actually calculates 2*stop_n - self.next_n
coefficients, since the indices are tailored for asymmetric discretizations)
:param stop_n: Index up to which new coefficients are calculated
"""
x_pts_p = self.x_pts_p[::-1] if self.interval_type == 'log' else self.x_pts_p
for n in range(2*self._next_n, 2*stop_n, 2):
int_index = n // 2
# Coefficients for the positive part of the domain:
try:
a, b = x_pts_p[int_index], x_pts_p[int_index + 1]
# Must invert the view, because for log-discretization the positive domain grid points are in
# inverted order
if self.interval_type == 'log':
b, a = a, b
except IndexError:
raise StopCoefficients
gamma_sq, err = \
integrate.quad(self.J, a, b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)
xi_numerator, err = \
integrate.quad(lambda x: x * self.J(x), a, b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)
self.gamma_buf[n] = np.sqrt(gamma_sq)
if self.ignore_zeros and gamma_sq == 0:
self.xi_buf[n] = 0
else:
self.xi_buf[n] = xi_numerator / gamma_sq
# Coefficients for the negative part of the domain:
try:
a, b = self.x_pts_m[int_index + 1], self.x_pts_m[int_index]
# Must invert the view, because for log-discretization the positive domain grid points are in
# inverted order
if self.interval_type == 'log':
b, a = a, b
except IndexError:
raise StopCoefficients
gamma_sq, err = \
integrate.quad(self.J, a, b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)
xi_numerator, err = \
integrate.quad(lambda x: x * self.J(x), a, b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)
self.gamma_buf[n+1] = np.sqrt(gamma_sq)
if self.ignore_zeros and gamma_sq == 0:
self.xi_buf[n+1] = 0
else:
self.xi_buf[n+1] = xi_numerator / gamma_sq
self._update_next_n(1)
| 2.46875 | 2 |
testsuite/pointcloud-fold/run.py | halirutan/OpenShadingLanguage | 2 | 12797884 | #!/usr/bin/env python
command += testshade("-param radius 1000.0 -param filename data/cloud.geo rdcloud")
| 1.015625 | 1 |
mmrotate/core/bbox/iou_calculators/builder.py | williamcorsel/mmrotate | 449 | 12797885 | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import build_from_cfg
from mmdet.core.bbox.iou_calculators.builder import IOU_CALCULATORS
ROTATED_IOU_CALCULATORS = IOU_CALCULATORS
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, ROTATED_IOU_CALCULATORS, default_args)
| 1.875 | 2 |
light_draft/views.py | zerc/django-light-draft | 8 | 12797886 | <gh_stars>1-10
# coding: utf-8
from __future__ import unicode_literals
from django.http import Http404
from django.views.generic.detail import DetailView
from .utils import load_from_shapshot
from .exceptions import DraftError
class BaseDraftView(DetailView):
"""
View for loading data from model `snapshot`
"""
def get_template_names(self):
names = super(BaseDraftView, self).get_template_names()
preview = names[0].replace('.html', '_preview.html')
names.insert(0, preview)
return names
def get_object(self, *args, **kwargs):
if getattr(self, '__object', None):
return self.__object
if 'hash' in self.request.GET:
try:
self.__object = load_from_shapshot(
self.model, self.request.GET.get('hash'))
except DraftError:
raise Http404('Snapshot does not exist')
return self.__object
return super(BaseDraftView, self).get_object(*args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(BaseDraftView, self).get_context_data(*args, **kwargs)
context['is_draft_preview'] = True
return context
| 2.015625 | 2 |
DeleteNodeinaLinkedList.py | Bit64L/LeetCode-Python- | 0 | 12797887 | <reponame>Bit64L/LeetCode-Python-<filename>DeleteNodeinaLinkedList.py<gh_stars>0
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next is not None:
node.val = node.next.val
pre = node
node = node.next
pre.next = None
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node1.next = node2
node2.next = node3
solution = Solution()
solution.deleteNode(node2)
while node1 is not None:
print(node1.val)
node1 = node1.next | 3.671875 | 4 |
tests/converter/test_url2netloc.py | Centaurioun/PyFunceble | 213 | 12797888 | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Tests of URL 2 Network Location converter.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import unittest.mock
from PyFunceble.converter.url2netloc import Url2Netloc
class TestUrl2Netloc(unittest.TestCase):
"""
Tests our internal URL converter.
"""
def setUp(self) -> None:
"""
Setups everything needed for the tests.
"""
self.converter = Url2Netloc()
def tearDown(self) -> None:
"""
Destroys everything previously created for the tests.
"""
del self.converter
def test_set_data_to_convert_no_string(self) -> None:
"""
Tests the method which let us set the data to work with for the case
that a non-string value is given.
"""
given = ["Hello", "World"]
self.assertRaises(TypeError, lambda: self.converter.set_data_to_convert(given))
def test_set_data_to_convert_empty_string(self) -> None:
"""
Tests the method which let us set the data to work with for the case
that an empty-string value is given.
"""
given = ""
self.assertRaises(ValueError, lambda: self.converter.set_data_to_convert(given))
def test_get_converted_nothing_to_decode(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no conversion is needed.
"""
given = "example.org"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_full_url(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that a full URL is given.
"""
given = "https://example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_full_url_with_port(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that a full URL (with explicit port) is given.
"""
given = "https://example.org:8080/hello/world/this/is/a/test"
expected = "example.org:8080"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_full_url_with_params(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that a full URL (with params) is given.
"""
given = "https://example.org/?is_admin=true"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_scheme(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no scheme is given.
"""
given = "example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_scheme_and_with_params(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no scheme (but with params) is given.
"""
given = "example.org/?is_admin=true"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_protocol(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no protocol is given.
"""
given = "://example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_protocol_and_with_params(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no protocol (but params) is given.
"""
given = "://example.org/?is_admin=true"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_protocol_and_path(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no protocol and path is given.
"""
given = "://example.org/"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_startswith_2_slashes(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that the given url starts with 2 slashes.
"""
given = "//example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_startswith_1_slash(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that the given url starts with 1 slash.
"""
given = "/example.org/hello/world/this/is/a/test"
expected = ""
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
| 2.6875 | 3 |
ex02-odd_or_even.py | lew18/practicepython.org-mysolutions | 0 | 12797889 | <reponame>lew18/practicepython.org-mysolutions<filename>ex02-odd_or_even.py
"""
https://www.practicepython.org
Exercise 2: Odd or Even
1 chile
Ask the user for a number. Depending on whether the number is even or odd,
print out an appropriate message to the user.
Hint: how does an even / odd number react differently when divided by 2?
Extras:
1. If the number is a multiple of 4, print out a different message.
2. Ask the user for two numbers: one number to check (call it num) and
one number to divide by (check). If check divides evenly into num,
tell that to the user. If not, print a different appropriate message.
"""
number = int(input("Enter an integer: "))
if (number % 4) == 0:
print(str(number) + " is evenly divisible by 4.")
elif ((number %2) == 0):
print(str(number) + " is even.")
else:
print(str(number) + " is odd.")
divisor = int(input("Enter a second integer: "))
if number % divisor:
print("%d is not a divisor of %d." % (divisor, number))
else:
print("%d is a divisor of %d." % (divisor, number))
| 4.46875 | 4 |
musicrecs/spotify/item/spotify_artist.py | nknaian/album_recs | 2 | 12797890 | <filename>musicrecs/spotify/item/spotify_artist.py
from .spotify_item import SpotifyItem
class SpotifyArtist(SpotifyItem):
"""Class to hold selected information about a spotify artist"""
def __init__(self, spotify_artist):
# Initialize base class
super().__init__(spotify_artist)
| 2.546875 | 3 |
2020-04-10/test_log.py | feieryouyiji/learningpy | 0 | 12797891 | <gh_stars>0
# # err_logging.py
# import logging
# def foo(s):
# return 10 / int(s)
# def bar(s):
# return foo(s) * 2
# def main():
# try:
# bar('0')
# except Exception as e:
# print("----出错了")
# logging.exception(e)
class FooError(ValueError):
pass
def foo(s):
n = int(s)
if n == 0:
raise FooError('invalid value: %s' % s)
return 10 / n
foo('0')
print('END')
| 3.171875 | 3 |
pyigm/cgm/tests/test_galaxy.py | pyigm/pyigm | 16 | 12797892 | <filename>pyigm/cgm/tests/test_galaxy.py
# Module to run tests on GalaxyCGM
from __future__ import print_function, absolute_import, division, unicode_literals
# TEST_UNICODE_LITERALS
import pytest
import numpy as np
from pyigm.cgm.galaxy import GalaxyCGM
def test_init_light():
mwcgm = GalaxyCGM(load=False)
def test_init_full():
mwcgm = GalaxyCGM()
assert len(mwcgm.abs.cgm_abs) > 0
# Cool
assert 'Richter+17' in mwcgm.refs
SiII_tbl = mwcgm.abs.ion_tbl((14,2))
assert (not np.any(np.isnan(SiII_tbl['logN'])))
assert np.sum(SiII_tbl['flag_N'] > 0) == 188
# Hot
assert 'Fang+15' in mwcgm.refs
# OVII table
ovii_tbl = mwcgm.abs.ion_tbl((8,7))
assert len(ovii_tbl['sig_logN'][0]) == 2
# OVI table
ovi_tbl = mwcgm.abs.ion_tbl((8,6))
# Usage
coords = mwcgm.abs.scoord
| 2.140625 | 2 |
Contributed/NeoPixelBarometer/Python/tempestbarometer.py | ucl-casa-ce/WindSpeedGauge | 4 | 12797893 | import requests
import json
import time
import neopixel
import board
#Set Colours
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
ORANGE = (100, 64, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
OFF = (0, 0, 0)
#Set NeoPixel Details - Pin/Number Pixels/Brightness etc
pixels = neopixel.NeoPixel(board.D18, 144, brightness=0.03, auto_write=False)
#Start up Lights
n = 1
t_end = time.time() + 22.32 * 1
while time.time() < t_end:
n = n + 1
if n >= 144:
n = 1
pixels[n] = (RED)
pixels[n-1] = (YELLOW)
pixels.show()
time.sleep (0.1)
pixels.fill((0, 0, 0))
pixels.show()
print ("Getting Conditions and Forecast")
def getconditions():
# Get Data from Weather Flow for Station Location
try:
response = requests.get('https://swd.weatherflow.com/swd/rest/better_forecast?api_key=db55228a-b708-4325-9166-7f2d04c61baa&station_id=50216&units_temp=c&units_wind=mph&units_pressure=mb&units_precip=mm&units_distance=mi').text
except requests.exceptions.RequestException as e:
time.sleep(60)
data = json.loads(response)
text = data['current_conditions']['conditions']
icon = data['current_conditions']['icon']
baro = int(data['current_conditions']['sea_level_pressure'])
trend = data['current_conditions']['pressure_trend']
print(text)
print(icon)
print(baro)
print(trend)
return trend, baro, icon
def barometer():
conditions = getconditions()
baro = conditions[1]
# Pressure top 1050 minus number of pixels to set top pixel
pixel = 906
pixelon = int(baro - pixel)
pixels[pixelon] = (RED)
def trendpixel():
conditions = getconditions()
trend = conditions[0]
if trend == 'steady':
pixels[14] = (GREEN)
else:
pixels[14] = (OFF)
if trend == 'rising':
pixels[16] = (BLUE)
else:
pixels[16] = (OFF)
if trend == 'falling':
pixels[12] = (RED)
else:
pixels[12] = (OFF)
def icon():
conditions = getconditions()
icon = str(conditions[2])
print("Icon")
print(icon)
if icon == 'clear-day':
pixels[36] = (YELLOW)
else:
pixels[36] = (OFF)
if icon == 'partly-cloudy-day' or 'partly-cloudy-night':
pixels[34] = (BLUE)
else:
pixels[34] = (OFF)
if icon == 'cloudy':
pixels[32] = (BLUE)
else:
pixels[32] = (OFF)
if icon == 'possibly-rainy-day':
pixels[30] = (BLUE)
else:
pixels[30] = (OFF)
if icon == 'possibly-rainy-night':
pixels[30] = (BLUE)
else:
pixels[30] = (OFF)
if icon == 'clear-night':
pixels[22] = (BLUE)
else:
pixels[22] = (OFF)
while True:
getconditions()
barometer()
trendpixel()
icon()
pixels.show()
time.sleep(60)
| 3.265625 | 3 |
aiocloudflare/api/zones/amp/sxg/sxg.py | Stewart86/aioCloudflare | 2 | 12797894 | from aiocloudflare.commons.auth import Auth
class Sxg(Auth):
_endpoint1 = "zones"
_endpoint2 = "amp/sxg"
_endpoint3 = None
| 1.242188 | 1 |
projects/views.py | kylef-archive/django-projects | 2 | 12797895 | import cPickle as pickle
import datetime
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import Http404, HttpResponse
from django.core import urlresolvers
from unipath import FSPath as Path
from projects.models import Project
def document(request, project, url):
try:
project = Project.objects.get(slug=project)
except Project.DoesNotExist:
raise Http404
docroot = Path(project.get_pickle_path())
# First look for <bits>/index.fpickle, then for <bits>.fpickle
bits = url.strip('/').split('/') + ['index.fpickle']
doc = docroot.child(*bits)
if not doc.exists():
bits = bits[:-2] + ['%s.fpickle' % bits[-2]]
doc = docroot.child(*bits)
if not doc.exists():
raise Http404("'%s' does not exist" % doc)
bits[-1] = bits[-1].replace('.fpickle', '')
template_names = [
'docs/%s.html' % '-'.join([b for b in bits if b]),
'docs/doc.html'
]
return render_to_response(template_names, RequestContext(request, {
'doc': pickle.load(open(doc, 'rb')),
'env': pickle.load(open(docroot.child('globalcontext.pickle'), 'rb')),
'update_date': datetime.datetime.fromtimestamp(docroot.child('last_build').mtime()),
'home': project.get_absolute_url(),
'redirect_from': request.GET.get('from', None),
}))
def update(request, slug):
try:
project = Project.objects.get(slug=slug)
except Project.DoesNotExist:
raise Http404
project.update()
return HttpResponse('done') | 2.15625 | 2 |
decrypt_device/main.py | vnszero/Enigma_2.0 | 0 | 12797896 | CEIL = 122
END_LINE = '\n'
FLOOR = 32
FOG_NUM = 1
FOG_POS = 2
ROLLBACK = 90
SECURITY = 'access denied\n'
SHIFT = 0
def verify_code(message : str) -> list:
i = 0
shift = None
fog_num = None
fog_pos = None
code_message = ''
for alpha in message:
if i == SHIFT:
shift = alpha
elif i == FOG_NUM:
fog_num = alpha
elif i == FOG_POS:
fog_pos = alpha
else:
code_message += alpha
if alpha == END_LINE:
with open("code.txt", "w") as file:
file.write(SECURITY)
break
i+=1
return shift, fog_num, fog_pos, code_message
def clear(message : str, fog_num : int, fog_pos : int) -> str:
clear_message = ''
i = FLOOR
for alpha in message:
if i > fog_num:
i = FLOOR
if i == fog_pos:
clear_message += alpha
i += 1
return clear_message
def uncesar(message : str, shift : int) -> str:
uncesar_message = ''
for alpha in message:
ord_ascii = ord(alpha)
if ord_ascii <= shift + FLOOR:
uncesar_message += chr(ord_ascii+ROLLBACK-shift)
else:
uncesar_message += chr(ord(alpha)-shift)
return uncesar_message
def main():
try:
# open, read and verify
encrypt_message = ''
with open("code.txt", "r") as file:
encrypt_message = file.read()
shift, fog_num, fog_pos, code_message = verify_code(encrypt_message)
if shift != None and fog_num != None and fog_pos != None:
# clear
clear_message = clear(code_message, ord(fog_num), ord(fog_pos))
# uncesar
decrypt_message = uncesar(clear_message, ord(shift))
# export
with open('message.txt', 'w') as file:
file.write(decrypt_message)
except:
print('There is a problem with: code.txt, Tip: verify the path')
if __name__ == '__main__':
main() | 3.28125 | 3 |
spar_python/report_generation/ta1/ta1_section_performance_latency.py | nathanawmk/SPARTA | 37 | 12797897 | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: Section class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Sep 2013 SY Original version
# *****************************************************************
# SPAR imports:
import spar_python.report_generation.ta1.ta1_section as section
import spar_python.report_generation.common.regression as regression
import spar_python.report_generation.common.latex_classes as latex_classes
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.ta1.ta1_analysis_input as t1ai
class Ta1LatencySection(section.Ta1Section):
"""The latency section of the TA1 report"""
def _store_query_latency_table(self):
"""Stores the LaTeX string representing the query latency table
on the output object."""
constraint_list = self._config.get_constraint_list(
require_correct=True)
categories = self._config.results_db.get_unique_query_values(
simple_fields=[(t1s.DBF_TABLENAME, t1s.DBF_NUMRECORDS),
(t1s.DBF_TABLENAME, t1s.DBF_RECORDSIZE),
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS),
(t1s.DBF_TABLENAME, t1s.DBF_CAT)],
constraint_list=constraint_list)
# create the latency table:
latency_table = latex_classes.LatexTable(
"Query Latency vs. Number of Records Returned Best Fit Functions",
"lat_main",
["DBNR", "DBRS", "Select", "Query Type",
"Best-Fit Func", "R-Squared"])
# compute correctness for every query category:
for (dbnr, dbrs, selection_cols, query_cat) in categories:
inp = t1ai.Input()
inp[t1s.DBF_CAT] = query_cat
inp[t1s.DBF_NUMRECORDS] = dbnr
inp[t1s.DBF_RECORDSIZE] = dbrs
inp[t1s.DBP_SELECTIONCOLS] = selection_cols
this_constraint_list = constraint_list + inp.get_constraint_list()
[x_values, y_values] = self._config.results_db.get_query_values(
simple_fields=[
(t1s.DBP_TABLENAME, t1s.DBP_NUMNEWRETURNEDRECORDS),
(t1s.DBP_TABLENAME, t1s.DBP_QUERYLATENCY)],
constraint_list=this_constraint_list)
try:
inputs = [x_values]
outputs = y_values
function = regression.regress(
function_to_regress=self._config.ql_all_ftr,
outputs=outputs, inputs=inputs)
function_string = function.string
rsquared = function.get_rsquared(inputs, outputs)
except regression.BadRegressionInputError:
function_string = "-"
rsquared = "-"
latency_table.add_content(
[inp.test_db.get_db_num_records_str(),
inp.test_db.get_db_record_size_str(),
selection_cols, query_cat, function_string, rsquared])
self._outp["query_latency_table"] = latency_table.get_string()
def _populate_output(self):
"""Populates the output object which is passed to the Jinja tempalte
in get_string."""
self._store_query_latency_table()
| 1.929688 | 2 |
RhyAn/decomposer/medianNMF.py | kaveenr/rhyan | 0 | 12797898 | import numpy as np
import scipy.signal as sp
import scipy.spatial.distance as sp_dist
import librosa
class MedianNMF:
y, sr = None,None
n_components = None
def __init__(self,y,sr,n_components = 5):
self.y, self.sr = y,sr
self.n_components = n_components
def decompose(self):
#filter out precussive parts
hpss_y = self.hpss()
#Perform Short-time Fourier transform
D = librosa.stft(hpss_y)
# Separate the magnitude and phase
S, phase = librosa.magphase(D)
#NMF decompose to components
components, activations = self.decomposeNMF(hpss_y, S, self.n_components)
#reconstruct and return
return [self.reconstructComponent(
components[:, i], activations[i], phase) for i in range(0,len(activations))]
def hpss(self, margin=4.0):
#extract precussive components through median filtering
return librosa.effects.percussive(self.y, margin=margin)
def decomposeNMF(self, y, magnitude, n_components):
# Decompose by nmf
return librosa.decompose.decompose(magnitude, n_components, sort=True)
def reconstructFull(self, activations, phase):
#reconstruct all components into one signal
D_k = components.dot(activations)
y_k = librosa.istft(D_k * phase)
return y_k
def reconstructComponent(self, components, activation, phase):
D_k = np.multiply.outer(components, activation)
y_k = librosa.istft(D_k * phase)
#filter out noise using Savitzky-Golay filter
component_filtered = sp.savgol_filter(y_k,11,1)
return component_filtered
| 2.203125 | 2 |
M1L10-Mini_Project-OpenAI_Taxi-v2/agent.py | felixrlopezm/Udacity_Deep_Reinforcement_Learning | 0 | 12797899 | import numpy as np
from collections import defaultdict
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.epsilon_start = 1.0
self.i_episode = 1.0
self.alpha = 0.04
self.gamma = 0.9
def epsilon_greedy_probs(self, state, epsilon):
''' Calculation of probabilities accordgin to a
epsilon greedy policy'''
probs = np.ones(self.nA) * epsilon / self.nA
best_action = np.argmax(self.Q[state])
probs[best_action] = 1 - epsilon + (epsilon / self.nA)
return probs
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
# Random action
# action = np.random.choice(self.nA)
# Epsilon decay
epsilon = self.epsilon_start / self.i_episode
# Epsilon-greedy policy/probabilities
probs = self.epsilon_greedy_probs(state, epsilon)
# Action selection acc. to epsilon-greedy policy
action = np.random.choice(np.arange(self.nA), p = probs)
return action
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
# SARSA method
next_action = self.select_action(next_state)
Gt = reward + self.gamma * self.Q[next_state][next_action]
# Q-learning (SARSAMAX) method
#best_action = np.argmax(self.Q[next_state])
#Gt = reward + self.gamma * self.Q[next_state][best_action]
self.Q[state][action] += self.alpha * (Gt - self.Q[state][action])
# i_episode update for calculation of epsilon decay
self.i_episode += 1.0 | 3.046875 | 3 |
attendees/persons/models/attending.py | xjlin0/-attendees30 | 0 | 12797900 | <filename>attendees/persons/models/attending.py
from django.db import models
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields.jsonb import JSONField
from django.contrib.postgres.indexes import GinIndex
from django.utils.functional import cached_property
from model_utils.models import TimeStampedModel, SoftDeletableModel
from . import Note, Utility, Attendee, Registration
class Attending(TimeStampedModel, SoftDeletableModel, Utility):
notes = GenericRelation(Note)
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
registration = models.ForeignKey(Registration, null=True, blank=True, on_delete=models.SET_NULL)
attendee = models.ForeignKey(Attendee, null=False, blank=False, on_delete=models.CASCADE, related_name="attendings")
gatherings = models.ManyToManyField('occasions.Gathering', through='occasions.Attendance')
category = models.CharField(max_length=20, null=False, blank=False, default="normal", help_text="normal, not_going, coworker, etc")
meets = models.ManyToManyField('occasions.Meet', through='AttendingMeet', related_name="meets")
infos = JSONField(null=True, blank=True, default=dict, help_text='Example: {"grade": 5, "age": 11, "bed_needs": 1, "mobility": 300}. Please keep {} here even no data')
# Todo: infos contains the following display data which are not for table join/query: age, bed_needs, mobility
def clean(self):
# fetching birthday from attendee record first
# Todo: check if meets' assemblies under attendee's organization
if self.registration and self.registration.assembly.need_age and self.infos.bed_needs < 1 and self.info.age is None:
raise ValidationError("You must specify age for the participant")
def get_absolute_url(self):
return reverse('attending_detail', args=[str(self.id)])
class Meta:
db_table = 'persons_attendings'
ordering = ['registration']
constraints = [
models.UniqueConstraint(fields=['attendee', 'registration'], condition=models.Q(is_removed=False), name="attending_attendee_registration")
]
indexes = [
GinIndex(fields=['infos'], name='attending_infos_gin', ),
]
@property
def main_contact(self):
return self.registration.registrant
@cached_property
def meet_names(self):
return ",".join([d.display_name for d in self.meets.all()])
@property
def attending_label(self):
return f'({self.registration}) {self.attendee.display_label}' # parentheses needed in datagrid_attendee_update_view.js
@cached_property
def all_addresses(self):
return '; '.join([a.street for a in self.attendee.places.all()])
def __str__(self):
return '%s %s' % (self.attendee, self.meet_names)
| 2.09375 | 2 |
tests/ts4/contracts/user_subscription.py | tonred/tonclick | 0 | 12797901 | <gh_stars>0
from tonos_ts4 import ts4
class UserSubscription(ts4.BaseContract):
def __init__(self, address: ts4.Address):
super().__init__('UserSubscription', {}, nickname='UserSubscription', address=address)
@property
def active(self) -> bool:
return self.call_getter('isActive', {'_answer_id': 0})
@property
def auto_renew(self) -> bool:
return self.call_getter('isAutoRenew', {'_answer_id': 0})
| 1.960938 | 2 |
thirdpart/pools.py | by46/geek | 0 | 12797902 | <reponame>by46/geek
from gevent import socket
from geventconnpool import ConnectionPool
class MyPool(ConnectionPool):
def _new_connection(self):
return socket.create_connection(("www.baidu.com", 80))
if __name__ == '__main__':
pool = MyPool(20)
with pool.get() as conn:
conn.send("PING\n")
| 2.734375 | 3 |
package/niflow/ants/brainextraction/workflows/atropos.py | rciric/poldracklab-antsbrainextraction | 0 | 12797903 | <gh_stars>0
from collections import OrderedDict
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces.ants import Atropos, MultiplyImages
from ..interfaces.ants import ImageMath
ATROPOS_MODELS = {
'T1': OrderedDict([
('nclasses', 3),
('csf', 1),
('gm', 2),
('wm', 3),
]),
'T2': OrderedDict([
('nclasses', 3),
('csf', 3),
('gm', 2),
('wm', 1),
]),
'FLAIR': OrderedDict([
('nclasses', 3),
('csf', 1),
('gm', 3),
('wm', 2),
]),
}
def init_atropos_wf(name='atropos_wf',
use_random_seed=True,
omp_nthreads=None,
mem_gb=3.0,
padding=10,
in_segmentation_model=list(ATROPOS_MODELS['T1'].values())):
"""
Implements supersteps 6 and 7 of ``antsBrainExtraction.sh``,
which refine the mask previously computed with the spatial
normalization to the template.
**Parameters**
use_random_seed : bool
Whether ATROPOS should generate a random seed based on the
system's clock
omp_nthreads : int
Maximum number of threads an individual process may use
mem_gb : float
Estimated peak memory consumption of the most hungry nodes
in the workflow
padding : int
Pad images with zeros before processing
in_segmentation_model : tuple
A k-means segmentation is run to find gray or white matter
around the edge of the initial brain mask warped from the
template.
This produces a segmentation image with :math:`$K$` classes,
ordered by mean intensity in increasing order.
With this option, you can control :math:`$K$` and tell
the script which classes represent CSF, gray and white matter.
Format (K, csfLabel, gmLabel, wmLabel).
Examples:
- ``(3,1,2,3)`` for T1 with K=3, CSF=1, GM=2, WM=3 (default)
- ``(3,3,2,1)`` for T2 with K=3, CSF=3, GM=2, WM=1
- ``(3,1,3,2)`` for FLAIR with K=3, CSF=1 GM=3, WM=2
- ``(4,4,2,3)`` uses K=4, CSF=4, GM=2, WM=3
name : str, optional
Workflow name (default: atropos_wf)
**Inputs**
in_files
:abbr:`INU (intensity non-uniformity)`-corrected files.
in_mask
Brain mask calculated previously
**Outputs**
out_mask
Refined brain mask
out_segm
Output segmentation
out_tpms
Output :abbr:`TPMs (tissue probability maps)`
"""
wf = pe.Workflow(name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_mask', 'out_segm', 'out_tpms']), name='outputnode')
# Run atropos (core node)
atropos = pe.Node(Atropos(
dimension=3,
initialization='KMeans',
number_of_tissue_classes=in_segmentation_model[0],
n_iterations=3,
convergence_threshold=0.0,
mrf_radius=[1, 1, 1],
mrf_smoothing_factor=0.1,
likelihood_model='Gaussian',
use_random_seed=use_random_seed),
name='01_atropos', n_procs=omp_nthreads, mem_gb=mem_gb)
# massage outputs
pad_segm = pe.Node(ImageMath(operation='PadImage', op2='%d' % padding),
name='02_pad_segm')
pad_mask = pe.Node(ImageMath(operation='PadImage', op2='%d' % padding),
name='03_pad_mask')
# Split segmentation in binary masks
sel_labels = pe.Node(niu.Function(function=_select_labels,
output_names=['out_wm', 'out_gm', 'out_csf']),
name='04_sel_labels')
sel_labels.inputs.labels = list(reversed(in_segmentation_model[1:]))
# Select largest components (GM, WM)
# ImageMath ${DIMENSION} ${EXTRACTION_WM} GetLargestComponent ${EXTRACTION_WM}
get_wm = pe.Node(ImageMath(operation='GetLargestComponent'),
name='05_get_wm')
get_gm = pe.Node(ImageMath(operation='GetLargestComponent'),
name='06_get_gm')
# Fill holes and calculate intersection
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM}
fill_gm = pe.Node(ImageMath(operation='FillHoles', op2='2'),
name='07_fill_gm')
mult_gm = pe.Node(MultiplyImages(dimension=3), name='08_mult_gm')
# MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM}
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} ME ${EXTRACTION_CSF} 10
relabel_wm = pe.Node(MultiplyImages(dimension=3, second_input=in_segmentation_model[-1]),
name='09_relabel_wm')
me_csf = pe.Node(ImageMath(operation='ME', op2='10'), name='10_me_csf')
# ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP}
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM}
# ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM}
add_gm = pe.Node(ImageMath(operation='addtozero'),
name='11_add_gm')
relabel_gm = pe.Node(MultiplyImages(dimension=3, second_input=in_segmentation_model[-2]),
name='12_relabel_gm')
add_gm_wm = pe.Node(ImageMath(operation='addtozero'),
name='13_add_gm_wm')
# Superstep 7
# Split segmentation in binary masks
sel_labels2 = pe.Node(niu.Function(function=_select_labels,
output_names=['out_wm', 'out_gm', 'out_csf']),
name='14_sel_labels2')
sel_labels2.inputs.labels = list(reversed(in_segmentation_model[1:]))
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} ${EXTRACTION_TMP}
add_7 = pe.Node(ImageMath(operation='addtozero'), name='15_add_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 2
me_7 = pe.Node(ImageMath(operation='ME', op2='2'), name='16_me_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} GetLargestComponent ${EXTRACTION_MASK}
comp_7 = pe.Node(ImageMath(operation='GetLargestComponent'),
name='17_comp_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 4
md_7 = pe.Node(ImageMath(operation='MD', op2='4'), name='18_md_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} FillHoles ${EXTRACTION_MASK} 2
fill_7 = pe.Node(ImageMath(operation='FillHoles', op2='2'),
name='19_fill_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} \
# ${EXTRACTION_MASK_PRIOR_WARPED}
add_7_2 = pe.Node(ImageMath(operation='addtozero'), name='20_add_7_2')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 5
md_7_2 = pe.Node(ImageMath(operation='MD', op2='5'), name='21_md_7_2')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 5
me_7_2 = pe.Node(ImageMath(operation='ME', op2='5'), name='22_me_7_2')
# De-pad
depad_mask = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding),
name='23_depad_mask')
depad_segm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding),
name='24_depad_segm')
depad_gm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding),
name='25_depad_gm')
depad_wm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding),
name='26_depad_wm')
depad_csf = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding),
name='27_depad_csf')
merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name='merge_tpms')
wf.connect([
(inputnode, pad_mask, [('in_mask', 'op1')]),
(inputnode, atropos, [('in_files', 'intensity_images'),
('in_mask', 'mask_image')]),
(atropos, pad_segm, [('classified_image', 'op1')]),
(pad_segm, sel_labels, [('output_image', 'in_segm')]),
(sel_labels, get_wm, [('out_wm', 'op1')]),
(sel_labels, get_gm, [('out_gm', 'op1')]),
(get_gm, fill_gm, [('output_image', 'op1')]),
(get_gm, mult_gm, [('output_image', 'first_input'),
(('output_image', _gen_name), 'output_product_image')]),
(fill_gm, mult_gm, [('output_image', 'second_input')]),
(get_wm, relabel_wm, [('output_image', 'first_input'),
(('output_image', _gen_name), 'output_product_image')]),
(sel_labels, me_csf, [('out_csf', 'op1')]),
(mult_gm, add_gm, [('output_product_image', 'op1')]),
(me_csf, add_gm, [('output_image', 'op2')]),
(add_gm, relabel_gm, [('output_image', 'first_input'),
(('output_image', _gen_name), 'output_product_image')]),
(relabel_wm, add_gm_wm, [('output_product_image', 'op1')]),
(relabel_gm, add_gm_wm, [('output_product_image', 'op2')]),
(add_gm_wm, sel_labels2, [('output_image', 'in_segm')]),
(sel_labels2, add_7, [('out_wm', 'op1'),
('out_gm', 'op2')]),
(add_7, me_7, [('output_image', 'op1')]),
(me_7, comp_7, [('output_image', 'op1')]),
(comp_7, md_7, [('output_image', 'op1')]),
(md_7, fill_7, [('output_image', 'op1')]),
(fill_7, add_7_2, [('output_image', 'op1')]),
(pad_mask, add_7_2, [('output_image', 'op2')]),
(add_7_2, md_7_2, [('output_image', 'op1')]),
(md_7_2, me_7_2, [('output_image', 'op1')]),
(me_7_2, depad_mask, [('output_image', 'op1')]),
(add_gm_wm, depad_segm, [('output_image', 'op1')]),
(relabel_wm, depad_wm, [('output_product_image', 'op1')]),
(relabel_gm, depad_gm, [('output_product_image', 'op1')]),
(sel_labels, depad_csf, [('out_csf', 'op1')]),
(depad_csf, merge_tpms, [('output_image', 'in1')]),
(depad_gm, merge_tpms, [('output_image', 'in2')]),
(depad_wm, merge_tpms, [('output_image', 'in3')]),
(depad_mask, outputnode, [('output_image', 'out_mask')]),
(depad_segm, outputnode, [('output_image', 'out_segm')]),
(merge_tpms, outputnode, [('out', 'out_tpms')]),
])
return wf
def _select_labels(in_segm, labels):
from os import getcwd
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
out_files = []
cwd = getcwd()
nii = nb.load(in_segm)
for l in labels:
data = (nii.get_data() == l).astype(np.uint8)
newnii = nii.__class__(data, nii.affine, nii.header)
newnii.set_data_dtype('uint8')
out_file = fname_presuffix(in_segm, suffix='class-%02d' % l,
newpath=cwd)
newnii.to_filename(out_file)
out_files.append(out_file)
return out_files
def _gen_name(in_file):
import os
from nipype.utils.filemanip import fname_presuffix
return os.path.basename(fname_presuffix(in_file, suffix='processed'))
| 2.171875 | 2 |
public/pylib/holdoutgroup.py | shaileshakarte28/SFMC | 0 | 12797904 | <filename>public/pylib/holdoutgroup.py
import requests
import json
import xmltodict
import datetime
from math import ceil
import jxmlease
import operator
import random
from operator import itemgetter
import time
from json import loads, dumps
def auth(clientId: str,
clientSecret: str,
accountId:str
) -> requests.Response:
end_point = "https://mc4pytkknrp1gsz0v23m93b3055y.auth.marketingcloudapis.com/v2/token"
headers = {'Content-type': 'application/json;charset=UTF-8'}
payload = {
"grant_type":"client_credentials",
"client_id": clientId,
"client_secret": clientSecret,
"account_id": accountId,
}
req = requests.post(
end_point,
payload,
{"headers" : headers}
# verify=False
)
# req.close()
return req.json()
cred = auth('<KEY>','<KEY>','6291063')
token = (cred["access_token"])
print("Access Token : ",token)
def dataextension2():
try:
accessToken = token
account_id = "6291063"
de_name = "Test_HoldOut_Group"
de_external_key = "5E4FE032-6C0E-42E8-8B81-99F167D7DFC9"
except Exception as e:
return "There is some problem with the Credentials Provided...",e
try:
descbody =f"""
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<s:Header>
<a:Action s:mustUnderstand="1">Retrieve</a:Action>
<a:To s:mustUnderstand="1">https://webservice.s6.exacttarget.com/Service.asmx</a:To>
<fueloauth xmlns="http://exacttarget.com">{accessToken}</fueloauth>
</s:Header>
<s:Body xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<RetrieveRequestMsg xmlns="http://exacttarget.com/wsdl/partnerAPI">
<RetrieveRequest>
<ObjectType>DataExtensionObject[{de_name}]</ObjectType>
<Properties>NAME</Properties>
<Properties>Flag</Properties>
<Properties>Status</Properties>
<Filter xsi:type="SimpleFilterPart">
<Property>Status</Property>
<SimpleOperator>equals</SimpleOperator>
<Value>Unprocessed</Value>
</Filter>
</RetrieveRequest>
</RetrieveRequestMsg>
</s:Body>
</s:Envelope>
"""
url = "https://webservice.s6.exacttarget.com/Service.asmx"
headers = {'content-type': 'text/xml'}
body = descbody
resp = requests.post(url, data=body, headers=headers)
response = resp.text
# print(response)
data = jxmlease.parse(response)
status1=data["soap:Envelope"]["soap:Body"]["RetrieveResponseMsg"]["Results"]
status2 = loads(dumps(status1))
except Exception as e:
return "There are no records for holding out...",e
else:
cust_list=[]
# print(status2)
for item in status2:
cust_key= item["Properties"]["Property"][0]['Value']
cust_list.append(cust_key)
print("UnProcessed List",cust_list)
n= len(cust_list)%10
print(n)
cust_1 = []
for i in range(0,n):
cust_1.append(cust_list.pop())
print(cust_1)
cust_2 = [ele for ele in cust_list if ele not in cust_1]
print(cust_2)
if len(cust_2) > 9:
# hold_list = cust_list[::10]
hold_list = [cust_2[x*10-1] for x in range(1,len(cust_2)) if x*10<=len(cust_2)]
print(hold_list)
for element in hold_list:
soapbody = f"""
<s:Envelope
xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<s:Header>
<a:Action s:mustUnderstand="1">Update</a:Action>
<a:MessageID>urn:uuid:7e0cca04-57bd-4481-864c-6ea8039d2ea0</a:MessageID>
<a:ReplyTo>
<a:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:Address>
</a:ReplyTo>
<a:To s:mustUnderstand="1">https://webservice.s6.exacttarget.com/Service.asmx</a:To>
<fueloauth xmlms="http://exacttarget.com">{accessToken}</fueloauth>
</s:Header>
<s:Body
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<UpdateRequest
xmlns="http://exacttarget.com/wsdl/partnerAPI">
<Objects xsi:type="DataExtensionObject">
<PartnerKey xsi:nil="true"/>
<Client>
<ID>{account_id}</ID>
</Client>
<ObjectID xsi:nil="true"/>
<CustomerKey>{de_external_key}</CustomerKey>
<Properties>
<Property>
<Name>Name</Name>
<Value>{element}</Value>
</Property>
<Property>
<Name>Flag</Name>
<Value>False</Value>
</Property>
<Property>
<Name>Status</Name>
<Value>Hold Out</Value>
</Property>
</Properties>
</Objects>
</UpdateRequest>
</s:Body>
</s:Envelope>
"""
url = "https://webservice.s6.exacttarget.com/Service.asmx"
headers = {'content-type': 'text/xml'}
body = soapbody
resp = requests.post(url, data=body, headers=headers)
print(resp.status_code)
# print(resp.text)
holdout_rec = hold_list
# print("HoldOut Records: ", holdout_rec)
res_list = tuple(set(holdout_rec)^set(cust_2))
print("Without Holdout: ", res_list)
for element in res_list:
soapbody = f"""
<s:Envelope
xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<s:Header>
<a:Action s:mustUnderstand="1">Update</a:Action>
<a:MessageID>urn:uuid:7e0cca04-57bd-4481-864c-6ea8039d2ea0</a:MessageID>
<a:ReplyTo>
<a:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:Address>
</a:ReplyTo>
<a:To s:mustUnderstand="1">https://webservice.s6.exacttarget.com/Service.asmx</a:To>
<fueloauth xmlms="http://exacttarget.com">{accessToken}</fueloauth>
</s:Header>
<s:Body
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<UpdateRequest
xmlns="http://exacttarget.com/wsdl/partnerAPI">
<Objects xsi:type="DataExtensionObject">
<PartnerKey xsi:nil="true"/>
<Client>
<ID>{account_id}</ID>
</Client>
<ObjectID xsi:nil="true"/>
<CustomerKey>{de_external_key}</CustomerKey>
<Properties>
<Property>
<Name>Name</Name>
<Value>{element}</Value>
</Property>
<Property>
<Name>Flag</Name>
<Value>True</Value>
</Property>
<Property>
<Name>Status</Name>
<Value>Processed</Value>
</Property>
</Properties>
</Objects>
</UpdateRequest>
</s:Body>
</s:Envelope>
"""
url = "https://webservice.s6.exacttarget.com/Service.asmx"
headers = {'content-type': 'text/xml'}
body = soapbody
resp = requests.post(url, data=body, headers=headers)
print(resp.status_code)
# print(resp.text)
if len(cust_1) > 0:
for element in cust_1:
soapbody = f"""
<s:Envelope
xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<s:Header>
<a:Action s:mustUnderstand="1">Update</a:Action>
<a:MessageID>urn:uuid:7e0cca04-57bd-4481-864c-6ea8039d2ea0</a:MessageID>
<a:ReplyTo>
<a:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:Address>
</a:ReplyTo>
<a:To s:mustUnderstand="1">https://webservice.s6.exacttarget.com/Service.asmx</a:To>
<fueloauth xmlms="http://exacttarget.com">{accessToken}</fueloauth>
</s:Header>
<s:Body
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<UpdateRequest
xmlns="http://exacttarget.com/wsdl/partnerAPI">
<Objects xsi:type="DataExtensionObject">
<PartnerKey xsi:nil="true"/>
<Client>
<ID>{account_id}</ID>
</Client>
<ObjectID xsi:nil="true"/>
<CustomerKey>{de_external_key}</CustomerKey>
<Properties>
<Property>
<Name>Name</Name>
<Value>{element}</Value>
</Property>
<Property>
<Name>Flag</Name>
<Value>True</Value>
</Property>
<Property>
<Name>Status</Name>
<Value>Unprocessed</Value>
</Property>
</Properties>
</Objects>
</UpdateRequest>
</s:Body>
</s:Envelope>
"""
url = "https://webservice.s6.exacttarget.com/Service.asmx"
headers = {'content-type': 'text/xml'}
body = soapbody
resp = requests.post(url, data=body, headers=headers)
print(resp.status_code)
# print(resp.text)
return "All Records Processed Sucessfully..."
dataextension2() | 2.625 | 3 |
python/submit_jobstream_byid.py | WorkloadAutomation/TWSzOS_REST_API_Python_samples | 2 | 12797905 | #!/usr/bin/python
#############################################################################
# Licensed Materials - Property of HCL*
# (C) Copyright HCL Technologies Ltd. 2017, 2018 All rights reserved.
# * Trademark of HCL Technologies Limited
#############################################################################
import waconn
import argparse
import datetime
# -----------------------------------------------------
# Define and parse command line arguments
# -----------------------------------------------------
parser = argparse.ArgumentParser(description='Submit a job stream to the TWSz plan')
parser.add_argument('-e','--engineName', help='name of the engine as defined in the TWSz Connector', required=True, metavar="<engine_name>")
parser.add_argument('-j','--jsName', help='job stream', required=True, metavar="<job_stream_name>")
args = parser.parse_args()
now = datetime.datetime.utcnow().isoformat()
# -----------------------------------------------------
# Intialize the client utility module
# -----------------------------------------------------
conn = waconn.WAConn('waconn.ini','/twsz/v1/'+args.engineName)
# -----------------------------------------------------
# Query the model and get the js id
# -----------------------------------------------------
resp = conn.post('/model/jobstream/header/query',
json={"filters": {"jobstreamFilter": {"jobStreamName": args.jsName,"validIn": now}}},
headers={'How-Many': '1'})
r = resp.json()
if len(r) == 0:
print('job stream not found')
exit(2)
jsId=r[0]["id"]
print("the js id is: " + jsId)
# -----------------------------------------------------
# Now submit the jobstream to the plan
# -----------------------------------------------------
submit = {"inputArrivalTime": now}
# now we can submit the js
print "submit parameters: " +str(submit)
resp = conn.post('/plan/current/jobstream/' + jsId + '/action/submit_jobstream', json=submit)
r = resp.json()
for js in r:
print ('Submitted: '+js)
| 2.15625 | 2 |
gans/ops/misc.py | gobrewers14/gans | 0 | 12797906 | <reponame>gobrewers14/gans
# ============================================================================
# Author: <NAME>
# Email: <<EMAIL>>
# Date: 2018-09-02 17:13:47
# Brief:
# ============================================================================
import tensorflow as tf
def attention(x, l=1.0, norm=None, name="att"):
"""
Args:
Returns:
Notes:
"""
with tf.variable_scope(name):
orig_shp = x.get_shape().as_list()
# make f, g, h
f = conv2d(x, num_filters=orig_shp[-1], norm=norm, filter_size=(1, 1), pad="SAME", name="fconv1x1")
g = conv2d(x, num_filters=orig_shp[-1], norm=norm, filter_size=(1, 1), pad="SAME", name="gconv1x1")
h = conv2d(x, num_filters=orig_shp[-1], norm=norm, filter_size=(1, 1), pad="SAME", name="hconv1x1")
f = tf.reshape(f, [-1, orig_shp[-1]])
g = tf.reshape(g, [-1, orig_shp[-1]])
h = tf.reshape(h, [-1, orig_shp[-1]])
beta = tf.nn.softmax(tf.matmul(g, tf.transpose(f, [1, 0])))
o = tf.matmul(beta, h)
o = tf.reshape(o, [-1] + orig_shp[1:])
out = l * o + x
return out
| 2.296875 | 2 |
training_dataset_maker1.py | deepakrana47/DT-RAE | 1 | 12797907 | <filename>training_dataset_maker1.py
import os
from utility1 import extract_batchfeature_using_senna, get_parents, get_dep, get_words_id, pdep_2_deporder_dep, dep_2_hid_var
from score_calc import get_rae_score_by_wd, get_rae_score_by_wd_dep
from dynamic_pooling import get_dynamic_pool
from text_process1 import line_processing, get_n_feature
import numpy as np
import pickle
def get_vect_data_by_dep(fname, pool_size, mtype, pf):
# # # print fname, 'file processing'
sent_fd = open(fname)
sents = sent_fd.read().split('\n')
slen = len(sents)
# slen = 3
schk = 20
scount = 0
asents_vect = []
asent_score = []
asents = []
nscore = []
nfeat = []
while scount < slen:
lscount = scount + schk if slen - scount > schk else slen
all_sents = []
score = []
try:
for sent in sents[scount:lscount]:
a = sent.split('\t')
if len(a) < 3:
continue
score.append(float(a[0]))
line1 = line_processing(a[1])
line2 = line_processing(a[2])
all_sents += [line1, line2]
nfeat.append(get_n_feature(line1, line2))
except IndexError:
pass
wds = extract_batchfeature_using_senna(all_sents)
wd_len = len(wds)
wd_count = 0
scor_count = 0
while wd_count < wd_len:
try:
temp_score, _, s_matrix = get_rae_score_by_wd_dep(wds[wd_count], wds[wd_count + 1],mtype=mtype)
except KeyError:
scor_count += 1
wd_count += 2
continue
except IndexError:
pass
sents_vect = get_dynamic_pool(s_matrix, pool_size=pool_size, pf=pf)
if not np.any(sents_vect):
scor_count += 1
wd_count += 2
continue
asents_vect.append(sents_vect)
asent_score.append(score[scor_count])
nscore.append(temp_score)
asents.append(all_sents[wd_count]+'\t'+all_sents[wd_count+1])
scor_count += 1
wd_count += 2
scount = lscount
# print scount,
# print
sent_fd.close()
return asents, asents_vect, asent_score, nscore, nfeat
def test_get_vect_data_by_dep(fsent, fpickle, fscore, fnfeat, pool_size, mtype, pf):
# print pf
# # # print fname, 'file processing'
asents_vect = []
asent_score = []
asents = []
nscore = []
nfeat = []
wds = pickle.load(open(fpickle,'rb'))
sents = pickle.load(open(fsent,'rb'))
scores = pickle.load(open(fscore,'rb'))
nfeats = pickle.load(open(fnfeat,'rb'))
for i in range(len(sents)):
temp_score, _, s_matrix = get_rae_score_by_wd_dep(wds[i][0], wds[i][1],mtype=mtype)
sents_vect = get_dynamic_pool(s_matrix, pool_size=pool_size, pf=pf)
if not np.any(sents_vect):
continue
asents_vect.append(sents_vect)
asent_score.append(scores[i])
nscore.append(temp_score)
asents.append(sents[i][0]+'\t'+sents[i][1])
nfeat.append(nfeats[i])
return asents, asents_vect, asent_score, nscore, nfeat
def get_vect_data(fname, pool_size, pf):
# # print fname, 'th sentence processing'
sent_fd = open(fname)
sents = sent_fd.read().split('\n')
slen = len(sents)
# slen = 3000
schk = 10
scount = 0
asents_vect = []
asent_score = []
nscore = []
asents = []
while scount < slen:
lscount = scount + schk if slen - scount > schk else slen
all_sents = []
score = []
try:
for sent in sents[scount:lscount]:
a = sent.split('\t')
if len(a) < 3:
continue
score.append(float(a[0]))
all_sents += [line_processing(a[1]), line_processing(a[2])]
except IndexError:
pass
wds = extract_batchfeature_using_senna(all_sents)
wd_len = len(wds)
wd_count = 0
scor_count = 0
while wd_count < wd_len:
try:
temp_score, _, s_matrix = get_rae_score_by_wd(wds[wd_count], wds[wd_count + 1], mtype='deep')
except KeyError:
scor_count += 1
wd_count += 2
continue
except IndexError:
pass
sents_vect = get_dynamic_pool(s_matrix, pool_size=pool_size, pf=pf)
if not np.any(sents_vect):
scor_count += 1
wd_count += 2
continue
asents_vect.append(sents_vect)
asent_score.append(score[scor_count])
nscore.append(temp_score)
asents.append(all_sents[wd_count]+'\t'+all_sents[wd_count+1])
scor_count += 1
wd_count += 2
scount = lscount
# # print scount,
# # print
sent_fd.close()
return asents, asents_vect, asent_score, nscore
def test_data_set_maker_by_wd(flag=None, base_dir = None, out_dir=None, pool_size=10, num_feat=1, stp=None, mtype='Normal', pf=None):
if flag == 'train':
v_csv_file = out_dir + 'train_vector_dataset.csv'
sent_file = out_dir + 'train_sent_dataset.txt'
nscore_txt = out_dir + 'training_orig_score.pickle'
src_dir = base_dir + 'train/'
tpickle = src_dir + 'msr_paraphrase_train' + str(stp) + '.pickle'
tsent= src_dir+ 'msr_paraphrase_trainsent'+ str(stp) + '.pickle'
tscore= src_dir+ 'msr_paraphrase_trainscore'+ str(stp) + '.pickle'
tnfeat= src_dir+ 'msr_paraphrase_trainnfeat'+ str(stp) + '.pickle'
elif flag == 'test':
v_csv_file = out_dir + 'test_vector_dataset.csv'
sent_file = out_dir + 'test_sent_dataset.csv'
nscore_txt = out_dir + 'test_orig_score.pickle'
src_dir = base_dir + 'test/'
tpickle = src_dir+'msr_paraphrase_test'+str(stp)+'.pickle'
tsent = src_dir + 'msr_paraphrase_testsent' + str(stp) + '.pickle'
tscore = src_dir + 'msr_paraphrase_testscore' + str(stp) + '.pickle'
tnfeat = src_dir + 'msr_paraphrase_testnfeat' + str(stp) + '.pickle'
if os.path.isfile(v_csv_file):
if open(v_csv_file,'r').readline():
# print "Already present :"
return v_csv_file, sent_file
data_csv_fd = open(v_csv_file,'w')
sents_fd = open(sent_file,'w')
all_nscore = []
all_nfeat = []
sents, sents_vect, score, nscore, nfeat = test_get_vect_data_by_dep(fpickle = tpickle,fsent=tsent, fscore=tscore, fnfeat=tnfeat, pool_size=pool_size, mtype=mtype, pf=pf)
all_nscore += score
all_nfeat += nfeat
csv_txt = ''
sent_txt = ''
for i in range(len(sents_vect)):
csv_txt += str(score[i])
sent_txt += sents[i]+'\n'
for j in sents_vect[i].reshape(pool_size*pool_size):
csv_txt += ','+str(j)
if num_feat == 1:
for j in nfeat[i]:
csv_txt += ',' + str(j)
csv_txt += '\n'
data_csv_fd.write(csv_txt)
sents_fd.write(sent_txt)
pickle.dump(all_nscore,open(nscore_txt, 'wb'))
data_csv_fd.close()
sents_fd.close()
return v_csv_file, sent_file
def data_set_maker_by_wd(flag=None, base_dir = None, out_dir=None, pool_size=10, num_feat=1, mtype='Normal', pf=None):
if flag == 'train':
v_csv_file = out_dir + 'train_vector_dataset.csv'
sent_file = out_dir + 'train_sent_dataset.txt'
nscore_txt = out_dir + 'training_orig_score.pickle'
src_dir = base_dir + 'train/'
file_list = os.listdir(src_dir)
elif flag == 'test':
v_csv_file = out_dir + 'test_vector_dataset.csv'
sent_file = out_dir + 'test_sent_dataset.csv'
nscore_txt = out_dir + 'test_orig_score.pickle'
src_dir = base_dir + 'test/'
file_list = os.listdir(src_dir)
if os.path.isfile(v_csv_file):
if open(v_csv_file,'r').readline():
# print "Already present :"
return v_csv_file, sent_file
data_csv_fd = open(v_csv_file,'w')
sents_fd = open(sent_file,'w')
all_nscore = []
all_nfeat = []
for i in range(len(file_list)):
sents, sents_vect, score, nscore, nfeat = get_vect_data_by_dep(src_dir +file_list[i], pool_size=pool_size, mtype=mtype, pf=pf)
all_nscore += score
all_nfeat += nfeat
csv_txt = ''
sent_txt = ''
for i in range(len(sents_vect)):
csv_txt += str(score[i])
sent_txt += sents[i]+'\n'
for j in sents_vect[i].reshape(pool_size*pool_size):
csv_txt += ','+str(j)
if num_feat == 1:
for j in nfeat[i]:
csv_txt += ',' + str(j)
csv_txt += '\n'
data_csv_fd.write(csv_txt)
sents_fd.write(sent_txt)
pickle.dump(all_nscore,open(nscore_txt, 'wb'))
data_csv_fd.close()
sents_fd.close()
return v_csv_file, sent_file
def wd_making(fname,stp):
sent_fd = open(fname)
sents = sent_fd.read().rstrip(' |\n').split('\n')
count = 1
nfeat = []
wds = []
all_sents = []
score = []
for sent in sents:
a = sent.split('\t')
if len(a) < 3:
continue
score.append(float(a[0]))
line1 = line_processing(a[1])
line2 = line_processing(a[2])
all_sents.append([line1, line2])
nfeat.append(get_n_feature(line1, line2))
temp = extract_batchfeature_using_senna([line1, line2])
print count,
if len(temp) !=2:
print "not all sentences parsed !!"
pass
wds.append(temp)
count +=1
pickle.dump(wds,open(fname.split('.')[0]+str(stp)+'.pickle','wb'))
pickle.dump(all_sents,open(fname.split('.')[0]+'sent'+str(stp)+'.pickle','wb'))
pickle.dump(score,open(fname.split('.')[0]+'score'+str(stp)+'.pickle','wb'))
pickle.dump(nfeat,open(fname.split('.')[0]+'nfeat'+str(stp)+'.pickle','wb'))
return
import Global
if __name__=='__main__':
testf = '/media/zero/41FF48D81730BD9B/Final_Thesies/data/NN-dataset/MSRParaphraseCorpus/test/msr_paraphrase_test.txt'
trainf = '/media/zero/41FF48D81730BD9B/Final_Thesies/data/NN-dataset/MSRParaphraseCorpus/train/msr_paraphrase_train.txt'
Global.init()
# Global.init_wv_self(0,50)
# wd_making(testf,0)
# wd_making(trainf,0)
Global.init_wv_self(1,50)
wd_making(testf,1)
wd_making(trainf,1) | 2.15625 | 2 |
nemo_benchmark.py | KaySackey/Nemo | 9 | 12797908 | <reponame>KaySackey/Nemo<filename>nemo_benchmark.py<gh_stars>1-10
import sys
import timeit
from nemo.parser import NemoParser
from mako.template import Template
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
print 'A filename is required'
exit()
def nemo(str, debug=False):
NemoParser(debug=debug).parse(str)
# Return Nothing so mako won't render a result
return ''
def nemo_render(str, debug=False):
return NemoParser(debug=debug).parse(str)
mako_temp = Template(filename=filename,
input_encoding='utf-8',
output_encoding='utf-8',)
nemo_temp = Template(filename=filename,
preprocessor=nemo,
input_encoding='utf-8',
output_encoding='utf-8',)
nemo_temp_render = Template(filename=filename,
preprocessor=nemo_render,
input_encoding='utf-8',
output_encoding='utf-8',)
number = 10000
t_mako = timeit.Timer('mako_temp.render()', 'from __main__ import mako_temp')
t_nemo = timeit.Timer('nemo_temp.render()', 'from __main__ import nemo_temp')
t_nemo_render = timeit.Timer('nemo_temp_render.render()', 'from __main__ import nemo_temp_render')
mako_time = t_mako.timeit(number=number) / number
nemo_time = t_nemo.timeit(number=number) / number
nemo_time_render = t_nemo_render.timeit(number=number) / number
print 'Mako (full render w/o nemo): %.2f ms' % (1000 * mako_time)
print 'Nemo (w/o mako render): %.2f ms' % (1000 * nemo_time)
print 'Nemo (w/ mako render): %.2f ms' % (1000 * nemo_time_render)
| 2.625 | 3 |
main.py | piotrek-szczygiel/orchlang | 11 | 12797909 | import argparse
import sys
import copy
from graphviz import Digraph
from rply import LexingError, ParsingError
from lang.lexer import Lexer
from lang.parser import Parser
from lang.scope import Scope
lexer = Lexer()
parser = Parser(lexer.tokens)
def execute(scope, source, draw=False, lexer_output=False, opt=False):
try:
tokens = lexer.lex(source)
if lexer_output:
print("LEXER OUTPUT")
for token in copy.copy(tokens):
print(token)
print()
print("PROGRAM OUTPUT")
ast = parser.parse(tokens)
# Optimize
if opt:
ast.eval(True, scope)
result = ast.eval(False, scope)
# Draw AST graph
if draw:
g = Digraph()
ast.draw(g)
g.render("ast", format="png", view=True, cleanup=True)
return result
except ValueError as err:
print(err)
except LexingError:
print("Lexing error")
except ParsingError:
print("Parsing error")
def run_repl():
scope = Scope()
while True:
try:
source = input("> ")
result = execute(scope, source)
if result is not None:
print(result)
if scope.last_pop is not None:
scope.symbols_stack.insert(0, scope.last_pop)
except KeyboardInterrupt:
break
def run_file(path, draw=False, lexer_output=False):
scope = Scope()
with open(path, "r") as f:
source = f.read()
execute(scope, source, draw=draw, lexer_output=lexer_output)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("file", nargs="?", help="path to script")
arg_parser.add_argument(
"-a", "--ast", help="draw abstract syntax tree", action="store_true"
)
arg_parser.add_argument(
"-l", "--lexer", help="print lexer output", action="store_true"
)
args = arg_parser.parse_args()
if args.file:
run_file(args.file, draw=args.ast, lexer_output=args.lexer)
else:
run_repl()
| 2.609375 | 3 |
empty7v2.0.py | elecalle/Note-Taking_App | 0 | 12797910 | <filename>empty7v2.0.py
####################################################################################
# Version: January 2022
#
# Purpose:
# This is a note-taking app, useful for people working on several projects at
# the same time and who want to be able to update multiple documents at the same
# time and from the same window.
#
# Structure:
# There are six text widgets in total: 5 corresponding to 5 different projects, 1
# ("Misc", from "Miscellaneous"for taking impromptu notes on anything else that is
# not related to those 5 projects.
# You write your notes on the desired text widget. Once you're done, you click on
# the green tick button, which saves what you've written in a corresponding .txt
# file, and automatically clears the text widget for you. On the txt file, entries
# appear together with the date and time of writing, unless there is already an entry
# on that txt file with the same data, in which case only the time of writing is
# indicated. Above every widget there's a button with the name of the corresponding
# project. If clicked, that button opens the .txt document associated with that
# widget.
# The red cross button clears the text widget (note: I had that link to a function in
# a previous version of this code, then I removed the function but left the button,
# and I still haven't gotten around to linking it again)
#
####################################################################################
import tkinter as tk
from tkinter import ttk
from PIL import ImageTk, Image
from tkinter import filedialog
import os
import ctypes
import functions
from functools import partial
import pdb; #pdb.set_trace()
ctypes.windll.shcore.SetProcessDpiAwareness(1) # this improves resolution of GUI
root = tk.Tk()
root.title("Elena's Notes")
#root.geometry('200x150') # this adjusts the size of the GUI to 200X150
# I want the border of the text widgets to change color if the cursor is over those widgets,
# so I am using two base64-encoded gifs of a rounded-corner square, one is green (accent color),
# the other one is black (no-selection color).
# CALLING A FUNCTION TO USE A BASE64-ENCODED GIF OF A ROUNDED-CORNER SQUARE WITH A GREEN BORDER
focusBorderImageData = functions.focusborder_string()
# CALLING A FUNCTION TO USE A BASE64-ENCODED GIF OF A ROUNDED-CORNER SQUARE WITH A BLACK BORDER
borderImageData = functions.border_string()
style = ttk.Style()
borderImage = tk.PhotoImage("borderImage", data=borderImageData)
focusBorderImage = tk.PhotoImage("focusBorderImage", data=focusBorderImageData)
style.element_create("RoundedFrame",
"image", borderImage,
("focus", focusBorderImage),
border=20, sticky="nsew")
style.layout("RoundedFrame", [("RoundedFrame", {"sticky": "nsew"})])
style.configure('TFrame', background= 'black')
# CREATING 2 FRAMES, A LEFT AND A RIGHT FRAME, WHERE TO HOST 3 TEXT WIDGETS EACH
frameL = tk.Frame(root, bg="#BAD9D0") # cD4F1B2 is a light green color
frameR = tk.Frame(root, bg="#BAD9D0")
# DECLARING LISTS TO POPULATE WITH OBJECTS THAT WILL BE ACCESSED LATER USING THE INDEX ("i")
frames = []
texts = []
# SETTING BUTTON IMAGES: MAIN BUTTON
img = functions.image(3)
# SETTING BUTTON IMAGES: SECONDARY BUTTONS
img_cross = Image.open('cross1.png')
img_cross = img_cross.resize((30,30), Image.ANTIALIAS)
img_photo_crs = ImageTk.PhotoImage(img_cross)
img_check = Image.open('check1.png')
img_check = img_check.resize((30,30), Image.ANTIALIAS)
img_photo_chk = ImageTk.PhotoImage(img_check)
for i in range(6):
# THE FIRST 3 SUBFRAMES GO TO THE LEFT
if i <= 2:
frame = ttk.Frame(frameL, style="RoundedFrame", padding=10)
# THE LAST 3 SUBFRAMES GO TO THE RIGHT
else:
frame = ttk.Frame(frameR, style="RoundedFrame", padding=10)
# APPEND EVERY SINGLE FRAME TO THE LIST TO ACCESS THEM LATER
frames.append(frame)
# CREATING THE TEXT WIDGETS (I.E. THE INPUT FIELDS)
text = tk.Text(frames[i], borderwidth = 0, highlightthickness = 0, wrap = "word", width = 10, height = 5)
text.pack(fill = "both", expand = True)
text.bind("<FocusIn>", lambda event: frames[i].state(["focus"]))
text.bind("<FocusOut>", lambda event: frames[i].state(["!focus"]))
text.insert("end", "")
texts.append(text)
# CREATING THE BUTTONS FOLLOWING THE GIVEN ORDER CHECKING THE LOOP INDEX
if i == 0:
main_button = functions.create_buttons(frameL, functions.openprobus, "Probus", i)
elif i == 1:
main_button = functions.create_buttons(frameL, functions.opensage, "Sage", i)
elif i == 2:
main_button = functions.create_buttons(frameL, functions.openJALL, "JALL", i) #Journal African Languages & Linguistics
elif i == 3:
main_button = functions.create_buttons(frameR, functions.openheaviness, "Heavi", i) # Topicalization Study
elif i == 4:
main_button = functions.create_buttons(frameR, functions.openacode, "ACoDe", i)
elif i == 5:
main_button = functions.create_buttons(frameR, functions.openmisc, "MISC", i)
main_button.config(image = img)
main_button.pack(pady = 5)
# SHOWING THE FRAMES (MOVE TO BOTTOM?)
frames[i].pack(side = tk.TOP, fill = "both", expand = True, padx = 30)
# ADDING THE BUTTONS INSIDE THE INPUT FIELDS
# PARTIAL (IN "command") ALLOW US TO SET A COMMAND FOR THE BUTTON USING THE INDEX ("i") AS ARGUMENT
button_check = tk.Button(frames[i], image = img_photo_chk, command = partial(functions.compile, texts , i), background = "white", borderwidth = 0, height = 30, width = 30)
button_check.image = img_photo_chk
button_check.pack(side = tk.LEFT)
button_cross = tk.Button(frames[i], image = img_photo_crs, background = "white", borderwidth = 0, height = 30, width = 30)
button_cross.image = img_photo_crs
button_cross.pack(side = tk.RIGHT)
root.configure(background="#BAD9D0") #BAD9D0 #cfffff #D4F1B2
frameL.pack(side=tk.LEFT, fill="both", expand=True) # side=tk.LEFT places frames side by side
frameR.pack(side=tk.LEFT, fill="both", expand=True) # side=tk.LEFT places frames side by side
# SET FOCUS TO THE FIRST FRAME
frames[0].focus_set()
font_tuple = ("Garamond", 14)
root.mainloop() | 3.515625 | 4 |
estimators/train_nocs.py | ArvindSoma/HumanEstimation | 0 | 12797911 | <reponame>ArvindSoma/HumanEstimation
"""
Train NOC class
"""
import os
import cv2
from recordclass import recordclass
from torchstat import stat
from math import log10
from models.networks import *
from utils.common import *
def visualize(batch, output, writer, name, niter, foreground=False, test=False):
output_image = output[0]
if foreground:
output_image = output[0][1]
foreground = output[0][0]
write_image(writer, name="{}/Ground Truth Foreground".format(name),
sample=((1 - batch['background']).long() * 2 - 1), niter=niter)
write_image(writer, name="{}/Output_Foreground".format(name),
sample=((torch.softmax(foreground, 1)[:, 1:2, :, :] > 0.5).long() * 2) - 1, niter=niter)
final_noc = output_image * (torch.softmax(output[0][0], 1)[:, 1:2, :, :] > 0.5).float()
write_image(writer, name="{}/Output_Final_NOC".format(name), sample=(final_noc * 2) - 1, niter=niter)
writer.add_scalar('L1-Loss', output[1].total_loss, niter)
writer.add_scalar('NOC-Loss', output[1].NOC_loss, niter)
writer.add_scalar('Background-Loss', output[1].background_loss, niter)
write_image(writer, name="{}/Output_NOC".format(name), sample=(output_image * 2) - 1, niter=niter)
write_image(writer, name="{}/Input".format(name), sample=batch['image'], niter=niter)
if not test:
write_image(writer, name="{}/Ground Truth NOC".format(name), sample=batch['noc_image'], niter=niter)
return True
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
class TrainNOCs:
def __init__(self, save_dir='Trial', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_downs=5, lr=2e-4,
betas=(0.5, 0.999), batch_size=8, checkpoint=None, model_type='res', output_heads='two'):
if output_heads == 'one':
self.forward = self.forward_sparse
self.seg_net = ResNetGenerator(out_channels=3, last_layer=nn.ReLU())
if model_type == 'res_unet':
self.seg_net = ResUnetGenerator(output_nc=3, last_layer=nn.ReLU())
# self.seg_net = UnetGenerator(input_nc=3, output_nc=3, num_downs=num_downs,
# use_dropout=False, norm_layer=torch.nn.BatchNorm2d,
# last_layer=nn.LeakyReLU(0.2))
self.foreground = False
elif output_heads == 'two':
self.forward = self.forward_2_heads
self.seg_net = ResNet2HeadGenerator(out_channels=3, last_layer=nn.ReLU())
if model_type == 'res_unet':
self.seg_net = ResUnet2HeadGenerator(output_nc=3, last_layer=nn.ReLU())
# self.seg_net = Unet2HeadGenerator(input_nc=3, output_nc=3, num_downs=num_downs,
# use_dropout=False, norm_layer=torch.nn.BatchNorm2d,
# last_layer=nn.ReLU())
self.foreground = True
else:
self.foreground = None
print("Error! Unknown number of heads!")
exit(256)
print("Using model {}.".format(self.seg_net.__class__.__name__))
# self.seg_net.apply(init_weights)
# stat(model=self.seg_net, input_size=(3, 256, 256))
self.sparse_l1 = torch.nn.SmoothL1Loss(reduction='none')
self.l1 = torch.nn.SmoothL1Loss()
self.l2 = torch.nn.MSELoss()
self.bce = torch.nn.BCEWithLogitsLoss()
self.seg_net.cuda()
self.sparse_l1.cuda()
self.l1.cuda()
self.bce.cuda()
self.seg_net.train()
self.criterion_selection = None
self.lr = lr
self.optimizer = torch.optim.Adam(params=self.seg_net.parameters(), lr=lr, betas=betas)
# self.optimizer = torch.optim.SGD(params=self.seg_net.parameters(), lr=lr, momentum=0.5)
if checkpoint is not None:
checkpoint = torch.load(checkpoint)
self.seg_net.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.save_path = os.path.join("../saves", save_dir)
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.batch_size = batch_size
self.mean = mean
self.std = std
self.loss_tuple = recordclass('losses', ('total_loss', 'NOC_loss', 'background_loss', 'NOC_mse'))
self.ply_start = '''ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header\n'''
self.un_norm = UnNormalize(mean=self.mean, std=self.std)
def criterion_mse(self, output, batch):
batch_size = batch['num_points'].shape[0]
batch['num_points'] = batch['num_points'].long()
num = batch['num_points'].view(batch_size, 1)
batch['yx_loc'] = batch['yx_loc'].long()
sub = 0
for idx in range(batch_size):
if num[idx, 0] == 0:
batch_size -= 1
continue
noc_points = batch['noc_points'][idx, :num[idx, 0], :]
noc_points = torch.transpose(noc_points, 0, 1)
sub += self.l2(output[idx, :, batch['yx_loc'][idx, :num[idx, 0], 0], batch['yx_loc'][idx, :num[idx, 0], 1]],
noc_points)
return sub / batch_size
def criterion_l1_sparse(self, output, batch):
batch_size = batch['num_points'].shape[0]
# num_points[num_points == 0] = 1
# sub = torch.abs(output - target)
# sub = self.sparse_l1(output, target)
# sub_batch = torch.sum(sub, dim=(1, 2, 3))
# sub_batch = sub_batch / (num_points * 3)
batch['num_points'] = batch['num_points'].long()
num = batch['num_points'].view(batch_size, 1)
batch['yx_loc'] = batch['yx_loc'].long()
sub = 0
for idx in range(batch_size):
if num[idx, 0] == 0:
batch_size -= 1
continue
noc_points = batch['noc_points'][idx, :num[idx, 0], :]
noc_points = torch.transpose(noc_points, 0, 1)
sub += self.l1(output[idx, :, batch['yx_loc'][idx, :num[idx, 0], 0], batch['yx_loc'][idx, :num[idx, 0], 1]],
noc_points)
return sub / batch_size
def forward_sparse(self, batch):
total_loss = 0
output = self.seg_net(batch['image'])
# if 'mask_image' in batch.keys():
# masked_output = output * (batch['mask_image'] > 0).float()
noc_loss = self.criterion_l1_sparse(output=output, batch=batch)
total_loss += noc_loss * 10
# loss = self.l1(masked_output, batch['noc_image'])
background_target = torch.zeros_like(output)
background_loss = self.l1(output * batch['background'], background_target)
total_loss += background_loss
mse = self.criterion_mse(output=output, batch=batch)
losses = self.loss_tuple(total_loss=total_loss, NOC_loss=noc_loss,
background_loss=background_loss, NOC_mse=mse)
return output, losses
def forward_2_heads(self, batch):
total_loss = 0
output = self.seg_net(batch['image'])
# masked_output = output[1] * (batch['mask_image'] > 0).float()
noc_loss = self.criterion_l1_sparse(output=output[1], batch=batch)
total_loss += noc_loss * 70
# print(torch.max(((1 - batch['background'][:, 0:1, :, :]) > 0).float()))
foreground = (1 - batch['background'][:, 0:2, :, :]).float()
foreground[:, 0, :, :] = batch['background'][:, 0, :, :]
background_loss = self.bce(input=output[0], target=foreground)
total_loss += background_loss
mse = self.criterion_mse(output=output[1], batch=batch)
losses = self.loss_tuple(total_loss=total_loss, NOC_loss=noc_loss,
background_loss=background_loss, NOC_mse=mse)
return output, losses
def train(self, batch):
output, losses = self.forward(batch=batch)
# output = self.seg_net(batch['image'])
#
# # if 'mask_image' in batch.keys():
# masked_output = output * batch['mask_image']
# # loss = self.criterion_l1(output=masked_output, batch=batch)
# l1_loss = self.loss_l1(masked_output, batch['noc_image']) / (
# torch.sum(batch['num_points']) + 1 * (torch.sum(batch['num_points'] == 0).float()))
#
self.optimizer.zero_grad()
losses.total_loss.backward()
self.optimizer.step()
return output, losses
def write_noc_ply(self, output, batch, idx, ply_dir):
batch_size = batch['num_points'].shape[0]
idx = idx * self.batch_size
# masked_output = output[1] * (batch['mask_image'] > 0).float()
batch['num_points'] = batch['num_points'].long()
num = batch['num_points'].view(batch_size, 1)
for pdx in range(batch_size):
image = batch['image'][pdx, :, batch['yx_loc'][pdx, :num[pdx, 0], 0], batch['yx_loc'][pdx, :num[pdx, 0], 1]]
if self.foreground:
out_arr = output[1]
else:
out_arr = output
output_arr = out_arr[pdx, :, batch['yx_loc'][pdx, :num[pdx, 0], 0], batch['yx_loc'][pdx, :num[pdx, 0], 1]]
noc_gt = batch['noc_points'][pdx, :num[pdx, 0]].cpu().numpy()
image = image.cpu().numpy().T
image = image[:, [2, 1, 0]] * 255
output_arr = output_arr.cpu().numpy().T
start = self.ply_start.format(num[pdx, 0].item())
concatenated_out = np.concatenate((output_arr, image), axis=1)
concatenated_gt = np.concatenate((noc_gt, image), axis=1)
image = batch['image'][pdx, :, :, :].cpu().numpy()
image = image.transpose(1, 2, 0) * 255
cv2.imwrite(os.path.join(ply_dir, 'Output_{}.png'.format(idx + pdx)), image.astype('uint8'))
with open(os.path.join(ply_dir, 'Output_{}.ply'.format(idx + pdx)), 'w') as write_file:
write_file.write(start)
np.savetxt(write_file, concatenated_out, fmt=' '.join(['%0.8f'] * 3 + ['%d'] * 3))
with open(os.path.join(ply_dir, 'Ground_truth_{}.ply'.format(idx + pdx)), 'w') as write_file:
write_file.write(start)
np.savetxt(write_file, concatenated_gt, fmt=' '.join(['%0.8f'] * 3 + ['%d'] * 3))
def validate(self, test_loader, niter, test_writer, write_ply=False, ply_dir=''):
total_losses = self.loss_tuple(0, 0, 0, 0)
if write_ply:
if not os.path.exists(ply_dir):
os.mkdir(ply_dir)
with torch.no_grad():
self.seg_net.eval()
for idx, batch in enumerate(test_loader):
for keys in batch:
batch[keys] = batch[keys].float().cuda()
output, losses = self.forward(batch=batch)
# View NOC as PLY
if write_ply:
self.write_noc_ply(output=output, batch=batch, idx=idx, ply_dir=ply_dir)
for jdx, val in enumerate(losses):
if jdx is 3:
total_losses[jdx] += 10 * log10(1 / losses[jdx].item())
else:
total_losses[jdx] += losses[jdx].item()
# total_losses.total_loss += losses.total_loss.item()
if idx == (len(test_loader) - 1):
# print(len(test_loader))
for jdx, val in enumerate(total_losses):
total_losses[jdx] /= len(test_loader)
print("Validation loss: {}".format(total_losses.total_loss))
# batch['image'] = self.un_norm(batch['image'])
batch['image'] = batch['image'] * 2 - 1
test_writer.add_scalar('PSNR', total_losses.NOC_mse, niter)
visualize(writer=test_writer, batch=batch, output=(output, total_losses),
name="Validation", niter=niter, foreground=self.foreground)
def test(self, test_loader, niter, test_writer):
with torch.no_grad():
self.seg_net.eval()
for idx, batch in enumerate(test_loader):
for keys in batch:
batch[keys] = batch[keys].float().cuda()
output = self.seg_net(batch['image'])
batch['image'] = batch['image'] * 2 - 1
visualize(writer=test_writer, batch=batch, output=(output, self.loss_tuple(0, 0, 0, 0)),
name="Validation", niter=niter, foreground=self.foreground, test=True)
def run(self, opt, data_loader, writer, epoch=0):
total_losses = self.loss_tuple(0, 0, 0, 0)
data_length = len(data_loader.train)
self.seg_net.train()
if epoch > 0 and epoch % 15 == 0:
self.lr *= 0.9
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
for idx, batch in enumerate(data_loader.train):
for keys in batch:
batch[keys] = batch[keys].float().cuda()
# batch['image'] = batch['image'] * 2 - 1
output, losses = self.train(batch=batch)
# print(batch['num_points'], torch.sum(batch['num_points']))
for jdx, val in enumerate(losses):
if jdx is 3:
total_losses[jdx] += 10 * log10(1 / losses[jdx].item())
else:
total_losses[jdx] += losses[jdx].item()
# total_loss += loss.item()
niter = idx + (epoch * data_length)
if idx % opt.log_iter == 0:
print("Epoch: {} | Iteration: {} | Train Loss: {}".format(epoch, niter, losses.total_loss.item()))
batch['image'] = batch['image'] * 2 - 1
visualize(writer=writer.train, batch=batch, output=(output, losses),
name="Train Total", niter=niter, foreground=self.foreground)
# batch['image'] = self.un_norm(batch['image'])
# visualize(writer=writer.train, batch=batch, output=(output, loss),
# name="Train", niter=niter)
# Last Iteration
if idx == (data_length - 1):
for jdx, val in enumerate(total_losses):
total_losses[jdx] /= data_length
print("Epoch: {} | Final Iteration: {} | Train Loss: {}".format(epoch, niter,
total_losses.total_loss))
batch['image'] = batch['image'] * 2 - 1
writer.train.add_scalar('PSNR', total_losses.NOC_mse, niter)
torch.save({'epoch': epoch,
'model': self.seg_net.state_dict(),
'optimizer': self.optimizer.state_dict()},
os.path.join(self.save_path, 'save_{}.pth'.format(niter)))
# visualize(writer=writer.train, batch=batch, output=(output, total_losses),
# name="Train Total", niter=(epoch + 1) * data_length)
# self.test(test_loader=data_loader.test, test_writer=writer.test, niter=(epoch + 1) * data_length)
self.validate(test_loader=data_loader.validate, test_writer=writer.validate, niter=(epoch + 1) * data_length + 1)
print("*" * 100)
if __name__ == "__main__":
noc_class = TrainNOCs()
| 2.421875 | 2 |
function/python/brightics/function/textanalytics/__init__.py | GSByeon/studio | 0 | 12797912 | <gh_stars>0
from .ngram import ngram
from .lda import lda
from .tfidf import tfidf
| 1.015625 | 1 |
src/ghaudit/__main__.py | scality/ghaudit | 1 | 12797913 | import logging
import os
from typing import Literal, Union
from ghaudit.cli import cli
LOGFILE = os.environ.get("LOGFILE")
LOGLEVEL = os.environ.get("LOGLEVEL", "ERROR")
# pylint: disable=line-too-long
LOG_FORMAT = "{asctime} {levelname:8s} ghaudit <{filename}:{lineno} {module}.{funcName}> {message}" # noqa: E501
STYLE = "{" # type: Union[Literal["%"], Literal["{"], Literal["$"]]
def main() -> None:
if LOGFILE:
handler = logging.FileHandler(LOGFILE)
formatter = logging.Formatter(LOG_FORMAT, style=STYLE)
handler.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(LOGLEVEL)
root.addHandler(handler)
else:
logging.basicConfig(level=LOGLEVEL, format=LOG_FORMAT, style=STYLE)
# pylint: disable=no-value-for-parameter
cli()
if __name__ == "__main__":
main()
| 2.375 | 2 |
crn_mc/mesh.py | elevien/crn-mc | 0 | 12797914 | import numpy as np
class Mesh:
""" Contains all the information about the spatial domain """
def __init__(self,dimension,topology,geometry):
self.Nvoxels = len(topology)
self.dimension = dimension
self.topology = topology # adjaceny matrix (numpy array), 0 along main diagonal, 1 elsewhere
# only really works for regular grids
self.geometry = geometry # numpy array of Nvoxels pairs (volume,x,(y,(z)))
def get_coarseMesh_voxel(voxel,coupling):
# returns the coarse mesh voxel associated with
# voxel by the coupling
# by convention I take the coarse mesh voxel to by the smallest
# index coupled to voxel according to coupling
i = 0
while coupling[voxel,i]<1:
i = i+1
return i
def make_lattice1d(Nx,L):
# generates uniform 1d lattice on [0,L]
topology = np.zeros((Nx,Nx))
d = np.ones(Nx-1)
topology = np.diag(d,1)+np.diag(d,-1)
geometry = np.zeros((Nx,2))
h = L/Nx
geometry[:,0] = h*np.ones(Nx)
geometry[:,1] = np.linspace(0,L-h,Nx)
mesh = Mesh(1,topology,geometry)
return mesh
def make_lattice1d_coupled(Nx,L,J):
mesh = make_lattice1d(Nx,L)
coupling = np.zeros((Nx,Nx))
for i in range(int(Nx/J)):
coupling[i*J:(i+1)*J,i*J:(i+1)*J] = np.ones((J,J))
return mesh,coupling
# need to implement
def make_lattice2d(Nx,Ny,Lx,Ly):
topology = np.zeros((Nx*Ny,Nx*Ny))
d1 = np.ones(Nx-1)
d2 = np.ones(Nx*Ny-Ny)
for i in range(Ny):
topology[i*Ny:(i+1)*Ny,i*Ny:(i+1)*Ny] = np.diag(d1,1)+np.diag(d1,-1)
topology = topology + np.diag(d2,Nx)+np.diag(d2,-Nx)
geometry = np.zeros((Nx*Ny,2))
hx = Nx/Lx
hy = Ny/Ly
#geometry[:,0] = h*np.ones(Nx)
#geometry[:,1] = linspace(0,L-h,Nx)
mesh = Mesh(1,topology,geometry)
return mesh
def make_lattice3d(Nx,Ny):
return None
| 3.171875 | 3 |
css-test/launch.py | savithruml/contrail-provisioning-tool | 0 | 12797915 | <reponame>savithruml/contrail-provisioning-tool
from flask import Flask, flash, redirect, render_template, request, session, abort, Response
import os, subprocess
from shelljob import proc
application = Flask(__name__)
@application.route('/provision', methods=['POST', 'GET'])
def provision():
target_node_ip = str(request.form['target-ip'])
target_node_password = str(request.form['target-password'])
base_image = str(request.form['image'])
contrail_package_version = str(request.form['package']).split('contrail-')[1]
contrail_package_release = str(request.form['release'])
contrail_package_build = str(request.form['build'])
testbed_file = request.files['testbed-file'].read()
localfile = open("testbed.py", "w")
localfile.write(testbed_file)
localfile.close()
if request.form["commit"] == "Provision":
def generate():
for line in (subprocess.check_output("ssh root@{4} wget http://10.84.5.120/github-build/R{0}/{1}/{2}/{3}/artifacts/contrail-cloud-docker_4.0.0.0-{1}-{3}.tgz && \
ssh root@{4} wget http://10.84.5.120/github-build/R{0}/{1}/{2}/{3}/artifacts/contrail-server-manager-installer_4.0.0.0-{1}~{3}_all.deb && \
ssh root@{4} truncate -s 0 /etc/apt/sources.list && \
scp testbed.py root@{4}:~/ && \
ssh root@{4} dpkg -i contrail-server-manager*deb". \
format(contrail_package_version,contrail_package_build,base_image,contrail_package_release,target_node_ip), shell=True)).splitlines():
yield line + '\n'
os.remove("testbed.py")
return Response( generate(), mimetype= 'text/html' )
@application.route('/login', methods=['POST', 'GET'])
def login():
usr_name = str(request.form['username'])
usr_pass = str(request.form['password'])
if usr_name == 'admin' and usr_pass == '<PASSWORD>':
return render_template('provision.html')
else:
return render_template('login-fail.html')
@application.route('/')
def main():
return render_template('login.html')
if __name__ == "__main__":
application.run(debug=True, host='0.0.0.0', port=9000)
| 2.1875 | 2 |
python/snapy/netsnmp/unittests/test_netsnmp.py | marineam/nagcat | 0 | 12797916 | # snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import time
from twisted.trial import unittest
from snapy.netsnmp.unittests import TestCase
from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID
class Result(object):
"""Container for async results"""
value = None
def set_result(value, result):
result.value = value
class TestSessionV1(TestCase):
version = "1"
bulk = False
basics = [
(OID(".1.3.6.1.4.2.1.1"), 1),
(OID(".1.3.6.1.4.2.1.2"), -1),
(OID(".1.3.6.1.4.2.1.3"), 1),
(OID(".1.3.6.1.4.2.1.4"), "test value"),
]
def setUpSession(self, address):
self.session = Session(
version=self.version,
community="public",
peername=address,
_use_bulk=self.bulk)
self.session.open()
def tearDownSession(self):
self.session.close()
def test_sget(self):
result = self.session.sget([x for x,v in self.basics])
self.assertEquals(result, self.basics)
return self.finishGet()
def test_get_small(self):
result = Result()
self.session.get([x for x,v in self.basics], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishGet()
def test_get_big(self):
oids = []
for i in xrange(1, 100):
oids.append(OID((1,3,6,1,4,2,4,i)))
result = Result()
self.session.get(oids, set_result, result)
self.session.wait()
result = dict(result.value)
for oid in oids:
assert oid in result
assert result[oid] == "data data data data"
return self.finishGet()
def test_walk_tree(self):
result = Result()
self.session.walk([".1.3.6.1.4.2.1"], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishWalk()
def test_walk_leaf(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result)
self.session.wait()
self.assertEquals(result.value, [(oid, 1)])
return self.finishGet()
def test_walk_strict(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result, strict=True)
self.session.wait()
self.assertEquals(result.value, [])
return self.finishStrictWalk()
def test_sysDescr(self):
result = self.session.sget([OID("SNMPv2-MIB::sysDescr.0")])
self.assert_(result)
self.assertIsInstance(result[0][1], str)
self.assert_(len(result[0][1]) > 0)
return self.finishGet()
class TestSessionV2c(TestSessionV1):
version = "2c"
def test_hrSystemDate(self):
# This is a special string that gets formatted using the
# MIB's DISPLAY-HINT value. Also, strip off everything
# other than the date and hour to avoid a race condition.
# And one more quirk, these dates are not zero padded
# so we must format the date manually, whee...
now = time.localtime()
now = "%d-%d-%d,%d" % (now[0], now[1], now[2], now[3])
result = self.session.sget([OID(".1.3.6.1.2.1.25.1.2.0")])
self.assert_(result)
value = result[0][1].split(':', 1)[0]
self.assertEquals(value, now)
return self.finishGet()
class TestSessionV2cBulk(TestSessionV2c):
bulk = True
class TestTimeoutsV1(unittest.TestCase):
version = "1"
def setUp(self):
self.session = Session(
version=self.version,
community="public",
peername="udp:127.0.0.1:9",
retries=0, timeout=0.1)
self.session.open()
def test_sget(self):
self.assertRaises(SnmpError, self.session.sget, [".1.3.6.1.4.2.1.1"])
def test_get(self):
result = Result()
self.session.get([".1.3.6.1.4.2.1.1"], set_result, result)
self.session.wait()
assert isinstance(result.value, SnmpTimeout)
def tearDown(self):
self.session.close()
class TestTimeoutsV2c(TestTimeoutsV1):
version = "2c"
class TestOID(unittest.TestCase):
def test_oid_name(self):
oid = OID("1.3.6.1.2.1.1.1.0")
self.assertEquals(oid, OID("SNMPv2-MIB::sysDescr.0"))
self.assertEquals(oid, OID("sysDescr.0"))
| 2.1875 | 2 |
adafruit_rgb_display/rgb.py | caternuson/Adafruit_CircuitPython_RGB_Display | 0 | 12797917 | # The MIT License (MIT)
#
# Copyright (c) 2017 <NAME> and Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_rgb_display.rgb`
====================================================
Base class for all RGB Display devices
* Author(s): <NAME>, <NAME>
"""
import time
try:
import struct
except ImportError:
import ustruct as struct
import adafruit_bus_device.spi_device as spi_device
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_RGB_Display.git"
# This is the size of the buffer to be used for fill operations, in 16-bit
# units.
try:
# If we're on CPython, try to set as large as possible
import platform
if "CPython" in platform.python_implementation():
# check for FT232H special case
try:
import os
if os.environ['BLINKA_FT232H']:
# we are limited by pyftdi's max SPI payload
from pyftdi.spi import SpiController
_BUFFER_SIZE = SpiController.PAYLOAD_MAX_LENGTH // 2 # max bytes / bytes per pixel
except KeyError:
# otherwise set it to blit the whole thing
_BUFFER_SIZE = 320 * 240
else:
# in case CircuitPython ever implements platform
_BUFFER_SIZE = 256
except ImportError:
# Otherwise set smaller MCU friendly size
_BUFFER_SIZE = 256
def color565(r, g=0, b=0):
"""Convert red, green and blue values (0-255) into a 16-bit 565 encoding. As
a convenience this is also available in the parent adafruit_rgb_display
package namespace."""
try:
r, g, b = r # see if the first var is a tuple/list
except TypeError:
pass
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3
class DummyPin:
"""Can be used in place of a ``DigitalInOut()`` when you don't want to skip it."""
def deinit(self):
"""Dummy DigitalInOut deinit"""
pass
def switch_to_output(self, *args, **kwargs):
"""Dummy switch_to_output method"""
pass
def switch_to_input(self, *args, **kwargs):
"""Dummy switch_to_input method"""
pass
@property
def value(self):
"""Dummy value DigitalInOut property"""
pass
@value.setter
def value(self, val):
pass
@property
def direction(self):
"""Dummy direction DigitalInOut property"""
pass
@direction.setter
def direction(self, val):
pass
@property
def pull(self):
"""Dummy pull DigitalInOut property"""
pass
@pull.setter
def pull(self, val):
pass
class Display: #pylint: disable-msg=no-member
"""Base class for all RGB display devices
:param width: number of pixels wide
:param height: number of pixels high
"""
_PAGE_SET = None
_COLUMN_SET = None
_RAM_WRITE = None
_RAM_READ = None
_X_START = 0 # pylint: disable=invalid-name
_Y_START = 0 # pylint: disable=invalid-name
_INIT = ()
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
def __init__(self, width, height):
self.width = width
self.height = height
self.init()
def init(self):
"""Run the initialization commands."""
for command, data in self._INIT:
self.write(command, data)
#pylint: disable-msg=invalid-name,too-many-arguments
def _block(self, x0, y0, x1, y1, data=None):
"""Read or write a block of data."""
self.write(self._COLUMN_SET, self._encode_pos(x0 + self._X_START, x1 + self._X_START))
self.write(self._PAGE_SET, self._encode_pos(y0 + self._Y_START, y1 + self._Y_START))
if data is None:
size = struct.calcsize(self._DECODE_PIXEL)
return self.read(self._RAM_READ,
(x1 - x0 + 1) * (y1 - y0 + 1) * size)
self.write(self._RAM_WRITE, data)
return None
#pylint: enable-msg=invalid-name,too-many-arguments
def _encode_pos(self, x, y):
"""Encode a postion into bytes."""
return struct.pack(self._ENCODE_POS, x, y)
def _encode_pixel(self, color):
"""Encode a pixel color into bytes."""
return struct.pack(self._ENCODE_PIXEL, color)
def _decode_pixel(self, data):
"""Decode bytes into a pixel color."""
return color565(*struct.unpack(self._DECODE_PIXEL, data))
def pixel(self, x, y, color=None):
"""Read or write a pixel at a given position."""
if color is None:
return self._decode_pixel(self._block(x, y, x, y))
if 0 <= x < self.width and 0 <= y < self.height:
self._block(x, y, x, y, self._encode_pixel(color))
return None
def image(self, img, rotation=0):
"""Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size equal to the display size."""
if not img.mode in ('RGB', 'RGBA'):
raise ValueError('Image must be in mode RGB or RGBA')
if rotation not in (0, 90, 180, 270):
raise ValueError('Rotation must be 0/90/180/270')
if rotation != 0:
img = img.rotate(rotation, expand=True)
imwidth, imheight = img.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display ({0}x{1}).' \
.format(self.width, self.height))
pixels = bytearray(self.width * self.height * 2)
# Iterate through the pixels
for x in range(self.width): # yes this double loop is slow,
for y in range(self.height): # but these displays are small!
pix = color565(img.getpixel((x, y)))
pixels[2*(y * self.width + x)] = pix >> 8
pixels[2*(y * self.width + x) + 1] = pix & 0xFF
#print([hex(x) for x in pixels])
self._block(0, 0, self.width-1, self.height - 1, pixels)
#pylint: disable-msg=too-many-arguments
def fill_rectangle(self, x, y, width, height, color):
"""Draw a rectangle at specified position with specified width and
height, and fill it with the specified color."""
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
width = min(self.width - x, max(1, width))
height = min(self.height - y, max(1, height))
self._block(x, y, x + width - 1, y + height - 1, b'')
chunks, rest = divmod(width * height, _BUFFER_SIZE)
pixel = self._encode_pixel(color)
if chunks:
data = pixel * _BUFFER_SIZE
for _ in range(chunks):
self.write(None, data)
self.write(None, pixel * rest)
#pylint: enable-msg=too-many-arguments
def fill(self, color=0):
"""Fill the whole display with the specified color."""
self.fill_rectangle(0, 0, self.width, self.height, color)
def hline(self, x, y, width, color):
"""Draw a horizontal line."""
self.fill_rectangle(x, y, width, 1, color)
def vline(self, x, y, height, color):
"""Draw a vertical line."""
self.fill_rectangle(x, y, 1, height, color)
class DisplaySPI(Display):
"""Base class for SPI type devices"""
#pylint: disable-msg=too-many-arguments
def __init__(self, spi, dc, cs, rst=None, width=1, height=1,
baudrate=12000000, polarity=0, phase=0, *,
x_offset=0, y_offset=0):
self.spi_device = spi_device.SPIDevice(spi, cs, baudrate=baudrate,
polarity=polarity, phase=phase)
self.dc_pin = dc
self.rst = rst
self.dc_pin.switch_to_output(value=0)
if self.rst:
self.rst.switch_to_output(value=0)
self.reset()
self._X_START = x_offset # pylint: disable=invalid-name
self._Y_START = y_offset # pylint: disable=invalid-name
super().__init__(width, height)
#pylint: enable-msg=too-many-arguments
def reset(self):
"""Reset the device"""
self.rst.value = 0
time.sleep(0.050) # 50 milliseconds
self.rst.value = 1
time.sleep(0.050) # 50 milliseconds
# pylint: disable=no-member
def write(self, command=None, data=None):
"""SPI write to the device: commands and data"""
if command is not None:
self.dc_pin.value = 0
with self.spi_device as spi:
spi.write(bytearray([command]))
if data is not None:
self.dc_pin.value = 1
with self.spi_device as spi:
spi.write(data)
def read(self, command=None, count=0):
"""SPI read from device with optional command"""
data = bytearray(count)
self.dc_pin.value = 0
with self.spi_device as spi:
if command is not None:
spi.write(bytearray([command]))
if count:
spi.readinto(data)
return data
| 1.734375 | 2 |
app.py | lmbejaran/sqlalchemy-challenge | 0 | 12797918 | import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import datetime as dt
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start_date><br/>"
f"/api/v1.0/<start_date>/<end_date><br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
first_date = session.query(Measurement.date).order_by(Measurement.date).first()
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
date_prcp = session.query(Measurement.date, func.max(Measurement.prcp)).group_by(Measurement.date).\
filter(Measurement.date > year_ago)
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_dates = []
for date, prcp in date_prcp:
dict = {}
dict["date"] = date
dict["prcp"] = prcp
all_dates.append(dict)
return jsonify(all_dates)
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
stations = session.query(Station.id, Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()
all_stations = []
for id, station, name, latitude, longitude, elevation in stations:
dict = {}
dict["id"] = id
dict["station"] = station
dict["name"] = name
dict["latitude"] = latitude
dict["longitude"] = longitude
dict["elevation"] = elevation
all_stations.append(dict)
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
year_ago = dt.date(2017, 8, 18) - dt.timedelta(days=365)
most_active = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date > year_ago).all()
most_active_tobs = []
for date, tobs in most_active:
dict = {}
dict['date'] = date
dict['tobs'] = tobs
most_active_tobs.append(dict)
return jsonify(most_active_tobs)
@app.route("/api/v1.0/<start_date>")
def calc_temps_start(start_date):
tobs_stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).all()
tobs_stats_all = []
for min, avg, max in tobs_stats:
dict = {}
dict['min'] = min
dict['avg'] = avg
dict['max'] = max
tobs_stats_all.append(dict)
return jsonify(tobs_stats_all)
@app.route("/api/v1.0/<start_date>/<end_date>")
def calc_temps_start_end(start_date, end_date):
tobs_stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
tobs_stats_all = []
for min, avg, max in tobs_stats:
dict = {}
dict['min'] = min
dict['avg'] = avg
dict['max'] = max
tobs_stats_all.append(dict)
return jsonify(tobs_stats_all)
if __name__ == '__main__':
app.run(debug=True) | 3.125 | 3 |
main.py | marcpinet/maze-generator-and-solver | 0 | 12797919 | <reponame>marcpinet/maze-generator-and-solver
import maze.maze as m
import maze.maze_tools as mt
import visual.colors as vc
from abc import abstractmethod
def ask_for_int(sentence: str) -> int:
"""
Ask the user for an integer.
"""
while True:
try:
return int(input(sentence))
except ValueError:
print("Invalid input. Please try again.")
def main():
# Intializing variables
_ = mt.MazeGenerator(0) # Needed to init the MazeBuilder.ALGORITHMS list
_ = mt.MazeSolver(0) # Needed to init the MazeSolver.ALGORITHMS list
alg_gen, alg_sol, width, height = -1, -1, -1, -1
build_anim, solve_anim = "", ""
# Getting the user's inputs
for i in range(len(mt.MazeGenerator.ALGORITHMS)):
print(
f"{vc.CMDColors.YELLOW} {i}: {list(mt.MazeGenerator.ALGORITHMS.keys())[i]} {vc.CMDColors.RESET}"
)
print()
while alg_gen not in range(len(mt.MazeGenerator.ALGORITHMS)):
alg_gen = ask_for_int("Input the n° of the algorithm for the generation: ")
print()
for i in range(len(mt.MazeSolver.ALGORITHMS)):
print(
f"{vc.CMDColors.YELLOW} {i}: {list(mt.MazeSolver.ALGORITHMS.keys())[i]} {vc.CMDColors.RESET}"
)
print()
while alg_sol not in range(len(mt.MazeSolver.ALGORITHMS)):
alg_sol = ask_for_int("Input the n° of the algorithm for the solving: ")
print()
while width not in range(1000):
width = ask_for_int("Width of the maze: ")
while height not in range(1000):
height = ask_for_int("Height of the maze: ")
print()
while build_anim.lower() not in ["y", "n"]:
build_anim = input("Enable animation for building? (Y/N): ")
while solve_anim.lower() not in ["y", "n"]:
solve_anim = input("Enable animation for solving? (Y/N): ")
print()
# Setting animation properties for pygame window
mt.Window.GENERATE_ANIMATION = True if build_anim.lower() == "y" else False
mt.Window.SOLVE_ANIMATION = True if solve_anim.lower() == "y" else False
# Showing the maze on the pygame window
# Initializing
maze = m.Maze(width, height)
maze_generator = mt.MazeGenerator(alg_gen)
maze_solver = mt.MazeSolver(alg_sol)
# Drawing
print(
vc.CMDColors.CYAN
+ "Press "
+ vc.CMDColors.FAIL
+ "SPACE"
+ vc.CMDColors.CYAN
+ " to start building the maze.\nPress "
+ vc.CMDColors.FAIL
+ "SPACE"
+ vc.CMDColors.CYAN
+ " again to solve it.\nPress "
+ vc.CMDColors.HEADER
+ "CTRL+C"
+ vc.CMDColors.CYAN
+ " in the terminal to exit."
+ vc.CMDColors.RESET
)
# Starting the animations
maze_drawer = mt.MazeDrawer(maze_generator, maze_solver, maze)
maze_drawer.start()
if __name__ == "__main__":
main()
| 3.859375 | 4 |
utils.py | quanhua92/vietnam_investment_fund | 0 | 12797920 | <gh_stars>0
import requests
import json
from datetime import datetime
def get_all_products():
url = "https://api.fmarket.vn/res/products/filter"
data = {"types":["NEW_FUND","TRADING_FUND"],"issuerIds":[],"page":1,"pageSize":1000,"fundAssetTypes":[],"bondRemainPeriods":[],"searchField":""}
headers = {"Content-Type": "application/json; charset=utf-8"}
x = requests.post(url, json=data, headers=headers)
return json.loads(x.text)
def get_history(product_id):
url = "https://api.fmarket.vn/res/product/get-nav-history"
toDate = datetime.now().strftime("%Y%m%d")
data = {"isAllData":1,"productId":product_id,"fromDate": None, "toDate": toDate}
headers = {"Content-Type": "application/json; charset=utf-8"}
x = requests.post(url, json=data, headers=headers)
return json.loads(x.text) | 2.6875 | 3 |
OpenGLCffi/GL/EXT/NV/half_float.py | cydenix/OpenGLCffi | 0 | 12797921 | <gh_stars>0
from OpenGLCffi.GL import params
@params(api='gl', prms=['x', 'y'])
def glVertex2hNV(x, y):
pass
@params(api='gl', prms=['v'])
def glVertex2hvNV(v):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glVertex3hNV(x, y, z):
pass
@params(api='gl', prms=['v'])
def glVertex3hvNV(v):
pass
@params(api='gl', prms=['x', 'y', 'z', 'w'])
def glVertex4hNV(x, y, z, w):
pass
@params(api='gl', prms=['v'])
def glVertex4hvNV(v):
pass
@params(api='gl', prms=['nx', 'ny', 'nz'])
def glNormal3hNV(nx, ny, nz):
pass
@params(api='gl', prms=['v'])
def glNormal3hvNV(v):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glColor3hNV(red, green, blue):
pass
@params(api='gl', prms=['v'])
def glColor3hvNV(v):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColor4hNV(red, green, blue, alpha):
pass
@params(api='gl', prms=['v'])
def glColor4hvNV(v):
pass
@params(api='gl', prms=['s'])
def glTexCoord1hNV(s):
pass
@params(api='gl', prms=['v'])
def glTexCoord1hvNV(v):
pass
@params(api='gl', prms=['s', 't'])
def glTexCoord2hNV(s, t):
pass
@params(api='gl', prms=['v'])
def glTexCoord2hvNV(v):
pass
@params(api='gl', prms=['s', 't', 'r'])
def glTexCoord3hNV(s, t, r):
pass
@params(api='gl', prms=['v'])
def glTexCoord3hvNV(v):
pass
@params(api='gl', prms=['s', 't', 'r', 'q'])
def glTexCoord4hNV(s, t, r, q):
pass
@params(api='gl', prms=['v'])
def glTexCoord4hvNV(v):
pass
@params(api='gl', prms=['target', 's'])
def glMultiTexCoord1hNV(target, s):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord1hvNV(target, v):
pass
@params(api='gl', prms=['target', 's', 't'])
def glMultiTexCoord2hNV(target, s, t):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord2hvNV(target, v):
pass
@params(api='gl', prms=['target', 's', 't', 'r'])
def glMultiTexCoord3hNV(target, s, t, r):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord3hvNV(target, v):
pass
@params(api='gl', prms=['target', 's', 't', 'r', 'q'])
def glMultiTexCoord4hNV(target, s, t, r, q):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord4hvNV(target, v):
pass
@params(api='gl', prms=['fog'])
def glFogCoordhNV(fog):
pass
@params(api='gl', prms=['fog'])
def glFogCoordhvNV(fog):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glSecondaryColor3hNV(red, green, blue):
pass
@params(api='gl', prms=['v'])
def glSecondaryColor3hvNV(v):
pass
@params(api='gl', prms=['weight'])
def glVertexWeighthNV(weight):
pass
@params(api='gl', prms=['weight'])
def glVertexWeighthvNV(weight):
pass
@params(api='gl', prms=['index', 'x'])
def glVertexAttrib1hNV(index, x):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib1hvNV(index, v):
pass
@params(api='gl', prms=['index', 'x', 'y'])
def glVertexAttrib2hNV(index, x, y):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib2hvNV(index, v):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z'])
def glVertexAttrib3hNV(index, x, y, z):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib3hvNV(index, v):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z', 'w'])
def glVertexAttrib4hNV(index, x, y, z, w):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4hvNV(index, v):
pass
@params(api='gl', prms=['index', 'n', 'v'])
def glVertexAttribs1hvNV(index, n, v):
pass
@params(api='gl', prms=['index', 'n', 'v'])
def glVertexAttribs2hvNV(index, n, v):
pass
@params(api='gl', prms=['index', 'n', 'v'])
def glVertexAttribs3hvNV(index, n, v):
pass
@params(api='gl', prms=['index', 'n', 'v'])
def glVertexAttribs4hvNV(index, n, v):
pass
| 2.140625 | 2 |
mysite/core/views.py | gopal031119/Decision-tree-using-Gimi-index | 0 | 12797922 | from django.shortcuts import render, redirect
from django.views.generic import TemplateView, ListView, CreateView
from django.core.files.storage import FileSystemStorage
from django.urls import reverse_lazy
from django.core.files import File
from .forms import BookForm
from .models import Book
# Load libraries
import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from sklearn.tree import export_graphviz
from six import StringIO
from IPython.display import Image
import os
import pydotplus
BASE_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
class Home(TemplateView):
template_name = 'home.html'
def upload(request):
context = {}
upload_file_name = ""
image_url = ""
if request.method == 'POST':
uploaded_file = request.FILES['document']
image_url = "Tree_of_"+str(os.path.splitext(uploaded_file.name)[0]) + ".png"
dataset_cols_name = []
pima = pd.read_csv(uploaded_file , header=0)
dataset_cols_name = pima.columns.values.tolist()
transet_cols_name = dataset_cols_name[:len(dataset_cols_name)-1]
transet_cols_name.append("decisionCol")
pima.columns = transet_cols_name
#split dataset in features and target variable
feature_cols = transet_cols_name[:len(transet_cols_name)-1]
X = pima[feature_cols] # Features
y = pima.decisionCol # Target variable
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) # 70% training and 30% test
# Create Decision Tree classifer object
clf = DecisionTreeClassifier()
# Train Decision Tree Classifer
clf = clf.fit(X_train,y_train)
#Predict the response for test dataset
y_pred = clf.predict(X_test)
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_cols,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png(image_url)
imge = Image(graph.create_png())
fs = FileSystemStorage()
upload_file_name = uploaded_file
path_to_generated_image = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+"\\mining-assignment-master\\"+ image_url
file = open(path_to_generated_image , "rb")
django_file = File(file)
name_of_image = "Tree_of_"+str(os.path.splitext(uploaded_file.name)[0]) + ".png"
name = fs.save(name_of_image, django_file)
print(path_to_generated_image)
context['image_name'] = name_of_image
return render(request, 'upload.html', context)
| 2.3125 | 2 |
ostaas.py | yadav19/simple-student-cloud | 2 | 12797923 | #!/usr/bin/python36
print("content-type: text/html")
print("")
import cgi
import subprocess as sp
form = cgi.FieldStorage()
user_name = form.getvalue('user_name')
lv_size = form.getvalue('lv_size')
print(user_name)
print(lv_size)
output=sp.getstatusoutput("sudo ansible-playbook ostaas.yml --extra-vars='user_name={u} lv_size={l}'".format(u=user_name, l=lv_size))
if output[0] == 0 :
print("<b> NFS-server succesfully created</b>")
client_mount=sp.getstatusoutput("sudo ansible-playbook ostaasclient.yml --extra-vars='user_name={u} lv_size={l}'".format(u=user_name, l=lv_size))
if client_mount[0] == 0 :
print("<b>Enjoy free cloud storage..</b>")
else:
print("Sorry, We're facing technical issue. please visit after some time")
| 2.375 | 2 |
src/backend/marsha/development/urls.py | insad/marsha | 0 | 12797924 | <gh_stars>0
"""Marsha Development app URLs configuration."""
from django.urls import path
from .views import DevelopmentLTIView
app_name = "development"
urlpatterns = [
path("development/", DevelopmentLTIView.as_view(), name="lti-development-view"),
]
| 1.335938 | 1 |
Neural Style Transfer/train_TensorFlow.py | Shashi456/Neural-Style | 31 | 12797925 |
import tensorflow as tf
from tensorflow.python.keras.preprocessing import image as kp_image
# Keras is only used to load VGG19 model as a high level API to TensorFlow
from keras.applications.vgg19 import VGG19
from keras.models import Model
from keras import backend as K
# pillow is used for loading and saving images
from PIL import Image
# numPy is used for manipulation of array of object i.e Image in our case
import numpy as np
##
##
##
# list of layers to be considered for calculation of Content and Style Loss
content_layers = ['block3_conv3']
style_layers = ['block1_conv1','block2_conv2','block4_conv3']
num_content_layers = len(content_layers)
num_style_layers = len(style_layers)
# path where the content and style images are located
content_path = 'content.jpg'
style_path = 'style.jpg'
# Save the result as
save_name = 'generated.jpg'
# path to where Vgg19 model weight is located
vgg_weights = "vgg_weights/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5"
############################################################################################################
############################################################################################################
# UTILS
############################################################################################################
############################################################################################################
def load_img(path_to_img):
max_dim = 512
img = Image.open(path_to_img)
img_size = max(img.size)
scale = max_dim/img_size
img = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)
img = kp_image.img_to_array(img)
# We need to broadcast the image array such that it has a batch dimension
img = np.expand_dims(img, axis=0)
# preprocess raw images to make it suitable to be used by VGG19 model
out = tf.keras.applications.vgg19.preprocess_input(img)
return tf.convert_to_tensor(out)
def deprocess_img(processed_img):
x = processed_img.copy()
# perform the inverse of the preprocessiing step
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
############################################################################################################
############################################################################################################
# Loss Function
############################################################################################################
############################################################################################################
### Content Loss Function
def get_content_loss(content, target):
return tf.reduce_mean(tf.square(content - target)) /2
### Style Loss Fucntion
def gram_matrix(input_tensor):
# if input tensor is a 3D array of size Nh x Nw X Nc
# we reshape it to a 2D array of Nc x (Nh*Nw)
channels = int(input_tensor.shape[-1])
a = tf.reshape(input_tensor, [-1, channels])
n = tf.shape(a)[0]
# get gram matrix
gram = tf.matmul(a, a, transpose_a=True)
return gram
def get_style_loss(base_style, gram_target):
height, width, channels = base_style.get_shape().as_list()
gram_style = gram_matrix(base_style)
# Original eqn as a constant to divide i.e 1/(4. * (channels ** 2) * (width * height) ** 2)
return tf.reduce_mean(tf.square(gram_style - gram_target)) / (channels**2 * width * height) #(4.0 * (channels ** 2) * (width * height) ** 2)
### Use to pass content and style image through it
def get_feature_representations(model, content_path, style_path, num_content_layers):
# Load our images in
content_image = load_img(content_path)
style_image = load_img(style_path)
# batch compute content and style features
content_outputs = model(content_image)
style_outputs = model(style_image)
# Get the style and content feature representations from our model
style_features = [ style_layer[0] for style_layer in style_outputs[num_content_layers:] ]
content_features = [ content_layer[0] for content_layer in content_outputs[:num_content_layers] ]
return style_features, content_features
### Total Loss
def compute_loss(model, loss_weights, generated_output_activations, gram_style_features, content_features, num_content_layers, num_style_layers):
generated_content_activations = generated_output_activations[:num_content_layers]
generated_style_activations = generated_output_activations[num_content_layers:]
style_weight, content_weight = loss_weights
style_score = 0
content_score = 0
# Accumulate style losses from all layers
# Here, we equally weight each contribution of each loss layer
weight_per_style_layer = 1.0 / float(num_style_layers)
for target_style, comb_style in zip(gram_style_features, generated_style_activations):
temp = get_style_loss(comb_style[0], target_style)
style_score += weight_per_style_layer * temp
# Accumulate content losses from all layers
weight_per_content_layer = 1.0 / float(num_content_layers)
for target_content, comb_content in zip(content_features, generated_content_activations):
temp = get_content_loss(comb_content[0], target_content)
content_score += weight_per_content_layer* temp
# Get total loss
loss = style_weight*style_score + content_weight*content_score
return loss, style_score, content_score
############################################################################################################
############################################################################################################
# CREATE STYLE TRANFER
############################################################################################################
############################################################################################################
# Using Keras Load VGG19 model
def get_model(content_layers,style_layers):
# Load our model. We load pretrained VGG, trained on imagenet data
vgg19 = VGG19(weights=None, include_top=False)
# We don't need to (or want to) train any layers of our pre-trained vgg model, so we set it's trainable to false.
vgg19.trainable = False
style_model_outputs = [vgg19.get_layer(name).output for name in style_layers]
content_model_outputs = [vgg19.get_layer(name).output for name in content_layers]
model_outputs = content_model_outputs + style_model_outputs
# Build model
return Model(inputs = vgg19.input, outputs = model_outputs), vgg19
def run_style_transfer(content_path, style_path, num_iterations=200, content_weight=0.1, style_weight=0.9):
# Create a tensorflow session
sess = tf.Session()
# Assign keras back-end to the TF session which we created
K.set_session(sess)
model, vgg19 = get_model(content_layers,style_layers)
# Get the style and content feature representations (from our specified intermediate layers)
style_features, content_features = get_feature_representations(model, content_path, style_path, num_content_layers)
gram_style_features = [gram_matrix(style_feature) for style_feature in style_features]
# VGG default normalization
norm_means = np.array([103.939, 116.779, 123.68])
min_vals = -norm_means
max_vals = 255 - norm_means
# In original paper, the initial stylized image is random matrix of same size as that of content image
# but in later images content image was used instead on random values for first stylized image
# because it proved to help to stylize faster
generated_image = load_img(content_path)
# generated_image = np.random.randint(0,255, size=generated_image.shape)
# Create tensorflow variable to hold a stylized/generated image during the training
generated_image = tf.Variable(generated_image, dtype=tf.float32)
model_outputs = model(generated_image)
# weightages of each content and style images i.e alpha & beta
loss_weights = (style_weight, content_weight)
# Create our optimizer
loss = compute_loss(model, loss_weights, model_outputs, gram_style_features, content_features, num_content_layers, num_style_layers)
opt = tf.train.AdamOptimizer(learning_rate=9, beta1=0.9, epsilon=1e-1).minimize( loss[0], var_list = [generated_image])
sess.run(tf.global_variables_initializer())
sess.run(generated_image.initializer)
# loading the weights again because tf.global_variables_initializer() resets the weights
vgg19.load_weights(vgg_weights)
# Put loss as infinity before training starts and Create a variable to hold best image (i.e image with minimum loss)
best_loss, best_img = float('inf'), None
for i in range(num_iterations):
# Do optimization
sess.run(opt)
# Make sure image values stays in the range of max-min value of VGG norm
clipped = tf.clip_by_value(generated_image, min_vals, max_vals)
# assign the clipped value to the tensor stylized image
generated_image.assign(clipped)
# Open the Tuple of tensors
total_loss, style_score, content_score = loss
total_loss = total_loss.eval(session=sess)
if total_loss < best_loss:
# Update best loss and best image from total loss.
best_loss = total_loss
# generated image is of shape (1, h, w, 3) convert it to (h, w, 3)
temp_generated_image = sess.run(generated_image)[0]
best_img = deprocess_img(temp_generated_image)
s_loss = sess.run(style_score)
c_loss = sess.run(content_score)
# print best loss
print('best: iteration: ', i ,'loss: ', total_loss ,' style_loss: ', s_loss,' content_loss: ', c_loss)
# Save image after every 100 iterations
if (i+1)%100 == 0:
output = Image.fromarray(best_img)
output.save(str(i+1)+'-'+save_name)
# after num_iterations iterations are completed, close the TF session
sess.close()
return best_img, best_loss
best, best_loss = run_style_transfer(content_path, style_path) | 2.90625 | 3 |
ctwalker/utils/__init__.py | aphearin/ctwalker | 0 | 12797926 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .robust_file_opener import _compression_safe_opener
| 0.902344 | 1 |
tests/test_ml.py | ChuaHanChong/MLOps-Project | 0 | 12797927 | <reponame>ChuaHanChong/MLOps-Project
"""Test for loading ml module."""
import inspect
import unittest
from pathlib import Path
class TestImportModule(unittest.TestCase):
"""Module testing."""
def test_import_ml(self):
"""Test ml module."""
import ml
self.assertEqual(
inspect.getfile(ml),
str(Path.cwd().joinpath('src', 'ml', '__init__.py')),
)
if __name__ == '__main__':
unittest.main()
| 2.265625 | 2 |
Project 8/DeskNotification.py | ingwant/Python-Programs | 0 | 12797928 | <filename>Project 8/DeskNotification.py
# pip install plyer
from plyer import notification
def send_desk_message(title, message):
notification.notify(
title=title,
message=message,
app_icon="circle-48.ico",
timeout=5
)
send_desk_message("TITLE", "This is a message....")
| 2.4375 | 2 |
src/graphics/panels.py | ukitinu/ttfh | 1 | 12797929 | from __future__ import annotations
import tkinter as tk
from typing import Callable
from src.graphics import utils
from src.graphics.interfaces import Panel
from src.timer import Clock
class PanelStyle:
""" This class holds the immutable style properties of the TextPanel """
def __init__(self, width: int, height: int, bg_colour: str, font: str):
self.width: int = width
self.height: int = height
self.bg_colour: str = bg_colour
self.font: str = font
class TextPanel(Panel):
"""
Represents a canvas containing one or more texts with, possibly, some variable styles and content.
"""
def __init__(self, root: tk.Tk, clock: Clock, style: PanelStyle, var_callback: Callable[[Clock], str]):
self.root: tk.Tk = root
self.clock: Clock = clock
self.style: PanelStyle = style
self.var_callback: Callable = var_callback
self.style_var = tk.StringVar(self.root, self.var_callback(self.clock))
def draw(self) -> None:
canvas = tk.Canvas(self.root, width=self.style.width, height=self.style.height,
bg=self.style.bg_colour, highlightthickness=0)
text_id = canvas.create_text(self.style.width / 2, self.style.height / 2,
anchor=tk.CENTER,
text=self.style_var.get().split(":")[0],
fill=self.style_var.get().split(":")[1],
font=self.style.font)
canvas.pack()
def on_change(varname, index, mode):
"""
The signature of the method must stay as is to work properly with tkinter.
It also seems I can't move it from here to a more sensible place.
"""
canvas.itemconfigure(text_id,
text=self.style_var.get().split(":")[0],
fill=self.style_var.get().split(":")[1])
self.style_var.trace_add('write', on_change)
def tick(self) -> None:
self.style_var.set(self.var_callback(self.clock))
class ClockPanel(Panel):
"""
Represents the canvas containing the clock with the hours and the circle.
"""
def __init__(self, root: tk.Tk, clock: Clock, style: PanelStyle, var_callback: Callable[[Clock], str]):
self.root: tk.Tk = root
self.clock: Clock = clock
self.style: PanelStyle = style
self.var_callback: Callable = var_callback
self.style_var = tk.StringVar(self.root, self.var_callback(self.clock))
def draw(self) -> None:
canvas = tk.Canvas(self.root, width=self.style.width, height=self.style.height,
bg=self.style.bg_colour, highlightthickness=0)
text_id = canvas.create_text(self.style.width / 2, self.style.height / 2,
anchor=tk.CENTER,
text=self.style_var.get().split(":")[0],
# fill=self.style_var.get().split(":")[1], # 'white',
fill='white',
font=self.style.font)
utils.draw_circle(canvas, self.style.width // 2, self.style.height // 2, self.style.width // 3,
outline='white',
width=8)
arc_id = utils.draw_circle(canvas, self.style.width // 2, self.style.height // 2, self.style.width // 3,
outline='red',
width=6,
extent=-1 * int(self.style_var.get().split(":")[1]) * 6)
canvas.pack()
def on_change(varname, index, mode):
"""
The signature of the method must stay as is to work properly with tkinter.
It also seems I can't move it from here to a more sensible place.
"""
hour = self.style_var.get().split(":")[0]
canvas.itemconfigure(text_id, text=hour)
minutes = int(self.style_var.get().split(":")[1])
extent = utils.calc_arc_extent(self.clock.day, self.clock.hour, minutes)
canvas.itemconfigure(arc_id, extent=extent)
self.style_var.trace_add('write', on_change)
def tick(self) -> None:
self.style_var.set(self.var_callback(self.clock))
| 3.015625 | 3 |
subt/octomap.py | robotika/osgar | 12 | 12797930 | <reponame>robotika/osgar<filename>subt/octomap.py
"""
ROS Binary Octomap parsing and processing
"""
import struct
import math
import numpy as np
import cv2
from osgar.node import Node
from osgar.lib.pplanner import find_path
# http://www.arminhornung.de/Research/pub/hornung13auro.pdf
# 00: unknown; 01: occupied; 10: free; 11: inner node with child next in the stream
STATE_UNKNOWN = 128 # color=(0, 0xFF, 0)
STATE_FREE = 255 # color=(0xFF, 0xFF, 0xFF)
STATE_OCCUPIED = 1 # color=(0x00, 0x00, 0xFF) ... just to be != 0, which is original unknown/black/undefined
STATE_FRONTIER = 196 # color=(0xFF, 0x00, 0xFF)
STATE_PATH = 64 # color=(0xFF, 0x00, 0x00)
SLICE_OCTOMAP_SIZE = 1024 # size of slice/image in XY octomap coordinates (for given Z)
def seq2xyz(seq_arr):
"""
Convert octomap sequence (0..7) into XYZ coordinate
:param seq_arr: list of parent-child sequences for one of given type (free, occupied, unknown)
:return: list of XYZ boxes with their "size category" (shorted the sequence bigger the voxel)
"""
xyz = []
if len(seq_arr) == 0:
return xyz
max_len = max([len(s) for s in seq_arr])
for seq in seq_arr:
d = 2 ** (max_len - 1)
x, y, z = -32767, -32767, -32767
for code in seq:
if code in [1, 3, 5, 7]:
x += d
if code in [2, 3, 6, 7]:
y += d
if code in [4, 5, 6, 7]:
z += d
d //= 2
xyz.append(((x, y, z), len(seq)))
return xyz
def xyz2img(img, xyz, color, level=2):
"""
Draw given list of voxels into existing image
:param img: I/O image
:param xyz: list of voxels (xyz and "size")
:param color: value 0..255 to be assigned to voxels at given level
:param level: Z-level for the cut
:return: updated image
"""
for pos, size in xyz:
x, y, z = pos
assert 1 <= size <= 16, size
d = 2 ** (16 - size)
if z <= level < z + d:
if d > 100:
# do not try to fill extra large (unknown) squares, for now
continue
px = SLICE_OCTOMAP_SIZE//2 + x
py = SLICE_OCTOMAP_SIZE//2 - y
img[max(0, py-d+1):min(SLICE_OCTOMAP_SIZE, py+1), max(0, px):min(SLICE_OCTOMAP_SIZE, px+d)] = color
return img
def data2stack(data):
"""
Convert binary ocotomap data into three lists (occupied, free, unknown)
:param data: binary octomap data (depth first)
:return: (occupied, free, unknown) lists of sequences
"""
stack = [[]]
unknown = []
free = []
occupied = []
for i in range(len(data) // 2):
prefix = stack.pop(0)
d = struct.unpack_from('<H', data, i * 2)[0]
for rot in range(14, -2, -2): # range(0, 16, 2):
val = (d & (0x3 << rot)) >> rot
if val == 3:
stack.insert(0, prefix + [rot // 2])
elif val == 2:
occupied.append(prefix + [rot // 2])
elif val == 1:
free.append(prefix + [rot // 2])
elif val == 0:
unknown.append(prefix + [rot // 2])
assert len(stack) == 0, len(stack)
return occupied, free, unknown
def data2maplevel(data, level):
"""
Convert Octomap data to image/level
"""
img = np.zeros((SLICE_OCTOMAP_SIZE, SLICE_OCTOMAP_SIZE), dtype=np.uint8)
occupied, free, unknown = data2stack(data)
xyz = seq2xyz(free)
xyz2img(img, xyz, color=STATE_FREE, level=level)
xyz = seq2xyz(occupied)
xyz2img(img, xyz, color=STATE_OCCUPIED, level=level)
xyz = seq2xyz(unknown)
xyz2img(img, xyz, color=STATE_UNKNOWN, level=level)
return img
def frontiers(img, start, draw=False):
"""
Find path to the best frontier (free-unknown transition)
:param img: color image with free=white, unknown=green
:param start: start pixel
:param draw: debug frontiers in pyplot
:return: extended image with drawn start and path, path
"""
size = img.shape
green = img[:, :, :] == STATE_UNKNOWN
white = img[:, :, :] == STATE_FREE
mask_right = green[:, 2:, :] & white[:, 1:-1, :]
mask_left = green[:, :-2, :] & white[:, 1:-1, :]
mask = mask_left | mask_right
z = np.zeros((size[0], 1, size[2]), dtype=np.bool)
mask = np.hstack([z, mask, z])
mask_up = green[2:, :, :] & white[1:-1, :, :]
mask_down = green[:-2, :, :] & white[1:-1, :, :]
z = np.zeros((1, size[1], size[2]), dtype=np.bool)
mask2 = mask_up | mask_down
mask = np.vstack([z, mask2, z]) | mask
z_mask_up = green[:, :, 2:] & white[:, :, 1:-1]
z_mask_down = green[:, :, :-2] & white[:, :, 1:-1]
z = np.zeros((size[0], size[1], 1), dtype=np.bool)
mask3 = z_mask_up | z_mask_down
# mask = np.concatenate([z, mask3, z], axis=2) | mask
xy = np.where(mask)
if len(xy[0]) == 0:
# there are no frontiers, i.e. no exploration path
return img, None
score = np.zeros(len(xy[0]))
for i in range(len(xy[0])):
x, y = xy[1][i]-SLICE_OCTOMAP_SIZE//2, SLICE_OCTOMAP_SIZE//2-xy[0][i]
if x > 4: # TODO - resolution and detect gate as one way only (0 is not sufficient due to impecise position
score[i] = math.hypot(x, y) * 0.03
else:
score[i] = 0 # too cruel cut for X positive semi-space, but let's move INSIDE!
for i in range(len(xy[0])):
x, y = xy[1][i]-SLICE_OCTOMAP_SIZE//2, SLICE_OCTOMAP_SIZE//2-xy[0][i]
for j in range(len(xy[0])):
x2, y2 = xy[1][j]-SLICE_OCTOMAP_SIZE//2, SLICE_OCTOMAP_SIZE//2-xy[0][j]
dist = math.hypot(x - x2, y - y2)
if dist < 10 and score[i] > 0: # ~ 5 meters, only inside
score[i] += 1.0
if draw:
import matplotlib.pyplot as plt
line = plt.plot(xy[1]-SLICE_OCTOMAP_SIZE//2, SLICE_OCTOMAP_SIZE//2-xy[0], 'bo')
m = score > 3*max(score)/4
plt.plot(xy[1][m] - SLICE_OCTOMAP_SIZE//2, SLICE_OCTOMAP_SIZE//2 - xy[0][m], 'ro')
plt.axes().set_aspect('equal', 'datalim')
plt.show()
drivable = white
# use 1 pixel surrounding
drivable_safe_y = drivable[2:, :] & drivable[1:-1, :] & drivable[:-2, :]
drivable_safe_xy = drivable_safe_y[:, 2:] & drivable_safe_y[:, 1:-1] & drivable_safe_y[:, :-2]
# add non-drivable frame to match original image size
z = np.zeros((size[0] - 2, 1, size[2]), dtype=np.bool)
tmp = np.hstack([z, drivable_safe_xy, z])
z = np.zeros((1, size[1], size[2]), dtype=np.bool)
drivable = np.vstack([z, tmp, z])
for limit_score in [3*max(score)/4, max(score)/4, 0]:
# select goal positions above the limit_score
# note, that the "safe path" does not touch external boundary so it would never find path
# to frontier. As a workaround add also all 8-neighbors of frontiers.
goals = []
xy2 = np.array(xy)[:, score > limit_score]
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
for dz in [0]: #[-1, 0, 1]:
goals.append(xy2 + np.repeat(np.asarray([[dy], [dx], [dz]]), xy2.shape[1], axis=1))
goals = np.hstack(goals).T[:, [1, 0, 2]]
# the path planner currently expects goals as tuple (x, y) and operation "in"
goals = set(map(tuple, goals))
path = find_path(drivable, start, goals, verbose=False)
if path is not None:
break
img[mask] = STATE_FRONTIER
if path is not None:
for x, y, z in path:
img[y][x] = STATE_PATH
else:
print('Path not found!')
return img, path
class Octomap(Node):
def __init__(self, config, bus):
super().__init__(config, bus)
bus.register('waypoints', 'dropped')
self.prev_data = None
self.time_limit_sec = None # initialized with the first sim_time_sec
self.debug_arr = []
self.waypoints = None # experimental trigger of navigation
self.waypoints_sent_time = None # unknown
self.sim_time_sec = None
self.pose3d = None
self.video_writer = None
self.video_outfile = None # 'octo.mp4' # optional video output generation
self.min_z = config.get('min_z', 0.5) # should be multiply of "resolution"
self.max_z = config.get('max_z', 0.5) # the limits are included
self.resolution = config.get('resolution', 0.5)
self.verbose = False
def on_sim_time_sec(self, data):
if self.time_limit_sec is None:
self.time_limit_sec = data
def on_pose3d(self, data):
if self.waypoints is not None:
print('Waypoints', data[0], self.waypoints[0], self.waypoints[-1])
self.waypoints_sent_time = self.publish('waypoints', self.waypoints)
self.waypoints = None
def on_octomap(self, data):
if self.sim_time_sec is None or self.pose3d is None or self.sim_time_sec < self.time_limit_sec:
return
self.time_limit_sec += 1 # simulated seconds
if self.waypoints_sent_time is not None and self.waypoints_sent_time > self.time:
self.publish('dropped', self.time_limit_sec) # log skipped sim_time
return
# bit unlucky conversion from existing Python2 data
assert len(data) % 2 == 0, len(data)
data = bytes([(d + 256) % 256 for d in data])
x, y, z = self.pose3d[0]
start = int(SLICE_OCTOMAP_SIZE//2 + x/self.resolution), int(SLICE_OCTOMAP_SIZE//2 - y/self.resolution), int((z - self.min_z)/self.resolution)
num_z_levels = int(round((self.max_z - self.min_z)/self.resolution)) + 1
img3d = np.zeros((SLICE_OCTOMAP_SIZE, SLICE_OCTOMAP_SIZE, num_z_levels), dtype=np.uint8)
for level in range(num_z_levels):
img3d[:, :, level] = data2maplevel(data, level=level + int(round(self.min_z/self.resolution)))
if self.verbose:
for i in range(num_z_levels):
cv2.imwrite('octo_%03d.png' % i, img3d[:, :, i])
img2 = np.zeros((SLICE_OCTOMAP_SIZE, SLICE_OCTOMAP_SIZE, 3), dtype=np.uint8)
level = max(0, min(num_z_levels - 1, start[2]))
img2[:, :, 0] = img3d[:, :, level]
img2[:, :, 1] = img3d[:, :, level]
img2[:, :, 2] = img3d[:, :, level]
__, path = frontiers(img3d, start) # this image is modified in place anyway
if self.verbose:
f = (img3d == STATE_FRONTIER).nonzero()
for x, y, z in zip(f[1], f[0], f[2]):
if z == start[2]:
cv2.circle(img2, (x, y), radius=0, color=(255, 0, 255), thickness=-1)
if path is not None:
for x, y, z in path:
cv2.circle(img2, (x, y), radius=0, color=(255, 0, 0), thickness=-1)
cv2.circle(img2, start[:2], radius=0, color=(39, 127, 255), thickness=-1)
cv2.imwrite('octo_cut.png', img2) # used for replay debugging
if self.video_outfile is not None:
if self.video_writer is None:
fps = 1
height, width = img2.shape[:2]
self.video_writer = cv2.VideoWriter(self.video_outfile,
cv2.VideoWriter_fourcc(*"mp4v"),
fps,
(width, height))
self.video_writer.write(img2)
if path is not None:
self.waypoints = [[(x - SLICE_OCTOMAP_SIZE//2)/2,
(SLICE_OCTOMAP_SIZE//2 - y)/2,
z * self.resolution + self.min_z]
for x, y, z in path]
def update(self):
channel = super().update()
handler = getattr(self, "on_" + channel, None)
if handler is not None:
handler(getattr(self, channel))
else:
assert False, channel # unknown channel
def draw_iteration(data, waypoints=None, paused=False):
global level, scaled
if waypoints is None:
waypoints = []
while True:
img = data2maplevel(data, level=level)
cv2.circle(img, start[:2], radius=0, color=(39, 127, 255), thickness=-1)
if scaled:
img = cv2.resize(img[256 + 128:-256 - 128, 256 + 128:-256 - 128], img.shape)
cv2.imshow('Octomap', img)
pose_str = '(%.02f, %.02f, %.02f)' % tuple(pose3d[0]) if pose3d is not None else 'None'
cv2.setWindowTitle('Octomap', f'Octomap {time}, {pose_str}, level={level}' + (' (paused)' if paused else ''))
key = cv2.waitKey(1) & 0xFF
KEY_Q = ord('q')
if key == KEY_Q:
break
if key == ord(' '):
paused = not paused
if ord('0') <= key <= ord('9'):
level = key - ord('0')
if key == ord('m'):
level -= 1
if key == ord('p'):
level += 1
if key == ord('s'):
scaled = not scaled
if key == ord('d'):
import open3d as o3d
res = 0.5
all = []
for lev in range(-3, 10):
img = data2maplevel(data, level=lev)
xy = np.where(img == STATE_OCCUPIED)
xyz = np.array(
[xy[1] - SLICE_OCTOMAP_SIZE / 2, SLICE_OCTOMAP_SIZE / 2 - xy[0], np.full(len(xy[0]), lev)]).T * res
all.extend(xyz.tolist())
pcd = o3d.geometry.PointCloud()
xyz = np.array(all)
pcd.points = o3d.utility.Vector3dVector(xyz)
voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.1)
lines = [[i, i + 1] for i in range(len(waypoints) - 1)]
colors = [[1, 0, 0] for i in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(waypoints)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([line_set, voxel_grid])
if not paused:
break
return key
if __name__ == "__main__":
import argparse
from osgar.lib.serialize import deserialize
from osgar.logger import LogReader, lookup_stream_id
parser = argparse.ArgumentParser("Analyze ocotomap data")
parser.add_argument('logfile', help='path to logfile with octomap data')
parser.add_argument('--out', help='output path to PNG image', default='out.png')
parser.add_argument('--draw', action='store_true', help='draw pyplot frontiers')
args = parser.parse_args()
octomap_stream_id = lookup_stream_id(args.logfile, 'fromrospy.octomap')
pose3d_stream_id = lookup_stream_id(args.logfile, 'fromrospy.pose3d')
waypoints_stream_id = lookup_stream_id(args.logfile, 'octomap.waypoints')
pose3d = None
x, y, z = 0, 0, 0
resolution = 0.5
waypoints = None
last_octo_data = None
with LogReader(args.logfile,
only_stream_id=[octomap_stream_id, pose3d_stream_id, waypoints_stream_id]) as logreader:
level = 2
scaled = True
for time, stream, data in logreader:
data = deserialize(data)
if stream == pose3d_stream_id:
pose3d = data
x, y, z = pose3d[0]
start = int(SLICE_OCTOMAP_SIZE//2 + x/resolution), int(SLICE_OCTOMAP_SIZE//2 - y/resolution), int(z / resolution)
continue
if stream == waypoints_stream_id:
waypoints = data
continue
if waypoints is None:
# speed up display/processing - maybe optional?
continue
assert len(data) % 2 == 0, len(data) # TODO fix this in cloudsim2osgar
data = bytes([(d + 256) % 256 for d in data])
last_octo_data = data
key = draw_iteration(data, waypoints)
if key == ord('q'):
break
waypoints = None # force wait for next waypoints message
if last_octo_data is not None:
draw_iteration(last_octo_data, waypoints, paused=True)
# vim: expandtab sw=4 ts=4
| 2.5625 | 3 |
src/complex_constraints/sushi_net.py | samarthbhargav/symstat | 0 | 12797931 | import argparse
import sys
import numpy as np
from sushi_data import SushiData, to_pairwise_comp
from compute_mpe import CircuitMPE
import tensorflow as tf
FLAGS =None
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, 0.1))
def bias_variable(shape):
return tf.Variable(tf.truncated_normal(shape, 0.1))
def main(_):
# Get data
sushi_data = SushiData('sushi.soc')
INPUT_SIZE = sushi_data.train_data.shape[1]
OUTPUT_SIZE = sushi_data.train_labels.shape[1]
# Create the model
# Input(25) - Layer 1(units)
x = tf.placeholder(tf.float32, [None, INPUT_SIZE])
W = []
b = []
ys = []
W.append(weight_variable([INPUT_SIZE, FLAGS.units]))
b.append(bias_variable([FLAGS.units]))
ys.append(tf.nn.sigmoid(tf.matmul(x, W[0]) + b[0]))
for i in range(1, FLAGS.layers):
# Layer i(units) - Layer i+1(units)
W.append(weight_variable([FLAGS.units, FLAGS.units]))
b.append(bias_variable([FLAGS.units]))
ys.append(tf.nn.sigmoid(tf.matmul(ys[i-1], W[i]) + b[i]))
# Layer n(units) - Output(25)
W.append(weight_variable([FLAGS.units, OUTPUT_SIZE]))
b.append(bias_variable([OUTPUT_SIZE]))
y = tf.matmul(ys[-1], W[-1]) + b[-1] + np.finfo(float).eps * 10
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, OUTPUT_SIZE])
yu = tf.unstack(tf.nn.sigmoid(y), axis=1)
# Create AC
# yleaves = [[ny, 1.0 - ny] for ny in yu]
# ac = AC('permutation-4.ac',yleaves)
# Create CircuitMPE instance for our predictions
cmpe = CircuitMPE('permutation-4.vtree', 'permutation-4.sdd')
wmc = cmpe.get_tf_ac([[1.0 - ny,ny] for ny in yu])
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=y))
loss = cross_entropy - FLAGS.wmc * tf.log(tf.reduce_mean(wmc))
# train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# train_step = tf.train.AdagradOptimizer(0.1).minimize(loss)
# train_step = tf.train.MomentumOptimizer(0.1, 0.5).minimize(loss)
train_step = tf.train.AdamOptimizer().minimize(loss)
# Train loop
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Should only compute pairwise comparisons once for labels
train_pw = to_pairwise_comp(sushi_data.train_labels, np.sqrt(OUTPUT_SIZE))
valid_pw = to_pairwise_comp(sushi_data.valid_labels, np.sqrt(OUTPUT_SIZE))
test_pw = to_pairwise_comp(sushi_data.test_labels, np.sqrt(OUTPUT_SIZE))
print train_pw.shape
print train_pw
# For early stopping
prev_loss = 1e15
# Train
for i in range(FLAGS.iters):
# batch_xs, batch_ys = sushi_data.get_batch(400)
batch_xs, batch_ys = sushi_data.train_data, sushi_data.train_labels
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Every 1k iterations check accuracy
if i % 100 == 0:
print("After %d iterations" % i)
# Computing "true" accuracies
correct_prediction = tf.equal(tf.reduce_sum(tf.abs(tf.to_int32(tf.nn.sigmoid(y)+0.5) - tf.to_int32(y_)), 1), 0)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print "Train accuracy: %f" % sess.run(accuracy, feed_dict={x: sushi_data.train_data, y_: sushi_data.train_labels})
print "Validation accuracy: %f" % sess.run(accuracy, feed_dict={x: sushi_data.valid_data, y_: sushi_data.valid_labels})
# Computing MPE instiation accuracies
pred = sess.run(tf.nn.sigmoid(y), feed_dict={x: sushi_data.train_data, y_: sushi_data.train_labels})
mpe_pred = np.array([cmpe.compute_mpe_inst([(1-p, p) for p in o]) for o in pred])
print "Train mpe accuracy: %f" % (float(np.sum(np.equal(np.sum(np.abs(mpe_pred - sushi_data.train_labels), axis=1), 0))) / float(sushi_data.train_data.shape[0]))
valid_pred = sess.run(tf.nn.sigmoid(y), feed_dict={x: sushi_data.valid_data, y_: sushi_data.valid_labels})
valid_mpe_pred = np.array([cmpe.compute_mpe_inst([(1-p, p) for p in o]) for o in valid_pred])
print "Validation mpe accuracy: %f" % (float(np.sum(np.equal(np.sum(np.abs(valid_mpe_pred - sushi_data.valid_labels), axis=1), 0))) / float(sushi_data.valid_data.shape[0]))
# Percentage of individual labels that are right
print("Percentage of individual labels in training that are right: %f" % (1. - np.sum(np.abs(np.array(pred + 0.5, int) - sushi_data.train_labels))/float(sushi_data.train_labels.shape[0] * sushi_data.train_labels.shape[1])))
print("Percentage of individual labels in validation that are right: %f" % (1. - np.sum(np.abs(np.array(valid_pred + 0.5, int) - sushi_data.valid_labels))/float(sushi_data.valid_labels.shape[0] * sushi_data.valid_labels.shape[1])))
# Compute pairwise accuracies using MPE
mpe_pred_pw = to_pairwise_comp(mpe_pred, 5)
print "Train pairwise mpe accuracy: %f" % (1. - float(np.sum(np.abs(mpe_pred_pw - train_pw)))/float(10*sushi_data.train_data.shape[0]))
valid_mpe_pred_pw = to_pairwise_comp(valid_mpe_pred, 5)
print "Validation pairwise mpe accuracy: %f" % (1. - float(np.sum(np.abs(valid_mpe_pred_pw - valid_pw)))/float(10*sushi_data.valid_data.shape[0]))
# Print loss
print "Train loss: %f" % sess.run(loss, feed_dict={x: sushi_data.train_data, y_: sushi_data.train_labels})
valid_loss = sess.run(loss, feed_dict={x: sushi_data.valid_data, y_: sushi_data.valid_labels})
print "Validation loss: %f" % valid_loss
# Print WMC
print "Train WMC: %f" % sess.run(tf.reduce_mean(wmc), feed_dict={x: sushi_data.train_data, y_: sushi_data.train_labels})
print "Validation WMC: %f" % sess.run(tf.reduce_mean(wmc), feed_dict={x: sushi_data.valid_data, y_: sushi_data.valid_labels})
print("Percentage of predictions that follow constraint: %f" % (float(np.sum([cmpe.weighted_model_count([(1-p, p) for p in o]) for o in np.array(valid_pred + 0.5, int)]))/float(sushi_data.valid_data.shape[0])))
# Early stopping
if prev_loss < valid_loss:
print "Stopping early"
print "Test accuracy: %f" % sess.run(accuracy, feed_dict={x: sushi_data.test_data, y_: sushi_data.test_labels})
test_pred = sess.run(tf.nn.sigmoid(y), feed_dict={x: sushi_data.test_data, y_: sushi_data.test_labels})
test_mpe_pred = np.array([cmpe.compute_mpe_inst([(1-p, p) for p in o]) for o in test_pred])
print "Test mpe accuracy: %f" % (float(np.sum(np.equal(np.sum(np.abs(test_mpe_pred - sushi_data.test_labels), axis=1), 0))) / float(sushi_data.test_data.shape[0]))
print("Percentage of individual labels in test that are right: %f" % (1. - np.sum(np.abs(np.array(test_pred + 0.5, int) - sushi_data.test_labels))/float(sushi_data.test_labels.shape[0] * sushi_data.test_labels.shape[1])))
test_mpe_pred_pw = to_pairwise_comp(test_mpe_pred, 5)
print "Test pairwise mpe accuracy: %f" % (1. - float(np.sum(np.abs(test_mpe_pred_pw - test_pw)))/float(10*sushi_data.test_data.shape[0]))
sys.exit()
else:
prev_loss = valid_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Args go here
parser.add_argument('--units', type=int, default=100,
help='Number of units per hidden layer')
parser.add_argument('--layers', type=int, default=3,
help='Number of hidden layers')
parser.add_argument('--wmc', type=float, default=0.0,
help='Coefficient of WMC in loss')
parser.add_argument('--iters', type=int, default=10000,
help='Number of minibatch steps to do')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 2.3125 | 2 |
auth-center/App/decorator/role_check_dec.py | Basic-Components/auth-center | 1 | 12797932 | <gh_stars>1-10
from functools import wraps
from sanic.response import json
from App.model import User
def role_check():
def decorator(func):
@wraps(func)
async def handler(request, *args, **kwargs):
if (request.app.name not in request.args['auth_roles']):
return json({"message":"没有权限查看"},401)
else:
return await func(request, *args, **kwargs)
return handler
return decorator
def role_or_self_check():
def decorator(func):
@wraps(func)
async def handler(request, *args, **kwargs):
try:
_id = kwargs["_id"]
except:
return json({"message":"url中需要有`_id`"},400)
if not ((request.app.name in request.args['auth_roles']) or _id == request.args['auth_id']):
return json({"message":"没有权限查看"},401)
else:
return await func(request, *args, **kwargs)
return handler
return decorator
| 2.375 | 2 |
moire/array.py | speedcell4/moire | 2 | 12797933 | import dynet as dy
import numpy as np
import moire
from moire import Expression
__all__ = [
'zeros', 'ones', 'full', 'normal', 'bernoulli', 'uniform', 'gumbel',
'zeros_like', 'ones_like', 'full_like', 'normal_like', 'bernoulli_like', 'uniform_like', 'gumbel_like',
'eye', 'diagonal',
'where',
]
def zeros(*dim, batch_size: int = 1) -> Expression:
a = np.zeros((*dim, batch_size), dtype=np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def zeros_like(x: Expression) -> Expression:
dim, batch_size = x.dim()
return zeros(*dim, batch_size=batch_size)
def ones(*dim, batch_size: int = 1) -> Expression:
a = np.ones((*dim, batch_size), dtype=np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def ones_like(x: Expression) -> Expression:
dim, batch_size = x.dim()
return ones(*dim, batch_size=batch_size)
def eye(N: int, M: int = None, k: int = 0) -> Expression:
return dy.inputTensor(np.eye(N, M, k), batched=False, device=moire.config.device)
def diagonal(x: Expression) -> Expression:
(dim0, dim1), batch_size = x.dim()
return dy.cmult(x, eye(dim0, dim1))
def full(*dim, value, batch_size: int = 1) -> Expression:
a = np.full((*dim, batch_size), fill_value=value, dtype=np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def full_like(x: Expression, value) -> Expression:
dim, batch_size = x.dim()
return full(*dim, value=value, batch_size=batch_size)
def normal(*dim, mean: float = 0.0, stddev: float = 1.0, batch_size: int = 1) -> Expression:
a = np.random.normal(loc=mean, scale=stddev, size=(*dim, batch_size)).astype(np.float32)
return dy.inputTensor(a, batched=True, device=moire.config.device)
def normal_like(x: Expression, mean: float = 0.0, stddev: float = 1.0) -> Expression:
dim, batch_size = x.dim()
return normal(*dim, mean=mean, stddev=stddev, batch_size=batch_size)
def bernoulli(*dim, p: float, batch_size: int = 1) -> Expression:
a = np.random.uniform(low=0, high=1.0, size=(*dim, batch_size)) < p
return dy.inputTensor(a.astype(np.int32), batched=True, device=moire.config.device)
def bernoulli_like(x: Expression, p: float) -> Expression:
dim, batch_size = x.dim()
return bernoulli(*dim, p=p, batch_size=batch_size)
def uniform(*dim, low: float, high: float, batch_size: int = 1) -> Expression:
a = np.random.uniform(low=low, high=high, size=(*dim, batch_size))
return dy.inputTensor(a, batched=True, device=moire.config.device)
def uniform_like(x: Expression, low: float, high: float) -> Expression:
dim, batch_size = x.dim()
return uniform(dim, low=low, high=high, batch_size=batch_size)
def gumbel(*dim, mu: float = 0.0, beta: float = 1.0, batch_size: int = 1) -> Expression:
a = np.random.gumbel(loc=mu, scale=beta, size=(*dim, batch_size))
return dy.inputTensor(a, batched=True, device=moire.config.device)
def gumbel_like(x: Expression, mu: float = 0.0, beta: float = 1.0) -> Expression:
dim, batch_size = x.dim()
return gumbel(*dim, mu=mu, beta=beta, batch_size=batch_size)
def where(cond: Expression, x: Expression, y: Expression) -> Expression:
return dy.cmult(cond, x) + dy.cmult(1.0 - cond, y)
if __name__ == '__main__':
a = dy.inputTensor([[1, 2, 3], [2, 3, 4], ])
moire.debug(f'a :: {a.dim()} => {a.value()}')
b = diagonal(a)
moire.debug(f'b :: {b.dim()} => {b.value()}')
| 2.125 | 2 |
nemo/collections/common/losses/aggregator.py | hamjam/NeMo | 4,145 | 12797934 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import LossType, NeuralType
__all__ = ['AggregatorLoss']
class AggregatorLoss(Loss):
"""
Sums several losses into one.
Args:
num_inputs: number of input losses
weights: a list of coefficient for merging losses
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
input_types = {}
for i in range(self._num_losses):
input_types["loss_" + str(i + 1)] = NeuralType(elements_type=LossType())
return input_types
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, num_inputs: int = 2, weights: List[float] = None):
super().__init__()
self._num_losses = num_inputs
if weights is not None and len(weights) != num_inputs:
raise ValueError("Length of weights should be equal to the number of inputs (num_inputs)")
self._weights = weights
@typecheck()
def forward(self, **kwargs):
values = [kwargs[x] for x in sorted(kwargs.keys())]
loss = torch.zeros_like(values[0])
for loss_idx, loss_value in enumerate(values):
if self._weights is not None:
loss = loss.add(loss_value, alpha=self._weights[loss_idx])
else:
loss = loss.add(loss_value)
return loss
| 2.25 | 2 |
PythonExercicios/ex070.py | Luis-Emanuel/Python | 0 | 12797935 | #Crie um programa que leia o nome e o preço de vários produtos. O program ddeverá perguntar se o usuário vai continuar.
#No final, mostre: A) Qual é o total gasto na compra B)Quantos produtos custão mais que 1000 C)Qual é o nome do produto mais barato.
cont = soma = produto_mais = produtovalor = 0
produto_menos = ''
print(20*'-')
print('{:^20}'.format('<NAME>'))
print(20*'-')
while True:
nome = str(input('Nome do produto: ')).strip()
preco = float(input('Preço R$: '))
soma += preco
cont += 1
if preco > 1000:
produto_mais += 1
if cont == 1 or produtovalor > preco:
produto_menos = nome
produtovalor = preco
resp = str(input('Quer continuar? [S/N]')).upper().strip()[0]
if resp == 'N':
break
print(20*'=')
print(f'O total do valor foi R${soma:.2f}')
print(f'Temos {produto_mais} produtos que custão mais de R$1000.00')
print(f'O produto mais barato foi {produto_menos} que custa R${produtovalor:.2f}')
| 4 | 4 |
cinder/backup/drivers/sheepdog.py | AO-AO/cmss-cinder | 0 | 12797936 | <gh_stars>0
#coding:utf-8
import time
import json
import urllib2
from oslo.config import cfg
from cinder import exception
from oslo_log import log as logging
from cinder.backup.driver import BackupDriver
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('cinder_ip',
default='172.16.172.250:8776',
help='ebs management node ip.'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
class SheepdogBackupDriver(BackupDriver):
def __init__(self, context, db_driver=None):
super(SheepdogBackupDriver, self).__init__(db_driver)
self.context = context
self._server_ip = self._utf8(CONF.cinder_ip)
@staticmethod
def _utf8(s):
"""Ensure string s is utf8 (i.e. not unicode)."""
if isinstance(s, str):
return s
return s.encode('utf8')
def backup(self, backup, volume_file):
LOG.info('Starting backup...... Creating a new backup for volume:%s.' % backup['volume_id'])
backup_id = backup['id']
url = 'http://' + self._server_ip + '/v2/admin/backups'
data = {
"backup":{
"container" : backup['container'],
"description": backup['display_description'],
"name" : backup['display_name'],
"volume_id" : backup['volume_id'],
"backupid" : backup_id
}
}
jdata = json.dumps(data)
req = urllib2.Request(url, jdata)
req.add_header('Content-type', 'application/json')
try:
response = urllib2.urlopen(req)
LOG.debug(response.read())
except urllib2.HTTPError, e:
LOG.debug(e.code)
msg = "redirect backup cmd failed!"
raise exception.BackupOperationError(msg)
while True:
url = 'http://' + self._server_ip + '/v2/admin/backups/' + backup_id
try:
response = urllib2.urlopen(url)
ret = response.read()
LOG.debug("RET: %r" % ret)
data = json.loads(ret)
except urllib2.HTTPError, e:
LOG.debug(e.code)
msg = "confirm backup cmd failed!"
raise exception.BackupOperationError(msg)
if data['backup']['status'] == 'available':
size = data['backup']['object_count']
LOG.debug("size %s MB." % size)
LOG.info('backup finished.')
break
time.sleep(3)
return size
def restore(self, backup, target_volume_id, volume_file):
LOG.info('Starting restore...... restore from src_volume:%(src)s to dst_volume:%(dst)s' %
{'src': backup['volume_id'], 'dst': str("volume-" + target_volume_id)})
backup_id = backup['id']
url = 'http://' + self._server_ip + '/v2/admin/backups/' + backup_id + '/restore'
data = {
"restore":{
"volume_id": target_volume_id
}
}
jdata = json.dumps(data)
req = urllib2.Request(url, jdata)
req.add_header('Content-type', 'application/json')
try:
response = urllib2.urlopen(req)
LOG.debug(response.read())
except urllib2.HTTPError, e:
LOG.debug(e.code)
msg = "redirect restore cmd failed!"
raise exception.BackupOperationError(msg)
while True:
url = 'http://' + self._server_ip + '/v2/admin/backups/' + backup_id
try:
response = urllib2.urlopen(url)
ret = response.read()
LOG.debug("RET: %r" % ret)
data = json.loads(ret)
except urllib2.HTTPError, e:
LOG.debug(e.code)
msg = "confirm restore cmd failed!"
raise exception.BackupOperationError(msg)
if data['backup']['status'] == 'available':
LOG.info('restore finished.')
break
time.sleep(3)
def delete(self, backup):
LOG.info('Starting delete...... backupid:%s' % backup['id'])
backup_id = backup['id']
url = 'http://' + self._server_ip + '/v2/admin/backups/' + backup_id
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
req.get_method = lambda:'DELETE'
try:
response = urllib2.urlopen(req)
LOG.debug(response.read())
except urllib2.HTTPError, e:
LOG.debug(e.code)
if e.code == 404:
msg = "backup does not exist!"
LOG.info(msg)
raise exception.BackupOperationError(msg)
#help to decide the volume whether belongs to ebs
else:
msg = "redirect delete cmd failed!"
raise exception.BackupOperationError(msg)
while True:
url = 'http://' + self._server_ip + '/v2/admin/backups/' + backup_id
try:
urllib2.urlopen(url)
except urllib2.HTTPError, e:
LOG.debug(e.code)
if e.code == 404:
"""backup does not exist! already success!"""
LOG.info('delete finished.')
break
else:
msg = "confirm delete cmd failed!"
raise exception.BackupOperationError(msg)
time.sleep(3)
def get_backup_driver(context):
return SheepdogBackupDriver(context)
if __name__ == '__main__':
driver = SheepdogBackupDriver()
| 2.125 | 2 |
app/common/helpers.py | citizensvs/cvc19backend | 1 | 12797937 | <filename>app/common/helpers.py<gh_stars>1-10
from app.common.conf import PHONE_NUMBER_VALIDATOR
def phone_number_valid(phone_number):
if phone_number.isdigit():
if PHONE_NUMBER_VALIDATOR.match(phone_number):
return True
raise ValueError
def normalize_phone_number(phone):
phone = phone.replace("+", "")
phone = phone.replace(" ", "")
if len(phone) == 10:
phone = f"91{phone}"
return phone
| 2.734375 | 3 |
sandbox/DS_registration_scripts/register_ap_ds.py | ska-sa/mkat-tango | 0 | 12797938 | <reponame>ska-sa/mkat-tango
# register_ap_ds.py
# -*- coding: utf8 -*-
# vim:fileencoding=utf8 ai ts=4 sts=4 et sw=4
# Copyright 2016 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases()
import PyTango
ap_name = "mkat_sim/ap/1"
dev_info = PyTango.DbDevInfo()
dev_info.server = "mkat-tango-AP-DS/test"
dev_info._class = "MkatAntennaPositioner"
dev_info.name = ap_name
db = PyTango.Database()
db.add_device(dev_info)
print("Registration of antenna positioner Successful")
| 1.96875 | 2 |
src/get_gov.py | cannibalcheeseburger/covid-19-tracker | 0 | 12797939 | import urllib.request
from bs4 import BeautifulSoup
def get_go():
url = "https://www.mohfw.gov.in/"
uClient = urllib.request.urlopen(url)
page_html = uClient.read()
uClient.close()
page_soup = BeautifulSoup(page_html,"html.parser")
news = page_soup.find_all('div',class_ = 'update-box')
newz = []
for new in news:
newz.append([new.strong.text,new.a.text.strip(),new.a['href']])
return newz | 3.34375 | 3 |
Chap10/hello.py | RiddhiDamani/Python | 0 | 12797940 | <filename>Chap10/hello.py
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
# Exceptions are powerful runtime error reporting mechanism commonly used in object oriented systems.
# ValueError: is the token i.e. it is the name of the error that is being generated here.
# sys has lot of constants in it.
import sys
def main():
print('Hello, World.')
try:
# x = int('foo')
# x = 5 / 3
x = 5 / 0
# capturing the error gracefully
# if you don't catch the errors it will stop the execution of your python script.
except ValueError:
print('I caught a ValueError!')
except ZeroDivisionError:
print('Don\'t divide by zero')
# if you don't know which error it is!
except:
print(f'unknown error! : {sys.exc_info()[1]}')
# else block gets executed only if you don't have any errors!
else:
print('Good Job!')
print(x)
if __name__ == '__main__':
main()
| 4.03125 | 4 |
utils_batch.py | gregor8003/text_word_embed | 0 | 12797941 | <reponame>gregor8003/text_word_embed<filename>utils_batch.py
from bisect import bisect_left
import itertools
from numpy.lib.npyio import load as npload
import numpy as np
from utils import (
get_mdsd_csv_cbow_data_input_files_iter,
get_mdsd_csv_cbow_data_output_files_iter,
grouper,
MDSD_MAIN_PATH,
)
def closest_batch_size(total_size, seed_size):
# produce all factors of total_size
factors = [d for d in range(1, total_size // 2 + 1) if not total_size % d]
# select number closest to seed size
# https://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value
pos = bisect_left(factors, seed_size)
if pos == 0:
return factors[0]
if pos == len(factors):
return factors[-1]
before = factors[pos - 1]
after = factors[pos]
closest = before
if after - seed_size < seed_size - before:
closest = after
return closest
def gen_array(array):
for els in np.nditer(array, flags=['external_loop']):
for el in grouper(els, 2):
yield el
def get_mdsd_csv_cbow_data_input_iter(batch_size, main_path=MDSD_MAIN_PATH):
iterators = []
for path in get_mdsd_csv_cbow_data_input_files_iter(main_path=main_path):
data = npload(path, mmap_mode='r')
iterators.append(gen_array(data))
main_it = itertools.chain.from_iterable(iterators)
for batch in grouper(main_it, batch_size):
batch = np.stack(batch, axis=0)
yield batch
def get_mdsd_csv_cbow_data_output_iter(batch_size, main_path=MDSD_MAIN_PATH):
iterators = []
for path in get_mdsd_csv_cbow_data_output_files_iter(main_path=main_path):
data = npload(path, mmap_mode='r')
iterators.append(gen_array(data))
main_it = itertools.chain.from_iterable(iterators)
for batch in grouper(main_it, batch_size):
batch = np.stack(batch, axis=0)
yield batch
def fit_generator(batch_size, main_path=MDSD_MAIN_PATH):
while True:
for res in fit_generator_it(batch_size, main_path):
yield res
def fit_generator_it(batch_size, main_path=MDSD_MAIN_PATH):
gens = zip(
get_mdsd_csv_cbow_data_input_iter(batch_size, main_path=main_path),
get_mdsd_csv_cbow_data_output_iter(batch_size, main_path=main_path)
)
for tr, lb in gens:
yield tr, lb
def get_mdsd_csv_cbow_data_input_size(main_path=MDSD_MAIN_PATH):
total_size = 0
for path in get_mdsd_csv_cbow_data_input_files_iter(main_path=main_path):
data = npload(path, mmap_mode='r')
total_size = total_size + data.shape[0]
return total_size
def get_mdsd_csv_cbow_data_output_size(main_path=MDSD_MAIN_PATH):
total_size = 0
for path in get_mdsd_csv_cbow_data_output_files_iter(main_path=main_path):
data = npload(path, mmap_mode='r')
total_size = total_size + data.shape[0]
return total_size
| 2.78125 | 3 |
scripts/deploy_stack.py | sfuruya0612/snatch | 3 | 12797942 | # -*- coding: utf-8 -*-
import glob
import os
import re
import sys
import logging
from boto3.session import Session
from botocore.exceptions import ClientError
from argparse import ArgumentParser
TEMPLATES = [
"/scripts/ec2.yml",
]
logger = logging.getLogger()
formatter = '%(levelname)s : %(asctime)s : %(message)s'
logging.basicConfig(level=logging.INFO, format=formatter)
class DeployStack:
# Option parser.
def get_option(self):
usage = "python " + sys.argv[0] + " [-h | --help] [-a | --app <APP_NAME>] [-p | --profile <AWS_PROFILE>] [-r | --region <AWS_REGION>]"
argparser = ArgumentParser(usage=usage)
argparser.add_argument("-a", "--app", type=str,
default="snatch",
help="Target app name.")
argparser.add_argument("-p", "--profile", type=str,
default="default",
help="~/.aws/config.")
argparser.add_argument("-r", "--region", type=str,
default="ap-northeast-1",
help="AWs regions. e.g. ap-northeast-1, us-east-1, ...")
return argparser.parse_args()
# Update CFn stacks.
def update_stack(self, stack_name, cfn, input):
w = cfn.get_waiter("stack_update_complete")
try:
cfn.update_stack(**input)
logger.info("Update %s.", stack_name)
w.wait(
StackName = stack_name,
)
return logger.info("Update %s complete.", stack_name)
except ClientError as e:
return logger.warning("%s", e.response["Error"]["Message"])
# Create CFn stacks.
def create_stack(self, stack_name, cfn, input):
w = cfn.get_waiter("stack_create_complete")
try:
cfn.create_stack(**input)
logger.info("Create %s.", stack_name)
w.wait(
StackName = stack_name,
)
return logger.info("Create %s complete.", stack_name)
except ClientError as e:
if e.response["Error"]["Code"] == "AlreadyExistsException":
self.update_stack(stack_name, cfn, input)
return
else:
return logger.warning("%s", e.response["Error"]["Message"])
# Valid CFn template.
def valid_template(self, template, body, cfn):
logger.info("Validate checks %s", template)
try:
cfn.validate_template(
TemplateBody = body,
)
return logger.info("%s is validation OK.", template)
except ClientError as e:
return logger.warning("%s", e.response["Error"]["Message"])
# Create EC2 keypair.
# 秘密鍵は ~/.ssh/ 配下に書き出す(file permission: 0600)
def create_keypair(self, app_name, session):
logger.info("Create %s KeyPair.", app_name)
ec2 = session.client("ec2")
try:
ec2.describe_key_pairs(
KeyNames=[
app_name,
],
)
return logger.info("%s KeyPair already exists.", app_name)
except ClientError as e:
if e.response["Error"]["Code"] == "InvalidKeyPair.NotFound":
res = ec2.create_key_pair(
KeyName=app_name,
)
private_key = res["KeyMaterial"]
pem_file = open(os.environ["HOME"] + "/.ssh/" + app_name + ".pem", "w")
pem_file.write(private_key)
pem_file.close
os.chmod(os.environ["HOME"] + "/.ssh/" + app_name + ".pem", 0o600)
return logger.info("%s KeyPair created.", app_name)
else:
return logger.warning("%s", e.response["Error"]["Message"])
# Provisiond stack
def provisiond(self, app_name, profile, region):
session = Session(profile_name=profile, region_name=region)
self.create_keypair(app_name, session)
cfn = session.client("cloudformation")
for t in TEMPLATES:
path = os.getcwd() + t
body = open(path).read()
stack_name = app_name + "-" + re.sub('\/(.*)\/(.*)\.yml', '\\1-\\2', t)
self.valid_template(t, body, cfn)
input = {
"StackName": stack_name,
"TemplateBody": body,
"Capabilities": [
'CAPABILITY_NAMED_IAM',
],
"Parameters": [
{
"ParameterKey": "AppName",
"ParameterValue": app_name,
},
],
}
try:
self.create_stack(stack_name, cfn, input)
except ClientError as e:
logger.warning("%s", e.response["Error"]["Message"])
return
@staticmethod
def main():
logger.info("Start provision stacks.")
self = DeployStack()
options = self.get_option()
app_name = options.app
profile = options.profile
region = options.region
self.provisiond(app_name, profile, region)
return logger.info("Finish provision stacks.")
if __name__ == '__main__':
DeployStack.main()
| 2.078125 | 2 |
tartiflette/directive/builtins/non_introspectable.py | alexchamberlain/tartiflette | 0 | 12797943 | <reponame>alexchamberlain/tartiflette<gh_stars>0
from typing import Any, Callable, Dict, Optional
from tartiflette import Directive
class NonIntrospectable:
async def on_introspection(
self,
_directive_args: Dict[str, Any],
_next_directive: Callable,
_introspected_element: Any,
_ctx: Optional[Dict[str, Any]],
_info: "Info",
) -> None:
return None
class NonIntrospectableDeprecated:
async def on_introspection(
self,
_directive_args: Dict[str, Any],
_next_directive: Callable,
_introspected_element: Any,
_ctx: Optional[Dict[str, Any]],
_info: "Info",
) -> None:
print(
"@non_introspectable is deprecated, please use @nonIntrospectable, will be removed in 0.12.0"
)
return None
def bake(schema_name, _config):
sdl = """
directive @nonIntrospectable on FIELD_DEFINITION
directive @non_introspectable on FIELD_DEFINITION
"""
Directive(name="nonIntrospectable", schema_name=schema_name)(
NonIntrospectable()
)
Directive(name="non_introspectable", schema_name=schema_name)(
NonIntrospectableDeprecated()
)
return sdl
| 2.078125 | 2 |
reading/book/migrations/0003_auto_20180613_1926.py | Family-TreeSY/reading | 2 | 12797944 | <reponame>Family-TreeSY/reading<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-06-13 11:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20180613_1914'),
]
operations = [
migrations.AlterField(
model_name='story',
name='image',
field=models.ImageField(blank=True, upload_to=b'', verbose_name='\u56fe\u7247'),
),
]
| 1.359375 | 1 |
TyphoonApi/suncreative/migrations/0008_auto_20210306_0049.py | ZhangDubhe/Tropical-Cyclone-Information-System | 9 | 12797945 | <gh_stars>1-10
# Generated by Django 2.2.13 on 2021-03-06 00:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('suncreative', '0007_postrecord_state'),
]
operations = [
migrations.CreateModel(
name='MediaFolder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('parent', models.IntegerField(default=None, null=True)),
],
),
migrations.AddField(
model_name='media',
name='folder',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='文件夹', to='suncreative.MediaFolder'),
),
]
| 1.835938 | 2 |
lcms/parse_mzml.py | NetherlandsForensicInstitute/msmatcher | 0 | 12797946 | import glob
from collections import namedtuple
import dateutil.parser
import numpy as np
import pandas as pd
import pymzml
import config
import lcms.utils as utils
def create_spectrum_and_peak_tables(msrun_list, experiment_id):
'''
fills the Spectrum table and for each spectrum the Peak table
:param msrun_list:
:param experiment_id:
:return:
'''
spectrum = namedtuple('spectrum',
'experiment_id ' +
'spectrum_id ' +
'total_ion_current ' +
'time_passed_since_start ' +
'ms_level ' +
'highest_observed_mz ' +
'lowest_observed_mz ' +
'scan_window_upper_limit ' +
'scan_window_lower_limit')
measurement = namedtuple('measurement',
'experiment_id ' +
'spectrum_id' +
' mz' +
' intensity')
spectrum_list = []
measurement_list = []
for i, spc in enumerate(msrun_list):
if i % 100 == 0:
print("Spectrum {}".format(i))
s = spectrum(experiment_id=experiment_id, spectrum_id=spc.ID,
total_ion_current=spc['total ion current'],
time_passed_since_start=spc['scan start time'], ms_level=spc['ms level'],
highest_observed_mz=spc['highest observed m/z'],
lowest_observed_mz=spc['lowest observed m/z'],
scan_window_upper_limit=spc['scan window upper limit'],
scan_window_lower_limit=spc['scan window lower limit'])
spectrum_list.append(s)
if i == 0:
m = measurement(experiment_id=experiment_id, spectrum_id=spc.ID, mz=np.nan,
intensity=np.nan)
else:
m = measurement(experiment_id=experiment_id, spectrum_id=spc.ID, mz=spc.mz,
intensity=spc.i)
# Fill peak table if experiment_id + spectrum_id do not already exist in table Peak
check_peak = config.db_connection.execute(
"""SELECT experiment_id from "Peak"
WHERE experiment_id = '{}'
AND spectrum_id = '{}'"""\
.format(experiment_id, spc.ID)).fetchone()
if check_peak is not None:
print(
("Experiment_id {} + spectrum_id {} combination already exists in Peak table. " +
"To avoid duplicates the spectra won't be added to the Peak table").format(experiment_id, spc.ID))
else:
peak_table = pd.DataFrame({"mz": spc.mz, "intensity": spc.i})
peak_table['experiment_id'] = experiment_id
peak_table['spectrum_id'] = spc.ID
peak_table.to_sql('Peak', con=config.db_connection, index=False, if_exists='append')
print("Appended to Peak table from experiment_id: {}, spectrum_id: {}".format(experiment_id, spc.ID))
measurement_list.append(m)
# check if experiment_id already exists in Spectrum table. If not, append data to Spectrum table
check_spectrum = config.db_connection.execute(
"""SELECT experiment_id from "Spectrum" WHERE experiment_id = '{}' """
.format(experiment_id)).fetchone()
if check_spectrum is not None:
print(("Experiment_id {} already exists in Spectrum table. " +
"To avoid duplicates the spectra won't be added to the Spectrum table")
.format(experiment_id))
else:
spectrum_table = pd.DataFrame(spectrum_list)
spectrum_table.to_sql('Spectrum', con=config.db_connection, index=False, if_exists='append')
print("Appended to Spectrum table with info from experiment_id: {}".format(experiment_id))
def create_experiment(msrun_list, filename):
"""
Create a new experiment structure based on the information in the msrun_list.
:param msrun_list: an open pymzml runner
:param filename: name of the pymzml file
:return: a dictionary containing the initialized experiment
"""
experiment = dict.fromkeys(['run_id', 'run_start_time', 'human_run_start_time', 'spectra_count',
'experimental_state_id', 'match_type_id', 'filename'])
# Todo: waarden in onderstaande tabel moeten nog ingevuld worden
experiment['run_id'] = msrun_list.info['run_id']
if "start_time" in msrun_list.info.keys():
start_time_str = msrun_list.info["start_time"]
start_time = dateutil.parser.parse(start_time_str)
experiment['run_start_time'] = start_time.timestamp()
experiment['human_run_start_time'] = start_time
else:
experiment['run_start_time'] = None
experiment['human_run_start_time'] = None
experiment['spectra_count'] = msrun_list.info['spectrum_count']
experiment['experimental_state_id'] = None
experiment['match_type_id'] = None
experiment['filename'] = filename.split('/')[-1]
return experiment
def create_experiment_table(msrun_list, filename):
"""
fills the Experiment table.
:param msrun_list: an open pymzml runner
:param filename: name of the pymzml file
:return:
"""
experiment = create_experiment(msrun_list, filename)
utils.append_to_experiment('Experiment', experiment)
experiment_id = config.db_connection.execute(
"""SELECT experiment_id from "Experiment" WHERE filename = '{}' """.format(
filename.split('/')[-1])).fetchone()[0]
return experiment_id
def read_file(filename):
msrun = pymzml.run.Reader(filename)
msrun_list = list(msrun)
# check if filename already in Experiment table
check = config.db_connection.execute(
"""SELECT experiment_id from "Experiment" WHERE filename = '{}' """.format(
filename.split('/')[-1])).fetchone()
if check is not None:
print("File already exists in DB. Continue to filling Spectrum table")
experiment_id = check[0]
else:
# fill the Experiment table with data from file
experiment_id = create_experiment_table(msrun_list, filename)
# fill the Spectrum and Peak table with data from file
create_spectrum_and_peak_tables(msrun_list, experiment_id)
if __name__ == "__main__":
for n, filename in enumerate(glob.iglob('{}Geconverteerd/*.mzML'.format(config.data_dir))):
print("reading file {} ({})".format(n, filename.split('/')[-1]))
if '+' in filename.split('/')[-1]:
print("Raw data, will be skipped for now")
continue
read_file(filename)
| 2.390625 | 2 |
setup.py | ddolzhenko/dirutil | 0 | 12797947 | from setuptools import setup, find_packages
ver = "0.4"
setup(
name = 'dirutil',
version = ver,
description = 'High level directory utilities',
keywords = ['dir', 'directory', 'workdir', 'tempdir'],
author = '<NAME>',
author_email = '<EMAIL>',
packages = find_packages(),
test_suite = 'dirutil.get_tests',
url = 'https://github.com/ddolzhenko/dirutil',
download_url = 'https://github.com/ddolzhenko/dirutil/archive/v{}.tar.gz'.format(ver),
classifiers = [],
install_requires = [
# "checksumdir==1.0.5",
],
)
| 1.34375 | 1 |
ingine/examples/eight_queens_puzzle.py | sqarrt/Ingine | 0 | 12797948 | <filename>ingine/examples/eight_queens_puzzle.py
import random
from pyeasyga import pyeasyga
from ingine import ga
# setup seed data
data = [0, 1, 2, 3, 4, 5, 6, 7]
# initialise the GA
gaa = pyeasyga.GeneticAlgorithm(data,
population_size = 200,
generations = 100,
crossover_probability = 0.8,
mutation_probability = 0.2,
elitism = True,
maximise_fitness = False)
# define and set function to create a candidate solution representation
def create_individual(data):
individual = data[:]
random.shuffle(individual)
return individual
def crossover(parent_1, parent_2):
crossover_index = random.randrange(1, len(parent_1))
child_1a = parent_1[:crossover_index]
child_1b = [i for i in parent_2 if i not in child_1a]
child_1 = child_1a + child_1b
child_2a = parent_2[crossover_index:]
child_2b = [i for i in parent_1 if i not in child_2a]
child_2 = child_2a + child_2b
return child_1, child_2
def mutate(individual):
mutate_index1 = random.randrange(len(individual))
mutate_index2 = random.randrange(len(individual))
individual[mutate_index1], individual[mutate_index2] = individual[mutate_index2], individual[mutate_index1]
def selection(population):
return random.choice(population)
def fitness(individual, data):
collisions = 0
for item in individual:
item_index = individual.index(item)
for elem in individual:
elem_index = individual.index(elem)
if item_index != elem_index:
if item - (elem_index - item_index) == elem\
or (elem_index - item_index) + item == elem:
collisions += 1
return collisions
def print_board(board_representation):
def print_x_in_row(row_length, x_position):
print('-' + ''.join(['----' for _ in range(row_length)]))
print('|' + ''.join([' X |' if i == x_position else ' |' for i in range(row_length)])),
def print_board_bottom(row_length):
print('-' + ''.join(['----' for _ in range(row_length)]))
num_of_rows = len(board_representation)
row_length = num_of_rows #rows == columns in a chessboard
for row in range(num_of_rows):
print_x_in_row(row_length, board_representation[row])
print_board_bottom(row_length)
print('\n')
optimizer = ga.get_optimizer(data,
population_size = 200,
generations = 100,
crossover_probability = 0.8,
mutation_probability = 0.2,
elitism = True,
maximise_fitness = False,
create_individual = create_individual,
crossover = crossover,
mutate = mutate,
fitness = fitness,
selection = selection)
res = optimizer()
# print the GA's best solution; a solution is valid only if there are no collisions
if res[0] == 0:
print(res)
print_board(res[1])
else:
print(None)
| 2.96875 | 3 |
dyn_agent.py | Journerist/PokerJohn | 0 | 12797949 | from collections import deque
import random
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
class DYNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount future rewards less
self.exploration_rate = 1
self.exploration_decay = 0.995
self.exploration_min = 0.01
self.model = self._build_model()
def _build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
| 2.546875 | 3 |
tests/test_pandas_marc.py | cmsetzer/pandas-marc | 2 | 12797950 | #!/usr/bin/env python3
"""Test suite for pandas-marc."""
from pandas_marc import MARCDataFrame
def test_instantiate_marcdataframe(dataframe):
kwargs = {
'dataframe': dataframe,
'occurrence_delimiter': '|',
'subfield_delimiter': '‡'
}
mdf = MARCDataFrame(**kwargs)
for key, value in kwargs.items():
assert getattr(mdf, key) is value
def test_marcdataframe_produces_correct_marc_records(dataframe, records):
mdf = MARCDataFrame(dataframe)
output = [record.as_marc() for record in mdf.records]
expected = [record.as_marc() for record in records]
assert output == expected
def test_marcdataframe_with_other_occurrence_delimiter(dataframe, records):
# Find and replace backslashes in original dataframe with pipes
dataframe = dataframe.replace(r'\\', '|', regex=True)
mdf = MARCDataFrame(dataframe, occurrence_delimiter='|')
output = [record.as_marc() for record in mdf.records]
expected = [record.as_marc() for record in records]
assert output == expected
def test_marcdataframe_with_other_subfield_delimiter(dataframe, records):
# Find and replace double daggers in original dataframe with dollar signs
dataframe = dataframe.replace(r'\$', '‡', regex=True)
mdf = MARCDataFrame(dataframe, subfield_delimiter='‡')
output = [record.as_marc() for record in mdf.records]
expected = [record.as_marc() for record in records]
assert output == expected
| 3.078125 | 3 |
Subsets and Splits