blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba190de8b4de0297f1e0b734184bf21440191948
|
31cdde2bda93df838399f26fd4fcf0f2f7989a5c
|
/budget/migrations/0006_auto__add_field_budgetuser_coop.py
|
495a4e661790ba21a7819cec4a769a2b2936fc84
|
[] |
no_license
|
SteveXian/budget_app
|
e2ca586db1de8d2af392cfb254a0222059262f10
|
3c610eddfd0aa447cc0299c96762321ce1e2b397
|
refs/heads/master
| 2021-01-01T19:21:18.127896 | 2013-12-02T12:11:12 | 2013-12-02T12:11:12 | 12,254,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,383 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BudgetUser.coop'
db.add_column(u'budget_budgetuser', 'coop',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BudgetUser.coop'
db.delete_column(u'budget_budgetuser', 'coop')
models = {
u'budget.budgetplanningdata': {
'Meta': {'object_name': 'BudgetPlanningData'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '19', 'decimal_places': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'user_id': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'budget.budgetuser': {
'Meta': {'object_name': 'BudgetUser'},
'coop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'current_year': ('django.db.models.fields.IntegerField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'program': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'program_length': ('django.db.models.fields.IntegerField', [], {}),
'sequence': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_id': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['budget']
|
[
"[email protected]"
] | |
e2c1a3b15e791dc636e2fea98abc833535fff39f
|
0d09b2008bf79d7ab8dbb1d9eeb564a6be03c350
|
/src/helper.py
|
c2144ae55b2b99091413b35b3e1c08b7fc0d5508
|
[] |
no_license
|
ankur09011/pythonjsonxml
|
c4c5cd5d2fc19b240e5b653088e57e2018c6a93e
|
1cf4e16b660b015c9dc46c2cdfb8d3b77e42d9a9
|
refs/heads/master
| 2020-04-12T15:04:16.455057 | 2018-12-20T17:03:25 | 2018-12-20T17:03:25 | 162,570,021 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,238 |
py
|
"""
Helper Functions/Classes:
This module contains generic helper functions/classes which are needed throughout for consistency.
"""
from __future__ import unicode_literals
import collections
import logging
import numbers
from random import randint
CONVERTOR_LOG = logging.getLogger("convert_data_to_xml")
# initialize list of unique ids
ids = []
class XMLJSONConverterException(Exception):
"""
:TODO: Implement Custom Exception Class
"""
pass
def sanity_check(value):
"""
Check if provided JSON file is present valid and existing
:TODO: implement more sanity check functions here
:param value: Object
:return: sanity as Boolean, True or False
"""
pass
def unicode_me(something):
"""
Converts strings with non-ASCII characters to unicode logging
and other purpose.
"""
try:
return str(something, 'utf-8')
except:
return str(something)
def make_id(element, start=100000, end=999999):
"""Returns a random integer"""
return '%s_%s' % (element, randint(start, end))
def get_unique_id(element):
"""Returns a unique id for a given element"""
this_id = make_id(element)
dup = True
while dup:
if this_id not in ids:
dup = False
ids.append(this_id)
else:
this_id = make_id(element)
return ids[-1]
def change_xml_type_name(val):
"""
Returns the data type for the xml type attribute
:param val: object
:return: xml type name as String
"""
if type(val).__name__ in ('str', 'unicode'):
return 'string'
if type(val).__name__ in ('int', 'long'):
return 'number'
if type(val).__name__ == 'float':
return 'float'
if type(val).__name__ == 'bool':
return 'boolean'
if type(val).__name__ == 'NoneType':
return 'null'
if isinstance(val, dict):
return 'object'
if isinstance(val, collections.Iterable):
return 'array'
return type(val).__name__
def replace_escape_xml(s):
"""
escape strings for unicode errors
:param s: string
"""
if type(s) in [str]:
s = unicode_me(s) #handle unicode
s = s.replace('\'', ''')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('&', '&')
s = s.replace('"', '"')
return s
def make_attrstring(attr):
"""Returns an attribute string in the form key="val" """
attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()])
return '%s%s' % (' ' if attrstring != '' else '', attrstring)
def key_is_valid_xml(key):
try:
# :TODO: implement key validity function
return True
except Exception:
return False
def make_valid_xml_name(key, attr):
"""Tests an XML name and fixes it if invalid"""
key = replace_escape_xml(key)
attr = replace_escape_xml(attr)
# pass through if key is already valid
if key_is_valid_xml(key):
return key, attr
# prepend a lowercase n if the key is numeric
if key.isdigit():
return 'n%s' % (key), attr
# replace spaces with underscores if that fixes the problem
if key_is_valid_xml(key.replace(' ', '_')):
return key.replace(' ', '_'), attr
# key is still invalid - move it into a name attribute
attr['name'] = key
key = 'key'
return key, attr
def convert(obj, add_name=True, parent='root'):
"""
Logical routing call to convert function based on data type.
Treat as interface for custom routing logic for data type
"""
ids = False
item_name = 'item'
if type(obj) in [str, int, float]:
return convert_int_str_to_xml(item_name, obj, add_name=add_name)
if hasattr(obj, 'isoformat'):
return convert_int_str_to_xml(item_name, obj.isoformat())
if type(obj) == bool:
return convert_bool_to_xml(item_name, obj, add_name=add_name)
if obj is None:
return convert_none_to_xml(item_name, '', add_name=add_name)
if isinstance(obj, dict):
return convert_dict_to_xml(obj, ids, parent)
if isinstance(obj, collections.Iterable):
return convert_list_to_xml(obj, ids, parent)
raise TypeError('Unsupported data type: %s (%s)' % (obj, type(obj).__name__))
def convert_dict_to_xml(obj, ids, parent):
"""
Converts python dict into XML elements.
"""
attr_type = True
output = []
add_output_line = output.append
for key, val in obj.items():
attr = {}
key, attr = make_valid_xml_name(key, attr)
if type(val) in [str, int, float]:
add_output_line(convert_int_str_to_xml(key, val))
elif hasattr(val, 'isoformat'): # datetime
add_output_line(convert_int_str_to_xml(key, val.isoformat()))
elif type(val) == bool:
add_output_line(convert_bool_to_xml(key, val))
elif isinstance(val, dict):
if attr_type:
attr['type'] = change_xml_type_name(val)
types = change_xml_type_name(val)
add_output_line('<%s name="%s">%s</%s>' % (
types, key,
convert_dict_to_xml(val, ids, key),
types
)
)
elif isinstance(val, collections.Iterable):
if attr_type:
attr['type'] = change_xml_type_name(val)
types = change_xml_type_name(val)
add_output_line('<%s name="%s">%s</%s>' % (
types,
key,
convert_list_to_xml(val, ids, key),
types
)
)
elif val is None:
add_output_line(convert_none_to_xml(key, val))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
val, type(val).__name__)
)
return ''.join(output)
def convert_list_to_xml(items, unique_ids, parent):
"""
Converts python list into an XML elements.
"""
output = []
add_output_line = output.append
item_name = 'item'
attr_type = True
if unique_ids:
this_id = get_unique_id(parent)
for i, item in enumerate(items):
attr = {} if not ids else { 'id': '%s_%s' % (this_id, i+ 1)}
if type(item) in [str, int]:
add_output_line(convert_int_str_to_xml(item_name, item, add_name=False))
elif hasattr(item, 'isoformat'):
add_output_line(convert_int_str_to_xml(item_name, item.isoformat()))
elif type(item) == bool:
add_output_line(convert_bool_to_xml(item_name, item, add_name=False))
elif isinstance(item, dict):
add_output_line('<object>%s </object>' % (convert(item),))
elif isinstance(item, collections.Iterable):
if not attr_type:
add_output_line('<%s %s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list_to_xml(item, ids, item_name),
item_name,))
else:
add_output_line('<%s type="list"%s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list_to_xml(item, ids, item_name,),
item_name,
)
)
elif item is None:
add_output_line(convert_none_to_xml(item_name, None,))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
item, type(item).__name__)
)
return ''.join(output)
def convert_int_str_to_xml(key, val, add_name=True):
"""
Converts a number or string into an XML element
"""
attr = {}
key, attr = make_valid_xml_name(key, attr)
type = change_xml_type_name(val)
if add_name:
return '<%s name="%s">%s</%s>' % (
type, key,
replace_escape_xml(val),
type
)
else:
return '<%s>%s</%s>' % (
type,
replace_escape_xml(val),
type
)
def convert_bool_to_xml(key, val, add_name=True):
"""
Converts a boolean into an XML element
"""
attr_type = True
attr = {}
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = change_xml_type_name(val)
type = change_xml_type_name(val)
if add_name:
return '<%s name="%s">%s</%s>' % (type, key, str(val).lower(), type)
else:
return '<%s>%s</%s>' % (type, str(val).lower(), type)
def convert_none_to_xml(key, val, add_name=True):
"""
Converts a null value into an XML element.
"""
attr = {}
attr_type = True
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = change_xml_type_name(val)
type = change_xml_type_name(val)
if add_name:
return '<%s name="%s" />' % (type, key)
else:
return '<%s />' % (type)
def wrap_escape_string(s):
"""
Wraps a string into extra section if required, currently disabled
"""
s = unicode_me(s).replace(']]>', ']]]]><![CDATA[>')
return '<![CDATA[' + s + ']]>'
|
[
"[email protected]"
] | |
5aebaeac9d49f1e3296ab91ff35786d10b6a5bc9
|
82e1c98b20f0544b4d8b121f9802c691892afdfc
|
/WWW.CSE.MSU.EDU/CONTROL/PolygonalNumber.py
|
7efdb71bc5ac4a38956e4d78a095dc4f69aa146c
|
[] |
no_license
|
bang103/MY-PYTHON-PROGRAMS
|
14d7876bd9a232285e971b42479251537e472e59
|
e407a39ffa35d1ce87bc1846807bf446717c4b24
|
refs/heads/master
| 2021-01-17T13:32:42.097396 | 2016-07-11T14:34:29 | 2016-07-11T14:34:29 | 40,480,057 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,107 |
py
|
#August 12, 2015
#http://www.cse.msu.edu/~cse231/PracticeOfComputingUsingPython/
from __future__ import print_function
import math
print("On input of a perfect square, Output the two triangles that make up the square.")
while True:
inp=raw_input("Input Q/q to Quit OR Input a perfect square ---> ")
try:
fnumber=float(inp)
except:
if inp.lower()=="q":
print("Bye and Thank you")
break
else:
print("Invalid input: input contains non-numeric characters")
continue
inp2=str(fnumber)
index=inp2.find(".")
dec=inp2[index+1:]
if int(dec)==0: #a positive integer has been input
sqnumber=int(fnumber)
root=int(math.sqrt(fnumber))
n=root
t1=int((n*n+n)/2)
t2=int(((n-1)*(n-1)+(n-1))/2)
print ("The Square Number %d is made up of Two Triangular numbers %d and %d"% (sqnumber,t1,t2))
else:
print("Invalid input: only perfectly square positive integers accepted")
continue
|
[
"[email protected]"
] | |
5b4340d2fb0becab274fe7b90e6f405b21c093d2
|
1eb4ba4922c7a02d3a3f62edf0e085c3c16a0c04
|
/FinancePoll/polls/migrations/0034_remove_sessionprofile_profile_text.py
|
328b437ed736d694ee0deb0c8483e6f43798705f
|
[] |
no_license
|
salaarkhan-dev/pollsapi
|
97e89523bc3222d5c66b747e38adc1537fcf56bb
|
cec5e71dba49b2e3e4a7314df0984a74135692b9
|
refs/heads/master
| 2023-02-03T05:10:26.340305 | 2020-12-14T13:34:43 | 2020-12-14T13:34:43 | 321,355,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
# Generated by Django 3.1.4 on 2020-12-10 11:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0033_auto_20201210_1658'),
]
operations = [
migrations.RemoveField(
model_name='sessionprofile',
name='profile_text',
),
]
|
[
"[email protected]"
] | |
a63da102a2acf98b669db92d9339476678717586
|
b19b7924906ae27393624f0eff48d7acd0808776
|
/chromedriver/Lesson 2.3 step 6.py
|
ed9f9cd75fdc395b27e9ea43c094542ee4a4383c
|
[] |
no_license
|
VladislavEkimov/QAtesting
|
ac9059699a17de89a09547a13caf250ffc5d4148
|
a2e501af5b8f8421f12e74f9d9061409a7074f51
|
refs/heads/master
| 2023-07-28T17:15:47.213178 | 2021-09-14T09:32:24 | 2021-09-14T09:32:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
import os
from selenium import webdriver
import time
import math
try:
browser = webdriver.Chrome()
url = 'http://suninjuly.github.io/redirect_accept.html'
browser.get(url)
button = browser.find_element_by_css_selector("[type='submit']")
button.click()
new_window = browser.window_handles[1]
browser.switch_to.window(new_window)
x = int(browser.find_element_by_css_selector('#input_value').text)
result = str(math.log(abs(12*math.sin(x))))
input = browser.find_element_by_css_selector('.form-control')
input.send_keys(result)
button = browser.find_element_by_css_selector('.btn')
button.click()
finally:
time.sleep(5)
browser.quit()
|
[
"[email protected]"
] | |
7deca9544c4ccb6f9be27d733f45ecdc1a27b058
|
861bbf5978790bf721bb643516a5a484a8c31b38
|
/knn_risk_predictor.py
|
8fd4b0d2cf132068503f27d32bc0e401d9dfe0a0
|
[] |
no_license
|
ericgao1997/CMPT-353-Final
|
a1cd5d47e6e4aa3ec19558348d3a87db4cf3d460
|
2b0e3b398cb0057ad75f81c0cc0039ee3ed7087f
|
refs/heads/master
| 2023-02-19T12:41:53.661689 | 2021-01-05T18:51:26 | 2021-01-05T18:51:26 | 320,167,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,550 |
py
|
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsRegressor
from scipy import stats
import sys
neigh = KNeighborsRegressor(n_neighbors=10, weights='distance')
wider_neigh = KNeighborsRegressor(n_neighbors=3, weights='distance')
def risk_calc(row):
base_risk = row['medical'] + row['food']*0.8 + row['gathering']*0.5 + row['transport']*0.5 + row['notable']*0.2
if base_risk == 0:
return 0
# t_total = row['medical'] + row['food'] + row['gathering']+ row['transport'] + row['notable']
t_total = 1
return base_risk**t_total
def irl_risk(model):
irl_cases = pd.read_csv("data/confirmed_cases.csv")
irl_cases['risk'] = model.predict(irl_cases)
irl_cases.to_csv("out/irl_risk.csv",index=False)
return irl_cases['risk']
def main(data_file):
data = pd.read_csv(data_file,index_col=0)
data['risk'] = data.apply(lambda row: risk_calc(row),axis=1)
data['risk'] = data.apply(lambda x: x['risk']/data['risk'].max(), axis=1)
print (data)
targets = data[(data['risk']==0) & (data['tag_count']>0)]
data.to_csv('out/initial_risks.csv',index = False)
known = data[~data.isin(targets)]
known = known[known['lat'].notna()]
print(len(known))
print(len(targets))
X = known[['lat','lon']]
y = known['risk']
neigh.fit(X,y)
print (targets)
X_t = targets[['lat','lon']]
targets['risk'] = neigh.predict(X_t)
# * neigh.predict_proba(targets[['lat','lon']])
indv_models = pd.concat([known,targets])
indv_models.to_csv('out/checked_risks.csv',index = False)
# print (indv_models)
new_targets = indv_models[(indv_models['risk']==0)]
known_2 = indv_models[~indv_models.isin(new_targets)]
known_2 = known_2[known_2['lat'].notna()]
wider_neigh.fit(known_2[['lat','lon']],known_2['risk'])
new_targets['risk'] = wider_neigh.predict(new_targets[['lat','lon']])
overall_risks = pd.concat([known_2,new_targets])
overall_risks.to_csv('out/smart_risks.csv',index = False)
irl_risks = irl_risk(wider_neigh)
print (overall_risks)
# targets = data[ (data['food']==False) | (data['medical']==False) | (data['gathering']==False) | (data['transport']==False) | (data['notable']==False) ]
# targets = targets[targets['tag_count']>0]
# Validate our smartest model
print(stats.ttest_ind(overall_risks['risk']**0.5,irl_risks**0.5))
print(stats.mannwhitneyu(overall_risks['risk'],irl_risks))
if __name__=='__main__':
data_file = sys.argv[1]
main(data_file,)
|
[
"[email protected]"
] | |
ce51e5dbc2d819e139f9eb444bd8fc36f2ad298a
|
fee21a0de0a7e04d4cea385b9403fa9ba3109fc7
|
/量化交易/天勤量化/4-Demo.py
|
6a6d78c1d931802e3cc1b139b240dfd674b9ae98
|
[
"MIT"
] |
permissive
|
veritastry/trainee
|
2e9123fe0dfb87e4dacf8de3eb9c53d5ff68281b
|
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
|
refs/heads/master
| 2023-02-17T20:44:39.660480 | 2021-01-18T14:29:33 | 2021-01-18T14:29:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,224 |
py
|
from tqsdk import TqApi, TargetPosTask
'''
价差回归
当近月-远月的价差大于200时做空近月,做多远月
当价差小于150时平仓
'''
api = TqApi()
quote_near = api.get_quote("SHFE.rb1810")
quote_deferred = api.get_quote("SHFE.rb1901")
# 创建 rb1810 的目标持仓 task,该 task 负责调整 rb1810 的仓位到指定的目标仓位
target_pos_near = TargetPosTask(api, "SHFE.rb1810")
# 创建 rb1901 的目标持仓 task,该 task 负责调整 rb1901 的仓位到指定的目标仓位
target_pos_deferred = TargetPosTask(api, "SHFE.rb1901")
while True:
api.wait_update()
if api.is_changing(quote_near) or api.is_changing(quote_deferred):
spread = quote_near.last_price - quote_deferred.last_price
print("当前价差:", spread)
if spread > 200:
print("目标持仓: 空近月,多远月")
# 设置目标持仓为正数表示多头,负数表示空头,0表示空仓
target_pos_near.set_target_volume(-1)
target_pos_deferred.set_target_volume(1)
elif spread < 150:
print("目标持仓: 空仓")
target_pos_near.set_target_volume(0)
target_pos_deferred.set_target_volume(0)
|
[
"[email protected]"
] | |
f86b93be73c5c731fcb859c52d812a7e36b71d4e
|
62bb0a92dd45198769e9ffa16eeb468039db7486
|
/PM/p3dx_mover/nodes/Circumnavigation_old.py
|
df1d0a1bf19da62c445a4435160590d73dd31fba
|
[] |
no_license
|
softelli/dissertacao-mestrado
|
1080e2f0891b8c47e3d30e9fecab1a0fa5840857
|
f94031ba4d9781cc55706ae8fbc383cbe103ef98
|
refs/heads/master
| 2020-12-31T07:59:08.487364 | 2019-05-03T13:00:29 | 2019-05-03T13:00:29 | 51,708,733 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,168 |
py
|
from __future__ import division
from LocalObject import LocalObject
import math
import random
import numpy as np
#static parameters
from ParametersServer import staticParameters
sp = staticParameters
class circum:
def __init__(self):
#forca
self.force = 1.0
#Forca dos coeficientes sobre o Robo
self.coefficienteForce = 1.0
#Coeficiente de orientacao
self.orientationCoef = 1.0
#Coeficiente de Interferencia...
self.interferenceCoef = 1.0
#Coeficiente de Proximdiade
self.proximityCoef = 1.0
#Coeficiente de aceleracao
self.forwardCoef = 1.0
#Coeficiente de Conversao
self.conversionCoef = 0.0
#Coeficiente de Velocidade
self.linearCoef = 1.0
#Raio obtido
self.obtainedRadiusToBeacon = 0.0
#Angulo Obtido em Relacao ao Beacon
self.obtainedAngleToBeacon = 0.0
#Angulo Obtido em Relacao ao Robo
self.obtainedAngleToRobot = 0.0
#Distancia obtida
self.obtainedDistanceToRobot = 0.0
#Distancia obtida ao alien
self.obtainedDistanceToAlien = 0.0
#angulo obtido ao alien
self.obtainedAngleToAlien = 0.0
#Angulo Mais proximo
self.closerAngle = 0.0
#Coeficiente de Relacao entre Raio Desejado e Obtido
self.relativeDiffInRadius = 0.0
#Coeficiente angular
self.angularCoef = 0.1
#novo controle
self.angularControl = 0.0
#novo controle
self.linearControl = 0.0
#Dimensoes do cone do sensor
self.sensorRadius = sp.sensor_cone_radius
self.sensorAngle = sp.sensor_cone_angle
#substituida por linear velocity, angular_velocity
self.maxLinearVelocity = sp.max_linear_velocity
self.linearVelocity = sp.min_linear_velocity
self.angularVelocity = sp.init_angular_velocity
#velocidade tangencial/linear da roda direita m/s
self.rightLinearVelocity = 0.0
#velocidade tangencial/linear da roda esquerda m/s
self.leftLinearVelocity = 0.0
#rotacao da roda direita r.p.s
self.rightWheelRotation = 0.0
#rotacao da roda esquerda r.p.s
self.leftWheelRotation = 0.0
#Beacon detectado?
self.hasBeacon = False
#Robo detectado?
self.hasRobot = False
#Alien detectado?
self.hasAlien = False
#diferenca relativa entre o raio obtido e o desejado
def rDiR(self, dRtB, oRtB):
#dRtB = desiredRadiusToBeacon
#oRtB = obtainedRadiusToBeacon
#return relativeDiffInRadius
return (dRtB - oRtB) / dRtB
#coeficiente de velocidade linear
def lC(self, pC, fC, iC):
#pC = proximityCoef
#fC = forwardCoef
#iC = interferenceCoef
#return linearCoef
return pC + 2.0 * fC * iC
#coeficiente de conversao #eliminado o halfSensorAngle e o mirrorAngleToBeacon
def cC(self, oAtB, dAtB):
#oAtB = obtainedAngleToBeacon
#dAtB = desiredAngleToBeacon
#return conversionCoef
return 1.0 - (oAtB / dAtB)
#codigo simplificado
#se nao funcionar, voltar ao codigo do NETLOGO
#AnguloObtido > AnguloDesejado, retorna 0 > valor >= -0.5
#AnguloObtido = AnguloDesejado, retorna 0
#AnguloObtido < AnguloDesejado, retorna 0 < valor <= 2.5
#coeficiente angular (para incremento na velocidade angular) #2015.11.07 alterado
def aC(self, cF, rDiR, cC, iC):
#cF: coefficienteForce
#rDiR: relativeDiffInRadius
#cC: updateConversionCoef
#iC: interferenceCoef
#return angularCoef
#divide by zero prevent
return cF * rDiR - cC/iC
#forca angular --- acrescentado para teste em 2015.11.07
#def aF(self, oAtB, dAtB):
#if not self.hasBeacon:
#return 0.0
#else:
#return (oAtB - dAtB)/(dAtB * 2.5)
#coeficiente de orientacao - modificcado pois angulo medio agora eh ZERO
def oC(self,oAtR, Sa):
#oAtR: obtainedAngleToRobot
#Sa: sensorAngle
#return orientationCoef
#return 1 - self.plus(oAtR/(Sa/2.0))
return 1 - 2 * ((oAtR**2)**0.5)/Sa
#oAtR = 0.0: retorna 1
#oAtR = Sa/2: retorna 0
#oAtR = -Sa/2: retorna 0
#Velocidade Angular -- essa abordagem sera utilizada aqui????
def aV(self,aV,aC):
#aV: angularVelocity (actual)
#aC: angularCoef
#return angularVelocity (new)
## aV = aC ## utilizar quando aC == aF
aV += aC
aV = self.limit(sp.min_angular_velocity, aV, sp.max_angular_velocity)
return aV
#Coeficiente de Interferencia
def iC(self,oRtB, dRtB):
#oRtB: obtainedRadiusToBeacon
#dRtB: desiredRadiusToBeacon
#return interferenceCoef (always positive)
#return self.plus((oRtB - dRtB)/ dRtB)
return ((oRtB - dRtB)**2)**0.5 / dRtB
#oRtB = dR: 0.0
#oRtB = 0: 1.0aC
#oRtB > dR: iC > 0.0
#oRtB < dR: iC > 0.0
#Coeficiente de Proximidade
def pC(self,oDtR, mDbR, sR):
#oDtR: obtainedDistanceToRobot
#mDbR: minDistanceBetweenRobots
#sR: sensorRadius
#return proximityCoef
return 1 - ((oDtR - mDbR)/(sR - mDbR))**2
#oDtR = mDbR: 1.0
#oDtR = 0.0: ~= 1.0
#oDtR = sR: 0.0
#Coeficiente de Avanco - OK 2015.10.19
def fC(self, oDtR, dDtR, sR, mDbR):
#oDtR: obtainedDistanceToRobot
#dDtR: desiredDistanceToRobot
#sR: sensorRadius
#mDbR: minDistanceBetweenRobots
#return forwardCoef
return (oDtR - dDtR) / (sR - dDtR + mDbR)
#(o - d)/(r - d + m), retornando
#oDtR > dDtR: 0.0 < fC <= 1.0
#oDtR = dDtR: 0.0
#oDtR = mDbR: -1.0
#oDtR < mDbR: -1.0
#ca --> -1, quando o --> min
#ca < -1, quando o < min
#retorna sempre positivo
#def plus(self,value):
#if value >= 0.0:
# return value
#else:
# return value * -1.0
def limit(self,min_value, value, max_value):
#min_value:
#max_value:
#value:s
#return value constrained by: min_value <= value <= max_value
if value > max_value:
return max_value
if value < min_value:
return min_value
return value
def pAc(self, sensor_angle, obtained_angle, desired_angle):
#calculo da diferenca proporcional angular
#min_angle = desired_angle - (sensor_angle / 2 - desired_angle)
min_angle = 2 * desired_angle - sensor_angle / 2
max_angle = sensor_angle / 2
a = 0.0
if obtained_angle < min_angle:
a = -1.0
elif obtained_angle > max_angle:
a = 1.0
else:
a = 2 * (obtained_angle - min_angle) / (max_angle - min_angle) - 1
return a
def pRc(self, sensor_radius, obtained_radius, desired_radius):
#calculo da diferenca proporcional radial
r = (obtained_radius - desired_radius) / sensor_radius
return r
def aCtrl(self, sensor_angle, obtained_angle, desired_angle, sensor_radius, obtained_radius, desired_radius):
if obtained_radius == 0.0:
return 0.0
pac = self.pAc(sensor_angle, obtained_angle, desired_angle)
prc = self.pRc(sensor_radius, obtained_radius, desired_radius)
return pac + prc
def printCoef(self):
#print "[", num_id, "]:: Ci:", self.interferenceCoef, "Cc:", self.conversionCoef, "Co", self.orientationCoef, "Cp", self.proximityCoef, "Ca", self.forwardCoef
print("Coef. de interferencia (iC): %6.2f" % (self.interferenceCoef))
print("Coef. de conversao (cC): %6.2f" % (self.conversionCoef))
print("Coef. de orientacao (oC): %6.2f" % (self.orientationCoef))
print("Coef. de proximidade (pC): %6.2f" % (self.proximityCoef))
print("Coef. de avanco (fC): %6.2f" % (self.forwardCoef))
print("Coef. de vel linear (lC): %6.2f" % (self.linearCoef))
print("Coef. de vel angular (aC): %6.2f" % (self.angularCoef))
print("Angular Control aCtrl: %6.2f" % (self.angularControl))
print "---------------------------"
print("Velocidade Linear (vL): %6.2f m/s" % (self.linearVelocity))
print("Velocidade Angular (vA): %6.2f rad/s" % (self.angularVelocity))
print("Velocidade Tang Direita : %6.2f m/s" % (self.rightLinearVelocity))
print("Rotacao Roda Direita : %6.2f rad/s" % (self.rightWheelRotation))
print("Velocidade Tang Esquerda : %6.2f m/s" % (self.leftLinearVelocity))
print("Rotacao Roda Esquerda : %6.2f rad/s" % (self.leftWheelRotation))
print "---------------------------"
print("Angulo ao Beacon (aB): %6.2f" % (self.obtainedAngleToBeacon))
print("Raio do Sensor (Sr): %6.2f" % (sp.sensor_cone_radius))
print("Raio desejado (dR): %6.2f" % (sp.desired_radius))
print("Raio obtido (oR): %6.2f" % (self.obtainedRadiusToBeacon))
print("Diferenca entre raios (rD): %6.2f" % (self.relativeDiffInRadius))
print "---------------------------"
print("Angulo ao Robot (aR): %6.2f" % (self.obtainedAngleToRobot))
print("Min Distancia entre Rob(mD): %6.2f" % (sp.min_distance_to_robot))
print("Distancia desejada (dD): %6.2f" % (sp.desired_distance_to_robot))
print("Distancia obtida (oD): %6.2f" % (self.obtainedDistanceToRobot))
#atualiza a existencia de objetos detectados
def updateDetectedObjects(self, detectedBeaconDist, detectedRobotDist, detectedAlienDist):
#print "detected beacon"
#detectedBeaconDist.prn()
#print "detected robot"
#detectedRobotDist.prn()
#print "detected alien"
#detectedAlienDist.prn()
if detectedBeaconDist.linear > 0.0:
self.hasBeacon = True
self.obtainedRadiusToBeacon = detectedBeaconDist.linear
self.obtainedAngleToBeacon = detectedBeaconDist.angular
else:
self.hasBeacon = False
self.obtainedRadiusToBeacon = 0.0
self.obtainedAngleToBeacon = 0.0
if detectedRobotDist.linear > 0.0:
self.hasRobot = True
self.obtainedDistanceToRobot = detectedRobotDist.linear
self.obtainedAngleToRobot = detectedRobotDist.angular
else:
self.hasRobot = False
self.obtainedDistanceToRobot = 0.0
self.obtainedAngleToRobot = 0.0
if detectedAlienDist.linear > 0.0:
self.hasAlien = True
self.obtainedDistanceToAlien = detectedAlienDist.linear
self.obtainedAngleToAlien = detectedAlienDist.angular
else:
self.hasAlien = False
self.obtainedDistanceToAlien = 0.0
self.obtainedAngleToAlien = 0.0
#devolve as velocidades linear e angular
def process(self, myVelocities, beaconCoord, robotCoord, alienCoord):
#print "beacon coord", beaconCoord.getVelocities()
#print "robot coord", robotCoord.getVelocities()
#print "alien coord", alienCoord.getVelocities()
#atualiza a existencia dos objetos
self.updateDetectedObjects(beaconCoord, robotCoord, alienCoord)
#atualizar a diferenca relativa entre os raios
self.relativeDiffInRadius = self.rDiR(sp.desired_radius, self.obtainedRadiusToBeacon)
#atualizar o Coeficiente de Interferencia #nao havendo Beacon, retorna 1.0
self.interferenceCoef = self.iC(self.obtainedRadiusToBeacon, sp.desired_radius)
#atualizar o Coeficiente de Conversao #nao havendo Beacon, retorna 1.0
self.conversionCoef = self.cC(self.obtainedAngleToBeacon, sp.desired_angle_to_beacon)
#atualizar coeficiente de orientacao
self.orientationCoef = self.oC(self.obtainedAngleToRobot, sp.sensor_cone_angle)
#atualizar coeficiente de proximidade
self.proximityCoef = self.pC(self.obtainedDistanceToRobot, sp.min_distance_to_robot, sp.sensor_cone_radius)
#atualizar coeficiente de avanco
self.forwardCoef = self.fC(self.obtainedDistanceToRobot, sp.desired_distance_to_robot, sp.sensor_cone_radius, sp.min_distance_to_robot)
#atualiza o coeficiente de velocidade linear
self.linearCoef = self.lC(self.proximityCoef, self.forwardCoef, self.interferenceCoef)
#atualiza o coeficiente de velocidade angular
self.angularCoef = self.aC(self.coefficienteForce, self.relativeDiffInRadius, self.conversionCoef, self.interferenceCoef)
#atualiza a velocidade angular -- alterado em 2015.11.07
#self.angularVelocity = self.aV(self.angularVelocity, self.angularCoef)
#novo controle angular
#sensor_angle, obtained_angle, desired_angle, sensor_radius, obtained_radius, desired_radius#
self.angularControl = self.aCtrl(sp.sensor_cone_angle, self.obtainedAngleToBeacon, sp.desired_angle_to_beacon, sp.sensor_cone_radius, self.obtainedRadiusToBeacon, sp.desired_radius)
#novo controle linear
self.linearControl = 0.0
#utilizando aF
#self.angularVelocity = self.aV(self.angularVelocity, self.aF(self.obtainedAngleToBeacon, sp.desired_angle_to_beacon))
#apenas teste para o resultados
self.linearVelocity = 0.5 # m/s
#http://143.106.148.168:9080/Cursos/IA368W/parte1.pdf pag 33
#self.angularVelocity = self.linearVelocity / sp.desired_radius # rad/s
self.angularVelocity = self.linearVelocity / sp.desired_radius + self.angularControl # rad/s
myVelocities.angular = self.angularVelocity
myVelocities.linear = self.linearVelocity
#atualiza rotacoes e velocidades tangenciais #monitoramento
self.rightLinearVelocity = self.angularVelocity + self.linearVelocity
self.leftLinearVelocity = - 2 * self.linearVelocity - self.rightLinearVelocity
self.rightWheelRotation = self.rightLinearVelocity / (sp.wheel_diameter * np.pi)
self.leftWheelRotation = self.leftLinearVelocity / (sp.wheel_diameter * np.pi)
self.printCoef()
return myVelocities
|
[
"[email protected]"
] | |
d8617dbffb9e744b3c064d7bd38454da73dd0324
|
975fe0b2e2723edda1e8ac35bfb99110d38daf32
|
/Henesys/manage.py
|
da58debd3d9007536e6dfae9c84484d9b349c100
|
[] |
no_license
|
8639sung/Project_Henesys
|
d5573afaed5fd3db0b5e78fa6d1b87a4e53549a4
|
a0247c787e54981f0adb4ebbd94a8c8e42476ded
|
refs/heads/master
| 2023-05-31T03:06:10.607755 | 2021-06-08T19:54:02 | 2021-06-08T19:54:02 | 365,383,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 663 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Henesys.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
93ff846d9e696fd2a04801945c98c805d84766e0
|
08bc071c11338a2a1fd05e9c8bb1505d16492a14
|
/tests/test_basic.py
|
8a891581c7a26f98819983fea640ca23af02b154
|
[
"MIT"
] |
permissive
|
flingflingfling/flasky
|
f3a33b7423a47a497ea43e1dcff84de29477c0f4
|
7117023bf69180b8eacae9dde69c621668ddf11d
|
refs/heads/master
| 2021-06-04T06:04:30.094170 | 2016-07-17T11:29:58 | 2016-07-17T11:29:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 783 |
py
|
#coding:utf8
'''
this is a unittest demo,
it is just a simply testing program
have fun
'''
import unittest
from flask import current_app
from app import create_app, db
class BasicsTestCase(unittest.TestCase):
def setUp(self): # create a new test config env and db
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_app_exists(self): # testing whether the instance exist
self.assertFalse(current_app is None)
def test_app_testing(self): # testing whether the config running
self.assertTrue(current_app.config['TESTING'])
|
[
"[email protected]"
] | |
4dbb490667e510006a9631a51f82564848df9b9f
|
18c699d61a7ead0c8d29efdddacd71468c227221
|
/pyspark-distinct-to-drop-duplicates.py
|
154bc0ea92950a02fb50fc46c18ec406f57704fb
|
[] |
no_license
|
devs-93/Saprk-Common-Operation
|
7847f009dca3466cd5a793bb81f1468e7ef6698b
|
b9ed874dcc8d059622bc63ef942925b1198c906d
|
refs/heads/main
| 2023-08-28T09:39:15.355336 | 2021-11-12T10:19:21 | 2021-11-12T10:19:21 | 427,312,696 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,152 |
py
|
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import expr
spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate()
data = [("James1", "Sales1", 3000),
("James1", "Sales1", 3000),
("Michael", "Sales", 4600),
("Robert", "Sales", 4100),
("Maria", "Finance", 3000),
("James", "Sales", 3000),
("Scott", "Finance", 3300),
("Jen", "Finance", 3900),
("Jeff", "Marketing", 3000),
("Kumar", "Marketing", 2000),
("Saif", "Sales", 4100)
]
columns = ["employee_name", "department", "salary"]
df = spark.createDataFrame(data=data, schema=columns)
df.printSchema()
df.show(truncate=False)
# Distinct
distinctDF = df.distinct()
print("Distinct count: " + str(distinctDF.count()))
distinctDF.show(truncate=False)
# Drop duplicates
df2 = df.dropDuplicates()
print("Distinct count: " + str(df2.count()))
df2.show(truncate=False)
# Drop duplicates on selected columns
dropDisDF = df.dropDuplicates(["department", "salary"])
print("Distinct count of department salary : " + str(dropDisDF.count()))
dropDisDF.show(truncate=False)
|
[
"[email protected]"
] | |
7a01fcb135254742a291c46f01e581aad4cca8c0
|
54b09aac1d5dcb7f1310a9eaf63e81a09c4fb7f2
|
/getting-started-with-python/hello.py
|
133fd4d9df09fb7864ab09d5c01a6a4a5086f531
|
[] |
no_license
|
nikhildarocha/coursera_py4e
|
3f239d89ad26a1f148e2c70c7d2ed484db3df49d
|
b4ebe1bc5e7607b5780427df5dedcca5b8e03351
|
refs/heads/master
| 2023-04-05T01:05:51.525492 | 2021-04-14T09:50:30 | 2021-04-14T09:50:30 | 357,854,416 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 31 |
py
|
print('Hello World Everyone!')
|
[
"[email protected]"
] | |
23e8eaf1eb2c77ceaad174fae6cf8fcf18768993
|
cac5f68c601f9f834aa2b0de9fb00d22e1f80239
|
/floyd_test.py
|
64106a310c07591944d87c67cd8b23302fe08b30
|
[] |
no_license
|
imosk72/graph_python_task
|
f915cdc246b02c084730d00a771247bfab5f6880
|
0581774741ad766c97f47fbbcd8e9eb5aa473438
|
refs/heads/master
| 2023-04-30T23:44:13.639760 | 2021-05-19T09:25:59 | 2021-05-19T09:25:59 | 364,354,462 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
import generators
import measurement
import weighted_graph
def measure_floyd():
f = open("floyd.txt", "w")
for i in range(1, 100):
print(i)
f.write(str(i) + " " + "%.6f" % measurement.test(20, weighted_graph.WeightedGraph(i, generators.generate_graph(i, True, True), True).floyd) + "\n")
f.close()
if __name__ == "__main__":
#measure_floyd()
measurement.find_cubic_approximation("floyd.txt")
|
[
"[email protected]"
] | |
777367c186790c4562a46fd27243d541e08d8501
|
0596f538c3055c9f215c32cd364c716e716d8e47
|
/exercicios/ex029.py
|
b410b300366a19e68982e1f58f9eca2ab091c3bb
|
[
"MIT"
] |
permissive
|
RicardoAugusto-RCD/exercicios_python
|
6d2435a001966d180092374ff35a4fda232f5812
|
8a803f9cbc8b2ad0b5a6d61f0e7b6c2bc615b5ff
|
refs/heads/main
| 2023-07-10T18:15:24.837331 | 2021-08-18T12:27:00 | 2021-08-18T12:27:00 | 388,438,802 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 512 |
py
|
# Escreva um programa que leia a velocidade de um carro.
# Se ele ultrapassar 80km/h, mostre uma mensagem dizendo que ele foi multado.
# A multa vai custar R$7,00 por cada km acima do limite.
velocidade = float(input('Digite uma velocidade em km/h: '))
if velocidade <= 80:
print('Você estava dentro da velocidade correta da via!')
else:
velocidadeAcima = velocidade - 80
multa = velocidadeAcima * 7
print('Você estava a {}km/h e o valor da multa será R${:.2f}'.format(velocidade, multa))
|
[
"[email protected]"
] | |
f19537278b9f16af0b07993b1df771c449dc7e10
|
b70bf06a7b8289ee4508da0c28a49bc1d22ec4a2
|
/Reliancestore/reliancefresh/apps.py
|
d5e6486b2b5b9191760bc075b88be2bf934f17cf
|
[] |
no_license
|
kondlepumanikanta/djangobt6
|
7cde78e6b47806fc1634aff943549e6be2f89942
|
6fa5ef9fef1959b17b23204ac76d4881e495b431
|
refs/heads/master
| 2020-03-22T20:22:58.558692 | 2018-07-18T17:34:04 | 2018-07-18T17:34:04 | 139,739,615 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 166 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ReliancefreshConfig(AppConfig):
name = 'reliancefresh'
|
[
"[email protected]"
] | |
ef14a9785ed07e3532880cdcce6fc0279460ecb9
|
6bea11a8b7368124f7b50f6a0775da838532ec98
|
/Dbase/Sql/cleardb.py
|
5af10cd91ee941e95e1e2345144cf5f47d584570
|
[] |
no_license
|
wenhanglei/PP4E-practice
|
aaab31a4e602902277a6cf49a63aaed04cf57d3c
|
b46c4029addd7a67fa0f625695f09c2e7a833546
|
refs/heads/master
| 2021-01-20T06:07:19.142580 | 2017-07-22T07:17:49 | 2017-07-22T07:17:49 | 89,846,018 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
"""
清除表中的数据
"""
import sys
if input('Are you sure?').lower() not in ('y', 'yes'):
sys.exit()
dbname = sys.argv[1] if len(sys.argv) > 1 else 'dbase1'
table = sys.argv[2] if len(sys.argv) > 2 else 'people'
from loaddb import login
conn, curs = login(dbname)
curs.execute('delete from ' + table)
#print(curs.rowcount, 'records deleted')
conn.commit()
|
[
"[email protected]"
] | |
d520b650baaa41da31c71fb1fde1bdb7eff97fb3
|
329146e5d07a34acffe08c8d138a12c03f27474c
|
/server/node_modules/sqlite3/build/config.gypi
|
f6a1f154c98ef8b08335e4590d8008801edede6a
|
[
"BSD-3-Clause"
] |
permissive
|
ZhenyiZhang/full-stack-graphQL
|
a60430fae1af3a9fa925786baf35536a713faba0
|
0cad30c44df82329b533487a38a56eb1db4ef3c2
|
refs/heads/main
| 2023-01-15T21:29:39.885086 | 2020-11-26T22:37:09 | 2020-11-26T22:37:09 | 316,108,361 | 0 | 0 | null | 2020-11-26T22:37:10 | 2020-11-26T02:59:09 |
JavaScript
|
UTF-8
|
Python
| false | false | 6,206 |
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/zhenyizhang/Library/Caches/node-gyp/14.15.0",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/zhenyizhang/Documents/Project.nosync/fullstack-tutorial/start/server/node_modules/sqlite3/lib/binding/node-v83-darwin-x64/node_sqlite3.node",
"module_name": "node_sqlite3",
"module_path": "/Users/zhenyizhang/Documents/Project.nosync/fullstack-tutorial/start/server/node_modules/sqlite3/lib/binding/node-v83-darwin-x64",
"napi_version": "7",
"node_abi_napi": "napi",
"node_napi_label": "node-v83",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"commit_hooks": "true",
"browser": "",
"also": "",
"sign_git_commit": "",
"rollback": "true",
"usage": "",
"audit": "true",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"preid": "",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"searchlimit": "20",
"read_only": "",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/zhenyizhang/.npmrc",
"init_module": "/Users/zhenyizhang/.npm-init.js",
"cidr": "",
"user": "",
"node_version": "14.15.0",
"save": "true",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"before": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"otp": "",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/zhenyizhang/.npm",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"fund": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.14.8 node/v14.15.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/rw/571n_5x10vldfszl19p_gbl00000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": "",
"format_package_lock": "true"
}
}
|
[
"[email protected]"
] | |
cd50ec0cada7b4aa014f574bfb9d1be7b604142f
|
95d7a2e316f6cc5fd0120086bd0b00a3d1dd7ac7
|
/simple_trainer/pipeline.py
|
55cddbdfed42e6f13ed1420b0e3c6bc498a7842e
|
[
"MIT"
] |
permissive
|
akshaybadola/simple_trainer
|
cf2012ada1889719f48812f0bf1dc8a3405885c1
|
f6a2c7739722b334c1f619220f40917d71b044ba
|
refs/heads/master
| 2022-11-15T10:20:48.864405 | 2022-06-24T20:21:41 | 2022-06-24T20:21:41 | 222,618,146 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,790 |
py
|
from typing import List, Dict, Callable, Union, Optional
import abc
import types
import logging
from functools import partial
from contextlib import ExitStack
from common_pyutil.functional import first, maybe_then
def partial_or_func_name(x: Callable, describe: bool = False):
return maybe_then(x, [partial, types.FunctionType],
[lambda x: ("partial " if describe else "") + x.func.__name__,
lambda x: x.__name__])
class Hooks(abc.ABC):
"""A simple class implmentation for flow based programming
Hooks are named points in an execution pipeline at which functions can be
added and removed dynamically. Hooks should be:
1. Configurable
2. Inspectable
3. Modifiable
Unlike a standard pipeline, hooks can be added and removed by user, and
functions to the hook can be altered programmatically and interactively
through the publicly exposed API.
"""
def __init__(self, logger: logging.Logger):
"""Initialize with a logger.
Args:
logger: A logger for logging
"""
self._hooks: Dict[str, List[Callable]] = {}
self.logger = logger
def __iter__(self):
return self._hooks.keys().__iter__()
def create_hook(self, hook_name: str, funcs: List[Callable] = []):
"""Create a `hook` with given name
Args:
hook_name: Name of the hook to create
funcs: List of functions to initialize the hook.
Each function can be called with or without arguments. The arguments
must be keyword arguments. The hook must process the kwargs as it
receives. This provides for the most flexibility without inspecting the
code. It's upto the functions on how they will process the keyword
arguments.
Example:
from types import SimpleNamespace
def some_function(**kwargs):
processable_args = ["arg1", "arg2", "arg3"]
args_dict = {arg: kwargs.get(arg, None) for arg in processable_args}
args = SimpleNamespace(**args_dict)
if any([args_dict[arg] is None for arg in processable_args]):
# maybe raise error or catch error and don't
# do anything with a warning
"""
if hook_name in self._hooks:
raise AttributeError(f"Hook {hook_name} already exists")
else:
self._hooks[hook_name] = funcs
def delete_hook(self, hook_name: str):
"""Delete a named `hook` from Hooks
Args:
hook_name: Name of the hook to delete
"""
if hook_name not in self._hooks:
raise AttributeError(f"No such hook {hook_name}")
else:
self._hooks.pop(hook_name)
@abc.abstractmethod
def _prepare_function(self, func: Callable) -> Callable:
"""Prepare a function to be added to a hook.
When any function is added to a hook, it's transformed by with this
function. This has to be overridden. E.g., in the example below, each
function added to any hook is called with `self` as the first argument.
Example:
class Pipeline(Hooks):
def __init__(self):
pass
def _prepare_function(self, func):
return partial(func, self)
Or if you'd like to keep the :class:`Hooks` instance separate.
class MyHooks(Hooks):
def _prepare_function(self, func):
return func
class Pipeline:
def __init__(self):
self.hooks = MyHooks()
"""
return func
def check_func_args(self, func: Callable):
if isinstance(func, partial):
n_args = func.func.__code__.co_argcount
if n_args != len(func.args):
raise AttributeError("Partial function must be fully specified")
else:
n_args = func.__code__.co_argcount
if n_args:
raise AttributeError("Function to the hook cannot take any arguments")
def run_hook_with_contexts(self, hook_name: str, contexts: List, **kwargs):
"""Run a named hook with contexts
Args:
hook_name: Name of the hook
contexts: contexts in which to run
kwargs: Optional keyword arguments for hook
"""
hook = self._get_hook(hook_name)
if hook:
with ExitStack() as stack:
for con in contexts:
stack.enter_context(con)
for func in hook:
func(**kwargs)
def run_hook(self, hook_name: str):
"""Run a named hook.
Args:
hook_name: Name of the hook
"""
hook = self._get_hook(hook_name)
if hook:
for func in hook:
func()
def run_hook_with_args(self, hook_name: str, **kwargs):
"""Run a named hook with arguments.
Only keyword arguments are allowed. Therefore, for all the functions in
the hook can accept an arbitrary number of arguments and the functions
can check and choose the relevant arguments.
Args:
hook_name: Name of the hook
kwargs: keyword arguments only for the hook
"""
hook = self._get_hook(hook_name)
if hook:
for func in hook:
func(**kwargs)
def add_to_hook(self, hook_name: str, func: Callable, position: Union[int, str] = 0):
"""Add function :code:`func` to hook_name with name `hook_name`.
Args:
hook_name: Name of the hook
func: A function with a single argument
position: Where to insert the hook_name. Defaults to front of list.
If `position` is not given then it's added to the front of the list.
"""
f_name = partial_or_func_name(func, True)
func = self._prepare_function(func)
self.check_func_args(func)
if hook_name in self._hooks:
self.logger.info(f"Adding {f_name} to {hook_name} at {position}")
if position == "first":
pos = 0
elif position == "last":
pos = len(hook_name)
elif isinstance(position, int):
pos = position
else:
raise ValueError(f"Unknown Value for position {position}")
self._hooks[hook_name].insert(pos, func)
def add_to_hook_at_end(self, hook_name: str, func: Callable):
self.add_to_hook(hook_name, func, "last")
def add_to_hook_at_beginning(self, hook_name: str, func: Callable):
self.add_to_hook(hook_name, func, "first")
def add_to_hook_before(self, hook_name: str, func: Callable, before_func: str):
"""Add function :code:`func` to hook with given name.
Args:
hook_name: Name of the hook
func: A function with a single argument
position: Where to insert the hook_name. Defaults to front of list.
"""
f_name = partial_or_func_name(func, True)
func = self._prepare_function(func)
self.check_func_args(func)
if hook_name in self._hooks:
self.logger.info(f"Adding {f_name} to {hook_name} before {before_func}")
names = [partial_or_func_name(x) for x in self._hooks[hook_name]]
if before_func in names:
pos = names.index(before_func)
self._hooks[hook_name].insert(pos, func)
else:
raise ValueError(f"No such func {before_func}")
def add_to_hook_after(self, hook_name: str, func: Callable, after_func: str):
"""Add function :code:`func` to hook with given name.
Args:
hook_name: Name of the hook_name
func: A function with a single argument
position: Where to insert the hook_name. Defaults to front of list.
"""
f_name = partial_or_func_name(func, True)
func = self._prepare_function(func)
self.check_func_args(func)
if hook_name in self._hooks:
self.logger.info(f"Adding {f_name} to {hook_name} after {after_func}")
names = [partial_or_func_name(x) for x in self._hooks[hook_name]]
if after_func in names:
pos = names.index(after_func) + 1
self._hooks[hook_name].insert(pos, func)
else:
raise ValueError(f"No such func {after_func}")
def remove_from_hook(self, hook_name: str, function_name: str):
"""Remove from named hook the named function.
Args:
hook_name: The name of the hook_name
function_name: The name of the function to remove
If there are multiple functions with same name, remove only the first
one from the list.
"""
hook = self._get_hook(hook_name)
if hook:
func = first(hook, lambda x: partial_or_func_name(x) == function_name)
self._hooks[hook_name].remove(func)
def remove_from_hook_at(self, hook_name: str, position: int):
"""Remove from named hook the function at position.
Args:
hook_name: The name of the hook_name
position: The position at which the function to remove
"""
hook = self._get_hook(hook_name)
if hook:
hook.pop(position)
def _get_hook(self, hook_name: str) -> Optional[List[Callable]]:
"""Get hook with give name if it exists.
Args:
hook_name: Name of the hook
"""
if hook_name in self.hooks:
return self.hooks[hook_name]
else:
return None
@property
def hooks(self) -> Dict[str, List[Callable]]:
"""Return all the hooks
"""
return self._hooks
def describe_hook(self, hook_name: str) -> List[str]:
"""Describe the hook with given name
Args:
hook_name: Name of the hook
For each function in the hook, if it's a regular function return a
string representation of a tuple of:
1. The function name
2. The function annotations
If it's a :class:`partial` function:
1. The function name
2. The function arguments
3. The keyword arguments
"""
hook = self._get_hook(hook_name)
retval = []
if hook:
for x in hook:
if isinstance(x, partial):
retval.append(f"{partial_or_func_name(x, True)}")
else:
retval.append(f"{x.__name__}")
return retval
|
[
"[email protected]"
] | |
52793a05086193090d0b2d2851abe1075600a7d7
|
649417ac89aa4917eeecf00ad7aa2d9ddaa15bf6
|
/PhaseMatchingBiphotonFWM.py
|
b4c39e2c3123be323950df49fef38a7e65ef84ab
|
[] |
no_license
|
damienbonneau/sources
|
70bb514e384571f922b044306f6dfd81ac459bed
|
60d0aa605bbd6f9e6ea30e4a369d12dd4ed1a83b
|
refs/heads/master
| 2021-01-20T21:15:45.454573 | 2016-08-04T18:01:49 | 2016-08-04T18:01:49 | 64,950,261 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 54,609 |
py
|
# -*- coding: utf-8 -*-
from numpy import *
import matplotlib as mpl
from matplotlib import cm,colors
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy.optimize import leastsq
import os,time
# -----------------------------------------------------------------------------#
# Plot functions
# -----------------------------------------------------------------------------#
# Lattice: bidimensional numpy array, example : lattice = ones((size, size), dtype=float )
# extent: axis extent for each axis [begin_x,end_x,begin_y,end_y]
def plotcolormap(lattice,extent,fname = None):
fig = plt.figure()
map1=colors.LinearSegmentedColormap.from_list('bla',['#000000','#FF0000','#FFFF00'])
begin_x,end_x,begin_y,end_y = extent
aspect = (end_x - begin_x )/(end_y - begin_y)
plt.imshow(lattice, map1,vmin = 0, interpolation='nearest',extent=extent,aspect = aspect)
plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 6) )
#cbar = plt.colorbar()
#cbar.locator = MaxNLocator( nbins = 6)
# vmin=0,vmax = 1,
if fname is None:
plt.show()
else:
plt.savefig(fname)
plt.close()
def plot(plots):
for x,y,style in plots:
plt.plot(x, y, style) # x, y, 'k--',
plt.grid(True)
plt.title('')
plt.xlabel('')
plt.ylabel('')
plt.show()
def plotcolormapphase(lattice,extent):
fig = plt.figure()
map1=colors.LinearSegmentedColormap.from_list('bla',['#0000FF','#000000','#FF0000'])
plt.imshow(lattice, map1,vmin = -pi,vmax = pi, interpolation='nearest',extent=extent)
# vmin=0,vmax = 1,
plt.show()
# -----------------------------------------------------------------------------#
# MISC FUNCTIONS (helpers for classes)
# -----------------------------------------------------------------------------#
def funcpeak(lbda,lbda0):
T = 1.*10**(-9)
signu = 0.441/T
siglbda = signu/(c*10**6)*(lbda0)**2
return sqrt(1./(sqrt(2*pi)*siglbda) * exp(-(lbda-lbda0)**2/(2*siglbda**2)))
"""
input state as a 2D matrix
!! the input state is not given as a density matrix
it's a pure state given in a matrix
"""
def schmidtnumber(state):
N,M = state.shape
ror=zeros((N,N)) # reduced density matrix
for l in xrange(N):
for n in xrange(N):
for p in xrange(N):
ror[l,n]+=state[p,l]*state[p,n]
ror2 = dot(ror,ror)
# compute the trace of ror2
tmp = 0
for k in xrange(N):
tmp+= ror2[k,k]
schn = 1.0/tmp
return schn
def parse_extent(line):
l1 = line.split(":")[1]
l2 = l1.split(",")[0]
swlmin,swlmax = l2.split("-")
wlmin,wlmax = float(swlmin),float(swlmax)
return wlmin,wlmax
def parse_biphoton_data(line):
l1 = line.replace("\n","")
ls = l1.split(" ")
res = []
for e in ls:
res.append(float(e))
return array(res)
# -----------------------------------------------------------------------------#
# CONSTANTS
# -----------------------------------------------------------------------------#
I = 1.0j
HPLANCK = 6.626068*10**(-34) #m2 kg / s
HBAR = HPLANCK/(2*pi)
EPSILON0 = 8.85418782*10**(-12)#m-3 kg-1 s4 A2 or C.V-1.M-1
c = 299792458.0 # CLIGHT = 299792458. # m/s
n2_Si = 6.3* 10**(-18) # m2/W (Semicond. Sci. Technol. 23 (2008) 064007 (9pp))
# -----------------------------------------------------------------------------#
# CLASS Waveguide
# -----------------------------------------------------------------------------#
# Init (width, height):
# * Take the width and height of the waveguide cross section as parameters
# * Loads a file containing lbda vs neff
# * fits a dispersion curve to the data loaded
# This class has methods to obtain the effective index, the group index, and wave number when given a wavelength
#
class Waveguide(object):
def __init__(self,width,height):
self.rootname = "waveguide_data_noslab"
self.width = width
self.height = height
s = "%dx%d" % (width,height)
files = os.listdir(self.rootname)
for fname in files:
if fname.find(s) >=0:
self.__load__(fname)
self.__fit__()
# We fix the FWM effective area that we calculate using the overlap between the four fields
self.Aeff = 0.03 # um^2
def __load__(self,fname):
path = self.rootname+"\\"+fname
f = open(path)
line = f.readline()
lbdas = []
neffs = []
while(len(line))>0:
splitted = line.split("\t")
lbda,neff = splitted[0:2]
line = f.readline()
if lbda>0:
lbdas.append(float(lbda))
neffs.append(float(neff))
self.lbdas = array(lbdas)
self.neffs = array(neffs)
return
def __fit__(self):
p0 = [1,0,0,0]
plsqwl2n = leastsq(self.__residuals__, p0, args=(self.neffs, self.lbdas))
self.pwl2n = plsqwl2n[0] # wavelength to neff
#print self.p
def __func__(self,p,x):
d,c,b,a = p
return a*x**3+b*x**2+c*x+d
def __residuals__(self,p,y, x):
err = y-self.__func__(p,x)
return err
def getneff(self,lbda):
return self.__func__(self.pwl2n,lbda)
# lbda in um
def wl2kv(self,a_lbda):
return 2*pi*self.getneff(a_lbda)/(a_lbda) # the kvector z component is returned in um-1
def kv2wl(self,a_kv):
pass # not as easy ...
def plotneff(self):
x = arange(min(self.lbdas),max(self.lbdas),0.1)
plots = [(self.lbdas,self.neffs,"-"),(x,self.getneff(x),"-")]
plot(plots)
def getng(self,lbda):
lbda_step = 0.00001
lbda1 = lbda - lbda_step
lbda2 = lbda + lbda_step
neff1 = self.getneff(lbda1)
neff2 = self.getneff(lbda2)
neff = self.getneff(lbda)
ng = neff -lbda*(neff2-neff1)/(2*lbda_step)
return ng
# -----------------------------------------------------------------------------#
# CLASS FWM_Simu
# -----------------------------------------------------------------------------#
# This class calculates the joint spectral distribution obtained for a straight
# waveguide with a given set of parameters
# Init (
# * Waveguide cross section
# * Waveguide length (Meters)
# * Pump power (Watts)
# * Pump wavelength (um)
# * Pulse duration (Seconds)
# * Repetition rate (Hz)
# )
#
# computeJS: Does the simulation
#
class FWM_Simu(object):
def __init__(self,wg = Waveguide(550,220),
length = 0.03, # 0.03 ->3cm
pumppower = 0.1*10**-3,pumpwl = 1.55,pulseduration=1.*10**(-12),reprate = 40*10**6, N= 200
):
self.T = pulseduration # in seconds
self.wg = wg # waveguide crosssection (Waveguide object)
self.length = length # Propagation length in the waveguide
self.L = length
self.pumppower = pumppower # in W
#self.gamma = 3*10**2 # W^-1 m^-1 ; non linear coeff IEEE JOURNAL OF SELECTED TOPICS IN QUANTUM ELECTRONICS, VOL. 16, NO. 1, JANUARY/FEBRUARY 2010
self.lbda_p = pumpwl
#self.pumpenvelop(self.lbda_p)
self.pumpenvelop(pumpwl) # computes siglbda
self.gamma = 460. # 2*pi/(self.lbda_p*10**(-6))*n2_Si/(self.wg.Aeff*10**(-12)) #W-1 M-1
#print "Gamma", self.gamma
self.reprate = reprate # Hz
self.Epulse = self.pumppower/self.reprate #Energy per pulse in J
self.N = N
def setPumpwl(self,x):
self.lbda_p = x
def setPulseDuration(self,x):
self.T = x
self.pumpenvelop(self.lbda_p)
# Define phase matching function
def getdk(self,p1,p2,lbda_p1,lbda_p2,lbda_s,lbda_i):
kp1,kp2,ki,ks = map(self.wg.wl2kv,[lbda_p1,lbda_p2,lbda_i,lbda_s])
ga = self.gamma*10**(-6) # to put gamma in um
dk = kp1+kp2-ks-ki-ga*(p1+p2) # When putting gamma, the phase matching bandwidth changes dramatically
return dk
# **************
# Pump envelopes
# **************
def pumpenvelop(self,lbda):
return self.gaussppumpenvelop(lbda) #self.gaussppumpenvelop(lbda)
#return self.rectpumpenvelop(lbda) #self.gaussppumpenvelop(lbda)
def toplotCWGain(self,lbda_s = arange(1.5,1.6,0.0001)):
lbda_i = 1./(2/self.lbda_p-1/lbda_s)
a_dk = self.getdk(0,0,self.lbda_p,self.lbda_p,lbda_s,lbda_i) # um-1
a_phasematching = sinc(self.length*10**6/2*a_dk)
return a_phasematching**2
def gausspulsedpumpenvelop(self,lbda,dlbda = 0.4*10**(-4)):
return self.gaussppumpenvelop(lbda) *(sin(2*pi*(lbda)/dlbda))**2# From laser textbook
def rectpumpenvelop(self,lbda):
signu = 0.441/self.T # self.linewidth #0.441/sigma_t # From laser book, in Hz
sigomega = 2*pi*signu
lbda0 = self.lbda_p
siglbda = signu/(c*10**6)*(lbda0)**2
w = sqrt(2*pi)*siglbda
self.siglbda = siglbda
a = 1/sqrt(w)
lbda_min = lbda0-w/2
lbda_max = lbda0+w/2
#print "lbdas", lbda_min,lbda_max
step = w / 400
self.pumprange = arange(lbda_min,lbda_max,step)
#print "min ", lbda_min,lbda[0]
#print "max ", lbda_max,lbda[-1]
output = (lbda>=lbda_min)*(lbda<=lbda_max)*a
#if type(lbda) == type(zeros(5)):
# print min(lbda),lbda_min,lbda_max,max(lbda)," ---> ", output.sum()
return output
def gaussppumpenvelop(self,lbda):
lbda0 = self.lbda_p
k0,k = map(self.wg.wl2kv,[lbda0,lbda])
signu = 0.441/self.T # self.linewidth #0.441/sigma_t # From laser book, in Hz
sigomega = 2*pi*signu
siglbda = signu/(c*10**6)*(lbda0)**2
ng = self.wg.getng(lbda0)
sigk = siglbda/(lbda0)**2*2*pi*ng
self.siglbda = siglbda
omega0 = 2*pi*c/lbda0
omega = 2*pi*c/lbda
#return exp(-2*log(2)*((lbda0-lbda)*10**-6)**2/(siglbda**2)) # From laser textbook
return sqrt(1./(sqrt(2*pi)*siglbda) * exp(-(lbda-lbda0)**2/(2*siglbda**2))) # this gauss envelop is on lambda which is probably not very physical ...
#return sqrt(1./(sqrt(2*pi)*sigomega) * exp(-(omega-omega0)**2/(2*sigomega**2)))*sqrt(2*pi*c)/lbda
# Rectangular pulse in the temporal domain
# lbda in um
# T : pulse length [S]
def sincpumpenvelop(self,lbda):
T = self.T
om = 2*pi*c/(lbda*10**-6)
om0 = 2*pi*c/(self.lbda_p*10**(-6))
dom = om - om0
#return sinc(dom*T/2) * sqrt(T/(2*pi)) # this normalization works when integrating over omega
# *** WARNING, in python, sinc(x) = sin(pi*x)/(pi*x) which is already normalized to one ! ***
return sinc(dom*T/2) * sqrt(T*pi*c*10**6/(lbda**2)) # c in um/s, lbda in um, T in s; this normalization is for lambda
# **************
#
# **************
# This provides the range of lbdas which should be used to accurately span the pump
def updatepumprange(self):
print "Get pump range ..."
lbda_p = self.lbda_p
lbda_step= 0.00000001 # step for finding the pump range
P = 0.
targetfraction = 0.95
deltalbda = 0.5*10**(-6) # initialize deltalbda at 1pm
while (P<targetfraction):
deltalbda = 2*deltalbda
lbdas = arange(lbda_p-deltalbda,lbda_p+deltalbda,lbda_step)
#print P
P=(self.pumpenvelop(lbdas)*self.pumpenvelop(lbdas).conjugate()).sum()*lbda_step
print P
print P
N = 400
step = (lbdas[-1]-lbdas[0])/N # Step for the returned pump range
res = arange(lbdas[0],lbdas[-1],step)
#print "Size of pump lbdas" ,lbdas.size
#print self.pumpenvelop(lbda_p)
print "Pump range : (um)",lbdas[0],lbdas[-1]
self.pumprange = res
return res
def setRangeJS(self,lbda_s_min,lbda_s_max,lbda_i_min,lbda_i_max):
self.lbda_s_min = lbda_s_min
self.lbda_s_max = lbda_s_max
self.lbda_i_min = lbda_i_min
self.lbda_i_max = lbda_i_max
self.extent = [x*1000 for x in [self.lbda_i_min,self.lbda_i_max,self.lbda_s_min,self.lbda_s_max]] # um to nm
print self.extent
def setRangeScanResonance(self,lbda_s_min,lbda_s_max):
# Get the range for signal centered on the resonance
lsm,lsM = lbda_s_min,lbda_s_max
# Get the range for idler using rough energy conservation
lp = self.lbda_p
lp_min = min(self.pumprange)
lp_max = max(self.pumprange)
lim = 1./(2./lp_min - 1./lsM)
liM = 1./(2./lp_max - 1./lsm)
print "avg_pumps", (lim+lsm)/2,(liM+lsM)/2
#print "%.2f %.2f ; %.2f %.2f (pm)" % (lsm*10**6,lsM*10**6,lim*10**6,liM*10**6)
print lsm,lsM,lim,liM
self.setRangeJS(lsm,lsM,lim,liM)
def computeJS_old(self,begin=1.545,end=1.555): # begin=1.545,end=1.555,step=0.0001
#size = int((end-begin)/step)
size = self.N
step = (end-begin) / self.N
P = self.pumppower
L = self.length
lattice = ones((size, size), dtype=float )
phases = ones((size, size), dtype=float )
for i in xrange(size):
print i
lbda_i = i*step+begin
for j in xrange(size):
lbda_s = j*step+begin
a_lbda_p1 = self.pumprange
a_lbda_p2 = 1./(1/lbda_s+1/lbda_i-1/a_lbda_p1)
a_p1 = P*self.pumpenvelop(a_lbda_p1) # pump amplitude 1
a_p2 = P*self.pumpenvelop(a_lbda_p2) # pump amplitude 2
a_dk = self.getdk(a_p1,a_p2,a_lbda_p1,a_lbda_p2,lbda_s,lbda_i)
a_phasematching = 1
a_expi = 1
#a_phasematching = sinc(L/2*a_dk)
a_expi = exp(I*L/2*a_dk)
a_res = a_phasematching*a_expi*a_p1*a_p2
res = a_res.sum()*a_res.size*step
lattice[i,size-1-j]= sqrt(abs(res.real**2+res.imag**2)) #res res #
phases[i,size-1-j] = angle(res)
#N = sqrt((lattice*conjugate(lattice)).max())
#lattice = lattice/N
self.lattice = lattice
self.phases = phases
self.extent = [begin*1000,end*1000,begin*1000,end*1000]
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(lattice/Z))
# Override these methods to add custom filters on signal and idler arm
def filter_idler(self,lbda):
return ones(lbda.size)
def filter_signal(self,lbda):
return ones(lbda.size)
def getPurityAfterFilter(self):
Ni = self.Ni
Ns = self.Ns
# Apply custom filters:
m_filter_signal =zeros((Ni,Ns))
m_filter_idler =zeros((Ni,Ns))
for i in arange(Ni):
m_filter_signal[i,:] = self.filter_signal(self.a_lbda_s)
for j in arange(Ns):
m_filter_idler[:,j] = self.filter_idler(self.a_lbda_i)
lattice = self.normlattice*m_filter_signal*m_filter_idler
# Multiply by the appropriate missing constants
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
normlattice = sqrt(abs(lattice/Z))
self.normlattice_unfiltered = self.normlattice[:,:] # Save the previous matrix
self.normlattice = normlattice # assign the new filtered matrix
purity = self.computeHeraldedPhotonPurity() # computes the purity after filtering
self.normlattice = self.normlattice_unfiltered # restore the previous matrix
return purity
def computeJS(self):
P = self.pumppower
L = self.L # Cavity length
N = self.N
lbda_s_min = self.lbda_s_min
lbda_s_max = self.lbda_s_max
lbda_i_min = self.lbda_i_min
lbda_i_max = self.lbda_i_max
step_i = (lbda_i_max-lbda_i_min)/N
step_s = (lbda_s_max-lbda_s_min)/N
a_lbda_i = arange(lbda_i_min,lbda_i_max,step_i)[0:N]
a_lbda_s = arange(lbda_s_min,lbda_s_max,step_s)[0:N]
self.a_lbda_i = a_lbda_i
self.a_lbda_s = a_lbda_s
Ni = a_lbda_i.size
Ns = a_lbda_s.size
print Ni, Ns
self.Ni = Ni
self.Ns = Ns
self.step_i = step_i
self.step_s = step_s
rangepump = self.pumprange
M = rangepump.size
dlbda_pump = (rangepump.max()-rangepump.min())/M
lattice = zeros((Ni,Ns))
a_lbda_p1 = rangepump
a_p1 = self.pumpenvelop(a_lbda_p1) # pump amplitude 1
ng = self.wg.getng(self.lbda_p)
print "Steps" ,step_i,step_s
#dbgpm = 0.
pumpmax = self.pumpenvelop(self.lbda_p)
phases = zeros((Ni,Ns))
print max(a_p1)
for j in xrange(Ns):
#rint j
lbda_s = a_lbda_s[j] # lbda_s_min+j*step_s
for i in xrange(Ni):
lbda_i = a_lbda_i[i] # lbda_i_min+i*step_i
a_lbda_p2 = 1./(1./lbda_s+1./lbda_i-1./a_lbda_p1)
a_p2 = self.pumpenvelop(a_lbda_p2) # pump amplitude 2
#print a_lbda_p2[0],a_lbda_p2[-1]," ---> ", a_p2.sum()
#print max(a_p2)
# In order to save computation time we can take a_pm = 1. for small cavities
a_dk = 1.
a_pm = 1.
#a_dk = self.getdk(P*a_p1*conjugate(a_p1),P*a_p2*conjugate(a_p2),a_lbda_p1,a_lbda_p2,lbda_s,lbda_i)
#a_pm = sinc(L/2*a_dk/pi) # the L will be added later in the global constant
a_res = a_p1*a_p2*a_pm
a_res = a_res * a_lbda_p2/a_lbda_p1
# Multiply by the dlambda;
# The pump function is i um^(-1/2), dlbda_pump is in um
a_res = a_res*dlbda_pump
res = a_res.sum() # unitless
#res = res
# Multiply by the dlambda
# Since the formula was derived for domega, we have to remember that domega = -2*pi*c/lbda**2 * dlbda
lattice[i,Ns-1-j]= abs(res.real**2+res.imag**2)* (step_i/(lbda_i**2)) * (step_s/(lbda_s**2))
#print angle(res)
phases[i,Ns-1-j] = angle(res)
# Check what should be the proper formula which keeps the joint spectral amplitude instead of joint spectral probability distribution
# Apply custom filters:
# m_filter_signal =zeros((Ni,Ns))
# m_filter_idler =zeros((Ni,Ns))
# for i in arange(Ni):
# m_filter_signal[i,:] = self.filter_signal(a_lbda_s)
# for j in arange(Ns):
# m_filter_idler[:,j] = self.filter_idler(a_lbda_i)
# lattice = lattice*m_filter_signal*m_filter_idler
# Multiply by the appropriate missing constants
lattice = lattice*(c*self.Epulse*self.gamma*(self.L))**2/(2*pi**2) #/ (2*pi*ng)
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(lattice/Z))
self.lattice = lattice
self.phases = phases
def plotBiphoton(self,fname = None):
plotcolormap(self.lattice,self.extent,fname)
def __g__(self,i,j):
#return (self.normlattice[i,:]*conjugate(self.normlattice[j,:])).sum()
return (self.normlattice[i,:]*exp(I*self.phases[i,:])*conjugate(self.normlattice[j,:]*exp(I*self.phases[j,:]))).sum()
def __g_nophase__(self,i,j):
return (self.normlattice[i,:]*conjugate(self.normlattice[j,:])).sum()
def __G_nophase__(self,i,j,k,l):
return self.__g_nophase__(i,j)*self.__g_nophase__(k,l)
vectg = vectorize(__g__)
def __G__(self,i,j,k,l):
return self.__g__(i,j)*self.__g__(k,l)
vectG = vectorize(__G__)
vectG_nophase = vectorize(__G_nophase__)
# Purity = Tr(ro**2)
def computenaivepurity(self):
lattice = sqrt(self.normlattice)
N = self.N
P = 0
for n in xrange(self.N):
for m in xrange(self.N):
P+= (lattice[:,n]*conjugate(lattice[:,m])).sum()*(lattice[:,m]*conjugate(lattice[:,n])).sum()
self.purity = abs(P)
self.schn = 1./P
return P
# Computes the probability of getting coincidences between two heralded photons from different sources
def computePcoincfrom2photons(self):
lattice = sqrt(self.normlattice)
#print "State Norm:", abs(lattice*conjugate(lattice)).sum() # equivalent to the trace
print "Computing proba coincidence"
N = self.N
omega1 = zeros((N,N))
omega2 = zeros((N,N))
for i in range(N):
omega1[:,i]= arange(N)
omega2[i,:]= arange(N)
Gnnmm = self.vectG(self,omega1,omega1,omega2,omega2)
Gnmmn = self.vectG(self,omega1,omega2,omega2,omega1)
print "Gnnmm: ",Gnnmm.sum()
print "Gnmmn: ",Gnmmn.sum()
Pcoinc = 0.5*(Gnnmm.sum()-Gnmmn.sum()) # See proof in my labbook from 2012 (27/01/2012)
print "Pcoinc: ",Pcoinc
print "Visibility: ", 1.-Pcoinc/0.5
self.visibility= 1.-Pcoinc/0.5
return 1.-Pcoinc/0.5
def computeHeraldedPhotonPurity(self):
#self.computePcoincfrom2photons()
lattice = self.normlattice
N = self.N
omega1 = zeros((N,N))
omega2 = zeros((N,N))
for i in range(N):
omega1[:,i]= arange(N)
omega2[i,:]= arange(N)
#print "State Norm:", abs(lattice*conjugate(lattice)).sum() # equivalent to the trace
purity = self.vectG(self,omega1,omega2,omega2,omega1).sum()
#purity2 = self.vectG_nophase(self,omega1,omega2,omega2,omega1).sum()
# print "Purity: ", purity,purity2
self.purity = abs(purity)
self.schn = 1/purity
"""
print "Computing heralded photon purity"
N = self.N
omega1 = zeros((N,N))
omega2 = zeros((N,N))
for i in range(N):
omega1[:,i]= arange(N)
omega2[i,:]= arange(N)
x = self.vectg(self,arange(N),arange(N))
print "Tr_ro1: ",x.sum()
g12 = self.vectg(self,omega1,omega2)
purity = (g12*g12).sum() # no dot product here, the formula (g12*g12).sum() provides exactly the trace over
# the reduced density matrix squared.
#print schn, schmidtnumber(lattice)
"""
return abs(purity)
###
# -----------------------------------------------------------------------------#
# CLASS FWM_RingSimu
# -----------------------------------------------------------------------------#
# This class calculates the joint spectral distribution obtained in a ring
# resonator for a given set of parameters
# Init (
# * Waveguide cross section
# * Waveguide length (Meters)
# * Pump power (Watts)
# * Pump wavelength (um)
# * Pulse duration (Seconds)
# * Repetition rate (Hz)
# * N: grid sampling (the JSA is stored in a NxN grid)
# * r: ring coupling (r = 1 means no coupling, while r = 0 means full coupling)
# * tau: round trip transmission which accounts for the loss in the ring resonator
# )
#
# setPumpToClosestRes(lambda) : Sets the pump to the closest resonance to the given wavelength
# setRangeScanResonance(p) : Sets the resonance to be used for collecting the idler photon. p is the resonance number.
# p = 0 is the same as the pump resonance
# p = +1 or -1 are the next nearest resonance to the pump
# p = +M or -M ....
#
# plotcavityresponse() : Shows the transmission spectrum of the cavity
# computeJS() : Does the simulation
#
# applycavity(lambda) : This is the function which applies the cavity. By default, this function applies a ring resonator.
# Different cavities can however be used.
# save(filename) : Saves the result of the simulation including all the parameters, the full state, and the derived parameters such as the Schmidt number
#
class FWM_RingSimu(FWM_Simu):
def __init__(self,wg = Waveguide(550,220),
length = 80., # um
pumppower = 45.*10**-3,pumpwl = 1.55,pulseduration=1.*10**(-12),N = 200,r = 0.98,tau = 1.0): # 300*10**3 -> 300 kHz linewidth
FWM_Simu.__init__(self,wg = wg,
length = length, # 0.03 ->3cm
pumppower = pumppower,pumpwl = pumpwl,pulseduration=pulseduration)
self.lbda_p = pumpwl # in um # We take the cavity resonance wavelength equal to the pump central wavelength
self.mpump = -1 # resonance number closest to the pump
# Ring parameters
self.L = length # Length of the ring in um
self.r = r
self.tau = tau # tau = 1. -> No loss
#self.tau = self.r # critical coupling
self.N = N
self.lattice = zeros((N,N))
# For loading purpose : Params
self.purity = -1
self.schn = -1
self.geneeff = -1
self.setters = {"Purity" : self.__setPurity__,
"Schmidt number" : self.__setSchn__,
"r" : self.__setr__,
"Nb pairs per pulse" : self.__setgeneeff__,
"Pulse duration (ps)" : self.__setT__ ,
"N" : self.__setN__,
}
self.resonancenumber = 0 # Resonance scanned for signal
# Setters when loading
def __setPurity__(self,x):
self.purity = x
def __setSchn__(self,x):
self.schn = x
def __setr__(self,x):
self.r = x
def __setgeneeff__(self,x):
self.geneeff = x
def __setT__(self,x):
self.T = x
def __setN__(self,x):
self.N = x
self.lattice = zeros((x,x))
self.phases = zeros((x,x))
def setTau(self,x):
self.tau = x
def setr(self,x):
self.r = x
def setL(self,L):
self.L = L
def ring(self,lbda):
k = self.wg.wl2kv(lbda)
t = sqrt(1-self.r**2)
tau = self.tau
r = self.r
return I*t/(1-tau*r*exp(I*k*self.L))
def cavity_transmission(self,lbda):
t = sqrt(1-self.r**2)
return self.r+I*t*self.ring(lbda)
# Override these methods to add custom filters on signal and idler arm
def filter_idler(self,lbda):
return ones(lbda.size)
def filter_signal(self,lbda):
return ones(lbda.size)
# If using two coupled rings
def set_r2(self,r2 = 0.999):
self.r2 = r2
def CROW2(self,lbda):
k = self.wg.wl2kv(lbda)
r2 = self.r2
t2 = sqrt(1-r2**2)
r1 = self.r
t1 = sqrt(1-r1**2)
tau = self.tau
L1 = self.L
L2 = L1
g1 = tau*exp(I*L1*k)
g2 = tau*exp(I*L2*k)
return I*t1*(r2-g2)/(1-r2*g2+r1*g1*(g2-r2))
def applycavity(self,lbda):
return self.ring(lbda)
# Returns the closest cavity resonance for a given lambda and the resonance number
def getClosestCavityRes(self,lbda):
m = round(self.wg.wl2kv(lbda)*self.L/(2*pi))
kp0 = m*2*pi/self.L # target pump propagation constant
# The problem is now to get lbda0 from kp0
# We start approximating the neff of lbda0 using the one of lambda
neff = self.wg.getneff(lbda)
# Using a scipy optimize method could be more robust and faster than the following code
lbda0 = 2*pi*neff/kp0
print lbda0
lbdastep = 1*10**(-7) * sign(lbda0-lbda)
kp = self.wg.wl2kv(lbda0)
err = (kp-kp0)/kp0
while(abs(err)>0.0000001):
lbda0 += lbdastep
kp = self.wg.wl2kv(lbda0)
newerr = (kp-kp0)/kp0
if newerr**2>err**2:
lbdastep = lbdastep*(-1)
err = newerr
return lbda0,m
# Centers the pump on the closest cavity resonance
def setPumpToClosestRes(self,lbda):
self.lbda_p,self.mpump = self.getClosestCavityRes(lbda)
print "Pump is set at %.7f um" % self.lbda_p
# Get the range to scan for signal for the nth resonance with respect to the pump
# Rq : The pump should have been set such that mpump has a meaningful value
def getSignalRange(self,n):
FWHM = (1-self.r*self.tau)*self.lbda_p**2/(self.wg.getng(self.lbda_p)*sqrt(2)*pi*self.L)
print "FWHM (um) : ",FWHM
fullrange = 5*FWHM #
wlFSR = self.lbda_p**2/(self.L*self.wg.getng(self.lbda_p)) # FSR in lambda
print "FSR (um) : ",wlFSR
lbda_s,m = self.getClosestCavityRes(self.lbda_p+n*wlFSR)
print "Resonance (um) : ",lbda_s
return lbda_s-fullrange/2,lbda_s+fullrange/2
def plotcavityresponse(self,albda = arange(1.5477-0.01,1.5477+0.01,0.0000001)):
cavity = self.applycavity(albda)*self.applycavity(albda).conjugate()
pump = self.pumpenvelop(albda)**2
lbda_i,m_i = self.getClosestCavityRes(1.548)
lbda_s = 1./(2./self.lbda_p-1./lbda_i)
signal_wl = funcpeak(albda,lbda_s)
idler_wl = funcpeak(albda,lbda_i)
plot([(albda,cavity,"-"),
(albda,pump/pump.max()*cavity.max(),"-"),
(albda,signal_wl/signal_wl.max()*cavity.max(),"r-"),
(albda,idler_wl/idler_wl.max()*cavity.max(),"r-")
]) # Plot the pump normalised wrt the biggest field enhancement
def setRangeJS(self,lbda_s_min,lbda_s_max,lbda_i_min,lbda_i_max):
self.lbda_s_min = lbda_s_min
self.lbda_s_max = lbda_s_max
self.lbda_i_min = lbda_i_min
self.lbda_i_max = lbda_i_max
def setRangeScanResonance(self,m):
# Get the range for signal centered on the resonance
lsm,lsM = self.getSignalRange(m)
self.resonancenumber = m
# Get the range for idler using rough energy conservation
lp = self.lbda_p
lim = 1./(2./lp - 1./lsM)
liM = 1./(2./lp - 1./lsm)
#print "%.2f %.2f ; %.2f %.2f (pm)" % (lsm*10**6,lsM*10**6,lim*10**6,liM*10**6)
print lsm,lsM,lim,liM
self.setRangeJS(lsm,lsM,lim,liM)
def updatepumprange(self):
print "Get pump range ..."
lbda_p = self.lbda_p
print lbda_p
lbda_step= 0.00000001 # step for finding the pump range
P = 0.
targetfraction = 0.95
deltalbda = 0.5*10**(-6) # initialize deltalbda at 1pm
while (P<targetfraction):
deltalbda = 2*deltalbda
lbdas = arange(lbda_p-deltalbda,lbda_p+deltalbda,lbda_step)
#print P
P=(self.pumpenvelop(lbdas)*self.pumpenvelop(lbdas).conjugate()).sum()*lbda_step
print P
print P
N = 400
# get cavity range
# If the pump is broader than the cavity, then we should chop the pump to the cavity region such that the grid is fine enough in the cavity
# If the pump is narrower than the cavity, then keep pump range
lsm,lsM = self.getSignalRange(0)
rl = lsM-lsm
lsm = lsm-rl/2
lsM = lsM+rl/2
lbdamax = min(lbdas[-1],lsM)
lbdamin = max(lbdas[0],lsm)
step = (lbdamax-lbdamin)/N # Step for the returned pump range
res = arange(lbdamin,lbdamax,step)
#print "Size of pump lbdas" ,lbdas.size
#print self.pumpenvelop(lbda_p)
self.pumprange = res
print "Pump range : (um)",lbdas[0],lbdas[-1]
return res
def getjointproba(self):
return self.normlattice
def getjointprobascaled(self):
return self.normlattice/self.normlattice.max()
def computeJS(self): # begin=1.545,end=1.555,step=0.0001
print self.wg.getng(self.lbda_p)
P = self.pumppower
L = self.L # Cavity length
N = self.N
lbda_s_min = self.lbda_s_min
lbda_s_max = self.lbda_s_max
lbda_i_min = self.lbda_i_min
lbda_i_max = self.lbda_i_max
step_i = (lbda_i_max-lbda_i_min)/N
step_s = (lbda_s_max-lbda_s_min)/N
a_lbda_i = arange(lbda_i_min,lbda_i_max,step_i)[0:N]
a_lbda_s = arange(lbda_s_min,lbda_s_max,step_s)[0:N]
Ni = a_lbda_i.size
Ns = a_lbda_s.size
print Ni, Ns
Ni = N
Ns = N
self.step_i = step_i
self.step_s = step_s
rangepump = self.pumprange
M = rangepump.size
dlbda_pump = (rangepump.max()-rangepump.min())/M
lattice = zeros((Ni,Ns))
a_lbda_p1 = rangepump
cav_resp_p1 = self.applycavity(a_lbda_p1)
a_p1 = self.pumpenvelop(a_lbda_p1) # pump amplitude 1
ng = self.wg.getng(self.lbda_p)
print "Steps" ,step_i,step_s
#dbgpm = 0.
pumpmax = self.pumpenvelop(self.lbda_p)
phases = zeros((Ni,Ns))
for j in xrange(Ns):
print j
lbda_s = a_lbda_s[j] # lbda_s_min+j*step_s
cav_resp_s = self.applycavity(lbda_s)
for i in xrange(Ni):
lbda_i = a_lbda_i[i] # lbda_i_min+i*step_i
a_lbda_p2 = 1./(1./lbda_s+1./lbda_i-1./a_lbda_p1)
a_p2 = self.pumpenvelop(a_lbda_p2) # pump amplitude 2
# In order to save computation time we can take a_pm = 1. for small cavities
a_dk = self.getdk(P*a_p1*conjugate(a_p1),P*a_p2*conjugate(a_p2),a_lbda_p1,a_lbda_p2,lbda_s,lbda_i)
a_pm = sinc(L/2*a_dk/pi) # the L will be added later in the global constant
#a_pm = 1.
a_res = a_p1*a_p2*a_pm*cav_resp_p1*self.applycavity(a_lbda_p2)* self.applycavity(lbda_i)*cav_resp_s #
a_res = a_res * a_lbda_p2/a_lbda_p1
# Multiply by the dlambda;
# The pump function is i um^(-1/2), dlbda_pump is in um
a_res = a_res*dlbda_pump
res = a_res.sum() # unitless
#res = res
# Multiply by the dlambda
# Since the formula was derived for domega, we have to remember that domega = -2*pi*c/lbda**2 * dlbda
lattice[i,Ns-1-j]= abs(res.real**2+res.imag**2)* (step_i/(lbda_i**2)) * (step_s/(lbda_s**2))
#print angle(res)
phases[i,Ns-1-j] = angle(res)
# Check what should be the proper formula which keeps the joint spectral amplitude instead of joint spectral probability distribution
# Apply custom filters:
# m_filter_signal =zeros((Ni,Ns))
# m_filter_idler =zeros((Ni,Ns))
# for i in arange(Ni):
# m_filter_signal[i,:] = self.filter_signal(a_lbda_s)
# for j in arange(Ns):
# m_filter_idler[:,j] = self.filter_idler(a_lbda_i)
# lattice = lattice*m_filter_signal*m_filter_idler
# Multiply by the appropriate missing constants
lattice = lattice*(c*self.Epulse*self.gamma*(self.L))**2/(2*pi**2) #/ (2*pi*ng)
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(lattice/Z))
self.lattice = lattice
self.phases = phases
xi = 2*lattice.sum()
xi = tanh(sqrt(xi))**2 # Approximation valid in the case of two-mode squeezer
self.probapair = xi * (1-xi)
# Theory calculation for CW regime for comparison
vg = c/self.wg.getng(self.lbda_p)
print "Epulse (nJ) ", self.Epulse*10**9
print "gamma W-1,m-1", self.gamma
print "L (um)", L
print "T (ps)", self.T*10**12
print "vg %e" % vg
print "r : %.4f" % self.r
print "tau : %.4f" % self.tau
print "Siglbda : %.5f" % (self.siglbda)
#deltalbda = self.siglbda*sqrt(2*pi) # Such that the approx rectangular pulse results matches the gaussian def
#beta2_pulsed = (self.Epulse*self.gamma*c)**2/(32*ng**4*pi**6)*self.lbda_p**4/(L**2*deltalbda**2)*(1-self.r**2)**4/(1-self.tau*self.r)**4
xi = (self.Epulse*self.gamma*c)**2/(32*ng**4*pi**2)*self.lbda_p**4*pumpmax**4/(L**2)*(1-self.r**2)**4/(1-self.tau*self.r)**4
xi = tanh(sqrt(xi))**2
beta2_pulsed = xi * (1-xi)
#beta2_pulsed = (self.Epulse*self.T*self.gamma/(L*10**(-6)))**2*vg**4/16.*(1-self.r**2)**4/(1-self.tau*self.r)**4
xi = self.gamma**2*self.pumppower**2*(L*10**(-6))/8 * vg*self.T*(1-self.r**2)**4/(1-self.r*self.tau)**7
xi = tanh(sqrt(xi))**2
beta2_CW = xi * (1-xi)
# We multiply the lattice by a factor of two since we only integrate over half of Phi(k1,k2) and we should account for the other symmetrical half
print "Nb pairs per pulse:",self.probapair
print "Flat pulse model:", beta2_pulsed
print "CW model:", beta2_CW
lbda_i0 = (lbda_i_max+lbda_i_min)/2
lbda_s0 = (lbda_s_max+lbda_s_min)/2
self.extent = list(array([lbda_i_min-lbda_i0,lbda_i_max-lbda_i0,lbda_s_min-lbda_s0,lbda_s_max-lbda_s0])*1000) # Check where should go i and s
self.beta2_pulsed = beta2_pulsed
self.beta2_CW = beta2_CW
def getPhases(self):
return self.phases
def getAverageSpectra(self):
return self.normlattice.sum(axis = 0),self.normlattice.sum(axis = 1)
def save(self,directory="resonances_toshiba"):
timestamp = time.strftime("%m%d_%H%M",time.localtime(time.time()))
# Create repository if it does not exist
if not os.path.exists("data\\%s" % directory):
os.makedirs("data\\%s" % directory)
fname = "data\\%s\\simu_%s_r=%.3f_tau=%.3f_%.2fps_res=%d.txt" % (directory,timestamp,self.r,self.tau,self.T * 10**12,self.resonancenumber)
# Header
fw = open(fname,"w")
fw.write("#Laser parameters\n")
fw.write("%s : %.3f\n" % ("Pulse duration (ps)",self.T*10**12))
fw.write("%s : %.4f\n" % ("Pump power avg (mW)",self.pumppower*1000))
fw.write("%s : %.3f\n" % ("Repetition rate(MHz)",self.reprate/(10**6)))
fw.write("%s : %.18e\n" % ("Energy per pulse (uJ)",self.Epulse*1000000))
fw.write("%s : %.6f\n" % ("Pump wavelength (um)",self.lbda_p))
fw.write("\n#Waveguide parameters\n")
fw.write("%s : %.3f\n" % ("Width (nm)",self.wg.width))
fw.write("%s : %.3f\n" % ("Height (nm)",self.wg.height))
fw.write("%s : %.3f\n" % ("Aeff (um^2)",self.wg.Aeff))
fw.write("%s : %.3f\n" % ("gamma (W-1 m-1)",self.gamma))
fw.write("\n#Ring parameters\n")
fw.write("%s : %.3f\n" % ("Cavity length (um)",self.L))
fw.write("%s : %.5f\n" % ("Tau",self.tau))
fw.write("%s : %.5f\n" % ("r",self.r))
fw.write("\n#BiPhoton state properties\n")
fw.write("%s : %.5f\n" % ("Nb pairs per pulse",self.probapair))
fw.write("%s : %.5f\n" % ("Flat pulse model",self.beta2_pulsed))
fw.write("%s : %.5f\n" % ("CW model",self.beta2_CW))
self.computeHeraldedPhotonPurity()
#self.computePcoincfrom2photons()
#fw.write("%s : %.6f\n" % ("Visibility from two heralded sources",self.visibility))
fw.write("%s : %.6f\n" % ("Schmidt number",abs(self.schn)))
fw.write("%s : %.6f\n" % ("Purity",abs(1/self.schn)))
# Theory calculation for CW regime for comparison
vg = c/self.wg.getng(self.lbda_p)
beta2 = self.gamma**2*(self.Epulse/self.T)**2*(self.L*10**(-6))/8 * vg*self.T*(1-self.r**2)**4/(1-self.r)**7
fw.write("%s : %.5f\n" % ("Nb pairs(analytical CW)",beta2))
fw.write("\n")
fw.write("N=%d\n" % self.N)
fw.write("Resonance number : %d\n" % self.resonancenumber)
fw.write("\n#Scan range\n")
fw.write("%s : %.6e - %.6e, %.6e\n" % ("idl min, idl max, step (um)",self.lbda_i_min,self.lbda_i_max,self.step_i))
fw.write("%s : %.6e - %.6e, %.6e\n" % ("sig min, sig max, step (um)",self.lbda_s_min,self.lbda_s_max,self.step_s))
fw.write("\n#Raw data Biphoton distribution\n")
# Saves the joint spectrum
for j in xrange(self.N):
line = " ".join(("%.18e" % x) for x in self.lattice[:,self.N-1-j])
fw.write(line+"\n")
fw.write("\n#Raw data Biphoton phase distribution\n")
# Saves the joint spectrum
for j in xrange(self.N):
line = " ".join(("%.18e" % x) for x in self.phases[:,self.N-1-j])
fw.write(line+"\n")
fw.close()
return fname
def load(self,fname):
print "Loading %s ..." % fname
f = open(fname,"r")
line = f.readline()
while (len(line)>0):
if line.startswith("#Scan range"):
# Load the extent of the wavelength for signal and idler
line = f.readline() # Readline for the idler
self.lbda_i_min,self.lbda_i_max = parse_extent(line)
line = f.readline() # Readline for the signal
self.lbda_s_min,self.lbda_s_max = parse_extent(line)
self.extent = [self.lbda_i_min,self.lbda_i_max,self.lbda_s_min,self.lbda_s_max] # Check where should go i and s
if line.startswith("#Raw data Biphoton distribution"):
# Load the biphoton distribution
for j in xrange(self.N):
line = f.readline()
self.lattice[:,self.N-1-j] = parse_biphoton_data(line)
if line.startswith("#Raw data Biphoton phase distribution"):
# Load the biphoton phase distribution
for j in xrange(self.N):
line = f.readline()
self.phases[:,self.N-1-j] = parse_biphoton_data(line)
if line.find("#")>=0:
l1 = line.split("#")[0]
if line.find(":")>=0:
line = line.replace("\n","")
name,value = line.split(" : ")
if name in self.setters.keys():
self.setters[name](float(value))
elif line.startswith("N="):
name,value = line.split("=")
self.setters[name](int(value))
line = f.readline()
Z = self.lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(self.lattice/Z))
f.close()
class CustomPump():
def __init__(self,fname="G2 Straight Transmission.csv"):
self.rootname = "."
self.__load__(fname)
self.__fit__()
def __load__(self,fname):
path = os.path.join(self.rootname,fname)
f = open(path)
line = f.readline()
lbdas = []
amplitudes = []
for i in arange(30):
line = f.readline()
while(len(line))>0:
splitted = line.split(",")
lbda,amplitude = splitted[0:2]
line = f.readline()
if lbda>0:
lbdas.append(float(lbda)/1000) # nm -> um
amplitudes.append(float(amplitude))
self.lbdas = array(lbdas)
self.amplitudes = array(amplitudes)
self.amplitudes = self.amplitudes/self.amplitudes.sum() # Normalise
self.lbda_p = self.lbdas[self.amplitudes.argmax()]
def __fit__(self):
# Gaussian multiplied by rational fraction to account for distorsion
a = (10**3)
b = (10**3)
c = (10**3)**1.5
d = 10
e = 1
f = 1
sig = 1.0*10**(-3) # um
p0 = [self.lbda_p,sig,a,b,c,d,e,f]
plsq = leastsq(self.__residuals__, p0, args=(self.amplitudes, self.lbdas))
self.p = plsq[0]
print self.p
# p : parameters
# lbdas : wavelengths
def __func__(self,p,lbdas):
lbda0,sig,a,b,c,d,e,f = p
dlbdas = lbdas-lbda0
res = exp(-dlbdas**2/(2*sig**2))*(a*dlbdas+f/(b*dlbdas**3+c*dlbdas**2+d*dlbdas+e))
return res
def __residuals__(self,p,y, x):
err = y-self.__func__(p,x)
return err
def getPulse(self,lbda):
return self.__func__(self.p,lbda)
def plotres(self):
lbda1,lbda2 = min(self.lbdas),max(self.lbdas)
x = arange(lbda1,lbda2,0.000001)
#self.p = (A,r,tau)
plots = [(self.lbdas,self.amplitudes,"ro"),(x,self.getPulse(x),"k-")] # (neff0 self.lbdas,self.Iouts,"ro"),
#plot(plots)
print self.lbda_p
return plots
# Fit ring when seeded by a pulse laser from which we know the shape
class RingPulsed():
def __init__(self,R,Lc,fname,pumpfunc):
self.R = R # radius (um)
self.Lc = Lc # coupling length (um)
self.L = 2*(pi*R + Lc) # Total length
#FSR = 1.5556-1.5477 # um
self.neff0 = 4.14330 #4.143277 # Starting effective group index 4.1434
self.pumpfunc = pumpfunc
self.rootname = "."
self.__load__(fname)
self.__fit__()
def __load__(self,fname):
path = os.path.join(self.rootname,fname)
f = open(path)
line = f.readline()
lbdas = []
amplitudes = []
for i in arange(30):
line = f.readline()
while(len(line))>0:
splitted = line.split(",")
lbda,amplitude = splitted[0:2]
line = f.readline()
if lbda>0:
lbdas.append(float(lbda)/1000) # nm -> um
amplitudes.append(float(amplitude))
self.lbdas = array(lbdas)
self.amplitudes = array(amplitudes)
self.amplitudes = self.amplitudes/self.amplitudes.sum() # Normalise
self.lbda_p = self.lbdas[self.amplitudes.argmin()]
# adjust the neff0 guess
m = int(self.neff0*self.L/self.lbda_p)
self.neff0 = m*self.lbda_p/self.L
def __fit__(self):
a = b = c = d=e=f=0.000000000000001
p0 = [max(self.amplitudes),0.9,0.9,self.neff0,a,b,c,d,e,f]
plsq = leastsq(self.__residuals__, p0, args=(self.amplitudes, self.lbdas))
self.p = plsq[0]
print self.p
# p : parameters
# lbdas : wavelengths
def __func__(self,p,lbdas):
A,r,tau,neff,a,b,c,d,e,f = p
dlbdas = lbdas-self.lbda_p
#neff = self.neff0
L = self.L
phi = 2*pi*L*neff/lbdas
r2 = r**2
tau2 = tau**2
K = 2*r*tau*cos(phi)
res = A*(r2+tau2-K)/(1+r2*tau2-K) * self.pumpfunc(lbdas) * (a+b*dlbdas+c*dlbdas**3)/(d+e*dlbdas+f*dlbdas**3)
return res
def ringResponse(self,p,lbdas):
A,r,tau,neff,a,b,c,d,e,f = p
dlbdas = lbdas-self.lbda_p
#neff = self.neff0
L = self.L
phi = 2*pi*L*neff/lbdas
r2 = r**2
tau2 = tau**2
K = 2*r*tau*cos(phi)
res = A*(r2+tau2-K)/(1+r2*tau2-K) * (a+b*dlbdas+c*dlbdas**3)/(d+e*dlbdas+f*dlbdas**3)*max(self.pumpfunc(lbdas))
return res
def __residuals__(self,p,y, x):
err = y-self.__func__(p,x)
return err
def getIout(self,lbda):
return self.__func__(self.p,lbda)
def plotres(self):
lbda1,lbda2 = min(self.lbdas),max(self.lbdas)
x = arange(lbda1,lbda2,0.000001)
plots = [(self.lbdas,self.amplitudes,"bo"),(x,self.getIout(x),"k-"),(x,self.ringResponse(self.p,x),"b--")] # (self.lbdas,self.Iouts,"ro"),
#plot(plots)
self.lbda_p = self.lbdas[self.amplitudes.argmin()]
print self.lbda_p
return plots
# December 15, 2004 / Vol. 29, No. 24 / OPTICS LETTERS p 2861
# Ultrahigh-quality-factor silicon-on-insulator microring resonator
def computeQ(self):
A,r,tau,neff=self.p[0:4]
return (2*pi*neff/self.lbda_p)*self.L/(-2*log(r*tau))
def main():
# Load the pulse
#pump = CustomPump("G2 Straight Transmission.csv")
#pump.plotres()
#pumpfunc = pump.getPulse
wg = Waveguide(450,220)
T = 100.*10**(-12)
#for T in [100.,50.,25.,10.,5.]:
N = 100 # 200# N = 50 Provides accurate number for r = 0.98 rings with 100ps pulses
#for T in [1000.,500.,200.,100.,50.,25.,10.]:
r = 0.93
tau = 1.-0.0198
radius = 10.
coupling_length = 5.
lbda0= 1.55
res_number = 1 # resonance number (pump resonance is 0).
for res_number in [1]: #arange(0,1):# [1,2,3,4]:
for T in [5.0] : # ,0.75,1.,1.5,2.0,0.5,1.,5.,,50.,100.,500.,1000.,2000. #arange(10.,1000,10.): # [60.,70.,80.,90.,110.,120.,130.,140.,150.,160.,170.,180.,190.,210.,220.,230.,240.,250.,260.,270.,280.,290.]: #arange(10.,100.,10.): # arange(5,55,5): #[25.,50.,100.,200.,500.]: [1.0,2.0,5.0,10.0,20.0,50.0,100.0,200.0,500.0,1000.0,]
for r in [0.9]: # [0.95,0.96,0.97,0.98,0.99]: # 0.85,0.86,0.87,0.88,0.89,0.90,0.91,0.92,0.93,0.94,0.95,0.96
for tau in [0.997]: # 0.76,0.96,0.98
#for r2 in [0.9998,0.9997,0.9996,0.9995,0.9994]: #[1.0,0.9999,0.999,0.99]:
mySim =FWM_RingSimu(wg,length = 2*(radius*pi+coupling_length),pulseduration = T*10**(-12),N = N,r = r,tau = tau,pumppower = 3.*10**-3,pumpwl = lbda0) # 500
#mySim.pumpenvelop = pumpfunc
mySim.setRangeScanResonance(+res_number)
mySim.plotcavityresponse()
mySim.updatepumprange()
mySim.computeJS()
fname = mySim.save("Ring_pumpscan")
mySim.plotBiphoton(fname[:-3]+"png")
# -----------------------------------------------------------------------------#
# MISC FUNCTIONS II: Specific FWM applications
# -----------------------------------------------------------------------------#
def plot1Dgain():
wgs = [
#Waveguide(450,220),
Waveguide(470,220)
#Waveguide(500,220),
#Waveguide(550,220),
]
plots = []
colors = ["r-","b-","g-"]
i = 0
lbda_s = arange(1.40,1.70,0.0001)
for wg in wgs:
simu = FWM_Simu(wg = wg,length = 0.0058,pumpwl = 1.5479)
res = simu.toplotCWGain(lbda_s)
plots.append((lbda_s,res,colors[i]))
i += 1
fw = open("fwm_bandwidth_cw.csv","w")
fw.write("Wavelength (um), FWM gain (a.u)")
for i in arange(lbda_s.size):
line = "%.5f,%.5f\n" % (lbda_s[i],res[i])
fw.write(line)
fw.close()
plot(plots)
def plotnbpairsScaling():
lbda_min = 1.542
lbda_max = 1.544
wg = Waveguide(550,220)
lbda_s = arange(1.5,1.6,0.0001)
tointegrate = (lbda_s>lbda_min) * (lbda_s<lbda_max)
lengths = arange(0,0.01,0.0001)
#lengths = arange(0,100.,0.1)
res = []
for L in lengths:
simu = FWM_Simu(wg = wg,length = L )
gainperbandwidth = (L/2)**2*simu.toplotCWGain(lbda_s = lbda_s) #
#res.append(gainperbandwidth[tointegrate].sum())
res.append(gainperbandwidth.sum())
plot([(lengths,res,"r-")])
if __name__ == "__main__":
#pump = CustomPump("G2 Straight Transmission.csv")
#pump.plotres()
#ring = RingPulsed(20,5,"G2 Ring Transmission.csv",pump.getPulse)
#plot(ring.plotres()+pump.plotres())
main()
#plotnbpairsScaling()
#plot1Dgain()
|
[
"[email protected]"
] | |
237921523213c4beab3bebd982ca877c4dbba96d
|
2884f45d89a5cd378ac44d236fc7c11ff01f2d8b
|
/main.py
|
2ff64ce7e8ed859549778e58b852b52894611199
|
[] |
no_license
|
UCSD-CSE-SPIS-2021/practice-Jodi-R
|
32ea94de9d3c845403c183fc4d153a8e93f2572a
|
849fe54f9cbb7ceb11c570a87cd87e133e878256
|
refs/heads/master
| 2023-07-04T23:42:38.973082 | 2021-08-02T23:08:50 | 2021-08-02T23:08:50 | 392,114,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 50 |
py
|
#Jodi Roe for CSE SPIS 2021
print('Hello, World!')
|
[
"[email protected]"
] | |
a17a3398d51c31dd07c12aac53b64feea4bb01a3
|
871e1e8e8e4a13d930ad25b24bdd9cd9fefa3c49
|
/accounts/views.py
|
f9e9c6428fed13fdd8754a44b995edc5383f24d9
|
[] |
no_license
|
namitgpta/Django-projects
|
6c9d7be5f8de358f7b7130716833ce0a31e6a54e
|
642438a8700809547b742e6427677fb410c67317
|
refs/heads/master
| 2023-04-16T15:57:35.024649 | 2021-05-02T17:40:38 | 2021-05-02T17:40:38 | 363,646,729 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,916 |
py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User, auth
# Create your views here.
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
messages.info(request, 'invalid credentials')
return redirect('login')
else:
return render(request, 'login.html')
def register(request):
if request.method == 'POST':
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
password1 = request.POST['password1']
password2 = request.POST['password2']
email = request.POST['email']
if password1 == password2:
if User.objects.filter(username=username).exists():
messages.info(request, 'Username already taken')
return redirect('register')
elif User.objects.filter(email=email).exists():
messages.info(request,'Email already taken')
return redirect('register')
else:
user = User.objects.create_user(username=username, password=password1, email=email,
first_name=first_name, last_name=last_name)
user.save()
print('User Created')
return redirect('login')
else:
messages.info(request, 'password not matching....')
return redirect('register')
else:
return render(request, 'register.html')
def logout(request):
auth.logout(request)
return redirect('/')
|
[
"[email protected]"
] | |
74889d0b7d3a2f0c76254b7c7f76fed2298d71da
|
39d9bc0bdf069700aa44c3cc8cb4127f5770431d
|
/2nd Place/geopose/models/heads.py
|
3ee102a6931f81050fd427f0dffeea2d68386d05
|
[
"MIT"
] |
permissive
|
personx000/overhead-geopose-challenge
|
5189d7dd6e385b4e9c0b0b238168f66dd0a60033
|
f32b5763ae01627519c9080224b8be97ebe7f3e2
|
refs/heads/main
| 2023-08-30T08:01:04.820481 | 2021-11-05T13:45:52 | 2021-11-05T13:45:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,143 |
py
|
from typing import List
import torch
import torch.nn.functional as F
from pytorch_toolbelt.modules import (
ACT_RELU,
instantiate_activation_block,
GlobalAvgPool2d,
FPNFuseSum,
ResidualDeconvolutionUpsample2d,
conv1x1,
)
from torch import nn
from ..dataset import (
OUTPUT_VFLOW_DIRECTION,
tensor_vector2angle,
OUTPUT_MAGNITUDE_MASK,
OUTPUT_VFLOW_ANGLE,
OUTPUT_AGL_MASK,
OUTPUT_VFLOW_SCALE,
)
__all__ = ["SimpleAGLHead", "AGLHead"]
class RegressionHeadWithGSD(nn.Module):
def __init__(self, in_channels, embedding_size, out_channels, activation=ACT_RELU):
super().__init__()
self.up = nn.Sequential(
conv1x1(in_channels, (in_channels // 4) * 4), ResidualDeconvolutionUpsample2d((in_channels // 4) * 4)
)
self.conv1 = nn.Conv2d(in_channels // 4, embedding_size, kernel_size=3, padding=1)
self.act1 = instantiate_activation_block(activation, inplace=True)
self.conv2 = nn.Conv2d(embedding_size + 1, embedding_size, kernel_size=3, padding=1)
self.act2 = instantiate_activation_block(activation, inplace=True)
self.conv3 = nn.Conv2d(embedding_size, out_channels, kernel_size=3, padding=1)
self.act3 = instantiate_activation_block(ACT_RELU, inplace=True)
for layer in [self.conv1, self.conv2, self.conv3]:
torch.nn.init.normal_(layer.weight, mean=0.0, std=0.1)
# torch.nn.init.zeros_(layer.bias)
torch.nn.init.ones_(layer.bias)
def forward(self, x, gsd):
x = self.up(x)
x = self.act1(self.conv1(x))
gsd = gsd.reshape(gsd.size(0), 1, 1, 1).expand((-1, -1, x.size(2), x.size(3)))
x = torch.cat([x, gsd], dim=1)
x = self.act2(self.conv2(x))
x = self.act3(self.conv3(x))
return x
class RegressionHead(nn.Module):
def __init__(self, in_channels, embedding_size, out_channels, activation=ACT_RELU):
super().__init__()
# self.up = nn.Sequential(conv1x1(in_channels, in_channels * 4), nn.PixelShuffle(upscale_factor=2))
self.up = nn.Sequential(
conv1x1(in_channels, (in_channels // 4) * 4), ResidualDeconvolutionUpsample2d((in_channels // 4) * 4)
)
self.conv1 = nn.Conv2d(in_channels // 4, embedding_size, kernel_size=3, padding=1)
self.act1 = instantiate_activation_block(activation, inplace=True)
self.conv2 = nn.Conv2d(embedding_size, embedding_size, kernel_size=3, padding=1)
self.act2 = instantiate_activation_block(activation, inplace=True)
self.conv3 = nn.Conv2d(embedding_size, out_channels, kernel_size=3, padding=1)
self.act3 = instantiate_activation_block(ACT_RELU, inplace=True)
for layer in [self.conv1, self.conv2, self.conv3]:
torch.nn.init.normal_(layer.weight, mean=0.0, std=0.1)
# torch.nn.init.zeros_(layer.bias)
torch.nn.init.ones_(layer.bias)
def forward(self, x):
x = self.up(x)
x = self.act1(self.conv1(x))
x = self.act2(self.conv2(x))
x = self.act3(self.conv3(x))
return x
class SimpleRegressionHead(nn.Module):
def __init__(self, in_channels, embedding_size, out_channels, activation=ACT_RELU, dropout_rate=0.0):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, embedding_size, kernel_size=3, padding=1)
self.drop = nn.Dropout2d(dropout_rate, inplace=False)
self.act1 = instantiate_activation_block(activation, inplace=True)
self.conv2 = nn.Conv2d(embedding_size, out_channels, kernel_size=3, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.drop(x)
x = self.act1(x)
x = self.conv2(x)
return x
class OnlyAGLHead(nn.Module):
def __init__(
self, encoder_channels: List[int], decoder_channels: List[int], embedding_size=64, dropout_rate=0.0, activation=ACT_RELU
):
super().__init__()
in_channels = decoder_channels[0]
self.dropout = nn.Dropout2d(dropout_rate, inplace=False)
self.height = RegressionHeadWithGSD(in_channels, embedding_size, 1, activation=activation)
def forward(self, rgb, encoder_feature_maps, decoder_feature_maps, gsd, orientation_outputs):
# Take the finest feature map from the decoder
x = decoder_feature_maps[0]
x = self.dropout(x)
height = self.height(x, gsd)
return {
OUTPUT_AGL_MASK: F.interpolate(height, size=rgb.size()[2:], mode="bilinear", align_corners=True),
}
class SimpleAGLHead(nn.Module):
def __init__(
self, encoder_channels: List[int], decoder_channels: List[int], embedding_size=64, dropout_rate=0.0, activation=ACT_RELU
):
super().__init__()
in_channels = decoder_channels[0]
self.dropout = nn.Dropout2d(dropout_rate, inplace=False)
self.height = RegressionHeadWithGSD(in_channels, embedding_size, 1, activation=activation)
self.magnitude = RegressionHead(in_channels, embedding_size, 1, activation=activation)
self.scale = ScaleHead()
def forward(self, rgb, encoder_feature_maps, decoder_feature_maps, gsd, orientation_outputs):
# Take the finest feature map from the decoder
x = decoder_feature_maps[0]
x = self.dropout(x)
height = self.height(x, gsd)
mag = self.magnitude(x)
scale = self.scale(mag, height)
return {
OUTPUT_AGL_MASK: F.interpolate(height, size=rgb.size()[2:], mode="bilinear", align_corners=True),
OUTPUT_MAGNITUDE_MASK: F.interpolate(mag, size=rgb.size()[2:], mode="bilinear", align_corners=True),
OUTPUT_VFLOW_SCALE: scale,
}
class SquareRoot(nn.Module):
def forward(self, x):
return x.sqrt()
class Exponent(nn.Module):
def forward(self, x):
return x.exp()
def instantiate_transformation(name):
if name == "sqrt":
return SquareRoot()
elif name == "exp":
return Exponent()
elif name == "identity":
return nn.Identity()
else:
raise KeyError(name)
class AGLHead(nn.Module):
def __init__(
self,
encoder_channels: List[int],
decoder_channels: List[int],
embedding_size=64,
dropout_rate=0.0,
activation=ACT_RELU,
num_upsample_blocks=1,
agl_activation=ACT_RELU,
agl_transformation="identity",
):
super().__init__()
in_channels = decoder_channels[0]
self.dropout = nn.Dropout2d(dropout_rate, inplace=False)
self.scale = ScaleHead()
upsample_blocks = []
for i in range(num_upsample_blocks):
input_channels = (in_channels // 2) * 2
upsampled_channels = input_channels // 4
upsample_blocks += [
conv1x1(in_channels, input_channels),
ResidualDeconvolutionUpsample2d(input_channels, scale_factor=2),
nn.Conv2d(upsampled_channels, upsampled_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(upsampled_channels),
instantiate_activation_block(activation, inplace=True),
]
in_channels = upsampled_channels
self.upsample = nn.Sequential(*upsample_blocks)
self.height = nn.Sequential(
nn.Conv2d(upsampled_channels + 1, upsampled_channels, kernel_size=3, padding=1),
instantiate_activation_block(activation, inplace=True),
nn.Conv2d(upsampled_channels, 1, kernel_size=3, padding=1),
instantiate_activation_block(agl_activation, inplace=True),
instantiate_transformation(agl_transformation),
)
self.magnitude = nn.Sequential(
nn.Conv2d(upsampled_channels, upsampled_channels, kernel_size=3, padding=1),
instantiate_activation_block(activation, inplace=True),
nn.Conv2d(upsampled_channels, 1, kernel_size=3, padding=1),
instantiate_activation_block(agl_activation, inplace=True),
instantiate_transformation(agl_transformation),
)
def forward(self, rgb, encoder_feature_maps, decoder_feature_maps, gsd, orientation_outputs):
# Take the finest feature map from the decoder
x = decoder_feature_maps[0]
x = self.upsample(x)
gsd = gsd.reshape(gsd.size(0), 1, 1, 1).expand((-1, -1, x.size(2), x.size(3)))
height = self.height(torch.cat([x, gsd], dim=1))
mag = self.magnitude(x)
scale = self.scale(mag, height)
return {
OUTPUT_AGL_MASK: height,
OUTPUT_MAGNITUDE_MASK: mag,
OUTPUT_VFLOW_SCALE: scale,
}
class HyperColumnAGLHead(nn.Module):
def __init__(
self, encoder_channels: List[int], decoder_channels: List[int], embedding_size=64, dropout_rate=0.0, activation=ACT_RELU
):
super().__init__()
self.height = nn.ModuleList(
[
SimpleRegressionHead(
in_channels=in_channels + 1,
out_channels=1,
dropout_rate=dropout_rate,
embedding_size=embedding_size,
activation=activation,
)
for in_channels in decoder_channels
]
)
self.magnitude = nn.ModuleList(
[
SimpleRegressionHead(
in_channels=in_channels,
out_channels=1,
dropout_rate=dropout_rate,
embedding_size=embedding_size,
activation=activation,
)
for in_channels in decoder_channels
]
)
self.fuse = FPNFuseSum(mode="bilinear", align_corners=True)
self.scale = ScaleHead()
def forward(self, rgb, encoder_feature_maps, decoder_feature_maps, gsd, orientation_outputs):
heights = [
height_layer(
torch.cat(
[feature_map, gsd.reshape(gsd.size(0), 1, 1, 1).expand((-1, -1, feature_map.size(2), feature_map.size(3)))],
dim=1,
)
)
for (feature_map, height_layer) in zip(decoder_feature_maps, self.height)
]
magnitude = [mag_layer(feature_map) for (feature_map, mag_layer) in zip(decoder_feature_maps, self.magnitude)]
height = F.relu(self.fuse(heights), inplace=True)
mag = F.relu(self.fuse(magnitude), inplace=True)
scale = self.scale(mag, height)
return {
OUTPUT_AGL_MASK: F.interpolate(height, size=rgb.size()[2:], mode="bilinear", align_corners=True),
OUTPUT_MAGNITUDE_MASK: F.interpolate(mag, size=rgb.size()[2:], mode="bilinear", align_corners=True),
OUTPUT_VFLOW_SCALE: scale,
}
class BasicOrientationHead(nn.Module):
def __init__(self, encoder_channels: List[int], decoder_channels: List[int], dropout_rate=0.0, activation=ACT_RELU):
super().__init__()
in_channels = encoder_channels[-1]
self.pool = GlobalAvgPool2d(flatten=True)
self.orientation = nn.Sequential(
nn.Dropout(p=dropout_rate, inplace=True),
nn.Linear(in_channels, in_channels),
instantiate_activation_block(activation, inplace=True),
nn.Linear(in_channels, 2),
)
def forward(self, rgb, encoder_feature_maps, decoder_feature_maps, gsd):
features = self.pool(encoder_feature_maps[-1])
direction = self.orientation(features)
angle = tensor_vector2angle(direction)
return {OUTPUT_VFLOW_DIRECTION: direction, OUTPUT_VFLOW_ANGLE: angle}
class BasicOrientationScaleHead(nn.Module):
def __init__(self, encoder_channels: List[int], decoder_channels: List[int], dropout_rate=0.0, activation=ACT_RELU):
super().__init__()
in_channels = encoder_channels[-1]
self.pool = GlobalAvgPool2d(flatten=True)
self.orientation = nn.Sequential(
nn.Dropout2d(p=dropout_rate, inplace=False),
nn.Conv2d(in_channels, in_channels, kernel_size=1),
instantiate_activation_block(activation, inplace=True),
nn.Conv2d(in_channels, 2, kernel_size=3),
)
self.scale = nn.Sequential(
nn.Dropout(p=dropout_rate, inplace=False),
nn.Conv2d(in_channels, in_channels, kernel_size=1),
instantiate_activation_block(activation, inplace=True),
nn.Conv2d(in_channels, 1, kernel_size=3),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, a=0.01, mode="fan_out", nonlinearity="relu" if activation == ACT_RELU else "leaky_relu"
)
torch.nn.init.constant_(m.bias, 0.0001)
def forward(self, rgb, encoder_feature_maps, decoder_feature_maps, gsd):
features = encoder_feature_maps[-1]
direction = self.pool(F.normalize(self.orientation(features)))
scale = self.pool(F.relu(self.scale(features)))
angle = tensor_vector2angle(direction)
return {OUTPUT_VFLOW_DIRECTION: direction, OUTPUT_VFLOW_ANGLE: angle, OUTPUT_VFLOW_SCALE: scale}
class ScaleHead(nn.Module):
def __init__(self):
super().__init__()
@torch.cuda.amp.autocast(False)
def forward(self, mag, height):
curr_mag = torch.flatten(mag, start_dim=1).float()
curr_height = torch.flatten(height, start_dim=1).float()
batch_size = curr_mag.shape[0]
length = curr_mag.shape[1]
denom = (
torch.squeeze(
torch.bmm(
curr_height.view(batch_size, 1, length),
curr_height.view(batch_size, length, 1),
)
)
+ 0.01
)
pinv = curr_height / denom.view(batch_size, 1)
scale = torch.bmm(pinv.view(batch_size, 1, length), curr_mag.view(batch_size, length, 1))
scale = torch.squeeze(scale, dim=2)
return scale
|
[
"[email protected]"
] | |
c5ec95c66cfe71d29bb1d882b8fb66830103b339
|
8fb32802501815a06728c438d1485ded121e2fc8
|
/py/blender/TimerScript.py
|
8a0698dbdb5e731dd0c377ef28a2ee60ce3c8b87
|
[
"MIT"
] |
permissive
|
grimlock-/randomscripts
|
c5121d05a2dc80cffda410d9f3eb9245763c763d
|
b3ee95e0abdaf1320318d87b37f15b5ccee220f4
|
refs/heads/master
| 2022-02-28T10:47:51.617810 | 2019-08-07T05:22:02 | 2019-08-07T05:22:02 | 36,908,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,598 |
py
|
#This script is used to create a timer when making a video in the Blender VSE.
#When executed, the script grabs every marker on the timeline and groups them
#in pairs by name. Markers should be named in the pattern <sectionname>.Start
#and <sectionname>.End and there can be no more than two markers per section.
#Every section should have an associated text strip with the naming pattern
#<sectionname>.Timer
#WARNING: Each *.Start marker should be created before it's associated *.End
#marker. Otherwise they will appear to the script in reverse order and the
#timer for that section will not work.
import bpy
scene = bpy.data.scenes['Scene']
marks = []
st = -1
nm = ''
for marker in scene.timeline_markers:
i = marker.name.find('Start')
if i != -1:
st = marker.frame
nm = marker.name
else:
i = marker.name.find('End')
if i != -1:
nm = marker.name[:i]
marks.append((nm, st, marker.frame))
st = 0
nm = ''
else:
print('Unknown label: ' + marker.name)
for i in marks:
print(i)
def frame_step(scene):
for item in marks:
if scene.frame_current >= item[1] and scene.frame_current < item[2]:
obj = scene.sequence_editor.sequences_all[item[0] + 'Timer']
fps = scene.render.fps / scene.render.fps_base # actual framerate
cur_frame = scene.frame_current - item[1]
obj.text = '{0:.3f}'.format(cur_frame/fps)
break
bpy.app.handlers.frame_change_pre.append(frame_step)
bpy.app.handlers.render_pre.append(frame_step)
|
[
"[email protected]"
] | |
8a7df27444a69a45468f9ca79c391874c7e44342
|
f6fd01eaa74ace15ffc085065a51681565bf2740
|
/api_test/api/user.py
|
63fa52bd6a00572a8fbe028ee2e82684c1645a62
|
[] |
no_license
|
emmashen6786/api_test
|
d05c89a718b7441bb7f4d099792f873afa782056
|
51486a4c05548b410e360777c2e93d1c954bfa06
|
refs/heads/master
| 2020-05-15T03:10:40.762906 | 2019-04-19T02:25:08 | 2019-04-19T02:25:08 | 182,062,758 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,316 |
py
|
from rest_framework import parsers, renderers
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.views import APIView
from api_test.serializers import TokenSerializer
from api_test.common.api_response import JsonResponse
class ObtainAuthToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
def post(self, request, *args, **kwargs):
"""
用户登录
:param request:
:param args:
:param kwargs:
:return:
"""
serializer = self.serializer_class(data=request.data,
context={"request": request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
# token, created = Token.objects.get_or_create(user=user)
data = TokenSerializer(Token.objects.get(user=user)).data
data["userphoto"] = '/file/userphoto.jpg'
print("tongguo le ")
return JsonResponse(data=data, code="999999", msg="成功")
obtain_auth_token = ObtainAuthToken.as_view()
|
[
"[email protected]"
] | |
ab31f35f9772ff24371b4e4edd9c0ee518c224a7
|
3820d5b11619a2205426b0203f937495eea45611
|
/learnOne.py
|
68f21de50199a61a76112210598d18afb8fdde57
|
[] |
no_license
|
rwq9866/learnPython
|
1f490418ec51de2885a60b10f5d160d541712fb3
|
fbc9fc05bf47dccedfbe4b78ee130e81706f4ed2
|
refs/heads/master
| 2020-06-15T05:23:43.850012 | 2019-07-04T09:44:09 | 2019-07-04T09:44:09 | 195,214,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,531 |
py
|
# -*- coding: utf-8 -*-
# 输出
print("hello world!!!") # 自动换行输出
print("hello python!!!", end="这里面想写啥写啥 空格什么的 怎么喜欢怎么来 ") # 不换行输出
# 输入
demo = input("请输入您的姓名,按enter键结束: ")
print(demo, ",欢迎来到Python的世界!")
# 数字类型 不可变
# 1. 整数 1
# 2. 浮点数 1.1 1.6e9(1.6 * 10的9次方)
# 3. 布尔型 True(1) False(0)
# 4. 复数 eg: 1 + 1j
# 加减乘除算法时,除法和java是不一样的 还有一个乘方 eg: 2 ** 3 结果就为8 是不是很爽
demo = 10 / 3 # 精确除法,得到的是一个浮点数
print(demo) # 3.3333333333333335
demo = 10 // 3 # 取整
print(demo) # 3
# 字符串 不可变
# 1. 拼接时可以用','(自带一个空格) 或者用'+'(还是习惯这个) 再或者格式化(下面专门说 有点儿意思)
# 注意: 用+拼接时,如果是数字类型的需要用str()函数,eg: str(2) + "22"
demo = 'demo'
print(ord('p'), ord('y'), ord('t'), ord('h'), ord('o'), ord('n')) # 112 121 116 104 111 110 字符->编码
print(chr(112), chr(121), chr(116), chr(104), chr(111), chr(110)) # p y t h o n 编码->字符
print(len(demo)) # 4 字符串长度
print(str(True) + "555") # True555 字符串拼接
print(1, "de") # 1 de
print(demo * 2) # demodemo
print(demo[0:2]) # de 截取字符串
print(demo[1]) # e 获取指定下标的字符
print(demo[1:]) # emo
print(r'demo\nceshi') # demo\nceshi r''的作用是转义字符不发生转义
# 2. 字符串格式化(我是不常用这个,感觉很是麻烦,哪有+来得爽,只是感觉java上没有这个,拿来说说,只当了解)
# %s(字符串) %d(整数) %f(浮点数) %x(十六进制整数) 其实一个%s就成 貌似all->字符串
print('我今天(%s)心情还好,但是这个天气(%s)却不怎么滴!' % ('2019-7-4', '阴天'))
print('%03d' % 1) # 001 这个不错 那个3代表字符长度转为>=3,不够的补0(只能写0)
print('%.1f' % 6.84888888) # 6.8 f前的数代表几位小数(四舍五入)
print('我今天({0})心情还好,但是这个天气({1})却不怎么滴!'.format('2019-7-4', '阴天')) # 这个字符串的格式化方法贼麻烦
# 列表 list 这个在java开发时我是经常用 高频
# 1. 截取的方式和字符串都一样 能拼接 能用 * 复制 都一样
# 2. 有序 元素能改变
# 3. [] 表示空列表
demoList = [6, 6.6, True, 'demo', [6, 6.6, True]]
print(demoList[1:3]) # [6.6, True]
demoList.append('ceshi')
print(demoList) # 在末尾追加元素 [6, 6.6, True, 'demo', [6, 6.6, True], 'ceshi']
demoList.insert(1, "demo")
print(demoList) # 插入元素 [6, 'demo', 6.6, True, 'demo', [6, 6.6, True], 'ceshi']
demo = demoList.pop(2) # 删除指定元素并返回该元素 为空时 默认删除末尾元素
print(demo) # 6.6
demoList[0] = 8
print(demoList) # 修改指定元素
# 元组 tuple 基本和list一样 不可变
# 1. 截取的方式和字符串都一样 能拼接 能用 * 复制 都一样
# 2. 有序 元素不能改变
# 3. () 表示空元组 比较特殊的是 只有一个元素的元组应该这样表示 eg: (1,)
demoTuple = (6, 6.6, True, 'demo', [6, 6.6, True])
# 集合 set
# 1.创建方式比较特殊 可以用 demoSet = {6, 6.6, True, 'demo', [6, 6.6, True]},也可以用 demoSet = set(list)
# 2.空集合必须用set()
# 3.无序不重复
# 4.很像字典中的key
# 5.综上原因 set集合可以进行并集和交集操作
demoSet = {6, 6.66, True, 'demo'}
print(demoSet) # {'demo', True, 6, 6.66}
demoSet = set([6, 6.666, True, 'demo'])
print(demoSet) # {'demo', True, 6, 6.666}
# demoSet = set('222') # 一般不这么用 没什么意义
demoSet.add("ceshi") # 添加元素
print(demoSet) # {'demo', True, 6, 6.666, 'ceshi'}
demoSet.remove(6)
print(demoSet) # {'demo', True, 6.666, 'ceshi'}
demoSet1 = {6, 6.66, True, 'demo'}
print(demoSet & demoSet1) # 交集 {True, 'demo'}
print(demoSet | demoSet1) # 并集 {True, 'demo', 6.666, 6, 6.66, 'ceshi'}
# 字典 Dictionary 真心和java中的map很像
# 1. 空字典 {}
# 2. 判断是否有某个key值 用 (key in demoDictionary) 根据返回的True和False判断
demoDictionary = {"name":"muyou", "age":26, 'gender':'男'}
print(demoDictionary["name"]) # 根据key值获取value
print(demoDictionary.keys()) # 获取所有的key值 dict_keys(['name', 'age', 'gender'])
print(demoDictionary.values()) # 获取所有的value值 dict_values(['muyou', 26, '男'])
print(demoDictionary.pop("gender")) # 删除元素
|
[
"[email protected]"
] | |
e7cc031b91f1dbc8e5774610579c7d6f5a97b643
|
e14b70a36e096ee24074b76253ac1334be66d903
|
/extranet/modules/oauthprovider/controllers/authorize.py
|
bf79ab060b0d595e21d02d8c7a86fecda8b7a4e4
|
[] |
no_license
|
lodi-g/extranet
|
e3df378222567f6831b4b8ee4545403bffb2462a
|
9252e13364574cbd0c6a9aa53de91a323115c776
|
refs/heads/master
| 2021-08-08T07:46:55.758088 | 2017-11-09T00:44:12 | 2017-11-09T01:16:34 | 110,174,987 | 0 | 0 | null | 2017-11-09T22:49:14 | 2017-11-09T22:49:14 | null |
UTF-8
|
Python
| false | false | 1,637 |
py
|
from flask import render_template, request, flash, session
from flask_login import login_required, current_user, login_fresh
from werkzeug.security import gen_salt
from extranet import usm
from extranet.modules.oauthprovider import bp
from extranet.connections.extranet import provider as extranet_provider
from extranet.connections.extranet import scopes as defined_scopes
from extranet.models.oauth import OauthApp, OauthToken
def render_authorize(*args, **kwargs):
app_id = kwargs.get('client_id')
app = OauthApp.query.filter_by(client_id=app_id).first()
kwargs['app'] = app
session['oauthprovider.snitch'] = gen_salt(32)
kwargs['snitch'] = session['oauthprovider.snitch']
kwargs['request'] = request
kwargs['defined_scopes'] = defined_scopes
return render_template('authorize.html', **kwargs)
@bp.route('/authorize', methods=['GET', 'POST'])
@login_required
@extranet_provider.authorize_handler
def authorize(*args, **kwargs):
# bypass accept/deny form if already accepted (has token)
if OauthToken.query.filter_by(user_id=current_user.id).first() is not None:
return True
# confirm login to access autorize/deny dialog
if not login_fresh():
return usm.needs_refresh()
# render accept/deny if GET request
if request.method == 'GET':
return render_authorize(*args, **kwargs)
# verify POST request legitimacy
if 'oauthprovider.snitch' not in session or session['oauthprovider.snitch'] != request.form.get('snitch'):
flash('Something went wrong, please retry.')
return render_authorize(*args, **kwargs)
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
|
[
"[email protected]"
] | |
31abc275139c5e9004c2b45bc340bbde3de00afd
|
e5d4ce431548bea004fad8db462467bbce210492
|
/accounts/urls.py
|
a7a46f5cd6ff1d7bb8d1f15d4cf6e96535790b5f
|
[] |
no_license
|
emirmaydemir/blogprojesiCepMarketim
|
6f95e307c8cb870b1601d43179420ecd53a5d1c7
|
47635e00ae72355c170deb5c5cab4c8939fafe07
|
refs/heads/master
| 2023-02-05T10:51:54.476227 | 2020-12-22T11:34:48 | 2020-12-22T11:34:48 | 323,609,292 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 331 |
py
|
from django.urls import path
from .views import *
from post.views import post_index, post_detail
from django.urls import re_path
app_name='accounts'
urlpatterns = [
re_path(r'login/$',login_view,name='login'),
re_path(r'register/$', register_view, name='register'),
re_path(r'logout/$', logout_view, name='logout'),
]
|
[
"[email protected]"
] | |
3169f03ad1a82380f124de333e6a15857ecf1ae8
|
4fc21c3f8dca563ce8fe0975b5d60f68d882768d
|
/GodwillOnyewuchi/Phase 1/Python Basic 2/day 12 task/task10.py
|
a4924e40fbc8159a266fbfd0579729acab934db6
|
[
"MIT"
] |
permissive
|
Uche-Clare/python-challenge-solutions
|
17e53dbedbff2f33e242cf8011696b3059cd96e9
|
49ede6204ee0a82d5507a19fbc7590a1ae10f058
|
refs/heads/master
| 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 |
MIT
| 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null |
UTF-8
|
Python
| false | false | 290 |
py
|
# Python program to get numbers divisible by fifteen from a list using an anonymous function
def divisibleby15(lists):
newList = []
for i in lists:
if i % 15 == 0:
newList.append(i)
return newList
print(divisibleby15([23, 56, 12, 15, 45, 23, 70, 678, 90]))
|
[
"[email protected]"
] | |
78a758b50b7c3ecb4bb6e5761d61565d2eb317a5
|
2c5b25d0b5d6ba66d013251f93ebf4c642fd787b
|
/wrong_answer_codes/Contiguous_Array/Contiguous Array_324757576.py
|
1c620fdc45f25037006caf70d00f3c54a4797b19
|
[] |
no_license
|
abhinay-b/Leetcode-Submissions
|
da8099ac54b5d36ae23db42580064d0f9d9bc63b
|
d034705813f3f908f555f1d1677b827af751bf42
|
refs/heads/master
| 2022-10-15T22:09:36.328967 | 2020-06-14T15:39:17 | 2020-06-14T15:39:17 | 259,984,100 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 787 |
py
|
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
count = [0]*2
start = end = 0
maxVal = 0
for idx,num in enumerate(nums):
count[num] += 1
diff = abs(count[0] - count[1])
# print(diff,start,end)
if diff > 1:
count[nums[start]] -= 1
start += 1
elif diff == 1 and start > 0 and (count[nums[start-1]] + 1 == count[1-nums[start
-1]]):
start -= 1
count[nums[start]] += 1
end = idx
maxVal = max(maxVal, end - start+1)
elif not diff:
end = idx
maxVal = max(maxVal, end - start+1)
return maxVal
|
[
"[email protected]"
] | |
26eb004d0f44e7ffb5285a806f4ef704d73d851b
|
861327f7758b7ee3447584041e4918145b72d97f
|
/lmvelisa2.py
|
89404e350f0262c57a9b38306e4e10b1eec935da
|
[] |
no_license
|
tsamartino/ELISA
|
3b8df19b2fd9c82ac6bb4c25447ac04b266aebf5
|
799b409e284bdb2387d5260ce2d86981652ca9ed
|
refs/heads/master
| 2021-01-01T18:11:29.877640 | 2015-06-19T21:58:53 | 2015-06-19T21:58:53 | 37,539,860 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,440 |
py
|
import statistics
import os
source = "0633-1.txt"
#Open plate and print name
plate = open(source, 'r')
plate_name = "15-"
for char in source:
if char == ".":
break
else:
plate_name += char
print ("Plate name:", plate_name)
plate.readline()
plate.readline()
plate.readline()
#Creates dictionary sorted by row
#To access A1 - row_dict['A'][0]
row_dict = {}
i = 0
for line in plate:
if i < 8:
list_of_values = line.split('\t')
row_dict[line[0]] = list_of_values[1:13]
i += 1
plate.close()
#Creates dictionary sorted by column
#To access A1 - column_dict[1][0]
column_dict = {}
for number in range(1,13):
column_list = []
for item in sorted(row_dict):
column_list.append(row_dict[item][number-1])
column_dict[number] = row_dict[item][number-1]
column_dict[number] = [float(i) for i in column_list]
#Creates dictionary sorted by well number
#To access A1 - well_dict[1]
well_dict = {}
well_number = 1
for item in column_dict:
for x in range(0,8):
well_dict[well_number] = float(column_dict[item][x])
well_number += 1
#Establishes where controls are in the plate
#Creates list for each type of control
control_column = 9
#If no Agdia control is loaded, this value should == 0
agdia_well = 64
negative_list = column_dict[control_column][0:3]
positive_list = column_dict[control_column][3:6]
buffer_list = column_dict[control_column][6:8]
agdia_control = well_dict[agdia_well]
positive_wells = [agdia_well, (((control_column-1)*8)+4), (((control_column-1)*8)+5), (((control_column-1)*8)+6)]
#Calculates mean and standard deviation for each control type
negative_mean = statistics.mean(negative_list)
positive_mean = statistics.mean(positive_list)
buffer_mean = statistics.mean(buffer_list)
negative_stdev = statistics.stdev(negative_list)
positive_stdev = statistics.stdev(positive_list)
buffer_stdev = statistics.stdev(buffer_list)
#Calculates the cut-off value and signal:noise from the control means
cutoff = negative_mean * 3
sig_to_noise = positive_mean/negative_mean
#Print statistics for the plate
print("""
The cut-off was set at %.4f
The signal-to-noise ratio is %.2f
""" % (cutoff, sig_to_noise))
#Determines if a well is negative or positive and prints positive wells with O.D. value
sample_size = 60
count = 0
for item in well_dict:
if well_dict[item] >= cutoff:
if item in positive_wells:
continue
else:
count += 1
print ("Well",item,"is POSITIVE with O.D. value of",well_dict[item])
print ("%s out of %s sub-samples tested positive for LMV" % (count, sample_size))
#Builds list with only relevant sample data and mean of the controls
sample = [['Sub-sample', 'O.D. reading', 'Cut-off']]
for x in range(sample_size):
sample.append([str(x+1), well_dict[x+1], cutoff])
sample.append(['Agdia control', well_dict[agdia_well], cutoff])
sample.append(['Negative control mean', negative_mean, cutoff])
sample.append(['Positive control mean', positive_mean, cutoff])
sample.append(['Buffer control mean', buffer_mean, cutoff])
#Generates an html report using Google Visualization Combo Chart (bar for OD values, line for cutoff)
report = """
<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages: ["corechart"]});
google.setOnLoadCallback(drawVisualization);
function drawVisualization() {
// Plate data
var dataTable = new google.visualization.DataTable();
dataTable.addColumn({ type: 'string', id: 'Sub-sample'})
dataTable.addColumn({ type: 'number', id: 'O.D. reading'})
dataTable.addColumn({ type: 'number', id: 'Cut-off'})
dataTable.addRows(%s)
var options = {
title : 'ELISA results for plate %s',
vAxis: {title: "O.D. reading"},
hAxis: {title: "Sub-sample"},
seriesType: "bars",
series: {1: {type: "line"}}
};
var chart = new google.visualization.ComboChart(document.getElementById('chart_div'));
chart.draw(dataTable, options);
}
</script>
</head>
<body>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
""" % (sample[1:], plate_name)
output_filename = ".".join([source.split(".")[0], "html"])
with open(output_filename, "w") as handle:
handle.write(report)
#os.system("open %s" % output_filename)
#fuck around space
import json
sorted_wells = [['Sub-sample', 'O.D. reading', 'Cut-off']]
sorted_wells += [[str(i), well_dict[i], cutoff] for i in sorted(well_dict.keys())]
encoded_wells = json.dumps(sorted_wells)
html_container = """
<!doctype html>
<html>
<head>
<title>Report</title>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script>
var data = %s;
google.load('visualization', '1.0', {'packages':['corechart']});
function doneDrawingChart() {
var table = new google.visualization.DataTable();
table.addColumn('string', 'Well', 'Cutoff');
table.addColumn('number', 'OD', 'Cutoff');
table.addRows(data);
var options = {'title':'Seeds and shit',
'width':800,
'height':700};
var chart = new google.visualization.ColumnChart(document.getElementById("chart"));
chart.draw(table, options);
console.log("Finished drawing chart.", data);
};
google.setOnLoadCallback(doneDrawingChart);
</script>
</head>
<body>
Report results
<div id="chart"></div>
<script></script>
</body>
</html>
""" % (sample)
|
[
"[email protected]"
] | |
5dfe38fc03c0375b3b51d023a6dd2aa1cca6b25d
|
ac42f1d918bdbd229968cea0954ed75250acd55c
|
/admin/dashboard/openstack_dashboard/dashboards/physical/hosts/compute/tests.py
|
47aa906803025be9db313abb19823b19ec492fcc
|
[
"Apache-2.0"
] |
permissive
|
naanal/product
|
016e18fd2f35608a0d8b8e5d2f75b653bac7111a
|
bbaa4cd60d4f2cdda6ce4ba3d36312c1757deac7
|
refs/heads/master
| 2020-04-03T22:40:48.712243 | 2016-11-15T11:22:00 | 2016-11-15T11:22:00 | 57,004,514 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,056 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class EvacuateHostViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_list')})
def test_index(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
services = [service for service in self.services.list()
if service.binary == 'nova-compute']
api.nova.service_list(IsA(http.HttpRequest),
binary='nova-compute').AndReturn(services)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:evacuate_host',
args=[hypervisor])
res = self.client.get(url)
self.assertTemplateUsed(res,
'physical/hosts/compute/evacuate_host.html')
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_list',
'evacuate_host')})
def test_successful_post(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
services = [service for service in self.services.list()
if service.binary == 'nova-compute']
api.nova.service_list(IsA(http.HttpRequest),
binary='nova-compute').AndReturn(services)
api.nova.evacuate_host(IsA(http.HttpRequest),
services[1].host,
services[0].host,
False).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:evacuate_host',
args=[hypervisor])
form_data = {'current_host': services[1].host,
'target_host': services[0].host,
'on_shared_storage': False}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, dest_url)
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_list',
'evacuate_host')})
def test_failing_nova_call_post(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
services = [service for service in self.services.list()
if service.binary == 'nova-compute']
api.nova.service_list(IsA(http.HttpRequest),
binary='nova-compute').AndReturn(services)
api.nova.evacuate_host(IsA(http.HttpRequest),
services[1].host,
services[0].host,
False).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:evacuate_host',
args=[hypervisor])
form_data = {'current_host': services[1].host,
'target_host': services[0].host,
'on_shared_storage': False}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, dest_url)
class MigrateHostViewTest(test.BaseAdminViewTests):
def test_index(self):
disabled_services = [service for service in self.services.list()
if service.binary == 'nova-compute'
and service.status == 'disabled']
disabled_service = disabled_services[0]
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:migrate_host',
args=[disabled_service.host])
res = self.client.get(url)
self.assertNoMessages()
self.assertTemplateUsed(res,
'physical/hosts/compute/migrate_host.html')
@test.create_stubs({api.nova: ('migrate_host',)})
def test_maintenance_host_cold_migration_succeed(self):
disabled_services = [service for service in self.services.list()
if service.binary == 'nova-compute'
and service.status == 'disabled']
disabled_service = disabled_services[0]
api.nova.migrate_host(
IsA(http.HttpRequest),
disabled_service.host,
live_migrate=False,
disk_over_commit=False,
block_migration=False
).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:migrate_host',
args=[disabled_service.host])
form_data = {'current_host': disabled_service.host,
'migrate_type': 'cold_migrate',
'disk_over_commit': False,
'block_migration': False}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, dest_url)
@test.create_stubs({api.nova: ('migrate_host',)})
def test_maintenance_host_live_migration_succeed(self):
disabled_services = [service for service in self.services.list()
if service.binary == 'nova-compute'
and service.status == 'disabled']
disabled_service = disabled_services[0]
api.nova.migrate_host(
IsA(http.HttpRequest),
disabled_service.host,
live_migrate=True,
disk_over_commit=False,
block_migration=True
).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:migrate_host',
args=[disabled_service.host])
form_data = {'current_host': disabled_service.host,
'migrate_type': 'live_migrate',
'disk_over_commit': False,
'block_migration': True}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, dest_url)
@test.create_stubs({api.nova: ('migrate_host',)})
def test_maintenance_host_migration_fails(self):
disabled_services = [service for service in self.services.list()
if service.binary == 'nova-compute'
and service.status == 'disabled']
disabled_service = disabled_services[0]
api.nova.migrate_host(
IsA(http.HttpRequest),
disabled_service.host,
live_migrate=True,
disk_over_commit=False,
block_migration=True
).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:migrate_host',
args=[disabled_service.host])
form_data = {'current_host': disabled_service.host,
'migrate_type': 'live_migrate',
'disk_over_commit': False,
'block_migration': True}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, dest_url)
class DisableServiceViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats')})
def test_index(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:disable_service',
args=[hypervisor])
res = self.client.get(url)
template = 'physical/hosts/compute/disable_service.html'
self.assertTemplateUsed(res, template)
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_disable')})
def test_successful_post(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
services = [service for service in self.services.list()
if service.binary == 'nova-compute']
api.nova.service_disable(IsA(http.HttpRequest),
services[0].host,
'nova-compute',
reason='test disable').AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:disable_service',
args=[hypervisor])
form_data = {'host': services[0].host,
'reason': 'test disable'}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, dest_url)
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats',
'service_disable')})
def test_failing_nova_call_post(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
services = [service for service in self.services.list()
if service.binary == 'nova-compute']
api.nova.service_disable(
IsA(http.HttpRequest), services[0].host, 'nova-compute',
reason='test disable').AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:physical:hosts:compute:disable_service',
args=[hypervisor])
form_data = {'host': services[0].host,
'reason': 'test disable'}
res = self.client.post(url, form_data)
dest_url = reverse('horizon:physical:hosts:index')
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, dest_url)
|
[
"[email protected]"
] | |
a4bcbc3ea13c6d7161096668057371a82bc97ec8
|
e7ea544475ebfa70ebdf5d5949bde9e23edc60ba
|
/gbp/scripts/common/buildpackage.py
|
e1edfb29587dfad1895660c095e2fe13141cba7b
|
[] |
no_license
|
dcoshea/git-buildpackage
|
80cb7d890222488663a09e3d790fc5e985f791b9
|
f4aa76bfcda1ded4649cd071b123ef8d7bf2344d
|
refs/heads/master
| 2020-05-26T21:05:37.574986 | 2017-02-19T13:17:11 | 2017-02-19T13:17:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,061 |
py
|
# vim: set fileencoding=utf-8 :
#
# (C) 2006-2011, 2016 Guido Guenther <[email protected]>
# (C) 2012 Intel Corporation <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
#
"""Common functionality for Debian and RPM buildpackage scripts"""
import os
import os.path
import pipes
import tempfile
import shutil
from gbp.command_wrappers import (CatenateTarArchive, CatenateZipArchive)
from gbp.git import GitRepositoryError
from gbp.errors import GbpError
import gbp.log
# when we want to reference the index in a treeish context we call it:
index_name = "INDEX"
# when we want to reference the working copy in treeish context we call it:
wc_name = "WC"
def sanitize_prefix(prefix):
"""
Sanitize the prefix used for generating source archives
>>> sanitize_prefix('')
'/'
>>> sanitize_prefix('foo/')
'foo/'
>>> sanitize_prefix('/foo/bar')
'foo/bar/'
"""
if prefix:
return prefix.strip('/') + '/'
return '/'
def git_archive_submodules(repo, treeish, output, prefix, comp_type, comp_level,
comp_opts, format='tar'):
"""
Create a source tree archive with submodules.
Concatenates the archives generated by git-archive into one and compresses
the end result.
Exception handling is left to the caller.
"""
prefix = sanitize_prefix(prefix)
tempdir = tempfile.mkdtemp()
main_archive = os.path.join(tempdir, "main.%s" % format)
submodule_archive = os.path.join(tempdir, "submodule.%s" % format)
try:
# generate main (tmp) archive
repo.archive(format=format, prefix=prefix,
output=main_archive, treeish=treeish)
# generate each submodule's archive and append it to the main archive
for (subdir, commit) in repo.get_submodules(treeish):
tarpath = [subdir, subdir[2:]][subdir.startswith("./")]
gbp.log.debug("Processing submodule %s (%s)" % (subdir, commit[0:8]))
repo.archive(format=format, prefix='%s%s/' % (prefix, tarpath),
output=submodule_archive, treeish=commit, cwd=subdir)
if format == 'tar':
CatenateTarArchive(main_archive)(submodule_archive)
elif format == 'zip':
CatenateZipArchive(main_archive)(submodule_archive)
# compress the output
if comp_type:
# Redirect through stdout directly to the correct output file in
# order to avoid determining the output filename of the compressor
try:
comp_level_opt = '-%d' % comp_level if comp_level is not None else ''
except TypeError:
raise GbpError("Invalid compression level '%s'" % comp_level)
ret = os.system("%s --stdout %s %s %s > %s" %
(comp_type, comp_level_opt, comp_opts, main_archive,
output))
if ret:
raise GbpError("Error creating %s: %d" % (output, ret))
else:
shutil.move(main_archive, output)
finally:
shutil.rmtree(tempdir)
def git_archive_single(treeish, output, prefix, comp_type, comp_level, comp_opts, format='tar'):
"""
Create an archive without submodules
Exception handling is left to the caller.
"""
prefix = sanitize_prefix(prefix)
pipe = pipes.Template()
pipe.prepend("git archive --format=%s --prefix=%s %s" % (format, prefix, treeish), '.-')
try:
comp_level_opt = '-%d' % comp_level if comp_level is not None else ''
except TypeError:
raise GbpError("Invalid compression level '%s'" % comp_level)
if comp_type:
pipe.append('%s -c %s %s' % (comp_type, comp_level_opt, comp_opts), '--')
ret = pipe.copy('', output)
if ret:
raise GbpError("Error creating %s: %d" % (output, ret))
# Functions to handle export-dir
def dump_tree(repo, export_dir, treeish, with_submodules, recursive=True):
"dump a tree to output_dir"
output_dir = os.path.dirname(export_dir)
prefix = sanitize_prefix(os.path.basename(export_dir))
if recursive:
paths = []
else:
paths = ["'%s'" % nam for _mod, typ, _sha, nam in
repo.list_tree(treeish) if typ == 'blob']
pipe = pipes.Template()
pipe.prepend('git archive --format=tar --prefix=%s %s -- %s' %
(prefix, treeish, ' '.join(paths)), '.-')
pipe.append('tar -C %s -xf -' % output_dir, '-.')
top = os.path.abspath(os.path.curdir)
try:
ret = pipe.copy('', '')
if ret:
raise GbpError("Error in dump_tree archive pipe")
if recursive and with_submodules:
if repo.has_submodules():
repo.update_submodules()
for (subdir, commit) in repo.get_submodules(treeish):
gbp.log.info("Processing submodule %s (%s)" % (subdir, commit[0:8]))
tarpath = [subdir, subdir[2:]][subdir.startswith("./")]
os.chdir(subdir)
pipe = pipes.Template()
pipe.prepend('git archive --format=tar --prefix=%s%s/ %s' %
(prefix, tarpath, commit), '.-')
pipe.append('tar -C %s -xf -' % output_dir, '-.')
ret = pipe.copy('', '')
os.chdir(top)
if ret:
raise GbpError("Error in dump_tree archive pipe in submodule %s" % subdir)
except OSError as err:
gbp.log.err("Error dumping tree to %s: %s" % (output_dir, err[0]))
return False
except (GitRepositoryError, GbpError) as err:
gbp.log.err(err)
return False
except Exception as e:
gbp.log.err("Error dumping tree to %s: %s" % (output_dir, e))
return False
finally:
os.chdir(top)
return True
def wc_index(repo):
"""Get path of the temporary index file used for exporting working copy"""
return os.path.join(repo.git_dir, "gbp_index")
def write_wc(repo, force=True):
"""write out the current working copy as a treeish object"""
index_file = wc_index(repo)
repo.add_files(repo.path, force=force, index_file=index_file)
tree = repo.write_tree(index_file=index_file)
return tree
def drop_index(repo):
"""drop our custom index"""
index_file = wc_index(repo)
if os.path.exists(index_file):
os.unlink(index_file)
|
[
"[email protected]"
] | |
52b9f542a153e979406b22d48c38a86711d3273c
|
ee8ec2e94586e676440c85da0721c4ba7dcfc5bf
|
/fc_community/fcuser/views.py
|
e979ceb39a951fa5a594f6408e6afbeb381107cf
|
[] |
no_license
|
simbyungki/fc_django
|
acc97c48db082fd2d5a52833fa704bad241667dd
|
99332b0bfa2ec4d8bbf7e0c153926dbe07b4ae46
|
refs/heads/master
| 2023-01-02T15:47:42.352510 | 2020-11-02T10:02:46 | 2020-11-02T10:02:46 | 307,319,339 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,582 |
py
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth.hashers import make_password, check_password
from .models import Fcuser
from .forms import LoginForm
def index(request) :
res_data = {}
user_id = request.session.get('user')
if user_id :
fcuser = Fcuser.objects.get(pk=user_id)
res_data['user_id'] = fcuser
return render(request, 'fcuser/index.html', res_data)
def logout(request) :
if request.session.get('user') :
del(request.session['user'])
return redirect('/')
def login(request) :
if request.method == 'POST' :
form = LoginForm(request.POST)
if form.is_valid() :
request.session['user'] = form.user_id
return redirect('/')
else :
form = LoginForm()
return render(request, 'fcuser/login.html', {'form': form})
def register(request) :
if request.method == 'GET' :
return render(request, 'fcuser/register.html')
elif request.method == 'POST' :
username = request.POST.get('username', None)
useremail = request.POST.get('useremail', None)
password = request.POST.get('password', None)
re_password = request.POST.get('re-password', None)
res_data = {}
if not (username and useremail and password and re_password) :
res_data['error'] = '모든 값을 입력해주세요.'
elif password != re_password :
res_data['error'] = '비밀번호가 다릅니다.'
else :
fcuser = Fcuser(
username = username,
useremail = useremail,
password = make_password(password)
)
fcuser.save()
return render(request, 'fcuser/register.html', res_data)
|
[
"[email protected]"
] | |
13f14dc7b1dd99b30978cce555abb5bec03d63be
|
e98064e3b51cbfdef409bca9642bea32d772bc77
|
/hw2/2.1/src/plot.py
|
d2f2fc35bc953624d39c9e51cde21463209c2669
|
[] |
no_license
|
KUAN-HSUN-LI/SDML
|
b59b26ec1f71689d0bb16d6c34c05b59f3fc3004
|
be84492fed6e4712d9a86dc1cef481954aefe22f
|
refs/heads/master
| 2021-06-23T07:56:34.050666 | 2019-11-18T05:50:29 | 2019-11-18T05:50:29 | 210,079,111 | 1 | 0 | null | 2021-04-06T01:33:18 | 2019-09-22T02:25:29 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 704 |
py
|
import matplotlib.pyplot as plt
%matplotlib inline
with open('model/history.json', 'r') as f:
history = json.loads(f.read())
train_loss = [l['loss'] for l in history['train']]
valid_loss = [l['loss'] for l in history['valid']]
train_f1 = [l['acc'] for l in history['train']]
valid_f1 = [l['acc'] for l in history['valid']]
plt.figure(figsize=(7, 5))
plt.title('Loss')
plt.plot(train_loss, label='train')
plt.plot(valid_loss, label='valid')
plt.legend()
plt.show()
plt.figure(figsize=(7, 5))
plt.title('F1 Score')
plt.plot(train_f1, label='train')
plt.plot(valid_f1, label='valid')
plt.legend()
plt.show()
print('Best F1 score ', max([[l['acc'], idx] for idx, l in enumerate(history['valid'])]))
|
[
"b06209027.ntu.edu.tw"
] |
b06209027.ntu.edu.tw
|
b00f6542c6e1995eda6a3909f6de3a865280c095
|
16147da7fb23a54e3604bcf046b418755d92e9fe
|
/src/run_treemask.py
|
603d2c4aa870f8133b26b76e934608503c022dc4
|
[] |
no_license
|
tcc7496/veggie_code
|
28861e0274231f220b7c72fd678731590d45b8c6
|
5fb3fb0a7f6c32d3c56774183dea731601c9b972
|
refs/heads/master
| 2023-08-05T02:06:49.283707 | 2021-09-17T16:47:42 | 2021-09-17T16:47:42 | 352,734,400 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 697 |
py
|
'''
A script to extract the tree mask from the plant species map
'''
#######################################
from tree_mask import *
import click
@click.command()
@click.argument('file', type=click.Path(exists=True), help = 'full filepath to input plant species map')
@click.argument('outfile', type=click.Path(), help ='full filepath including filename of output geotiff')
@click.option('-a', '--aoi', default=None, type=click.Path(), help='optional area of interest shapefile')
def main(file, outfile, aoi):
tree_mask(file, outfile, aoi)
#######################################
if __name__ == "__main__":
''' Main block '''
main()
#######################################
|
[
"[email protected]"
] | |
5aae57ca5da47c5876a5d1998f846871038a0fa0
|
009cca46aed9599d633441b044987ae78b60685a
|
/scripts/submitJLabJob_sample.py
|
51c99e1ea1f8794af4f950771c0051f69c129d25
|
[] |
no_license
|
cipriangal/QweakG4DD
|
0de40f6c2693021db44916e03d8d55703aa37387
|
5c8f55be4ba0ec3a3898a40c4e1ff8eb42c550ad
|
refs/heads/master
| 2021-01-23T10:29:53.163237 | 2018-06-25T19:03:24 | 2018-06-25T19:03:24 | 29,534,503 | 0 | 4 | null | 2017-03-27T21:06:14 | 2015-01-20T14:45:37 |
C++
|
UTF-8
|
Python
| false | false | 5,539 |
py
|
#!/usr/bin/python
from subprocess import call
import sys,os,time
def main():
#center, x,y,z=0,335,560
_xP=0.
_yP=335.0
_zP=560.
_Px=0.#deg
_Py=0.
_beamE=1160#MeV
_email="[email protected]"
_source="/lustre/expphy/work/hallc/qweak/ciprian/simCodeG410/QweakG4DD"
_directory="/lustre/expphy/volatile/hallc/qweak/ciprian/farmoutput/g41001p01/sample/moustaches/withShower/md3"
_tracking=2 #0=primary only | 1=prim + opt photon | 2=no optical ph and 10x faster than 3=full
_stpSize=-0.02
_nEv=100000
_nrStop=100
_nrStart=0
_pol="V"
modTrj=0 ## 0:standard G4 propagation(wght sims) 1:debug print == big NONO! 2: modifyTraj
submit=0
nDist=203
sample=1
#idRoot= _pol+'_sampled_%03dk'% (_nEv/1000)
idRoot= _pol+'_sampled'+str(nDist)+'_%03dk'% (_nEv/1000)
for nr in range(_nrStart,_nrStop): # repeat for nr jobs
_idN= idRoot+'_%05d'% (nr)
print _idN
createMacFile(_directory,_idN,_xP,_yP,_zP,_Px,_Py,_tracking,_beamE,_nEv,nr,modTrj,sample,_pol,_stpSize)
##create input files
# if sample==1:
# if _pol=="V":
# call("root -l -q -b ../rootScripts/samplePrimaryDist.C\\("+str(_nEv)+",1,"+str(nDist)+"\\)",shell=True)
# else:
# call("root -l -q -b ../rootScripts/samplePrimaryDist.C\\("+str(_nEv)+",-1,"+str(nDist)+"\\)",shell=True)
# call(["mv","positionMomentum.in",_directory+"/"+_idN+"/positionMomentum.in"])
# call(["mv","polarization.in",_directory+"/"+_idN+"/polarization.in"])
call(["cp",_source+"/build/QweakSimG4",_directory+"/"+_idN+"/QweakSimG4"])
call(["cp",_source+"/myQweakCerenkovOnly.mac",_directory+"/"+_idN+"/myQweakCerenkovOnly.mac"])
createXMLfile(_source,_directory,idRoot,_nrStart,_nrStop,_email,sample)
if submit==1:
print "submitting position sampled with id",_idN," between ",_nrStart,_nrStop
call(["jsub","-xml",_source+"/scripts/jobs/"+idRoot+".xml"])
else:
print "NOT submitting position sampled with id",_idN," between ",_nrStart,_nrStop
print "I am all done"
def createMacFile(directory,idname,
xPos,yPos,zPos,
Px,Py,tracking,
beamE,nEv,nr,modTrj,sample,pol,stpSize):
if not os.path.exists(directory+"/"+idname+"/log"):
os.makedirs(directory+"/"+idname+"/log")
f=open(directory+"/"+idname+"/myRun.mac",'w')
f.write("/control/execute myQweakCerenkovOnly.mac\n")
f.write("/PrimaryEvent/SetBeamPositionX "+str(xPos)+" cm\n")
f.write("/PrimaryEvent/SetBeamPositionY "+str(yPos)+" cm\n")
f.write("/PrimaryEvent/SetBeamPositionZ "+str(zPos)+" cm\n")
f.write("/PrimaryEvent/SetBeamDirectionX "+str(Px)+" deg\n")
f.write("/PrimaryEvent/SetBeamDirectionY "+str(Py)+" deg\n")
if sample==1:
f.write("/PrimaryEvent/SetFixedPosMom false\n")
f.write("/PrimaryEvent/SetPolarization f\n")
else:
f.write("/PrimaryEvent/SetFixedPosMom true\n")
f.write("/PrimaryEvent/SetPolarization "+str(pol)+"\n")
f.write("/PhysicsProcesses/settingFlag "+str(modTrj)+"\n")
f.write("/EventGen/SetBeamEnergy "+str(beamE)+" MeV\n")
f.write("/TrackingAction/TrackingFlag "+str(tracking)+"\n")
f.write("/EventGen/SelectOctant 3\n")
seedA=int(time.time()/2000.)+ 100*nr+nr
seedB=int(time.time()/300. ) +10000*nr+nr
f.write("/Cerenkov/SetPbStepSize "+str(stpSize)+" mm\n");
f.write("/HallC/GeometryUpdate\n");
f.write("/random/setSeeds "+str(seedA)+" "+str(seedB)+"\n")
f.write("/run/beamOn "+str(nEv)+"\n")
f.close()
return 0
def createXMLfile(source,writeDir,idRoot,nStart,nStop,email,sample):
if not os.path.exists(source+"/scripts/jobs"):
os.makedirs(source+"/scripts/jobs")
f=open(source+"/scripts/jobs/"+idRoot+".xml","w")
f.write("<Request>\n")
f.write(" <Email email=\""+email+"\" request=\"false\" job=\"true\"/>\n")
f.write(" <Project name=\"qweak\"/>\n")
# f.write(" <Track name=\"debug\"/>\n")
f.write(" <Track name=\"simulation\"/>\n")
f.write(" <Name name=\""+idRoot+"\"/>\n")
f.write(" <OS name=\"centos7\"/>\n")
f.write(" <Command><![CDATA[\n")
f.write("QweakSimG4 myRun.mac\n")
f.write(" ]]></Command>\n")
f.write(" <Memory space=\"2000\" unit=\"MB\"/>\n")
for nr in range(nStart,nStop): # repeat for nr jobs
f.write(" <Job>\n")
idName= writeDir+"/"+idRoot+'_%05d'%(nr)
f.write(" <Input src=\""+idName+"/QweakSimG4\" dest=\"QweakSimG4\"/>\n")
f.write(" <Input src=\""+idName+"/myQweakCerenkovOnly.mac\" dest=\"myQweakCerenkovOnly.mac\"/>\n")
f.write(" <Input src=\""+idName+"/myRun.mac\" dest=\"myRun.mac\"/>\n")
if sample==1:
f.write(" <Input src=\""+idName+"/positionMomentum.in\" dest=\"positionMomentum.in\"/>\n")
f.write(" <Input src=\""+idName+"/polarization.in\" dest=\"polarization.in\"/>\n")
f.write(" <Output src=\"QwSim_0.root\" dest=\""+idName+"/QwSim_0.root\"/>\n")
f.write(" <Output src=\"o_tuple.root\" dest=\""+idName+"/o_tuple.root\"/>\n")
f.write(" <Stdout dest=\""+idName+"/log/log.out\"/>\n")
f.write(" <Stderr dest=\""+idName+"/log/log.err\"/>\n")
f.write(" </Job>\n\n")
f.write("</Request>\n")
f.close()
return 0
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f3a56eab63df2e25ca7185b2b359bdc948581b9a
|
f20f3ab827eab5ad6a3f97b35d10d7afe2f118d5
|
/__init__.py
|
e910486ed0e4b8b6f2fb6655c4441fbbf9959a91
|
[
"MIT"
] |
permissive
|
bradparks/Sprytile__blender_add_on_sprite_sheets_tile_maps
|
9adb618bbd0e1f4e9334b8f4e534cff6fa9cc9d7
|
421c7efe3ea9ebd7e0f8dca7fb797eca597964d2
|
refs/heads/master
| 2021-05-08T04:37:22.745456 | 2017-10-12T15:59:17 | 2017-10-12T15:59:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 22,891 |
py
|
bl_info = {
"name": "Sprytile Painter",
"author": "Jeiel Aranal",
"version": (0, 4, 24),
"blender": (2, 7, 7),
"description": "A utility for creating tile based low spec scenes with paint/map editor tools",
"location": "View3D > UI panel > Sprytile",
"wiki_url": "https://chemikhazi.github.io/Sprytile/",
"tracker_url": "https://github.com/ChemiKhazi/Sprytile/issues",
"category": "Paint"
}
# Put Sprytile directory is sys.path so modules can be loaded
import os
import sys
import inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
locals_list = locals()
if "bpy" in locals_list:
from importlib import reload
reload(addon_updater_ops)
reload(sprytile_gui)
reload(sprytile_modal)
reload(sprytile_panel)
reload(sprytile_utils)
reload(sprytile_uv)
reload(tool_build)
reload(tool_paint)
reload(tool_fill)
reload(tool_set_normal)
else:
from . import sprytile_gui, sprytile_modal, sprytile_panel, sprytile_utils, sprytile_uv
from sprytile_tools import *
import bpy
import bpy.utils.previews
from . import addon_updater_ops
from bpy.props import *
import rna_keymap_ui
class SprytileSceneSettings(bpy.types.PropertyGroup):
def set_normal(self, value):
if "lock_normal" not in self.keys():
self["lock_normal"] = False
if self["lock_normal"] is True:
return
if self["normal_mode"] == value:
self["lock_normal"] = not self["lock_normal"]
return
self["normal_mode"] = value
self["lock_normal"] = True
bpy.ops.sprytile.axis_update('INVOKE_REGION_WIN')
def get_normal(self):
if "normal_mode" not in self.keys():
self["normal_mode"] = 3
return self["normal_mode"]
normal_mode = EnumProperty(
items=[
("X", "X", "World X-Axis", 1),
("Y", "Y", "World Y-Axis", 2),
("Z", "Z", "World X-Axis", 3)
],
name="Normal Mode",
description="Normal to create the mesh on",
default='Z',
set=set_normal,
get=get_normal
)
lock_normal = BoolProperty(
name="Lock",
description="Lock normal used to create meshes",
default=False
)
snap_translate = BoolProperty(
name="Snap Translate",
description="Snap pixel translations to pixel grid",
default=True
)
paint_mode = EnumProperty(
items=[
("PAINT", "Paint", "Advanced UV paint tools", 1),
("MAKE_FACE", "Build", "Only create new faces", 3),
("SET_NORMAL", "Set Normal", "Select a normal to use for face creation", 2),
("FILL", "Fill", "Fill the work plane cursor", 4)
],
name="Sprytile Paint Mode",
description="Paint mode",
default='MAKE_FACE'
)
def set_show_tools(self, value):
keys = self.keys()
if "show_tools" not in keys:
self["show_tools"] = False
self["show_tools"] = value
if value is False:
if "paint_mode" not in keys:
self["paint_mode"] = 3
if self["paint_mode"] in {2, 4}:
self["paint_mode"] = 3
def get_show_tools(self):
if "show_tools" not in self.keys():
self["show_tools"] = False
return self["show_tools"]
show_tools = BoolProperty(
default=False,
set=set_show_tools,
get=get_show_tools
)
def set_dummy(self, value):
current_value = self.get_dummy_actual(True)
value = list(value)
for idx in range(len(value)):
if current_value[idx] and current_value[idx] & value[idx]:
value[idx] = False
mode_value_idx = [1, 3, 2, 4]
def get_mode_value(arr_value):
for i in range(len(arr_value)):
if arr_value[i]:
return mode_value_idx[i]
return -1
run_modal = True
paint_mode = get_mode_value(value)
if paint_mode > 0:
self["paint_mode"] = paint_mode
else:
run_modal = False
if "is_running" in self.keys():
if self["is_running"]:
self["is_running"] = False
else:
run_modal = True
if run_modal:
bpy.ops.sprytile.modal_tool('INVOKE_REGION_WIN')
def get_dummy_actual(self, force_real):
if "paint_mode" not in self.keys():
self["paint_mode"] = 3
out_value = [False, False, False, False]
if self["is_running"] or force_real:
index_value_lookup = 1, 3, 2, 4
set_idx = index_value_lookup.index(self["paint_mode"])
out_value[set_idx] = True
return out_value
def get_dummy(self):
if "is_running" not in self.keys():
self["is_running"] = False
is_running = self["is_running"]
return self.get_dummy_actual(is_running)
set_paint_mode = BoolVectorProperty(
name="Set Paint Mode",
description="Set Sprytile Tool Mode",
size=4,
set=set_dummy,
get=get_dummy
)
world_pixels = IntProperty(
name="World Pixel Density",
description="How many pixels are displayed in one world unit",
subtype='PIXEL',
default=32,
min=8,
max=2048
)
paint_normal_vector = FloatVectorProperty(
name="Srpytile Last Paint Normal",
description="Last saved painting normal used by Sprytile",
subtype='DIRECTION',
default=(0.0, 0.0, 1.0)
)
paint_up_vector = FloatVectorProperty(
name="Sprytile Last Paint Up Vector",
description="Last saved painting up vector used by Sprytile",
subtype='DIRECTION',
default=(0.0, 1.0, 0.0)
)
uv_flip_x = BoolProperty(
name="Flip X",
default=False
)
uv_flip_y = BoolProperty(
name="Flip Y",
default=False
)
mesh_rotate = FloatProperty(
name="Grid Rotation",
description="Rotation of mesh creation",
subtype='ANGLE',
unit='ROTATION',
step=9000,
precision=0,
min=-6.28319,
max=6.28319,
default=0.0
)
cursor_snap = EnumProperty(
items=[
('VERTEX', "Vertex", "Snap cursor to nearest vertex", "SNAP_GRID", 1),
('GRID', "Grid", "Snap cursor to grid", "SNAP_VERTEX", 2)
],
name="Cursor snap mode",
description="Sprytile cursor snap mode"
)
cursor_flow = BoolProperty(
name="Cursor Flow",
description="Cursor automatically follows mesh building",
default=False
)
paint_align = EnumProperty(
items=[
('TOP_LEFT', "Top Left", "", 1),
('TOP', "Top", "", 2),
('TOP_RIGHT', "Top Right", "", 3),
('LEFT', "Left", "", 4),
('CENTER', "Center", "", 5),
('RIGHT', "Right", "", 6),
('BOTTOM_LEFT', "Bottom Left", "", 7),
('BOTTOM', "Bottom", "", 8),
('BOTTOM_RIGHT', "Bottom Right", "", 9),
],
name="Paint Align",
description="Paint alignment mode",
default='CENTER'
)
def set_align_toggle(self, value, row):
prev_value = self.get_align_toggle(row)
row_val = 0
if row == 'top':
row_val = 0
elif row == 'middle':
row_val = 3
elif row == 'bottom':
row_val = 6
else:
return
col_val = 0
if value[0] and prev_value[0] != value[0]:
col_val = 1
elif value[1] and prev_value[1] != value[1]:
col_val = 2
elif value[2] and prev_value[2] != value[2]:
col_val = 3
else:
return
self["paint_align"] = row_val + col_val
def set_align_top(self, value):
self.set_align_toggle(value, "top")
def set_align_middle(self, value):
self.set_align_toggle(value, "middle")
def set_align_bottom(self, value):
self.set_align_toggle(value, "bottom")
def get_align_toggle(self, row):
if "paint_align" not in self.keys():
self["paint_align"] = 5
align = self["paint_align"]
if row == 'top':
return align == 1, align == 2, align == 3
if row == 'middle':
return align == 4, align == 5, align == 6
if row == 'bottom':
return align == 7, align == 8, align == 9
return False, False, False
def get_align_top(self):
return self.get_align_toggle("top")
def get_align_middle(self):
return self.get_align_toggle("middle")
def get_align_bottom(self):
return self.get_align_toggle("bottom")
paint_align_top = BoolVectorProperty(
name="Align",
size=3,
set=set_align_top,
get=get_align_top
)
paint_align_middle = BoolVectorProperty(
name="Align",
size=3,
set=set_align_middle,
get=get_align_middle
)
paint_align_bottom = BoolVectorProperty(
name="Align",
size=3,
set=set_align_bottom,
get=get_align_bottom
)
paint_hinting = BoolProperty(
name="Hinting",
description="Selected edge is used as X axis for UV mapping."
)
paint_stretch_x = BoolProperty(
name="Stretch X",
description="Stretch face over X axis of tile"
)
paint_stretch_y = BoolProperty(
name="Stretch Y",
description="Stretch face over Y axis of tile"
)
paint_edge_snap = BoolProperty(
name="Stretch Edge Snap",
description="Snap UV vertices to edges of tile when stretching.",
default=True
)
edge_threshold = FloatProperty(
name="Threshold",
description="Ratio of UV tile near to edge to apply snap",
min=0.01,
max=0.5,
soft_min=0.01,
soft_max=0.5,
default=0.35
)
paint_uv_snap = BoolProperty(
name="UV Snap",
default=True,
description="Snap UV vertices to texture pixels"
)
is_running = BoolProperty(
name="Sprytile Running",
description="Exit Sprytile tool"
)
is_snapping = BoolProperty(
name="Is Cursor Snap",
description="Is cursor snapping currently activated"
)
has_selection = BoolProperty(
name="Has selection",
description="Is there a mesh element selected"
)
is_grid_translate = BoolProperty(
name="Is Grid Translate",
description="Grid translate operator is running"
)
show_extra = BoolProperty(
name="Extra UV Grid Settings",
default=False
)
show_overlay = BoolProperty(
name="Show Grid Overlay",
default=True
)
auto_merge = BoolProperty(
name="Auto Merge",
description="Automatically merge vertices when creating faces",
default=True
)
auto_join = BoolProperty(
name="Join Multi",
description="Join multi tile faces when possible",
default=False
)
def set_reload(self, value):
self["auto_reload"] = value
if value is True:
bpy.ops.sprytile.reload_auto('INVOKE_REGION_WIN')
def get_reload(self):
if "auto_reload" not in self.keys():
self["auto_reload"] = False
return self["auto_reload"]
auto_reload = BoolProperty(
name="Auto",
description="Automatically reload images every few seconds",
default=False,
set=set_reload,
get=get_reload
)
fill_lock_transform = BoolProperty(
name="Lock Transforms",
description="Filled faces keep current rotations",
default=False,
)
axis_plane_display = EnumProperty(
items=[
('OFF', "Off", "Always Off", "RADIOBUT_OFF", 1),
('ON', "On", "Always On", "RADIOBUT_ON", 2),
('MIDDLE_MOUSE', "View", "Only when changing view", "CAMERA_DATA", 3)
],
name="Work Plane Cursor",
description="Display mode of Work Plane Cursor",
default='MIDDLE_MOUSE'
)
axis_plane_settings = BoolProperty(
name="Axis Plane Settings",
description="Show Work Plane Cursor settings",
default=False
)
axis_plane_size = IntVectorProperty(
name="Plane Size",
description="Size of the Work Plane Cursor",
size=2,
default=(2, 2),
min=1,
soft_min=1
)
axis_plane_color = FloatVectorProperty(
name="Plane Color",
description="Color Work Plane Cursor is drawn with",
size=3,
default=(0.7, 0.7, 0.7),
subtype='COLOR'
)
class SprytileMaterialGridSettings(bpy.types.PropertyGroup):
mat_id = StringProperty(
name="Material Id",
description="Name of the material this grid references",
default=""
)
id = IntProperty(
name="Grid ID",
default=-1
)
name = StringProperty(
name="Grid Name"
)
grid = IntVectorProperty(
name="Size",
description="Grid size, in pixels",
min=1,
size=2,
subtype='XYZ',
default=(32, 32)
)
def set_padding(self, value):
current_padding = self.get_padding()
if "grid" not in self.keys():
self["grid"] = (32, 32)
padding_delta = [ (value[0] - current_padding[0]) * 2, (value[1] - current_padding[1]) * 2]
new_grid = [self["grid"][0] - padding_delta[0], self["grid"][1] - padding_delta[1]]
if new_grid[0] < 1 or new_grid[1] < 1:
return
self["grid"] = (new_grid[0], new_grid[1])
self["padding"] = value
def get_padding(self):
if "padding" not in self.keys():
self["padding"] = (0, 0)
return self["padding"]
padding = IntVectorProperty(
name="Padding",
description="Cell padding, in pixels",
min=0,
size=2,
subtype='XYZ',
default=(0, 0),
set=set_padding,
get=get_padding
)
margin = IntVectorProperty(
name="Margin",
description="Spacing between tiles (top, right, bottom, left)",
min=0,
size=4,
subtype='XYZ',
default=(0, 0, 0, 0)
)
offset = IntVectorProperty(
name="Offset",
description="Offset of the grid",
subtype='TRANSLATION',
size=2,
default=(0, 0)
)
rotate = FloatProperty(
name="UV Rotation",
description="Rotation of UV grid",
subtype='ANGLE',
unit='ROTATION',
default=0.0
)
tile_selection = IntVectorProperty(
name="Tile Selection",
size=4,
default=(0, 0, 1, 1)
)
class SprytileMaterialData(bpy.types.PropertyGroup):
def expanded_default(self):
if 'is_expanded' not in self.keys():
self['is_expanded'] = True
def get_expanded(self):
self.expanded_default()
return self['is_expanded']
def set_expanded(self, value):
self.expanded_default()
do_rebuild = self['is_expanded'] is not value
self['is_expanded'] = value
if do_rebuild:
bpy.ops.sprytile.build_grid_list()
mat_id = StringProperty(
name="Material Id",
description="Name of the material this grid references",
default=""
)
is_expanded = BoolProperty(
default=True,
get=get_expanded,
set=set_expanded
)
grids = CollectionProperty(type=SprytileMaterialGridSettings)
class SprytileGridDisplay(bpy.types.PropertyGroup):
mat_id = StringProperty(default="")
grid_id = IntProperty(default=-1)
def get_mat_name(self):
if self.mat_id == "":
return ""
data_idx = bpy.data.materials.find(self.mat_id)
if data_idx < 0:
return ""
return bpy.data.materials[self.mat_id].name
def set_mat_name(self, value):
if self.mat_id == "":
return
data_idx = bpy.data.materials.find(self.mat_id)
if data_idx < 0:
return
bpy.data.materials[self.mat_id].name = value
bpy.ops.sprytile.validate_grids()
mat_name = StringProperty(
get=get_mat_name,
set=set_mat_name
)
class SprytileGridList(bpy.types.PropertyGroup):
def get_idx(self):
if "idx" not in self.keys():
self["idx"] = 0
return self["idx"]
def set_idx(self, value):
# If the selected index is a material entry
# Move to next entry
list_size = len(self.display)
while value < (list_size - 1) and self.display[value].mat_id != "":
value += 1
value = max(0, min(len(self.display)-1, value))
self["idx"] = value
if value < 0 or value >= len(self.display):
return
# Set the object grid id to target grid
target_entry = self.display[value]
if target_entry.grid_id != -1:
bpy.context.object.sprytile_gridid = target_entry.grid_id
display = bpy.props.CollectionProperty(type=SprytileGridDisplay)
idx = IntProperty(
default=0,
get=get_idx,
set=set_idx
)
def setup_props():
bpy.types.Scene.sprytile_data = bpy.props.PointerProperty(type=SprytileSceneSettings)
bpy.types.Scene.sprytile_mats = bpy.props.CollectionProperty(type=SprytileMaterialData)
bpy.types.Scene.sprytile_list = bpy.props.PointerProperty(type=SprytileGridList)
bpy.types.Scene.sprytile_ui = bpy.props.PointerProperty(type=sprytile_gui.SprytileGuiData)
bpy.types.Object.sprytile_gridid = IntProperty(
name="Grid ID",
description="Grid index used for object",
default=-1
)
def teardown_props():
del bpy.types.Scene.sprytile_data
del bpy.types.Scene.sprytile_mats
del bpy.types.Scene.sprytile_list
del bpy.types.Scene.sprytile_ui
del bpy.types.Object.sprytile_gridid
class SprytileAddonPreferences(bpy.types.AddonPreferences):
bl_idname = __package__
preview_transparency = bpy.props.FloatProperty(
name="Preview Alpha",
description="Transparency level of build preview cursor",
default=0.8,
min=0,
max=1
)
# addon updater preferences
auto_check_update = bpy.props.BoolProperty(
name="Auto-check for Update",
description="If enabled, auto-check for updates using an interval",
default=False,
)
updater_intrval_months = bpy.props.IntProperty(
name='Months',
description="Number of months between checking for updates",
default=0,
min=0
)
updater_intrval_days = bpy.props.IntProperty(
name='Days',
description="Number of days between checking for updates",
default=7,
min=0,
)
updater_intrval_hours = bpy.props.IntProperty(
name='Hours',
description="Number of hours between checking for updates",
default=0,
min=0,
max=23
)
updater_intrval_minutes = bpy.props.IntProperty(
name='Minutes',
description="Number of minutes between checking for updates",
default=0,
min=0,
max=59
)
def draw(self, context):
layout = self.layout
layout.prop(self, "preview_transparency")
kc = bpy.context.window_manager.keyconfigs.user
km = kc.keymaps['Mesh']
kmi_idx = km.keymap_items.find('sprytile.modal_tool')
if kmi_idx >= 0:
layout.label(text="Tile Mode Shortcut")
col = layout.column()
kmi = km.keymap_items[kmi_idx]
km = km.active()
col.context_pointer_set("keymap", km)
rna_keymap_ui.draw_kmi([], kc, km, kmi, col, 0)
addon_updater_ops.update_settings_ui(self, context)
def setup_keymap():
km_array = sprytile_modal.SprytileModalTool.keymaps
win_mgr = bpy.context.window_manager
key_config = win_mgr.keyconfigs.addon
keymap = key_config.keymaps.new(name='Mesh', space_type='EMPTY')
km_array[keymap] = [
keymap.keymap_items.new("sprytile.modal_tool", 'SPACE', 'PRESS', ctrl=True, shift=True)
]
keymap = key_config.keymaps.new(name="Sprytile Paint Modal Map", space_type='EMPTY', region_type='WINDOW', modal=True)
km_items = keymap.keymap_items
km_array[keymap] = [
km_items.new_modal('CANCEL', 'ESC', 'PRESS'),
km_items.new_modal('SNAP', 'S', 'ANY'),
km_items.new_modal('FOCUS', 'W', 'PRESS'),
km_items.new_modal('ROTATE_LEFT', 'ONE', 'PRESS'),
km_items.new_modal('ROTATE_RIGHT', 'TWO', 'PRESS'),
km_items.new_modal('FLIP_X', 'THREE', 'PRESS'),
km_items.new_modal('FLIP_Y', 'FOUR', 'PRESS')
]
sprytile_modal.SprytileModalTool.modal_values = [
'Cancel',
'Cursor Snap',
'Cursor Focus',
'Rotate Left',
'Rotate Right',
'Flip X',
'Flip Y'
]
def teardown_keymap():
for keymap in sprytile_modal.SprytileModalTool.keymaps:
kmi_list = keymap.keymap_items
for keymap_item in kmi_list:
keymap.keymap_items.remove(keymap_item)
sprytile_modal.SprytileModalTool.keymaps.clear()
def register():
addon_updater_ops.register(bl_info)
sprytile_panel.icons = bpy.utils.previews.new()
dirname = os.path.dirname(__file__)
icon_names = ('SPRYTILE_ICON_BUILD',
'SPRYTILE_ICON_PAINT',
'SPRYTILE_ICON_FILL',
'SPRYTILE_ICON_NORMAL')
icon_paths = ('icon-build.png',
'icon-paint.png',
'icon-fill.png',
'icon-setnormal.png')
for i in range(0, len(icon_names)):
icon_path = os.path.join(dirname, "icons")
icon_path = os.path.join(icon_path, icon_paths[i])
sprytile_panel.icons.load(icon_names[i], icon_path, 'IMAGE')
bpy.utils.register_class(sprytile_panel.SprytilePanel)
bpy.utils.register_module(__name__)
setup_props()
setup_keymap()
def unregister():
teardown_keymap()
teardown_props()
bpy.utils.unregister_class(sprytile_panel.SprytilePanel)
bpy.utils.unregister_module(__name__)
bpy.utils.previews.remove(sprytile_panel.icons)
# Unregister self from sys.path as well
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
sys.path.remove(cmd_subfolder)
if __name__ == "__main__":
register()
|
[
"[email protected]"
] | |
edcb724454b921fe8dc091a316470e10f89459df
|
6cea6b8cfeef78b433e296c38ef11f4637609f20
|
/src/collectors/ipmisensor/test/testipmisensor.py
|
66a79164c5d9b0f45141583e0676c31a4b5b8902
|
[
"MIT"
] |
permissive
|
philipcristiano/Diamond
|
b659d577ec054c06ab99308d6c2ba3163de84e1a
|
577270ea820af597458aa5d3325367608cd37845
|
refs/heads/master
| 2021-01-18T10:04:59.057835 | 2012-08-02T04:08:02 | 2012-08-02T04:08:02 | 3,140,864 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,392 |
py
|
#!/usr/bin/python
################################################################################
from test import *
from diamond.collector import Collector
from ipmisensor import IPMISensorCollector
################################################################################
class TestIPMISensorCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('IPMISensorCollector', {
'interval': 10,
'bin' : 'true',
'use_sudo' : False
})
self.collector = IPMISensorCollector(config, None)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
with patch('subprocess.Popen.communicate', Mock(return_value =
( self.getFixture('ipmitool.out').getvalue() , '')
)):
self.collector.collect()
self.assertPublishedMany(publish_mock, {
'System.Temp' : 32.000000,
'CPU1.Vcore' : 1.080000,
'CPU2.Vcore' : 1.000000,
'CPU1.VTT' : 1.120000,
'CPU2.VTT' : 1.176000,
'CPU1.DIMM' : 1.512000,
'CPU2.DIMM' : 1.512000,
'+1_5V' : 1.512000,
'+1_8V' : 1.824000,
'+5V' : 4.992000,
'+12V' : 12.031000,
'+1_1V' : 1.112000,
'+3_3V' : 3.288000,
'+3_3VSB' : 3.240000,
'VBAT' : 3.240000,
'Fan1' : 4185.000000,
'Fan2' : 4185.000000,
'Fan3' : 4185.000000,
'Fan7' : 3915.000000,
'Fan8' : 3915.000000,
'Intrusion' : 0.000000,
'PS.Status' : 0.000000,
'P1-DIMM1A.Temp' : 41.000000,
'P1-DIMM1B.Temp' : 39.000000,
'P1-DIMM2A.Temp' : 38.000000,
'P1-DIMM2B.Temp' : 40.000000,
'P1-DIMM3A.Temp' : 37.000000,
'P1-DIMM3B.Temp' : 38.000000,
'P2-DIMM1A.Temp' : 39.000000,
'P2-DIMM1B.Temp' : 38.000000,
'P2-DIMM2A.Temp' : 39.000000,
'P2-DIMM2B.Temp' : 39.000000,
'P2-DIMM3A.Temp' : 39.000000,
'P2-DIMM3B.Temp' : 40.000000,
})
################################################################################
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
7975a24774fc1e6eb1d6b45c1bbbc4badcec3213
|
8d6bde798a6103fe43c4ba4e231d4f90411d68f0
|
/day-34/quizzler-app/ui.py
|
bd77df45aa224b737faf1a7fcfa8c5428ac36f9a
|
[] |
no_license
|
lhserafim/python-100-days-of-code-monorepo
|
66dd3545e264224af84a1d3b7694681c44e391f1
|
de03d0979bb4c1c770cd79b5f01dda72ae2bb375
|
refs/heads/master
| 2023-05-30T10:28:06.206198 | 2021-06-14T14:56:33 | 2021-06-14T14:56:33 | 363,091,910 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,570 |
py
|
from tkinter import *
from quiz_brain import QuizBrain
THEME_COLOR = "#375362"
class QuizInterface:
# DICA. Estou atribuindo um tipo para o meu parâmetro. Isto é opcional mas ajuda no autocomplete.
# Isto é similar ao TypeScript
def __init__(self, quiz_brain: QuizBrain):
self.quiz = quiz_brain
self.window = Tk()
self.window.title("Quizzler")
self.window.config(padx=20, pady=20, bg=THEME_COLOR)
self.score_label = Label(text="Score: 0", fg="white", bg=THEME_COLOR)
self.score_label.grid(column=1, row=0)
self.canvas = Canvas(width=300, height=250, bg="white", highlightthickness=0)
self.question_text = self.canvas.create_text(150, 125,
text="Amazon acquired Twitch in August 2014 for",
fill=THEME_COLOR,
width=280, # width faz a quebra do texto
font=("Arial", 20, "italic"))
self.canvas.grid(column=0, row=1, columnspan=2, pady=50)
self.img_true = PhotoImage(file="images/true.png")
self.img_false = PhotoImage(file="images/false.png")
self.button_true = Button(image=self.img_true, command=self.true_pressed)
self.button_true.grid(column=0, row=2)
self.button_false = Button(image=self.img_false, command=self.false_pressed)
self.button_false.grid(column=1, row=2)
self.get_next_question()
self.window.mainloop()
def get_next_question(self):
self.canvas.config(bg="white")
if self.quiz.still_has_questions():
self.score_label.config(text=f"Score: {self.quiz.score}")
q_text = self.quiz.next_question()
self.canvas.itemconfig(self.question_text, text=q_text)
else:
self.canvas.itemconfig(self.question_text, text="End of the Game")
self.button_true.config(state="disabled")
self.button_false.config(state="disabled")
def true_pressed(self):
self.give_feedback(self.quiz.check_answer("True"))
def false_pressed(self):
self.give_feedback(self.quiz.check_answer("False"))
def give_feedback(self, is_right):
if is_right:
self.canvas.config(bg="green")
else:
self.canvas.config(bg="red")
# No caso de tkinter, não posso usar o timer, por causa do mainloop()
self.window.after(1000, self.get_next_question)
|
[
"[email protected]"
] | |
5426dbbd4f55d07a5a4548122f515314529345e8
|
9a8fd017aa2ca45d423d09434a6574eb7d1fe734
|
/Natural_Language_Processing_544/Finite State Machine/french_count.py
|
cbb8eed0b6f051405f669c2ba1777bba8ffb7a9f
|
[] |
no_license
|
mzy-ray/AI_Programming
|
d29395f33c86f28afb366f3bb72b1966ef3d3a7f
|
9069e2e635815142f0379424f492e52cc370ab7e
|
refs/heads/master
| 2020-03-28T04:44:14.845900 | 2018-10-02T06:53:20 | 2018-10-02T06:53:20 | 147,734,010 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,553 |
py
|
import sys
from fst import FST
from fsmutils import composewords
kFRENCH_TRANS = {0: "zero", 1: "un", 2: "deux", 3: "trois", 4:
"quatre", 5: "cinq", 6: "six", 7: "sept", 8: "huit",
9: "neuf", 10: "dix", 11: "onze", 12: "douze", 13:
"treize", 14: "quatorze", 15: "quinze", 16: "seize",
20: "vingt", 30: "trente", 40: "quarante", 50:
"cinquante", 60: "soixante", 100: "cent"}
kFRENCH_AND = 'et'
def prepare_input(integer):
assert isinstance(integer, int) and integer < 1000 and integer >= 0, \
"Integer out of bounds"
if (integer >= 100): num = str(integer)
elif (integer >= 10): num = "0" + str(integer)
else: num = "00" + str(integer)
return list(num)
def french_count():
f = FST('french')
f.add_state('hundreds')
f.add_state('tens')
f.add_state('units')
f.add_state('tens-only')
f.add_state('units-only')
f.add_state('10s')
f.add_state('20s-60s')
f.add_state('70s')
f.add_state('80s')
f.add_state('90s')
f.add_state('end')
f.initial_state = 'hundreds'
f.set_final('end')
f.add_arc('hundreds', 'tens', [str(1)], [kFRENCH_TRANS[100]])
f.add_arc('hundreds', 'tens-only', [str(0)], [])
for ii in xrange(2, 10):
f.add_arc('hundreds', 'tens', [str(ii)], [kFRENCH_TRANS[ii] + " " + kFRENCH_TRANS[100]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('tens', 'units', [str(0)], [])
f.add_arc('tens-only', 'units-only', [str(0)], [])
elif ii == 1:
f.add_arc('tens', '10s', [str(ii)], [])
f.add_arc('tens-only', '10s', [str(ii)], [])
elif ii <= 6:
f.add_arc('tens', '20s-60s', [str(ii)], [kFRENCH_TRANS[ii * 10]])
f.add_arc('tens-only', '20s-60s', [str(ii)], [kFRENCH_TRANS[ii * 10]])
elif ii == 7:
f.add_arc('tens', '70s', [str(ii)], [kFRENCH_TRANS[60]])
f.add_arc('tens-only', '70s', [str(ii)], [kFRENCH_TRANS[60]])
elif ii == 8:
f.add_arc('tens', '80s', [str(ii)], [kFRENCH_TRANS[4] + " " + kFRENCH_TRANS[20]])
f.add_arc('tens-only', '80s', [str(ii)], [kFRENCH_TRANS[4] + " " + kFRENCH_TRANS[20]])
else:
f.add_arc('tens', '90s', [str(ii)], [kFRENCH_TRANS[4] + " " + kFRENCH_TRANS[20]])
f.add_arc('tens-only', '90s', [str(ii)], [kFRENCH_TRANS[4] + " " + kFRENCH_TRANS[20]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('10s', 'end', [str(ii)], [kFRENCH_TRANS[10]])
elif ii <= 6:
f.add_arc('10s', 'end', [str(ii)], [kFRENCH_TRANS[ii + 10]])
else:
f.add_arc('10s', 'end', [str(ii)], [kFRENCH_TRANS[10] + " " + kFRENCH_TRANS[ii]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('20s-60s', 'end', [str(ii)], [])
elif ii == 1:
f.add_arc('20s-60s', 'end', [str(ii)], [kFRENCH_AND + " " + kFRENCH_TRANS[1]])
else:
f.add_arc('20s-60s', 'end', [str(ii)], [kFRENCH_TRANS[ii]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('70s', 'end', [str(ii)], [kFRENCH_TRANS[10]])
elif ii == 1:
f.add_arc('70s', 'end', [str(ii)], [kFRENCH_AND + " " + kFRENCH_TRANS[11]])
elif ii <= 6:
f.add_arc('70s', 'end', [str(ii)], [kFRENCH_TRANS[ii + 10]])
else:
f.add_arc('70s', 'end', [str(ii)], [kFRENCH_TRANS[10] + " " + kFRENCH_TRANS[ii]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('80s', 'end', [str(ii)], [])
else:
f.add_arc('80s', 'end', [str(ii)], [kFRENCH_TRANS[ii]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('90s', 'end', [str(ii)], [kFRENCH_TRANS[10]])
elif ii <= 6:
f.add_arc('90s', 'end', [str(ii)], [kFRENCH_TRANS[ii + 10]])
else:
f.add_arc('90s', 'end', [str(ii)], [kFRENCH_TRANS[10] + " " + kFRENCH_TRANS[ii]])
for ii in xrange(0, 10):
if ii == 0:
f.add_arc('units', 'end', [str(ii)], [])
else:
f.add_arc('units', 'end', [str(ii)], [kFRENCH_TRANS[ii]])
for ii in xrange(0, 10):
f.add_arc('units-only', 'end', [str(ii)], [kFRENCH_TRANS[ii]])
return f
if __name__ == '__main__':
string_input = raw_input()
user_input = int(string_input)
f = french_count()
if string_input:
print user_input, '-->',
print " ".join(f.transduce(prepare_input(user_input)))
|
[
"[email protected]"
] | |
195a6562afd3acd4081df6c6e6f645bbeae2f17a
|
2171efa295b695a485d0125ebc91158122f46493
|
/django_intro/venv/bin/easy_install-3.8
|
a35674f13453445504009d021be0014136107cc1
|
[] |
no_license
|
bennami/django-intro
|
a4b0fa7488a7abeb687e178f56b8f4543a8c8f73
|
97deb170908823717dfe63388288e8836dd88c98
|
refs/heads/master
| 2022-11-16T12:44:37.625401 | 2020-07-07T12:33:57 | 2020-07-07T12:33:57 | 277,811,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
8
|
#!/home/imane/PycharmProjects/django_intro/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
1781619b0a8abd1d3cb474a67ee1c1d84f0bd6c4
|
080688b23a9114a41594a4483b07a8896f106102
|
/app.py
|
3373c706cfc92661c50d138bb12a87ac3122f194
|
[
"Apache-2.0"
] |
permissive
|
ashishqm/sept
|
3160e9498190a8b60d93fc604394ab4d9c9e67ee
|
0772c20417bb3d1331f3960016e4e6d861acf8ec
|
refs/heads/master
| 2020-03-27T21:23:48.242448 | 2018-09-03T02:16:25 | 2018-09-03T02:16:25 | 147,140,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,418 |
py
|
#!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/static_reply', methods=['POST'])
def static_reply():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "interest":
return {}
result = req.get("result")
parameters = result.get("parameters")
name = parameters.get("bank-name")
bank = {'Federal Bank':'6.70%','Andhra Bank':'6.85%', 'Allahabad Bank':'6.75%', 'Axis Bank':'6.5%', 'Bandhan bank':'7.15%', 'Bank of Maharashtra':'6.50%', 'Bank of Baroda':'6.90%', 'Bank of India':'6.60%', 'Bharatiya Mahila Bank':'7.00%', 'Canara Bank':'6.50%', 'Central Bank of India':'6.60%', 'City Union Bank':'7.10%', 'Corporation Bank':'6.75%', 'Citi Bank':'5.25%', 'DBS Bank':'6.30%', 'Dena Bank':'6.80%', 'Deutsche Bank':'6.00%', 'Dhanalakshmi Bank':'6.60%', 'DHFL Bank':'7.75%', 'HDFC Bank':'5.75% to 6.75%', 'Post Office':'7.10%', 'Indian Overseas Bank':'6.75%', 'ICICI Bank':'6.25% to 6.9%', 'IDBI Bank':'6.65%', 'Indian Bank':'4.75%', 'Indusind Bank':'6.85%', 'J&K Bank':'6.75%', 'Karnataka Bank':'6.50 to 6.90%', 'Karur Vysya Bank':'6.75%', 'Kotak Mahindra Bank':'6.6%', 'Lakshmi Vilas Bank':'7.00%', 'Nainital Bank':'7.90%', 'Oriental Bank of Commerce':'6.85%', 'Punjab National Bank':'6.75%', 'Punjab and Sind Bank':'6.4% to 6.80%', 'Saraswat bank':'6.8%', 'South Indian Bank':'6% to 6.75%', 'State Bank of India':'6.75%', 'Syndicate Bank':'6.50%', 'Tamilnad Mercantile Bank Ltd':'6.90%', 'UCO bank':'6.75%', 'United Bank Of India':'6%', 'Vijaya Bank':'6.50%', 'Yes Bank':'7.10%'}
speech = "The interest rate of " + name + " is " + str(cost[name])
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
#"contextOut": [],
"source": "BankInterestRates"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print ("Starting app on port %d" %(port))
app.run(debug=True, port=port, host='0.0.0.0')
|
[
"[email protected]"
] | |
a1e1f711eb8af4ca585330198a42446ff9f904ef
|
8afeb5e41d1efbce2b8feb8f557f263120db91d3
|
/api/search_rest.py
|
736b8df13c0c8c9d8ca68368185325e599ceb723
|
[] |
no_license
|
panos1995/msccs-t23
|
7bbe9ed4c9172f3f72c2ce47e14db639a95b054b
|
228aaa71640f186ceb8e85c295c13d9df36611a5
|
refs/heads/master
| 2020-04-03T15:23:35.299920 | 2018-11-05T15:44:07 | 2018-11-05T15:44:07 | 155,361,035 | 0 | 0 | null | 2018-10-30T09:48:21 | 2018-10-30T09:48:21 | null |
UTF-8
|
Python
| false | false | 1,162 |
py
|
import datetime
import time
import tweepy
import pymongo
import sys
import json
from bson import json_util, ObjectId
def query_search(query):
access_token = "IHpSjYd5AuCdDRZTaGiMOwHUJ"
access_token_secret = "FNUvxez9N9vBzY72HiZcukHQqVqO0ZiV498qyaYDxaV5nKFSgu"
auth = tweepy.AppAuthHandler(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if (not api):
print("Can't Authenticate")
sys.exit(-1)
time_started = time.time()
result_list = []
id_list = []
# You can change the cound the time limit of search.
# moreover we can use Stream to be realy real_life project
for tweet in tweepy.Cursor(api.search, q=query, lang="en", count=10).items():
if(time.time()> time_started+2):
#mycol_all.insert(result_list)
return result_list, id_list
# result_list.append(json.loads(json_util.dumps({"Postid": tweet["idstr"], "Text": tweet["text"]})))
result_list.append({"Postid": tweet._json["id_str"], "Text": tweet._json["text"]})
id_list.append(tweet._json["id_str"])
|
[
"[email protected]"
] | |
d1c7e21291310553bb972a82567ec40099308797
|
f777e7b52b4fe2839b1c487e1f1d901e67906705
|
/galeria/admin.py
|
2d9d0bb64375ca9d372a9be1e472be83545618e2
|
[] |
no_license
|
dxviidmg/LosArquetiposDeJung
|
4e438e08f1664532cf2dfb2c5d1dcda733e84ee6
|
ce8caf64464be20b64c9c7e5924028560552557b
|
refs/heads/master
| 2020-07-15T14:03:37.851172 | 2017-03-08T03:13:44 | 2017-03-08T03:13:44 | 66,895,997 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 81 |
py
|
from django.contrib import admin
from .models import *
admin.site.register(Foto)
|
[
"david@david-Lenovo-U310"
] |
david@david-Lenovo-U310
|
3f259779a113f38727e5e331c041593a3830edfe
|
caaf56727714f8c03be38710bc7d0434c3ec5b11
|
/tests/components/telegram/test_notify.py
|
7488db49d9ea58db8f78e93cab0842fa686ee119
|
[
"Apache-2.0"
] |
permissive
|
tchellomello/home-assistant
|
c8db86880619d7467901fd145f27e0f2f1a79acc
|
ed4ab403deaed9e8c95e0db728477fcb012bf4fa
|
refs/heads/dev
| 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 |
Apache-2.0
| 2023-01-13T06:02:03 | 2016-07-06T04:13:49 |
Python
|
UTF-8
|
Python
| false | false | 1,598 |
py
|
"""The tests for the telegram.notify platform."""
from os import path
from homeassistant import config as hass_config
import homeassistant.components.notify as notify
from homeassistant.components.telegram import DOMAIN
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
with patch("homeassistant.components.telegram_bot.async_setup", return_value=True):
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{
"name": DOMAIN,
"platform": DOMAIN,
"chat_id": 1,
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, DOMAIN)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"telegram/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert not hass.services.has_service(notify.DOMAIN, DOMAIN)
assert hass.services.has_service(notify.DOMAIN, "telegram_reloaded")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
[
"[email protected]"
] | |
02d83f4fd8430f4c51e99d05bd82592703152010
|
6e870a237de0a55f5b04f0ef4f2689e7b6084287
|
/administrator/forms.py
|
63c3f42b05a45d84ff4d0619e68354b6d39f9110
|
[
"MIT"
] |
permissive
|
Arose114/e-learning-with-django
|
0acaba610632a31688249ae2dbdaab629444ae07
|
bb463204e1d3c4a7bca5b7d4836adb06a85403fb
|
refs/heads/main
| 2023-06-27T17:24:32.188585 | 2021-07-20T08:36:02 | 2021-07-20T08:36:02 | 375,014,280 | 0 | 0 |
MIT
| 2021-06-08T13:13:13 | 2021-06-08T13:13:13 | null |
UTF-8
|
Python
| false | false | 1,829 |
py
|
from account.forms import FormSettings
from student.models import *
from staff.models import Staff
from django import forms
from .models import Settings
class AddCourseForm(FormSettings):
class Meta:
model = Course
fields = "__all__"
class AddDepartmentForm(FormSettings):
class Meta:
model = Department
fields = "__all__"
class AddSessionForm(FormSettings):
class Meta:
model = Session
fields = "__all__"
class AddStudentForm(FormSettings):
department_list = Department.objects.filter(is_general=False)
department = forms.ModelChoiceField(
label="Choose Department", queryset=department_list, required=True)
class Meta:
model = Student
exclude = ['admin']
# https://stackoverflow.com/questions/22846048/django-form-as-p-datefield-not-showing-input-type-as-date
widgets = {
'dob': forms.DateInput(attrs={'type': 'date'})
}
class AddStaffForm(FormSettings):
department_list = Department.objects.filter(is_general=False)
department = forms.ModelChoiceField(
label="Choose Department", queryset=department_list, required=True)
class Meta:
model = Staff
exclude = ['admin']
class SettingsForm(FormSettings):
def clean(self):
# Then call the clean() method of the super class
cleaned_data = super(SettingsForm, self).clean()
if not self.instance.pk and Settings.objects.exists():
# if not self.pk and Settings.objects.exists():
raise forms.ValidationError("Update Site Settings Instead")
# ... do some cross-fields validation for the subclass
# Finally, return the cleaned_data
return cleaned_data
class Meta:
model = Settings
fields = "__all__"
|
[
"[email protected]"
] | |
c4b2fcaa8f6499cdca69575ead3662b305b1ccd5
|
4ed33dba672aa6aaef42698ef8437c872b078d37
|
/backend/home/migrations/0001_load_initial_data.py
|
e78b5b69ad3761f691200103468335142fc62434
|
[] |
no_license
|
crowdbotics-apps/flat-heart-27928
|
aecb93c66e39e94e01cef7fe9506effe994cde18
|
ce209de8910b1e9f006814b58a05aed1eeada32d
|
refs/heads/master
| 2023-05-26T14:51:41.045373 | 2021-06-11T20:01:34 | 2021-06-11T20:01:34 | 376,130,678 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "flat-heart-27928.botics.co"
site_params = {
"name": "Flat Heart",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"[email protected]"
] | |
ce6667dc95fdefc8be193b41ae44902d4600a89a
|
7a9c01f7029e74c697100e244d26c72d0e283d47
|
/models/amenity.py
|
9adbf8d9f5418e8b43eeb584cccd1acbde12617c
|
[] |
no_license
|
toyugo/holbertonschool-AirBnB_clone
|
63321296ecee98b1a0cda39c7b155cc2ea5ececb
|
5edaeafb6516130f2027b505fe8b168f6f9de174
|
refs/heads/main
| 2023-03-21T06:32:18.728878 | 2021-03-04T13:08:56 | 2021-03-04T13:08:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
#!/usr/bin/python3
""" Module Amenity """
from models.base_model import BaseModel
class Amenity(BaseModel):
""" Class Amenity base en BaseModel """
name = ""
|
[
"[email protected]"
] | |
6744d1aea0d953c74743d2d88e6ac66ed6aa086c
|
bfdaaee87c9383d3192dc308ba3e7ec1365dbd25
|
/mobiliseclient.py
|
254be5eaed56419b5fc044ab7ac20966d5d81421
|
[
"Apache-2.0"
] |
permissive
|
benwatson528/data-warehouse-client
|
622f2de12ad78236b66d7c21a08dd0d59dd8b6d0
|
927354e1e33dee80992b3dee7b555dd23d1bddec
|
refs/heads/master
| 2023-01-04T23:58:13.641844 | 2020-10-29T15:58:43 | 2020-10-29T15:58:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,533 |
py
|
#
# All client code in a single file for event data access
#
import json
import requests
#
# Subset of the e-SC datatypes necessary to fetch event data
#
class EscEvent:
def __init__(self):
self.eventType = ""
self.timestamp = 0
self.metadata = {}
self.data = {}
def parseDict(self, dict):
self.eventType = dict['eventType']
self.timestamp = dict['timestamp']
self.data = dict['data']
self.metadata = dict['metadata']
# Study object
class EscStudyObject:
def __init__(self):
self.id = 0
self.studyId = 0
self.folderId = ''
self.externalId = ''
self.name = ''
self.additionalProperties = {}
def parseDict(self, dict):
self.id = dict['id']
self.studyId = dict['studyId']
self.folderId = dict['folderId']
self.externalId = dict['externalId']
self.name = dict['name']
self.additionalProperties = dict['additionalProperties']
def toDict(self):
return {
'id': self.id,
'studyId': self.studyId,
'folderId': self.folderId,
'externalId': self.extrnalId,
'name': self.name,
'additionalProperties': self.additionalProperties
}
# Person in a study
class EscPerson(EscStudyObject):
def __init__(self):
EscStudyObject.__init__(self)
self.objectType = "EscPerson"
# JWT Token object
class EscJWT:
def __init__(self):
self.token = ''
self.id = ''
self.expiryTimestamp = 0
self.refreshToken = ''
# Create from JSON
def parseDict(self, dict):
self.expiryTimestamp = dict['expiryTimestamp']
self.token = dict['token']
self.id = dict['id']
self.refreshToken = dict['refreshToken']
# Write to JSON
def toDict(self):
return {
"expiryTimestamp": self.expiryTimestamp,
"token": self.token,
"id": self.id,
"refreshToken": self.refreshToken
}
# Project / Study object
class EscProject:
def __init__(self):
self.id = ''
self.name = ''
self.description = ''
self.workflowFolderId = ''
self.dataFolderId = ''
self.creatorId = ''
self.externalId = ''
self.projectType = 'HEIRARCHICAL'
def parseDict(self, dict):
self.id = dict['id']
self.name = dict['name']
self.description = dict['description']
self.workflowFolderId = dict['workflowFolderId']
self.dataFolderId = dict['dataFolderId']
self.creatorId = dict['creatorId']
self.externalId = dict['externalId']
self.projectType = dict['projectType']
def toDict(self):
return {
"id": self.id,
"name": self.name,
"description": self.description,
"workflowFolderId": self.workflowFolderId,
"dataFolderId": self.dataFolderId,
"creatorId": self.creatorId,
"externalId": self.externalId,
"projectType": self.projectType
}
# Base class for ServerObjects
class EscObject:
def __init__(self):
self.id = ''
self.name = ''
self.description = ''
self.creatorId = ''
self.projectId = ''
self.containerId = ''
self.internalClassName = ''
self.creationTime = 0
def parseDict(self, dict):
self.id = dict['id']
self.name = dict['name']
self.description = dict['description']
self.creatorId = dict['creatorId']
self.projectId = dict['projectId']
self.containerId = dict['containerId']
self.internalClassName = dict['internalClassName']
self.creationTime = dict['creationTime']
def toDict(self, dict):
return {
"id": self.id,
"name": self.name,
"description": self.description,
"creatorId": self.creatorId,
"projectId": self.projectId,
"containerId:": self.containerId,
"internalClassName": self.internalClassName,
"creationTime": self.creationTime
}
# Folder object
class EscFolder(EscObject):
def __init__(self):
EscObject.__init__(self)
# Document object
class EscDocument(EscObject):
def __init__(self):
EscObject.__init__(self)
self.currentVersionSize = 0
self.currentVersionNumber = 0
self.currentVersionHash = ''
self.downloadPath = ''
self.uploadPath = ''
def parseDict(self, dict):
self._EscObject__parseDict(dict)
self.currentVersionSize = dict['currentVersionSize']
self.currentVersionNumber = dict['currentVersionNumber']
self.currentVersionHash = dict['currentVersionHash']
self.downloadPath = dict['downloadPath']
self.uploadPath = dict['uploadPath']
def toDict(self):
dict = self.__EscObject_toDict()
dict['currentVersionSize'] = self.currentVersionSize
dict['currentVersionNumber'] = self.currentVersionNumber
dict['currentVersionHash'] = self.currentVersionHash
dict['downloadPath'] = self.downloadPath
dict['uploadPath'] = self.uploadPath
#
# Combined client object
#
class EscClient:
def __init__(self, hostname, port, ssl):
self.jwt = ""
self.hostname = hostname
self.port = port
self.ssl = ssl
# Create a url
def __create_url(self, url):
if self.ssl==True:
return 'https://' + self.hostname + ':' + str(self.port) + url
else:
return 'http://' + self.hostname + ':' + str(self.port) + url
# Create a form body that can be POSTed using a dict of name:value pairs
def __create_form_body(self, body_dict):
count = 0;
body = '';
for key in body_dict:
if count > 0:
body = body + '&'
body = body + key + '=' + body_dict[key]
count=count+1;
return body
# Create the request headers
def __create_headers(self):
return {
'Authorization' : 'Bearer ' + self.jwt
}
# Send a Form using the POST method
def __post_form_retrieve_json(self, url, form_data, send_auth):
if send_auth==True:
r = requests.post(self.__create_url(url), form_data, headers=self.__create_headers())
return r.json()
else:
r = requests.post(self.__create_url(url), form_data)
return r.json()
# Delete a resource
def __delete_resource(self, url):
requests.delete(self.__create_url(url), headers=self.__create_headers())
# Post text and get text back
def __post_text_retrieve_text(self, url, text_data):
headers = self.__create_headers()
headers['content-type'] = 'text/plain'
r = requests.post(self.__create_url(url), data=text_data.encode('utf-8'), headers=headers)
return r.text
def __post_json_retrieve_json(self, url, json_data):
headers = self.__create_headers()
headers['content-type'] = 'application/json'
r = requests.post(self.__create_url(url), data=json_data, headers=headers)
return r.json()
def __retrieve_json(self, url):
headers = self.__create_headers()
r = requests.get(self.__create_url(url), headers=headers)
return r.json()
def __retrieve_text(self, url):
headers = self.__create_headers()
r = requests.get(self.__create_url(url), headers=headers)
return r.text
# =======================================================================================
#
# Implementation of standard e-SC client methods
#
# =======================================================================================
#
# Issue an access token using a username and password
#
def issueToken(self, username, password, label):
auth_details = {
"username": username,
"password": password,
"label": label
}
result = self._EscClient__post_form_retrieve_json("/api/public/rest/v1/tokens/issue", auth_details, False)
jwt = EscJWT()
jwt.parseDict(result)
return jwt
#
# Release an access token, which prevents its subsequent use
#
def releaseToken(self, id):
self._EscClient__delete_resource(id)
#
# Check whether a JWT is still valid
#
def validateToken(self, token):
return self._EscClient__post_text_retrieve_text("/api/public/rest/v1/tokens/validate", token)
#
# Returns a list of the projects that the authenticated user is permitted to view
#
def listProjects(self):
jsonData = self._EscClient__retrieve_json("/api/public/rest/v1/storage/projects");
results = {}
for i in range(0, len(jsonData)):
project = EscProject();
project.parseDict(jsonData[i])
results[i] = project
return results
#
# Return a folder object given its database id
#
def getFolder(self, id):
jsonData = self._EscClient__retrieve_json("/api/public/rest/v1/storage/folders/" + id)
folder = EscFolder();
folder.parseDict(jsonData)
return folder
#
# Access a person using their externally visible ID. i.e. the PatientID
#
def getPersonByExternalId(self, externalId):
jsonData = self._EscClient__retrieve_json("/api/public/rest/v1/catalog/peoplebyexternalid/" + externalId)
person = EscPerson()
person.parseDict(jsonData)
return person
#
# Access a study given its externally visible ID
#
def getProjectByStudyCode(self, studyCode):
jsonData = self._EscClient__retrieve_json("/api/public/rest/v1/catalog/studiesbyexternalcode/" + studyCode)
project = EscProject()
project.parseDict(jsonData)
return project
#
# Return the number of event objects contained in a study
#
def getEventCount(self, studyCode):
return int(self._EscClient__retrieve_text("/api/public/rest/v1/catalog/studiesbyid/" + studyCode + "/allevents/count"))
#
# Get a set of events
#
def queryEventsFromStudy(self, studyCode, startIndex, pageSize):
jsonData = self._EscClient__retrieve_json("/api/public/rest/v1/catalog/studiesbyid/" + studyCode + "/allevents/" + str(startIndex) + "/" + str(pageSize))
results = {}
for i in range(0, len(jsonData)):
evt = EscEvent()
evt.parseDict(json.loads(jsonData[i]))
results[i] = evt
return results
#
# Get the number of people in a study
#
def getNumberOfPeopleInStudy(self, projectId):
return int(self._EscClient__retrieve_text("/api/public/rest/v1/catalog/studiesbyid/" + str(projectId) + "/people/count"))
#
# Get a set of people from a study
#
def getPeople(self, projectId, startIndex, count):
jsonData = self._EscClient__retrieve_json("/api/public/rest/v1/catalog/studiesbyid/" + str(projectId) + "/people/list/" + str(startIndex) + "/" + str(count))
results = {}
for i in range(0, len(jsonData)):
person = EscPerson()
person.parseDict(jsonData[i])
results[i] = person
return results
|
[
"[email protected]"
] | |
d4904493099383546f82db50de128bdb52234ea6
|
6e6e27192e3fe888af95a0f11bec5aebe06a74d8
|
/ArrayAndString/L209_minimum-size-subarray-sum.py
|
c657e576fd24315abd9ce7f7772c46aa5390fdef
|
[] |
no_license
|
lihujun101/LeetCode
|
af51a7eeec4b64f9d04b439285e2d0444c7fdf7e
|
96e847591aa6ea7ea285dbcfc1c9bcfc32026de5
|
refs/heads/master
| 2021-07-22T11:00:59.387428 | 2019-01-01T12:40:43 | 2019-01-01T12:40:43 | 146,878,231 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 875 |
py
|
class Solution:
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
# O(n)的方法:
# 1、sum和s对比,sum滚动计算,向右加,向左减
if len(nums) == 0:
return 0
i, j = 0, 0
sum, count, min_count = 0, 0, float("inf")
while True:
if sum < s:
if j == len(nums):
break
sum += nums[j]
j += 1
elif sum >= s:
count = j - i
if min_count > count:
min_count = count
sum -= nums[i]
i += 1
return 0 if min_count == float("inf") else min_count
if __name__ == '__main__':
s = Solution()
print(s.minSubArrayLen(s=11, nums=[1,2,3,4,5]))
|
[
"[email protected]"
] | |
496d7306eb09257e9b34d99372293f4febf58eda
|
ae912cfda71c89db5cb9d01e87ffc54c1dcb328d
|
/jobs/migrations/0002_project_project_tech.py
|
a98c0c0da537a40a8c78c8ff8d2ce677e0df106b
|
[] |
no_license
|
harshitksrivastava/portfolio-project
|
af6aa973d04a7be02c4984c657904ffb8302ba2e
|
ff36b186091e5382b2fb0567e90313aad71224fe
|
refs/heads/master
| 2022-12-12T19:10:49.182860 | 2020-12-17T12:07:49 | 2020-12-17T12:07:49 | 199,324,804 | 0 | 0 | null | 2022-11-22T06:36:29 | 2019-07-28T18:46:15 |
JavaScript
|
UTF-8
|
Python
| false | false | 477 |
py
|
# Generated by Django 3.0.8 on 2020-08-02 12:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='project_tech',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='jobs.Technology'),
),
]
|
[
"[email protected]"
] | |
6bc05f1c24acd83be18b9337a531c43c42f39d63
|
6e928e1651713f945c980bca6d6c02ac5dce249a
|
/task1/5.py
|
64b92c59d071daed1a062f5bbc9c61742d9564d9
|
[] |
no_license
|
Akzhan12/pp2
|
97334158b442383df32583ee6c0b9cab92a3ef45
|
56e33fd9119955ea8349172bf3f2cc5fbd814142
|
refs/heads/main
| 2023-06-28T08:30:11.068397 | 2021-07-29T08:34:43 | 2021-07-29T08:34:43 | 337,359,826 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 211 |
py
|
a = list(map(int,input().split()))
n = int(input()) % len(a)
if n < 0:
n = abs(n)
print(*a[n:],end = " ")
print(*a[0:n])
else:
n = abs(n)
print(*a[-n:],end = " ")
print(*a[0:-n])
|
[
"[email protected]"
] | |
a46afda8041485109144a60243600a990bd2b7d1
|
c0d5b7f8e48a26c6ddc63c76c43ab5b397c00028
|
/tests/columns/test_array.py
|
731e15ff8b962d66534e989094fe5f8cbef23a93
|
[
"MIT"
] |
permissive
|
aminalaee/piccolo
|
f6c5e5e1c128568f7ccb9ad1dfb4746acedae262
|
af8d2d45294dcd84f4f9b6028752aa45b699ec15
|
refs/heads/master
| 2023-07-14T09:44:04.160116 | 2021-07-11T22:56:27 | 2021-07-11T22:56:27 | 386,398,401 | 0 | 0 |
MIT
| 2021-07-15T19:32:50 | 2021-07-15T19:08:17 | null |
UTF-8
|
Python
| false | false | 2,199 |
py
|
from unittest import TestCase
from piccolo.table import Table
from piccolo.columns.column_types import Array, Integer
from tests.base import postgres_only
class MyTable(Table):
value = Array(base_column=Integer())
class TestArrayPostgres(TestCase):
"""
Make sure an Array column can be created.
"""
def setUp(self):
MyTable.create_table().run_sync()
def tearDown(self):
MyTable.alter().drop_table().run_sync()
def test_storage(self):
"""
Make sure data can be stored and retrieved.
"""
MyTable(value=[1, 2, 3]).save().run_sync()
row = MyTable.objects().first().run_sync()
self.assertEqual(row.value, [1, 2, 3])
@postgres_only
def test_index(self):
"""
Indexes should allow individual array elements to be queried.
"""
MyTable(value=[1, 2, 3]).save().run_sync()
self.assertEqual(
MyTable.select(MyTable.value[0]).first().run_sync(), {"value": 1}
)
@postgres_only
def test_all(self):
"""
Make sure rows can be retrieved where all items in an array match a
given value.
"""
MyTable(value=[1, 1, 1]).save().run_sync()
self.assertEqual(
MyTable.select(MyTable.value)
.where(MyTable.value.all(1))
.first()
.run_sync(),
{"value": [1, 1, 1]},
)
self.assertEqual(
MyTable.select(MyTable.value)
.where(MyTable.value.all(0))
.first()
.run_sync(),
None,
)
def test_any(self):
"""
Make sure rows can be retrieved where any items in an array match a
given value.
"""
MyTable(value=[1, 2, 3]).save().run_sync()
self.assertEqual(
MyTable.select(MyTable.value)
.where(MyTable.value.any(1))
.first()
.run_sync(),
{"value": [1, 2, 3]},
)
self.assertEqual(
MyTable.select(MyTable.value)
.where(MyTable.value.any(0))
.first()
.run_sync(),
None,
)
|
[
"[email protected]"
] | |
e224dafd3e35959feffd1ace01599c5833048a0b
|
627d40980db4fbcec59249831e5f7d04f6dcc767
|
/For_Lopp.py
|
e06ef50d7c5a5e7cf1248c5b317ca962559352f9
|
[] |
no_license
|
Shantha-Kumar-A/yuvarepo
|
33a151c7241c2b3bd6a1c1d3fac14b015e73172b
|
574811bad1b573bd19912a27001a47bca0832a60
|
refs/heads/master
| 2022-06-11T07:37:50.471032 | 2020-05-09T10:07:43 | 2020-05-09T10:07:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
x = ['Yuvaraj','hi',298]
for i in x:
print(i)
x = 'Yuvaraj'
for i in x:
print(i)
for i in range(10):
print(i)
for i in range(11,20):
print(i)
for i in range(20,10,-1):
print(i)
|
[
"[email protected]"
] | |
0c1a13795be34e0ee6d38ececfad4eb66f28e0c7
|
c37cb658a5c5fb3dc1aafa959c86866dc1ed9e39
|
/第八章/2-sock_server_client.py
|
5e8660e8bdc147a7b1892295f041d469c57d1598
|
[] |
no_license
|
Dragonet-D/python3-study
|
6fe7a7f049dd8067593c865e58d125e12b1de55a
|
b057d75a2421798d27e2d15e0eee35d9eb8dbf27
|
refs/heads/master
| 2021-10-10T21:18:30.319066 | 2021-10-01T07:52:27 | 2021-10-01T07:52:27 | 98,364,215 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
import socket
client = socket.socket()
client.connect(('localhost', 9999))
while True:
cmd = input('>>:').strip()
if len(cmd) == 0: continue
client.send(cmd)
cmd_res = client.recv(1024)
print(cmd_res)
client.close()
|
[
"[email protected]"
] | |
fb3c0ac7bfa0dcea0555b2ff32f1485a6926b6e0
|
033fc1b8735f5631c712c639abccea610277a0d7
|
/Configuration.py
|
07a486d50b58e37f3c7e0a25870d5c836fc56e51
|
[] |
no_license
|
MolotovCherry/Yandere-Simulator-Skin-Switcher
|
a7e70fb85b124eb90b4637ccac834d797afd16ea
|
6b17eeb1aa0cd2b2f70ad0f7553d6a599e7576ff
|
refs/heads/master
| 2022-11-25T09:02:01.167000 | 2020-07-30T09:09:42 | 2020-07-30T09:09:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,033 |
py
|
import os
import sys
from configparser import ConfigParser
def singleton(cls):
def inner(*args, **kwargs):
try:
return cls.__instance
except AttributeError:
__instance = cls(*args, **kwargs)
cls.__instance = __instance
return __instance
return inner
@singleton
class Configuration(ConfigParser):
def __init__(self):
super().__init__()
if hasattr(sys, 'frozen'):
configFile = os.path.dirname(os.path.abspath(sys.executable))
else:
configFile = os.path.dirname(os.path.abspath(__file__))
self.configFile = os.path.join(configFile, 'config.ini')
# we use this for writing a new file, or repairing damaged entries
self.defaults = {
'default': {
'style': 'dark',
'gameroot': '',
'currentskin': 'default'
}
}
self.validValues = {
'default': {
'style': ['light', 'dark'],
'gameroot': 'string',
'currentskin': 'string'
}
}
read = self.read(self.configFile)
if not read:
# config file doesn't exist, write defaults
self.writeDefaults()
else:
if self.repairConfig():
# save the fixed config
self.saveConfig()
def writeDefaults(self):
for key, value in self.defaults.items():
self[key] = value
self.saveConfig()
def saveConfig(self):
with open(self.configFile, 'w') as confile:
self.write(confile)
def repairConfig(self):
# did we repair anything?
dirtyConfig = False
# verify integrity of options, and if not exist, then reset the option
for k1, v1 in self.defaults.items():
if k1 in self:
for k2, v2 in self[k1].items():
if isinstance(self.validValues[k1][k2], list):
# make sure it is one of the allowed values
if self[k1][k2] not in self.validValues[k1][k2]:
self[k1][k2] = self.defaults[k1][k2]
dirtyConfig = True
elif isinstance(self.validValues[k1][k2], str):
# value can be anything, but it must be of a certain type
if self.validValues[k1][k2] == 'string':
# i don't care what the string is
if not isinstance(self[k1][k2], str):
self[k1][k2] = self.defaults[k1][k2]
dirtyConfig = True
elif self.validValues[k1][k2] == 'number':
# the number is represented in a string,
# so verify the string is essentially a number
try:
int(self[k1][k2])
except ValueError:
self[k1][k2] = self.defaults[k1][k2]
dirtyConfig = True
elif self.validValues[k1][k2] == 'bool':
# this is also a string
if self[k1][k2].lower() not in ('true', 'false'):
self[k1][k2] = self.defaults[k1][k2]
dirtyConfig = True
# fill in any possible gaps with default values
# in this way we'll never have a KeyError, no matter
# if the real config file is damaged
for k1, v1 in self.defaults.items():
if k1 not in self:
self[k1] = v1
dirtyConfig = True
else:
for k2, v2 in v1.items():
if k2 not in self[k1]:
self[k1][k2] = v2
dirtyConfig = True
return dirtyConfig
|
[
"[email protected]"
] | |
b1571f62c847a20ecf7624a5be9945287afced54
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/zlib/test/test_zlib.py
|
35fd7147de34051908c2d1acb58fc941e3703da9
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581 | 2019-08-13T10:29:43 | 2019-08-13T18:06:45 | 136,080,721 | 396 | 33 |
NOASSERTION
| 2020-04-01T03:05:18 | 2018-06-04T20:45:17 |
Python
|
UTF-8
|
Python
| false | false | 10,362 |
py
|
"""
Tests for the zlib module.
"""
import sys
try:
import zlib
except ImportError:
import py; py.test.skip("no zlib module on this host Python")
try:
from pypy.module.zlib import interp_zlib
except ImportError:
import py; py.test.skip("no zlib C library on this machine")
def test_unsigned_to_signed_32bit():
assert interp_zlib.unsigned_to_signed_32bit(123) == 123
assert interp_zlib.unsigned_to_signed_32bit(2**31) == -2**31
assert interp_zlib.unsigned_to_signed_32bit(2**32-1) == -1
if sys.maxint > 2**32:
from rpython.rlib.rarithmetic import r_uint
assert interp_zlib.unsigned_to_signed_32bit(r_uint(sys.maxint)) == -1
assert interp_zlib.unsigned_to_signed_32bit(r_uint(sys.maxint+1)) == 0
class AppTestZlib(object):
spaceconfig = dict(usemodules=['zlib'])
def setup_class(cls):
"""
Create a space with the zlib module and import it for use by the tests.
Also create some compressed data with the bootstrap zlib module so that
compression and decompression tests have a little real data to assert
against.
"""
cls.w_zlib = cls.space.getbuiltinmodule('zlib')
expanded = 'some bytes which will be compressed'
cls.w_expanded = cls.space.wrap(expanded)
cls.w_compressed = cls.space.wrap(zlib.compress(expanded))
def test_error(self):
"""
zlib.error should be an exception class.
"""
assert issubclass(self.zlib.error, Exception)
def test_crc32(self):
"""
When called with a string, zlib.crc32 should compute its CRC32 and
return it as a signed 32 bit integer. On 64-bit machines too
(it is a bug in CPython < 2.6 to return unsigned values in this case).
"""
assert self.zlib.crc32('') == 0
assert self.zlib.crc32('\0') == -771559539
assert self.zlib.crc32('hello, world.') == -936931198
def test_crc32_start_value(self):
"""
When called with a string and an integer, zlib.crc32 should compute the
CRC32 of the string using the integer as the starting value.
"""
assert self.zlib.crc32('', 42) == 42
assert self.zlib.crc32('\0', 42) == 163128923
assert self.zlib.crc32('hello, world.', 42) == 1090960721
hello = 'hello, '
hellocrc = self.zlib.crc32(hello)
world = 'world.'
helloworldcrc = self.zlib.crc32(world, hellocrc)
assert helloworldcrc == self.zlib.crc32(hello + world)
def test_crc32_negative_start(self):
v = self.zlib.crc32('', -1)
assert v == -1
def test_crc32_negative_long_start(self):
v = self.zlib.crc32('', -1L)
assert v == -1
assert self.zlib.crc32('foo', -99999999999999999999999) == 1611238463
def test_crc32_long_start(self):
import sys
v = self.zlib.crc32('', sys.maxint*2)
assert v == -2
assert self.zlib.crc32('foo', 99999999999999999999999) == 1635107045
def test_adler32(self):
"""
When called with a string, zlib.adler32() should compute its adler 32
checksum and return it as a signed 32 bit integer.
On 64-bit machines too
(it is a bug in CPython < 2.6 to return unsigned values in this case).
"""
assert self.zlib.adler32('') == 1
assert self.zlib.adler32('\0') == 65537
assert self.zlib.adler32('hello, world.') == 571147447
assert self.zlib.adler32('x' * 23) == -2122904887
def test_adler32_start_value(self):
"""
When called with a string and an integer, zlib.adler32 should compute
the adler 32 checksum of the string using the integer as the starting
value.
"""
assert self.zlib.adler32('', 42) == 42
assert self.zlib.adler32('\0', 42) == 2752554
assert self.zlib.adler32('hello, world.', 42) == 606078176
assert self.zlib.adler32('x' * 23, 42) == -2061104398
hello = 'hello, '
hellosum = self.zlib.adler32(hello)
world = 'world.'
helloworldsum = self.zlib.adler32(world, hellosum)
assert helloworldsum == self.zlib.adler32(hello + world)
assert self.zlib.adler32('foo', -1) == 45547858
assert self.zlib.adler32('foo', 99999999999999999999999) == -114818734
def test_invalidLevel(self):
"""
zlib.compressobj should raise ValueError when an out of bounds level is
passed to it.
"""
raises(ValueError, self.zlib.compressobj, -2)
raises(ValueError, self.zlib.compressobj, 10)
def test_compression(self):
"""
zlib.compressobj should return an object which can be used to compress
bytes.
"""
compressor = self.zlib.compressobj()
bytes = compressor.compress(self.expanded)
raises(OverflowError, compressor.flush, 2**31)
bytes += compressor.flush()
assert bytes == self.compressed
def test_decompression(self):
"""
zlib.decompressobj should return an object which can be used to
decompress bytes.
"""
decompressor = self.zlib.decompressobj()
bytes = decompressor.decompress(self.compressed)
bytes += decompressor.flush()
assert bytes == self.expanded
def test_compress(self):
"""
Test the zlib.compress() function.
"""
bytes = self.zlib.compress(self.expanded)
assert bytes == self.compressed
def test_decompress(self):
"""
Test the zlib.decompress() function.
"""
bytes = self.zlib.decompress(self.compressed)
assert bytes == self.expanded
def test_decompress_invalid_input(self):
"""
Try to feed garbage to zlib.decompress().
"""
raises(self.zlib.error, self.zlib.decompress, self.compressed[:-2])
raises(self.zlib.error, self.zlib.decompress, 'foobar')
def test_bad_arguments(self):
import zlib
raises(ValueError, zlib.decompressobj().flush, 0)
raises(ValueError, zlib.decompressobj().flush, -1)
raises(TypeError, zlib.decompressobj().flush, None)
raises(ValueError, zlib.decompressobj().decompress, b'abc', -1)
raises(TypeError, zlib.decompressobj().decompress, b'abc', None)
raises(TypeError, self.zlib.decompress, self.compressed, None)
raises(OverflowError, self.zlib.decompress, self.compressed, 2**31)
def test_empty_flush(self):
import zlib
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
assert co.flush() # Returns a zlib header
dco = zlib.decompressobj()
assert dco.flush() == b""
def test_decompress_incomplete_stream(self):
import zlib
# This is 'foo', deflated
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
# For the record
assert zlib.decompress(x) == b'foo'
raises(zlib.error, zlib.decompress, x[:-5])
# Omitting the stream end works with decompressor objects
# (see issue #8672).
dco = zlib.decompressobj()
y = dco.decompress(x[:-5])
y += dco.flush()
assert y == b'foo'
def test_unused_data(self):
"""
Try to feed too much data to zlib.decompress().
It should show up in the unused_data attribute.
"""
d = self.zlib.decompressobj()
s = d.decompress(self.compressed + 'extrastuff', 0)
assert s == self.expanded
assert d.unused_data == 'extrastuff'
assert d.flush() == ''
assert d.unused_data == 'extrastuff'
# try again with several decompression steps
d = self.zlib.decompressobj()
s1 = d.decompress(self.compressed[:10])
assert d.unused_data == ''
s2 = d.decompress(self.compressed[10:-3])
assert d.unused_data == ''
s3 = d.decompress(self.compressed[-3:] + 'spam' * 100)
assert d.unused_data == 'spam' * 100
assert s1 + s2 + s3 == self.expanded
s4 = d.decompress('egg' * 50)
assert d.unused_data == ('spam' * 100) + ('egg' * 50)
assert s4 == ''
def test_max_length(self):
"""
Test the max_length argument of the decompress() method
and the corresponding unconsumed_tail attribute.
"""
d = self.zlib.decompressobj()
data = self.compressed
for i in range(0, 100, 10):
s1 = d.decompress(data, 10)
assert s1 == self.expanded[i:i+10]
data = d.unconsumed_tail
assert not data
def test_max_length_large(self):
import sys
if sys.version_info < (2, 7, 13):
skip("passing a potentially 64-bit int as max_length is not "
"supported before 2.7.13")
d = self.zlib.decompressobj()
assert d.decompress(self.compressed, sys.maxsize) == self.expanded
def test_buffer(self):
"""
We should be able to pass buffer objects instead of strings.
"""
assert self.zlib.crc32(buffer('hello, world.')) == -936931198
assert self.zlib.adler32(buffer('hello, world.')) == 571147447
compressor = self.zlib.compressobj()
bytes = compressor.compress(buffer(self.expanded))
bytes += compressor.flush()
assert bytes == self.compressed
decompressor = self.zlib.decompressobj()
bytes = decompressor.decompress(buffer(self.compressed))
bytes += decompressor.flush()
assert bytes == self.expanded
bytes = self.zlib.compress(buffer(self.expanded))
assert bytes == self.compressed
bytes = self.zlib.decompress(buffer(self.compressed))
assert bytes == self.expanded
def test_flush_with_freed_input(self):
# Issue #16411: decompressor accesses input to last decompress() call
# in flush(), even if this object has been freed in the meanwhile.
input1 = b'abcdefghijklmnopqrstuvwxyz'
input2 = b'QWERTYUIOPASDFGHJKLZXCVBNM'
data = self.zlib.compress(input1)
dco = self.zlib.decompressobj()
dco.decompress(data, 1)
del data
data = self.zlib.compress(input2)
assert dco.flush(1) == input1[1:]
assert dco.unused_data == b''
assert dco.unconsumed_tail == b''
|
[
"[email protected]"
] | |
e418afb3c244e5d140f09bc3ff9dec3f541a546d
|
c864086ea38c3cfe4819fabcd71137db79a52f2f
|
/drugi.py
|
53ebf8955dcf91801d076323c4f94518bebc5a1a
|
[] |
no_license
|
kaczmarekmichal/nauka_pythona
|
acee5d99bd19e66d8d6fe19f71c5fd8b6d3b9b71
|
d9ac839fa5290d8f7b5cb7b31e6c66d199ae8b89
|
refs/heads/master
| 2020-04-10T12:18:59.731505 | 2019-01-19T10:47:40 | 2019-01-19T10:47:40 | 161,018,459 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
marka ='Peugeot'
marka2 = 'Audi'
ilosc_drzwi = 5
pojemnosc =1.3
marka_up = marka.upper()
marka_low = marka.lower()
print ("Samochod " + marka + " ma " + str(ilosc_drzwi) + " drzwi")
print (marka_low)
print("pojemnosc po zmianach " + str(pojemnosc*2))
marka_swap=marka.swapcase()
print(marka_swap)
if ilosc_drzwi >3:
print('duzy')
else:
print('maly')
if marka.startswith('Pe'):
print('uwaga peugeot')
else:
print("na szczescie to nie francuz!")
|
[
"[email protected]"
] | |
08f80bf55016aa228f9cc5eb54a1e81918a05532
|
87645df4118bc0c7c63bb4cca5d38cc2967fa6ba
|
/assignment18.py
|
9842084f08b0fe1127e3b6f908fcfc5ddf2e51e6
|
[] |
no_license
|
manpreet01multani/acadview_assignments
|
e0559f140d95b293759b59b5284782b1950ed567
|
52fc9e97396147f50aea3b3025fa2975a75a1756
|
refs/heads/master
| 2020-03-19T11:22:13.006177 | 2018-06-24T11:15:09 | 2018-06-24T11:15:09 | 136,450,742 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 722 |
py
|
#1.
from tkinter import *
r=Tk()
d={"Meet":789,"jeet":456,"heer":786,"reet":234,"geet":987,"geen":675,"reet":345}
label=Label(r,text="Name",font='size,10')
label.pack(side="bottom")
label=Label(r,text="phone number")
label.pack(side="top")
def handleList(event):
label3=mn.get(ACTIVE)
print(label3)
ph=d.get(label3)
global label1,label2
label1.config(text=label3)
label2.config(text=ph)
mn=Listbox(r)
mn.config(selectmode=EXTENDED)
scrollbar=Scrollbar(r)
scrollbar.config(command=mn.yview)
mn.config(yscrollcommand=scrollbar.set)
scrollbar.pack(side=RIGHT,fill=Y)
mn.pack(side=LEFT,expand=YES,fill=BOTH)
mn.bind('<Double-1>',handleList)
for k,v in d.items():
mn.insert('end',k)
r.mainloop()
|
[
"[email protected]"
] | |
8902f9f1b1e2cecf529e9c659f9b3c92b3b8ed33
|
7a60d48c403a98e0065d701602a9ec0eae136c35
|
/algorithmic_toolbox/PA4_divide_and_conquer/3_sorting.py
|
f0204b96a893a06eb391e4e8444b05b078c56b4d
|
[] |
no_license
|
themillipede/data-structures-and-algorithms
|
691a20ec4239f43c3f3f597b8f803097dc691d30
|
4a64a10d8f5e02921a83d37c48fd3871dd91ced8
|
refs/heads/master
| 2020-03-13T05:20:24.990413 | 2019-12-28T20:13:02 | 2019-12-28T20:13:02 | 130,981,788 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,283 |
py
|
# Uses python3
"""
3. Improving quick sort
Introduction: The goal in this problem is to redesign the randomized quick sort algorithm so that it works
fast even on sequences containing many equal elements.
Task: Replace the 2-way partition with a 3-way partition to enable the quick sort algorithm to efficiently
process sequences with few unique elements. That is, your new partition procedure should partition the
array into three parts: < x part, = x part, and > x part.
Input: The first line contains an integer n. The next line contains a sequence of n integers a_0, a_1, ..., a_(n-1).
Constraints: 1 <= n <= 10^5; 1 <= a_i <= 10^9 for all 0 <= i < n.
Output: The sequence sorted in in non-decreasing order.
"""
import sys
import random
#################################
# Quick sort with 2-way partition
#################################
def partition2(a, l, r):
pivot = a[l]
p_idx = l
for i in range(l + 1, r + 1):
if a[i] <= pivot:
p_idx += 1
a[i], a[p_idx] = a[p_idx], a[i]
a[l], a[p_idx] = a[p_idx], a[l]
return p_idx
def randomized_quick_sort2(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[l], a[k] = a[k], a[l]
m = partition2(a, l, r)
randomized_quick_sort2(a, l, m - 1)
randomized_quick_sort2(a, m + 1, r)
#################################
# Quick sort with 3-way partition
#################################
def partition3(a, l, r):
pivot = a[l]
p_idx = l
k = l # Will become the index of the largest number smaller than the pivot.
for i in range(l + 1, r + 1):
if a[i] <= pivot:
p_idx += 1
a[i], a[p_idx] = a[p_idx], a[i]
if a[p_idx] < pivot:
k += 1
a[k], a[p_idx] = a[p_idx], a[k]
a[l], a[p_idx] = a[p_idx], a[l]
return k, p_idx
def randomized_quick_sort3(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[l], a[k] = a[k], a[l]
m1, m2 = partition3(a, l, r)
randomized_quick_sort3(a, l, m1)
randomized_quick_sort3(a, m2 + 1, r)
if __name__ == '__main__':
input = sys.stdin.read()
n, *a = list(map(int, input.split()))
randomized_quick_sort3(a, 0, n - 1)
for x in a:
print(x, end=' ')
|
[
"[email protected]"
] | |
0697999540cfd3dabcd8c47abe2e699e561d6cca
|
396a7d442f1b9d4b6734f90ce73fb35cd1f2293b
|
/src/ashild_grotan_ex/ex01/comp_to_loop.py
|
432a11967f2578cc9738e0891bdaa2d763854df7
|
[] |
no_license
|
ashildgrotan/INF200-2019-Exersices
|
46d40d814f39ce6900bb057ce9ca7275f47a6d2c
|
70dbac6d92d29f78c0aac8c72c7a9d1f8b42b72a
|
refs/heads/master
| 2020-07-21T06:11:59.676983 | 2019-11-13T19:12:01 | 2019-11-13T19:12:01 | 206,767,955 | 0 | 0 | null | 2019-11-03T13:41:10 | 2019-09-06T10:10:53 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 348 |
py
|
"""
ex_01, task B.
"""
def squares_by_comp(n):
return [k ** 2 for k in range(n) if k % 3 == 1]
def squares_by_loop(n):
squares = []
for k in range(n):
if k % 3 == 1:
squares.append(k ** 2)
return squares
if __name__ == "__main__":
if squares_by_comp(10) != squares_by_loop(10):
print("ERROR!")
|
[
"[email protected]"
] | |
3a18512d569b9063b5ddbac6e2d4e18eff02ff34
|
dab13cee0dbdd13ee891e8f81378255e91e8cf34
|
/Blog/bin/django-admin.py
|
b6ae43ff66464d4bc2b6889378afad55c4348ed4
|
[] |
no_license
|
aaron-gl94/blog
|
1dd5f5664cca268cfb92e0b118d1ef4732224b1e
|
b955825c51782501745b7377a731d0c3de131d80
|
refs/heads/master
| 2021-01-20T18:36:04.147224 | 2016-07-29T15:35:02 | 2016-07-29T15:35:02 | 64,489,270 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 147 |
py
|
#!/home/syscorp/Backend/Blog/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
30d22e648e82216e843989a09b25df3c9431291e
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/library/library_editor_supplier.py
|
d50451b7ea1eab1cf3f62f26950986f7861e6e54
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 72 |
py
|
/home/openerp/production/extra-addons/library/library_editor_supplier.py
|
[
"[email protected]"
] | |
42b7d6c70484231c6bb27005121f31b13a66df8b
|
5d87e9864168ef07f49693ebf94c83dedfdde4ed
|
/setup.py
|
829e7337c8b5f700daac0f6c36457913d1d6eb85
|
[
"BSD-3-Clause"
] |
permissive
|
cavestruz/spa
|
34b852b63932a296009b8ee58abd50efe1f3d35a
|
4f147694601e0b88f97794c8851ccdd615c73ea4
|
refs/heads/master
| 2022-11-05T23:04:52.604040 | 2020-06-19T16:47:22 | 2020-06-19T16:47:22 | 269,761,428 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 315 |
py
|
from setuptools import setup
setup(name='spa',
version='0.1',
description='Tests of spa measurements on 300 sims',
url='http://github.com/cavestruz/spa',
author='Camille Avestruz',
author_email='[email protected]',
license='BSD3',
packages=['spa'],
zip_safe=False)
|
[
"[email protected]"
] | |
70bba7946ac7f7477975d7e1e539ba54fb5208fd
|
918becd643a9d0fec941f89c0da3eb5295fa7104
|
/i3/i3-quickterm/i3-quickterm~
|
2d013f3485864f0f1bbce8e99794609154fb5c29
|
[] |
no_license
|
xircon/Scripts-dots
|
2790dc6f7825302ee66d8cc58dd768b0ddc394a7
|
ef27d11e4163ddb223a2a4cb9ed47af71974cd81
|
refs/heads/master
| 2021-01-20T03:09:26.086417 | 2018-10-13T14:26:15 | 2018-10-13T14:26:15 | 89,503,757 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,845 |
#!/usr/bin/env python3
import argparse
import copy
import fcntl
import json
import os
import shlex
import subprocess
import sys
from contextlib import contextmanager, suppress
from pathlib import Path
import i3ipc
MARK_QT_PATTERN = 'quickterm_.*'
MARK_QT = 'quickterm_{}'
DEFAULT_CONF = {
'menu': 'rofi -dmenu -p "quickterm: " -no-custom -auto-select',
'term': 'tilix -title "{title}"',
'history': '{$HOME}/.cache/i3/i3-quickterm.order',
'ratio': 0.40,
'pos': 'top',
'shells': {
'haskell': 'ghci',
'js': 'node',
'python': 'python3 --no-banner',
'shell': '{$SHELL}',
}
}
def conf_path():
home_dir = os.environ['HOME']
xdg_dir = os.environ.get('XDG_CONFIG_DIR', '{}/.config'.format(home_dir))
return xdg_dir + '/i3/i3-quickterm.json'
def read_conf(fn):
try:
with open(fn, 'r') as f:
c = json.load(f)
return c
except Exception as e:
return {}
@contextmanager
def get_history_file(conf):
if conf['history'] is None:
yield None
return
p = Path(expand_command(conf['history'])[0])
os.makedirs(str(p.parent), exist_ok=True)
f = open(str(p), 'a+')
fcntl.lockf(f, fcntl.LOCK_EX)
try:
f.seek(0)
yield f
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
def expand_command(cmd, **rplc_map):
d = {'$' + k: v for k, v in os.environ.items()}
d.update(rplc_map)
return shlex.split(cmd.format(**d))
def move_back(conn, selector):
conn.command('{} floating enable, move scratchpad'
.format(selector))
def pop_it(conn, mark_name, pos='top', ratio=0.40):
ws, _ = get_current_workspace(conn)
wx, wy = ws['rect']['x'], ws['rect']['y']
wwidth, wheight = ws['rect']['width'], ws['rect']['height']
width = wwidth
height = int(wheight*ratio)
posx = wx
if pos == 'bottom':
margin = 6
posy = wy + wheight - height - margin
else: # pos == 'top'
posy = wy
conn.command('[con_mark={mark}],'
'resize set {width} px {height} px,'
'move absolute position {posx}px {posy}px,'
'move scratchpad,'
'scratchpad show'
''.format(mark=mark_name, posx=posx, posy=posy,
width=width, height=height))
def get_current_workspace(conn):
ws = [w for w in conn.get_workspaces() if w['focused']][0]
tree = conn.get_tree()
# wname = workspace['name']
ws_tree = [c for c in tree.descendents()
if c.type == 'workspace' and c.name == ws['name']][0]
return ws, ws_tree
def toggle_quickterm_select(conf, hist=None):
"""Hide a quickterm visible on current workspace or prompt
the user for a shell type"""
conn = i3ipc.Connection()
ws, ws_tree = get_current_workspace(conn)
# is there a quickterm opened in the current workspace?
qt = ws_tree.find_marked(MARK_QT_PATTERN)
if qt:
qt = qt[0]
move_back(conn, '[con_id={}]'.format(qt.id))
return
with get_history_file(conf) as hist:
# compute the list from conf + (maybe) history
hist_list = None
if hist is not None:
with suppress(Exception):
hist_list = json.load(hist)
# invalidate if different set from the configured shells
if set(hist_list) != set(conf['shells'].keys()):
hist_list = None
shells = hist_list or sorted(conf['shells'].keys())
proc = subprocess.Popen(expand_command(conf['menu']),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
for r in shells:
proc.stdin.write((r + '\n').encode())
stdout, _ = proc.communicate()
shell = stdout.decode().strip()
if shell not in conf['shells']:
return
if hist is not None:
# put the selected shell on top
shells = [shell] + [s for s in shells if s != shell]
hist.truncate(0)
json.dump(shells, hist)
toggle_quickterm(conf, shell)
def term_title(shell):
return '{} - i3-quickterm'.format(shell)
def toggle_quickterm(conf, shell):
conn = i3ipc.Connection()
tree = conn.get_tree()
shell_mark = MARK_QT.format(shell)
qt = tree.find_marked(shell_mark)
# does it exist already?
if len(qt) == 0:
subprocess.call(expand_command(conf['term'], title=term_title(shell)) +
['-e', sys.argv[0], '-i', shell])
else:
qt = qt[0]
ws, ws_tree = get_current_workspace(conn)
move_back(conn, '[con_id={}]'.format(qt.id))
if qt.workspace().name != ws.name:
pop_it(conn, shell_mark)
def launch_inplace(conf, shell):
conn = i3ipc.Connection()
shell_mark = MARK_QT.format(shell)
conn.command('mark {}'.format(shell_mark))
move_back(conn, '[con_mark={}]'.format(shell_mark))
pop_it(conn, shell_mark)
prog_cmd = expand_command(conf['shells'][shell])
subprocess.call(prog_cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--in-place', dest='in_place',
action='store_true')
parser.add_argument('shell', metavar='SHELL', nargs='?')
args = parser.parse_args()
conf = copy.deepcopy(DEFAULT_CONF)
conf.update(read_conf(conf_path()))
if args.shell is None:
toggle_quickterm_select(conf)
sys.exit(0)
if args.shell not in conf['shells']:
print('unknown shell: {}'.format(args.shell), file=sys.stderr)
sys.exit(1)
if args.in_place:
launch_inplace(conf, args.shell)
else:
toggle_quickterm(conf, args.shell)
|
[
"[email protected]"
] | ||
25116899b20f17da29b4c7bd6836e9e76144abd0
|
0ed5a3e86e3ed2a597a479db9f3dcd8208263393
|
/Project_Euler/P004_largest_palindromic_product.py
|
ead6e3325d6d350018a3029c85de25cb8a2600d5
|
[] |
no_license
|
justanotherguy-0/my_files
|
3eded569652e3eac963fad4fa971f400b682b1fc
|
9fb6960003224529b9433ea81eaddcc1bf37206a
|
refs/heads/master
| 2021-05-19T16:34:26.912930 | 2020-10-09T00:50:59 | 2020-10-09T00:50:59 | 252,029,729 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 622 |
py
|
n=999999
nn=999
def max_poldr(n):
while n>10000:
if n==int(str(n)[::-1]):
nn=999
while nn>100:
if n/nn <=999 and n/nn>=101 and n/nn==int(n/nn):
print('tugfa')
return(nn,' times ', n//nn,' is ', n )
n=10000
break
nn-=1
n-=10**(int((len(str(n))-0.1)/2)+1)
n=int(str(n)[:int((len(str(n))-0.1)/2)+1] + '9'*(len(str(n))//2))
continue
n-=1
print (max_poldr(999999))
|
[
"[email protected]"
] | |
73a7c6f6c221eea8f7d32240e74cb094dffad3f5
|
755e8c18b6877cfc750d49f65a9b31a1380c3862
|
/main.py
|
d9c43cf8ae9297d4dc9d3e2bbb75cb11efa38514
|
[] |
no_license
|
nope2jope/sooth_seer
|
36b9382c69a64837e2ae57689dba395c3f0191e7
|
7bda629c33c1b778041a24bc3a2df041efe434f4
|
refs/heads/master
| 2023-07-15T12:14:23.576191 | 2021-09-01T16:48:51 | 2021-09-01T16:48:51 | 387,584,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,554 |
py
|
from sooth_sayer import croupier, deck_writer
from pprint import pprint
import os.path
from flask import Flask, render_template, redirect, url_for, request
from flask_bootstrap import Bootstrap
import os
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ['ENV_SECRET_KEY']
Bootstrap(app)
# checks to see if csv exists
# if not, runs functions to scrape and format card info
if not os.path.exists('tarot_deck.csv'):
deck_writer.write_deck()
# retrieves info from csv file
tarot_deck = deck_writer.fetch_deck()
@app.route('/', methods=['GET','POST'])
def home():
cards = 0
clicked = False
return render_template("index.html", c=cards, bool=clicked)
@app.route('/one-card')
def one_card():
fortune = croupier.fortune_teller(deck=tarot_deck, spread=1)
cards = 1
portents = ['Fortune']
clicked = True
return render_template("index.html", c=cards, f=fortune, bool=clicked, p=portents)
@app.route('/three-card')
def three_card():
fortune = croupier.fortune_teller(deck=tarot_deck, spread=3)
cards = 3
portents = ['Past', 'Present', 'Future']
clicked = True
return render_template("index.html", c=cards, f=fortune, bool=clicked, p=portents)
@app.route('/four-card')
def four_card():
fortune = croupier.fortune_teller(deck=tarot_deck, spread=4)
cards = 4
portents = ['Querent', 'Past', 'Present', 'Future']
clicked = True
return render_template("index.html", c=cards, f=fortune, bool=clicked, p=portents)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[
"[email protected]"
] | |
b9c5ca1798fcaffb1707909fd79abe2418769bda
|
04ac33f68827aeef7d5bc441d10979143828ef1a
|
/contactSpider.py
|
037682c5a672fc9a935a9454eaef442e24e5a338
|
[] |
no_license
|
samshultz/realtor_agent_spider
|
a06e99af15fc78902c5f44fcb91dd6d55490b14f
|
4550301a9e4733ad19bd6fd904e079037847bbf7
|
refs/heads/master
| 2021-07-05T04:28:17.703484 | 2017-09-30T02:22:34 | 2017-09-30T02:22:34 | 105,333,052 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,933 |
py
|
import scrapy
class ContactSpider(scrapy.Spider):
# name of the spider
name = "contacts"
# the url to start scraping from
start_urls = [
"https://www.realtor.com/realestateagents/Los-Angeles_CA"
]
def parse(self, response):
# check the page for the name of the agent...
for href in response.css("div[itemprop=name] a::attr(href)"):
# ...click on it and call the parse_agent method on each one
yield response.follow(href, self.parse_agent)
# follow pagination links...
# for href in response.css("a.next::attr(href)"):
# #...repeat this method (parse method) on each page
# yield response.follow(href, self.parse)
def parse_agent(self, response):
# get the element containing the address info and extract the text
address = response.css("#modalcontactInfo span[itemprop=streetAddress]::text").extract_first()
# check if the address is available...
if address is not None:
# ... if it is, get the city, state and zipcode from it (this info
# is contained in the last three info in the address)
city, state, zipcode = address.split(",")[-3:]
# separate the address
addr = ''.join(address.split(",")[:-3])
else:
# if the address is not available
# set the city, state, addr and zipcode to empty string
city, state, zipcode = "", "", ""
addr = ""
# return a dictionary of the extracted info
yield {
"name": response.css("#modalcontactInfo p.modal-agent-name::text").extract_first().split(",")[0],
"location": response.css("#modalcontactInfo p.modal-agent-location::text").extract_first().strip(),
"address": addr,
"city": city,
"state": state,
"zipcode": zipcode,
}
|
[
"[email protected]"
] | |
da6bbc90bc3f5eb9cc318bf4a17e5c13343bd2a6
|
ed7e9c15fa0ddf5fa45e5c9392cd032dcc41c160
|
/miro_name_funktion.py
|
b5b52789bbb8f0dced89a7797deee3d25cf1b8b2
|
[] |
no_license
|
ZiggyStarProgrammer/kyh-practice
|
acefd043572a8d8ac03ea6e06544f485c32c2349
|
d253ddb1e39e99bf5528096541d4b978a7e7d289
|
refs/heads/master
| 2023-01-13T13:04:13.948295 | 2020-11-30T09:04:02 | 2020-11-30T09:04:02 | 291,671,537 | 0 | 0 | null | 2020-09-01T08:18:56 | 2020-08-31T09:25:50 |
Python
|
UTF-8
|
Python
| false | false | 148 |
py
|
import random
def hello(name):
rnd_age = random.randint(1, 150)
print(f"Hell {name} age {rnd_age}")
result = hello("Olof")
print(result)
|
[
"[email protected]"
] | |
3f54caa0e55508ce17876cc1465946e0635ab1b6
|
caf34f9e5b3f9ea60a4481331004a17a278bfd55
|
/django_scrapy/quote/apps.py
|
bbcbdc2376edb935c31606c84860113c35a5518f
|
[] |
no_license
|
ArielLahiany/django_scrapy
|
a827cfa737263bd4aa1d52f3c39ac2c1180b7d6c
|
fa2b0376b5e1aa0da306612a915d5193b194ad9e
|
refs/heads/master
| 2023-03-26T10:50:52.569171 | 2021-03-24T21:20:12 | 2021-03-24T21:20:12 | 351,224,986 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
# Django modules
from django.apps import AppConfig
class QuoteConfig(AppConfig):
name = 'django_scrapy.quote'
verbose_name = "Quote"
|
[
"[email protected]"
] | |
883646eb7ce3edd5796ff15d1b6dfccf9f746abe
|
8662ed271890dfc310c3854359d4663bfb051529
|
/Q7_b.py
|
49a34b843f33d45a87bb233727108f2531daed5b
|
[] |
no_license
|
zahraDehghanian97/Football_Player_Classification
|
daf10145fafbd8d542b3a17f0364b6eebb9d2651
|
7bc8f9685cc00b630c8c95b0a7c98daa494bee5c
|
refs/heads/master
| 2020-09-07T21:09:54.848689 | 2019-11-11T06:15:40 | 2019-11-11T06:15:40 | 220,913,642 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,103 |
py
|
import csv
import matplotlib.pyplot as plt
import statistics
import numpy as np
data = []
with open('first_half_logs.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
data.append(row)
line_count += 1
players = []
for i in range(16):
players.append([])
for d in data:
temp = [float(d[2]), float(d[3])]
players[int(d[1])].append(temp)
mean = []
cov = []
number =[]
for player in players :
if len(player) > 1:
mean.append([statistics.mean(np.array(player)[:,0]),statistics.mean(np.array(player)[:,1])])
number.append(player)
cov.append(np.cov(np.array(player)[:,0],np.array(player)[:,1]))
counter =0
N_bins = 100
for player in range(len(mean)) :
x, y = np.random.multivariate_normal(mean[player], cov[player], 5000).T
# plt.figure(counter)
plt.figure(figsize=(1, 2))
plt.hist2d(x, y, bins=N_bins, normed=False, cmap='plasma')
counter += 1
print(cov)
# Show the plot.
plt.show()
|
[
"[email protected]"
] | |
cc2703f83d745a4127e40e6b080d631591948f77
|
83aaf1e569e931cc2398301bb3238f834d8e7b67
|
/Teste/client.py
|
0fe88fcf9745b4b35514634f8754dda11516017c
|
[] |
no_license
|
Lucaspvo/CI061_RedesDeComputadoresII
|
e4e6f0e2d7988ff5e1a3864df14c653a45893d74
|
7e05c64f58128936af2bb5941449ea49e4327cf7
|
refs/heads/master
| 2021-01-12T04:11:33.957778 | 2016-12-28T23:21:06 | 2016-12-28T23:21:06 | 77,536,527 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
#!/usr/bin/python # This is client.py file
# -*- coding: utf-8 -*-
import socket # Import socket module
import sys
sys.path.append( 'Teste' )
from Transmition import *
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12345 # Reserve a port for your service.
s.connect((host, port))
data = "Vou me comunicar com o servidor agora!"
Transmition.Send(s, data)
#s.recv(1480)
data = "Ja devia ter parado"
Transmition.Send(s, data)
string = Transmition.Recv(s)
print string
s.close # Close the socket when done
|
[
"[email protected]"
] | |
43078cfccfee9f2bbde2f0af3de46006b564a128
|
0725ed7ab6be91dfc0b16fef12a8871c08917465
|
/tree/is_bst.py
|
26ed670c86a2703f7550da0fa62852b62ed81d7b
|
[] |
no_license
|
siddhism/leetcode
|
8cb194156893fd6e9681ef50c84f0355d09e9026
|
877933424e6d2c590d6ac53db18bee951a3d9de4
|
refs/heads/master
| 2023-03-28T08:14:12.927995 | 2021-03-24T10:46:20 | 2021-03-24T10:46:20 | 212,151,205 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 716 |
py
|
# A binary tree node
import sys
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def is_bst(node, min_limit, max_limit):
if not node:
return True
if not (min_limit < node.data < max_limit):
return False
l_path = is_bst(node.left, min_limit, node.data)
r_path = is_bst(node.right, node.data, max_limit)
return l_path and r_path
# Driver program to test above function
root = Node(4)
root.left = Node(2)
root.right = Node(5)
root.left.left = Node(1)
root.left.right = Node(3)
if (is_bst(root, -sys.maxint, sys.maxint)):
print "Is BST"
else:
print "Not a BST"
|
[
"[email protected]"
] | |
e5ab44dc776222c231274dd703bcd5aebdb8b110
|
f207586e34b37b13ee6012ea08f174e302fa0078
|
/mimo/util/decorate.py
|
cf41979d6dfcac6b024ecd468df4e0901d8627e7
|
[
"MIT"
] |
permissive
|
pnickl/mimo
|
92b7858108e077ff43082f15f635d1205120b143
|
81c4bbd2594e2136445009eae752ab8a1602a1cf
|
refs/heads/master
| 2022-12-24T02:10:34.838878 | 2020-08-04T19:24:21 | 2020-08-04T19:24:21 | 302,394,694 | 2 | 0 |
MIT
| 2020-10-08T16:07:26 | 2020-10-08T16:07:25 | null |
UTF-8
|
Python
| false | false | 1,796 |
py
|
def pass_obs_arg(f):
def wrapper(self, obs=None, **kwargs):
if obs is None:
assert self.has_data()
obs = [_obs for _obs in self.obs]
else:
obs = obs if isinstance(obs, list) else [obs]
return f(self, obs, **kwargs)
return wrapper
def pass_obs_and_labels_arg(f):
def wrapper(self, obs=None, labels=None, **kwargs):
if obs is None or labels is None:
assert self.has_data()
obs = [_obs for _obs in self.obs]
labels = self.labels
else:
obs = obs if isinstance(obs, list) else [obs]
labels = [self.gating.likelihood.rvs(len(_obs)) for _obs in obs]\
if labels is None else labels
return f(self, obs, labels, **kwargs)
return wrapper
def pass_target_and_input_arg(f):
def wrapper(self, y=None, x=None, **kwargs):
if y is None or x is None:
assert self.has_data()
y = [_y for _y in self.target]
x = [_x for _x in self.input]
else:
y = y if isinstance(y, list) else [y]
x = x if isinstance(x, list) else [x]
return f(self, y, x, **kwargs)
return wrapper
def pass_target_input_and_labels_arg(f):
def wrapper(self, y=None, x=None, z=None, **kwargs):
if y is None or x is None and z is None:
assert self.has_data()
y = [_y for _y in self.target]
x = [_x for _x in self.input]
z = self.labels
else:
y = y if isinstance(y, list) else [y]
x = x if isinstance(x, list) else [x]
z = [self.gating.likelihood.rvs(len(_y)) for _y in y]\
if z is None else z
return f(self, y, x, z, **kwargs)
return wrapper
|
[
"[email protected]"
] | |
de2ccc054448a523050c34b2aa361df702f2bc2a
|
1bb13fc75aa56c00ad17a16d7c590060aa71b188
|
/src/test/test_orders.py
|
c26ae7182c118076d8e92b747ca0cc0bbcd37172
|
[
"MIT"
] |
permissive
|
TheEpicBigBoss/pytr
|
add4df656943b1179c7539d6b70ebd3be0255e2c
|
7673f049e7ea635af47d11d8450db18c27b43104
|
refs/heads/master
| 2023-01-24T13:29:49.839121 | 2020-12-04T15:08:51 | 2020-12-04T15:24:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
import asyncio
import pprint
import json
import logging
from py_tr import TradeRepublicApi
tr = TradeRepublicApi()
def save_to_file(file, response):
with open(file, 'w') as outfile:
json.dump(response, outfile)
async def find_best_warrants():
logging.info("async started")
underlying_isin = "US0378331005" # AAPL
# await tr.search_derivative(underlying_isin)
# await tr.search("AAPL", asset_type="stock")
# await tr.ticker("US0378331005", exchange="LSX")
await tr.portfolio()
# await tr.cash_available_for_order()
# await tr.market_order('US0378331005', 'LSX', 'buy', 1, 'gfd', False) # BUY 1 AAPL
await tr.market_order('US0378331005', 'LSX', 'sell', 1, 'gfd', False) # SELL 1 AAPL
while True:
subscription_id, subscription, response = await tr.recv()
pprint.pprint(response)
# save_to_file("portfolio.json", response)
# with open('warrants_'+underlying_isin+'.json', 'w') as outfile:
# json.dump(response, outfile)
asyncio.get_event_loop().run_until_complete(find_best_warrants())
|
[
"[email protected]"
] | |
ea71dcf4271de4375a1cd100421e6cb04179b2a8
|
ae1d96991a256b905ab8793ebc6063a9628cef02
|
/muddery/combat/normal_combat_handler.py
|
f572690ce4f9a5ce3b3ed3411737fa890fdf193b
|
[
"BSD-3-Clause"
] |
permissive
|
FWiner/muddery
|
bd2028e431dbeae16d6db9806cd2e9a7f4c5f22d
|
f6daa5fab6007e7c830e301718154fbc7b78b2bb
|
refs/heads/master
| 2020-07-31T23:02:54.165362 | 2019-09-04T13:29:59 | 2019-09-04T13:29:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,083 |
py
|
"""
Combat handler.
"""
from django.conf import settings
from muddery.utils import defines
from muddery.utils.builder import delete_object
from muddery.combat.base_combat_handler import BaseCombatHandler
class NormalCombatHandler(BaseCombatHandler):
"""
This implements the normal combat handler.
"""
def start_combat(self):
"""
Start a combat, make all NPCs to cast skills automatically.
"""
super(NormalCombatHandler, self).start_combat()
for character in self.characters.values():
if not character.account:
# Monsters auto cast skills
character.start_auto_combat_skill()
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
for character in self.characters.values():
# Stop auto cast skills
character.stop_auto_combat_skill()
super(NormalCombatHandler, self).at_server_shutdown()
def show_combat(self, character):
"""
Show combat information to a character.
Args:
character: (object) character
Returns:
None
"""
super(NormalCombatHandler, self).show_combat(character)
# send messages in order
character.msg({"combat_commands": character.get_combat_commands()})
def finish(self):
"""
Finish a combat. Send results to players, and kill all failed characters.
"""
for character in self.characters.values():
# Stop auto cast skills
character.stop_auto_combat_skill()
super(NormalCombatHandler, self).finish()
def set_combat_results(self, winners, losers):
"""
Called when the character wins the combat.
Args:
winners: (List) all combat winners.
losers: (List) all combat losers.
Returns:
None
"""
super(NormalCombatHandler, self).set_combat_results(winners, losers)
# add exp to winners
# get total exp
exp = 0
for loser in losers:
exp += loser.provide_exp(loser)
if exp:
# give experience to the winner
for character in winners:
character.add_exp(exp, combat=True)
for character in winners:
if character.is_typeclass(settings.BASE_PLAYER_CHARACTER_TYPECLASS):
# get object list
loots = None
for loser in losers:
obj_list = loser.loot_handler.get_obj_list(character)
if obj_list:
if not loots:
loots = obj_list
else:
loots.extend(obj_list)
# give objects to winner
if loots:
character.receive_objects(loots, combat=True)
# call quest handler
for loser in losers:
character.quest_handler.at_objective(defines.OBJECTIVE_KILL, loser.get_data_key())
# losers are killed.
for character in losers:
character.die(winners)
def _cleanup_character(self, character):
"""
Remove character from handler and clean
it of the back-reference and cmdset
"""
super(NormalCombatHandler, self)._cleanup_character(character)
if not character.is_typeclass(settings.BASE_PLAYER_CHARACTER_TYPECLASS):
if character.is_temp:
# notify its location
location = character.location
delete_object(character.dbref)
if location:
for content in location.contents:
if content.has_account:
content.show_location()
else:
if character.is_alive():
# Recover all hp.
character.db.hp = character.max_hp
|
[
"[email protected]"
] | |
7c09a10f61384d6fed01557b413b08956ae5ed19
|
e0a2774cebdb4974e5a6c1363f3bc4e01cbe7915
|
/Chondokotha/apps.py
|
ded1f15d4575e672b78748aeb548e54a83e58f74
|
[] |
no_license
|
Julfikar-Haidar/Django-Vue-Apicall-advanced-search
|
cbc91970557848e41bbca127c12615a316ed5fe0
|
e5ff7180e43a7b9ef37847298395ebce5cee3a06
|
refs/heads/master
| 2021-09-27T08:01:08.041435 | 2020-03-18T06:22:08 | 2020-03-18T06:22:08 | 247,681,078 | 1 | 0 | null | 2021-09-22T18:45:47 | 2020-03-16T11:07:22 |
Python
|
UTF-8
|
Python
| false | false | 97 |
py
|
from django.apps import AppConfig
class ChondokothaConfig(AppConfig):
name = 'Chondokotha'
|
[
"[email protected]"
] | |
59294bd41c7d4e0de58d33c5fd3f37a16597be89
|
34a95dd5bf5c1339d6a1ba55a5c8085e62152bac
|
/quantumlounge/http/api/users/users.py
|
e1620b646be3d09e44a8100b03250618a8a39e63
|
[] |
no_license
|
mrtopf/QuantumLounge
|
fa082fd34bad54a9ed3dfdcd358a57590d43bfcc
|
ce9854dc47e7a07c3b59a165c10ee8da61d05db4
|
refs/heads/master
| 2021-01-19T14:32:50.048076 | 2011-09-29T10:27:10 | 2011-09-29T10:27:10 | 838,102 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,397 |
py
|
from quantumlounge.framework import Handler, json, html, RESTfulHandler
import werkzeug
import simplejson
import quantumlounge.usermanager.errors as errors
class Users(RESTfulHandler):
"""handle a collection of Users
**Allowed Methods**: ALL
TODO: More documentation in Sphinx about it
"""
# TODO: Do this via the content type?
collection_name = "usermanager"
@json() # we assume JSON for now
def get(self, format='json'):
um = self.settings[self.collection_name]
ct = self.settings['content1']['tweet']
so = self.request.values.get("so","date") # sort order
sd = self.request.values.get("sd","down") # sort direction
try:
l = int(self.request.values.get("l","10")) # limit
except:
return self.error("wrong value for 'l'")
try:
o = int(self.request.values.get("o","0")) #offset
except:
return self.error("wrong value for 'o'")
content = um.index(
sort_on = so,
sort_order = sd,
limit = l,
offset = o
)
content = [c.json for c in content]
return content
@json()
def post(self):
"""Create a new item"""
f = self.request.form
print "POSTING", f
return "ok"
class User(RESTfulHandler):
"""single item"""
|
[
"[email protected]"
] | |
0110676b8f2a531de2bd3d355df08326b97bf65d
|
3619115e1d476864885ca8d78bc17f19cba490f7
|
/Precis/Precis/Data/precis_formula.py
|
588e68dfeae38b9aad521418e8cfa28af2b866ae
|
[] |
no_license
|
ssaha6/Precis
|
bc9a1c975a5f024304770fc600a134e9f9890453
|
c2c59b1f126876a38f41036ecb2b0912e009dc89
|
refs/heads/master
| 2020-09-25T08:44:25.223012 | 2019-12-04T22:01:14 | 2019-12-04T22:01:14 | 225,965,202 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,288 |
py
|
from z3 import *
import re
class PrecisFormula:
# formulaZ3: Z3ExprRef; variable of Z3 version ---> more precisely should be BoolRef
formulaZ3 = None
# formula is string representation of formula
formula = ""
# string rep of formula
def __init__(self, varZ3):
self.formulaZ3 = varZ3
# Z3eprx in string format add a newline after every conjunct
self.formula = str(varZ3).replace("\n", "")
# returns string
def toInfix(self):
s = self.formula
while True:
s, flag = self.replace(s)
if not flag:
# replace("&& ","&& ") is to deal with spacing added in z3 expr when toString
# symbols ~ and ) are used placed holders for left and right parenthesis.
# We need these place holders because our regex looks for left and right paren
replacePlacedHolderFormula = s.replace("`", "(").replace("~", ")") \
.replace("&& ", "&& ").replace("&& ", "&& ") \
.replace("&& ", "&& ") \
.replace("&& ", "&& ") \
.replace("&& ", "&& ").replace("&& ", "&& ") \
.replace("|| ", "|| ").replace("|| ", "|| ") \
.replace("|| ", "|| ").replace("|| ", "|| ").replace("|| ", "|| ")\
.replace("== ","== ")\
.replace("!= ","!= ")
cSharpCompatibleFormula = replacePlacedHolderFormula.replace(
"False", "false").replace("True", "true")
return cSharpCompatibleFormula
# Acknowledgement: Neil Zhao
def replace(self, s):
pattern = re.compile(r'((And|Or|Not)(\(([^,()]+(,[^,()]+)*)\)))')
res = pattern.findall(s)
for r in res:
if r[1] == 'And':
conjunct = r[2][1:-1]
replacement = conjunct.replace(', ', ' && ')
s = s.replace(r[0], '`{}~'.format(replacement))
elif r[1] == 'Or':
disjunct = r[2][1:-1]
replacement = disjunct.replace(', ', ' || ')
s = s.replace(r[0], '`{}~'.format(replacement))
elif r[1] == 'Not':
negation = r[2][1:-1]
replacement = negation.replace(', ', ' && ')
s = s.replace(r[0], '`!`{}~~'.format(negation))
else:
assert(False) # why this case
return s, len(res) > 0
def precisAnd(self, other):
# check other is of type z3eprx
return PrecisFormula(And(self.formulaZ3. other))
def precisOr(self, other):
# check other is of type z3eprx
return PrecisFormula(Or(self.formulaZ3. other))
def precisSimplify(self):
postcondition = self.formulaZ3
set_option(max_args=10000000, max_lines=1000000,
max_depth=10000000, max_visited=1000000)
set_option(html_mode=False)
set_fpa_pretty(flag=False)
#intVars = [ Int(var) for var in intVariables]
#boolVars = [ Bool(var) for var in boolVariables]
#declareInts = "\n".join([ "(declare-const " + var + " Int)" for var in intVariables ])
#declareBools = "\n".join([ "(declare-const " + var + " Bool)" for var in boolVariables ])
#stringToParse = "\n".join([declareInts, declareBools, "( assert " + precondition + ")"])
#logger = logging.getLogger("Framework.z3Simplify")
# logger.info("############ z3 program")
# logger.info("############ " + stringToParse)
#expr = parse_smt2_string(strinagToParse)
g = Goal()
g.add(postcondition)
works = Repeat(Then(
OrElse(Tactic('ctx-solver-simplify'), Tactic('skip')),
OrElse(Tactic('unit-subsume-simplify'),Tactic('skip')),
# OrElse(Tactic('propagate-ineqs'),Tactic('skip')),
# OrElse(Tactic('purify-arith'),Tactic('skip')),
#OrElse(Tactic('ctx-simplify'),Tactic('skip')),
#OrElse(Tactic('dom-simplify'),Tactic('skip')),
#OrElse(Tactic('propagate-values'),Tactic('skip')),
OrElse(Tactic('simplify'), Tactic('skip')),
# OrElse(Tactic('aig'),Tactic('skip')),
# OrElse(Tactic('degree-shift'),Tactic('skip')),
# OrElse(Tactic('factor'),Tactic('skip')),
# OrElse(Tactic('lia2pb'),Tactic('skip')),
# OrElse(Tactic('recover-01'),Tactic('skip')),
# must to remove ite
#OrElse(Tactic('elim-term-ite'), Tactic('skip')),
#OrElse(Tactic('smt'), Tactic('skip')),
# OrElse(Tactic('injectivity'),Tactic('skip')),
# OrElse(Tactic('snf'),Tactic('skip')),
# OrElse(Tactic('reduce-args'),Tactic('skip')),
# OrElse(Tactic('elim-and'),Tactic('skip')),
# OrElse(Tactic('symmetry-reduce'),Tactic('skip')),
# OrElse(Tactic('macro-finder'),Tactic('skip')),
# OrElse(Tactic('quasi-macros'),Tactic('skip')),
Repeat(OrElse(Tactic('cofactor-term-ite'), Tactic('skip'))),
Repeat(OrElse(Tactic('split-clause'), Tactic('skip'))),
))
#works1 = Tactic('simplify')
result = works(g)
#result = works1(g)
# split_all =
# print str(result)
# result = [[ "d1", "d2", "d3"], #= conjunct && conjunct
# [ "d4", "d5", "d6"]]
# remove empty subgoals and check if resultant list is empty.
result = filter(None, result)
if not result:
return "true"
# return result
result = list(result)
completeConjunct = []
for i in range(0,len(result)):
conjunction = result[i]
completeDisjunct = []
for literal in conjunction:
#if i >= 1 and literal in result[i-1]:
# continue
completeDisjunct.append(literal)
completeConjunct.append(And(completeDisjunct))
simplifiedPrecondition = Or(completeConjunct)
return simplifiedPrecondition
# g1 = Goal()
# tac = Repeat(Then(
# OrElse(Tactic('tseitin-cnf'),Tactic('skip')),
# OrElse(Tactic('cofactor-term-ite'), Tactic('skip')),
# OrElse(Tactic('ctx-simplify'),Tactic('skip')),
# OrElse(Tactic('dom-simplify'),Tactic('skip')),
# OrElse(Tactic('factor'),Tactic('skip')),
# OrElse(Tactic('elim-term-ite'), Tactic('skip')),
# ))
# g1.add(simplifiedPrecondition)
# post = tac(g1)
# newConju = And(list(post[0]))
# print(PrecisFormula(simplify(newConju)).toInfix())
# print(list(post))
#print(PrecisFormula(post).toInfix())
#simplifiedPrecondition = simplifiedPrecondition.replace("Not", " ! ")
#simplifiedPrecondition = simplifiedPrecondition.replace("False", " false ")
#simplifiedPrecondition = simplifiedPrecondition.replace("True", " true ")
#simplifiedPrecondition = simplifiedPrecondition.replace("\n", " ")
|
[
"[email protected]"
] | |
3987405f70f48d91c8ac18c9912585cb8b9c44d3
|
5ba345bc16519d892fb533451eeface7c76a7d48
|
/Classification/Logistic-Regression/LogisticRegression.py
|
33ac92c5f131dde88d715d277e16cca84ae2164e
|
[] |
no_license
|
sayands/machine-learning-projects
|
337fd2aeb63814b6c47c9b2597bfe1ce4399a1f1
|
8e516c0ac3a96a4058d063b86559ded9be654c35
|
refs/heads/master
| 2021-05-06T16:26:37.008873 | 2018-08-02T20:27:20 | 2018-08-02T20:27:20 | 113,749,745 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,653 |
py
|
#Logistic Regression
#Importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, 2:4].values
Y = dataset.iloc[:, 4].values
#Splitting the dataset into the Training Set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state = 0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
#Fitting Logistic Regression To The Training Set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train,Y_train)
#Predicting The Test Set Results
y_pred = classifier.predict(X_test)
#Making The Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, y_pred)
#Visualising The Training Set Results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, Y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
#Visualising The Test Set Results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, Y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
fc5d1edb3647e18a663c8c43b897809c51abbf89
|
4c2a391f2f4d7361f2c7111b6d63edf67056f327
|
/model/oauth.py
|
4c650a7683108b8d5c4e420c7b90b52c00c2172a
|
[] |
no_license
|
niyoufa/tnd_server
|
6d69db32ceb5a6a14417b3e8b0f021fdc0e7e79c
|
59c9ac6769773573685be215b4674d77545fe127
|
refs/heads/master
| 2020-06-23T15:43:28.891619 | 2016-08-26T03:44:01 | 2016-08-26T03:44:01 | 66,613,944 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
# -*- coding: utf-8 -*-
"""
author : youfaNi
date : 2016-07-13
"""
from bson.son import SON
import renren.model.model as model
import renren.libs.mongolib as mongo
import renren.consts as consts
import renren.libs.utils as utils
class OauthModel(model.BaseModel,model.Singleton):
__name = "renren.oauth_clients"
def __init__(self):
model.BaseModel.__init__(self,OauthModel.__name)
|
[
"[email protected]"
] | |
b77cbf626196e076ca7cd12c221754afd3fca90e
|
ff8847da0e311e097fb2662535e2235f090fb6c8
|
/examples/test_rotcube.py
|
cbba7d2ddd564fb217203105e88ee22bc80be276
|
[
"Apache-2.0"
] |
permissive
|
donghaozhang/SWC2VTK
|
222eda519fed7fcc9f668382db2ca4ccf812a136
|
285e4981a3f1f54b8f819f3490f2e451569ce541
|
refs/heads/master
| 2020-07-07T04:33:58.381671 | 2019-08-20T14:56:38 | 2019-08-20T14:56:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 719 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 11 14:49:53 2016
@author: nebula
"""
import numpy as np
from swc2vtk.vtkgenerator import VtkGenerator
if __name__ == '__main__':
filename = 'rot_cube.vtk'
vtkgen = VtkGenerator()
pi = 3.141592
# pos = np.array([[0, 0, 0], [0, 2, 0], [4, 2, 0], [5, 5, 5], [8, 8, 8]])
pos = np.array([[0, 0, 0], [0, 2, 0], [4, 2, 0], [5, 5, 5], [8, 8, 8], [6, 6, 6], [4, 6, 6], [4, 4, 6]])
# pos = np.array([[0, 0, 0], [0, 2, 0], [4, 2, 0], [5, 5, 0], [8, 8, 0]])
for i in range(pos.shape[0] - 1):
print str(pos[i]) + ' to ' + str(pos[i + 1])
vtkgen.add_cuboid_p2p(pos[i], pos[i + 1], 0.2 * i + 0.3, 0.2 * i)
vtkgen.write_vtk(filename)
|
[
"[email protected]"
] | |
0c28c8c4901757f77ea916c729c5f15e0fa6aaee
|
a9205fccfcb73dc1d113f7ed5650ac625082b593
|
/Repositorio.py
|
fa08d70fc2f808521f4e0f9ab0899e0733ec40ec
|
[] |
no_license
|
Fradyzz/CORPCITI
|
2b198e9bd9c0b6523832003d8557865e37d5c43c
|
0c937c39d5006694e9ad3c42a3018e29d3a9495b
|
refs/heads/master
| 2021-04-05T19:53:00.734812 | 2020-04-01T00:01:59 | 2020-04-01T00:01:59 | 248,594,743 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,426 |
py
|
import pandas as pd;
from pandas import ExcelWriter;
from openpyxl.writer.excel import ExcelWriter
class Repos():
eventos=[]; usuarios=[]; admins=[];
def __init__(self, eventos=[], usuarios=[], admins=[]):
self.eventos=eventos;
self.usuarios=usuarios;
self.admins=admins;
def loguinUser(self, user, contra):
for i in self.usuarios:
if (i.nick == user and i.contrase==contra):
return True;
else: return False;
def loguinAdmi(self, user, contra):
for i in self.admins:
if (i.nick == user and i.contrase==contra):
return True;
else: return False;
def insertaEvent(self, ob):
self.eventos.append(ob);
def insertaUser(self, ob):
self.usuarios.append(ob);
def insertaAdm(self, ob):
self.admins.append(ob);
def countEvent(self):
c=0;
for i in self.eventos:
c+=1;
return c;
def countUser(self):
c=0;
for i in self.usuarios:
c+=1;
return c;
def countAdm(self):
c=0;
for i in self.admins:
c+=1;
return c;
def listataEvent(self, i):
return (self.eventos[i]);
def listataUser(self, i):
return(self.usuarios[i]);
def listataAdm(self, i):
return (self.admins[i]);
def editaEvent(self, cod, ob):
c=0;
for i in self.eventos:
if(i.codEvent==cod):
self.eventos[c]=ob;
c+=1;
def editaUser(self, cod, ob):
c=0;
for i in self.usuarios:
if(i.codigo==cod):
self.usuarios[c]=ob;
c+=1;
def editaAdm(self, cod, ob):
c=0;
for i in self.admins:
if(i.codigo==cod):
self.admins[c]=ob;
c+=1;
def buscaEvent(self, cod, iter):
for i in self.eventos:
if(i.codEvent==cod):
return(self.eventos[iter]);
def buscaUser(self, cod, iter):
for i in self.usuarios:
if (i.codigo == cod):
return (self.usuarios[iter]);
def buscaAdm(self, cod, iter):
for i in self.admins:
if (i.codigo == cod):
return (self.admins[iter]);
def borraEvent(self, cod):
for i in self.eventos:
if(i.codEvent==cod):
self.eventos.remove(i);
def borraUser(self, cod):
for i in self.usuarios:
if(i.codigo==cod):
self.usuarios.remove(i);
def borraAdm(self, cod):
for i in self.admins:
if(i.codigo==cod):
self.admins.remove(i);
def SaveExcelEvent(self):
coA=[]; nomA=[]; descrA=[]; lugA=[]; fechaA=[]; horaA=[];
for i in self.eventos:
coA.append(i.codEvent); nomA.append(i.nomEvent);
descrA.append(i.descrEvent); lugA.append(i.lugar);
fechaA.append(i.fecha); horaA.append(i.hora);
data = {'Codigo': coA, 'Nombre': nomA, 'Descripción': descrA, 'Lugar': lugA, 'Fecha': fechaA, 'Hora': horaA}
#data2 = [{'Tipo': t, 'Cantidad': ca}];
df = pd.DataFrame(data, columns=['Codigo', 'Nombre', 'Descripción', 'Lugar', 'Fecha', 'Hora']);
writer = ExcelWriter('RegistroEventos.xlsx');
df.to_excel(writer, index=False);
writer.save();
def SaveExcelUser(self):
coA=[]; ceduA=[]; nomA=[]; apellA=[]; fNacimA=[]; nickA=[]; telfA=[]; emailA=[]; contraseA=[];
for i in self.usuarios:
coA.append(i.codigo); ceduA.append(i.cedula); nomA.append(i.nombre);
apellA.append(i.apellido); fNacimA.append(i.fNacim);
nickA.append(i.nick); telfA.append(i.telf); emailA.append(i.email);
contraseA.append(i.contrase);
data = {'Codigo': coA, 'Cédula':ceduA, 'Nombre': nomA, 'Apellido': apellA,
'Fecha de Naicimiento': fNacimA, 'Nick': nickA,
'Teléfono': telfA, 'Email': emailA, 'Contraseña:': contraseA}
#data2 = [{'Tipo': t, 'Cantidad': ca}];
df = pd.DataFrame(data, columns=['Codigo','Cédula','Nombre','Apellido','Fecha de Nacimiento',
'Nick','Teléfono','Email','Contraseña']);
writer = ExcelWriter('RegistroUsuarios.xlsx');
df.to_excel(writer, index=False);
writer.save();
def SaveExcelAdmin(self):
coA=[]; ceduA=[]; nomA=[]; apellA=[]; nickA=[]; telfA=[]; emailA=[]; contraseA=[];
for i in self.admins:
coA.append(i.codigo); ceduA.append(i.cedula); nomA.append(i.nombre);
apellA.append(i.apellido); nickA.append(i.nick);
telfA.append(i.telf); emailA.append(i.email);
contraseA.append(i.contrase);
data = {'Codigo': coA, 'Cédula':ceduA, 'Nombre': nomA, 'Apellido': apellA, 'Nick': nickA,
'Teléfono': telfA, 'Email': emailA, 'Contraseña:': contraseA}
df = pd.DataFrame(data, columns=['Codigo', 'Cédula', 'Nombre','Apellido','Fecha de Nacimiento',
'Nick','Teléfono','Email','Contraseña']);
writer = ExcelWriter('RegistroAdmins.xlsx');
df.to_excel(writer, index=False);
writer.save();
|
[
"[email protected]"
] | |
d9917661d0f20a96f2d7b647b9a499c5ee7da8b6
|
fbd06dea5dc8b0e538ff7d5bda3429d2de6b461c
|
/api/handler.py
|
8e5eb097671cae3936314635042d2ea5af9bde75
|
[] |
no_license
|
LessioGuilherme/pa004_health_insurance_cross_sell
|
713a78a71768bc505a77eb9672b5e3111a132fec
|
91ace7fffe89b2e785f10f23f160b63d523e18ec
|
refs/heads/main
| 2023-03-27T21:28:35.754093 | 2021-03-29T22:25:03 | 2021-03-29T22:25:03 | 328,284,147 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,212 |
py
|
import os
import pickle
import pandas as pd
from flask import Flask, request, Response
from insurancevehicle.Insurancevehicle import Insurancevehicle
model = pickle.load( open('C:/Users/Guilherme/Repos/pa004_health_insurance_cross_sell/model/model_lgbm.pkl', 'rb'))
app = Flask (__name__)
@app.route( '/insurancevehicle/ranking', methods =['POST'] )
def insurance_vehicle_ranking():
test_json = request.get_json()
if test_json: # there is data
if isinstance(test_json, dict): # unique example
test_raw = pd.DataFrame(test_json, index=[0])
else: # multiple example
test_raw = pd.DataFrame(test_json, columns=test_json[0].keys())
data = test_raw.copy()
pipeline = Insurancevehicle()
data = pipeline.data_cleaning(data)
data = pipeline.feature_engineering(data)
data = pipeline.data_preparation(data)
df_response = pipeline.get_ranking(model, test_raw, data)
return df_response
else:
return Response('{}', status=200, mimetype='application/json')
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='127.0.0.1', port=port)
|
[
"[email protected]"
] | |
b1a2cc010714c4d2270279ddfa921fa64a88c943
|
54beda9490ef3f8f1e2c89fc995e1840385bc527
|
/app/models.py
|
c730a00fa9e78a9f2ea7755c3ada601bc6729217
|
[] |
no_license
|
strengthnotes/web
|
f2c7302a112222c8882b0804daef1c35ab59d2c6
|
ac3e1c1bc4b81417bc85b5c5419d93dc84885137
|
refs/heads/master
| 2021-12-28T06:25:15.104421 | 2017-03-12T21:44:44 | 2017-03-12T21:44:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,915 |
py
|
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask_login import UserMixin
from . import db, login_manager
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def __repr__(self):
return '<User %r>' % self.username
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
[
"[email protected]"
] | |
d1bbae8eca99d9390cad48e10dfc1865ab044bb6
|
3f51c53b0e4c07b09cf73971c269376d8d544c89
|
/Monthly_Expenditure.py
|
ce4410db348f4636f833baa36706e2319671bcfe
|
[] |
no_license
|
Veena-Wanjari/Monthly_Expenditure_Track
|
131f394ac03033e9e6f7b9e297a2e8fbb487c01e
|
6487096b47378c0947f8361084a81581468f45dd
|
refs/heads/master
| 2022-12-21T21:37:56.681610 | 2020-09-26T11:11:05 | 2020-09-26T11:11:05 | 298,792,370 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,800 |
py
|
import tkinter as tk
from tkinter import ttk
from tkcalendar import Calendar
from csv import DictWriter
import os
window = tk.Tk()
window.title("Monthly Expenditure")
window.geometry("450x550")
window.resizable(width = False, height = False)
#create Items Name label
name_label = ttk.Label(window, text = "Purchased items name:")
name_label.grid(row=0, column=0,padx = 14, pady = 14, sticky = tk.W)
#create Name Entry box
name_var = tk.StringVar()
name_entrybox = ttk.Entry(window, width = 16, textvariable = name_var)
name_entrybox.grid(row = 0, column = 1, padx = 14, pady = 14)
name_entrybox.focus()
#create Price Name label
price_label = ttk.Label(window, text = "Enter price:")
price_label.grid(row = 1, column = 0,padx = 14, pady = 14,sticky = tk.W)
#create Price Entry box
price_var = tk.DoubleVar()
price_entrybox = ttk.Entry(window, width = 16, textvariable = price_var)
price_entrybox.grid(row = 1, column = 1, padx = 14, pady = 14)
price_entrybox.focus()
#create Shop Name label
shop_name = ttk.Label(window, text = "Select Shop name:")
shop_name.grid(row = 3, column = 0,padx = 14, pady = 14,sticky = tk.W)
#create Combobox
shop_var = tk.StringVar()
shop_combobox = ttk.Combobox(window, width = 14, textvariable = shop_var, state = 'readonly')
shop_combobox['values'] = ('Udaya Store', 'Woolworth', 'Big Apple', 'Sunrise Fresh')
shop_combobox.current(0)
shop_combobox.grid(row = 3, column = 1, padx = 14, pady = 14)
#create Calendar Label
calendar_label = ttk.Label(window, text = "Choose Date:")
calendar_label.grid(row = 4, column = 0,padx = 14, pady = 14,sticky = tk.W)
#Creating Calendar
cal = Calendar(window, selectmode = "day", year = 2020, month = 8, day = 27)
cal.grid(row = 5, column = 1)
def Exit():
window.destroy()
def action():
item_name = name_var.get()
item_price = price_var.get()
shop_details = shop_var.get()
calc_date = cal.get_date()
#write to csv,
with open("Monthly_details.csv", 'a', newline="") as f:
dict_writer = DictWriter(f, fieldnames = ['Item Name', 'Item Price', 'Shop Name', 'Date'])
if os.stat('Monthly_details.csv'). st_size == 0:
dict_writer.writeheader()
dict_writer.writerow({
'Item Name' : item_name,
'Item Price' : item_price,
'Shop Name': shop_details,
'Date' : calc_date,
})
name_entrybox.delete(0, tk.END)
price_entrybox.delete(0, tk.END)
#creating Submit Button
submit_button = ttk.Button(window, text = 'Submit', command = action)
submit_button.grid(row = 7, column = 1, padx = 14, pady = 30)
exit_button = ttk.Button(window, text = 'EXIT', command = Exit)
exit_button.grid(row = 8, column = 1, padx = 14, pady = 10)
window.mainloop()
|
[
"[email protected]"
] | |
515c690329c1f5626a96c8abcf86ef28a3639bdb
|
1a04f833bcc5598f78d0c9febb32cbf7e636dbf3
|
/console/django_scantron/results/urls.py
|
d2d6e9bfe1a743ff2c4087e4d06291153c7fb176
|
[
"Apache-2.0"
] |
permissive
|
opsdisk/scantron
|
be56c09912763a9dfd62121c686d781e5cd45713
|
aad5b0a8b5c863294703baf22cfb2a5b8f8619af
|
refs/heads/master
| 2022-02-04T20:01:44.494099 | 2022-01-17T23:14:42 | 2022-01-17T23:14:42 | 395,031,583 | 138 | 22 |
Apache-2.0
| 2022-01-17T23:27:32 | 2021-08-11T15:14:27 |
Python
|
UTF-8
|
Python
| false | false | 176 |
py
|
from django.conf.urls import url
from django_scantron.results import views
urlpatterns = [url(r"^results/(?P<id>\d+)$", views.retrieve_scan_file, name="retrieve_scan_file")]
|
[
"[email protected]"
] | |
fb95a962370d7b4bb6c6d781611394a5ad69f45a
|
e3fe234510d19c120d56f9a2876b7d508d306212
|
/17tensorflow/5_lm/ngram/ngram.py
|
6146628f947c8ebec2603563c38c067b7d61b32d
|
[
"Apache-2.0"
] |
permissive
|
KEVINYZY/python-tutorial
|
78b348fb2fa2eb1c8c55d016affb6a9534332997
|
ae43536908eb8af56c34865f52a6e8644edc4fa3
|
refs/heads/master
| 2020-03-30T02:11:03.394073 | 2019-12-03T00:52:10 | 2019-12-03T00:52:10 | 150,617,875 | 0 | 0 |
Apache-2.0
| 2018-09-27T16:39:29 | 2018-09-27T16:39:28 | null |
UTF-8
|
Python
| false | false | 3,057 |
py
|
# -*- coding: utf-8 -*-
# Author: XuMing <[email protected]>
# Data: 17/11/29
# Brief:
"""读取语料 生成 n-gram 模型"""
from collections import Counter, defaultdict
from pprint import pprint
from random import random
import jieba
N = 2 # N元模型
START = '$$' # 句首的 token
BREAK = '。!?' # 作为句子结束的符号
IGNORE = '\n “”"《》〈〉()*' # 忽略不计的符号
def process_segs(segments):
"""对 segments (iterator) 进行处理,返回一个 list. 处理规则:
- 忽略 \n、空格、引号、书名号等
- 在断句符号后添加 START token
"""
results = [START for i in range(N - 1)]
for seg in segments:
if seg in IGNORE:
continue
else:
results.append(seg)
if seg in BREAK:
results.extend([START for i in range(N - 1)])
return results
def count_ngram(segments):
"""统计 N-gram 出现次数"""
dct = defaultdict(Counter)
for i in range(N - 1, len(segments)):
context = tuple(segments[i - N + 1:i])
word = segments[i]
dct[context][word] += 1
return dct
def to_prob(dct):
"""将次数字典转换为概率字典"""
prob_dct = dct.copy()
for context, count in prob_dct.items():
total = sum(count.values())
for word in count:
count[word] /= total # works in Python 3
return prob_dct
def generate_word(prob_dct, context):
"""根据 context 及条件概率,随机生成 word"""
r = random()
psum = 0
for word, prob in prob_dct[context].items():
psum += prob
if psum > r:
return word
# return START
def generate_sentences(m, prob_dct):
"""生成 m 个句子"""
sentences = []
text = ''
context = tuple(START for i in range(N - 1))
i = 0
while (i < m):
word = generate_word(prob_dct, context)
text = text + word
context = tuple((list(context) + [word])[1:])
if word in BREAK:
sentences.append(text)
text = ''
context = tuple(START for i in range(N - 1))
i += 1
return sentences
def main():
for N in range(2, 6):
print('\n*** reading corpus ***')
with open('../../../data/tianlongbabu.txt', encoding="utf8") as f:
corpus = f.read()
print('*** cutting corpus ***')
raw_segments = jieba.cut(corpus)
print('*** processing segments ***')
segments = process_segs(raw_segments)
print('*** generating {}-gram count dict ***'.format(N))
dct = count_ngram(segments)
print('*** generating {}-gram probability dict ***'.format(N))
prob_dct = to_prob(dct)
# pprint(prob_dct)
import pickle
pickle.dump(prob_dct)
print('*** generating sentences ***')
with open('generated_{}gram.txt'.format(N), 'w', encoding="utf8") as f:
f.write('\n'.join(generate_sentences(20, prob_dct)))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
9aefb0ae5bd605c4dae7ca200d14f1508eb9fb11
|
f0755c0ca52a0a278d75b76ee5d9b547d9668c0e
|
/atcoder.jp/abc084/abc084_d/Main.py
|
672f72253da43a227e962b8055a0caa9001017ec
|
[] |
no_license
|
nasama/procon
|
7b70c9a67732d7d92775c40535fd54c0a5e91e25
|
cd012065162650b8a5250a30a7acb1c853955b90
|
refs/heads/master
| 2022-07-28T12:37:21.113636 | 2020-05-19T14:11:30 | 2020-05-19T14:11:30 | 263,695,345 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 567 |
py
|
def primes(n):
is_prime = [1]*(n+1)
is_prime[0] = 0
is_prime[1] = 0
for i in range(2, int(n**0.5) + 1):
if not is_prime[i]:
continue
for j in range(i*2,n+1,i):
is_prime[j] = 0
return is_prime
max = 100001
prime = primes(max)
a = [0]*max
for i in range(max):
if i % 2 == 0:
continue
if prime[i] and prime[(i+1)//2]:
a[i] = 1
s = [0]*(max+1)
for i in range(max):
s[i+1] = s[i] + a[i]
Q = int(input())
for i in range(Q):
l,r = map(int, input().split())
print(s[r+1]-s[l])
|
[
"[email protected]"
] | |
5630da04cc30441eabf72f420f1a24217fbaba01
|
e2423781704811bf0a0ecc07f9cb29d0a044ac48
|
/tensorflow_datasets/image/bccd/dummy_data_generation.py
|
bddde3b24d939e2a794def3d52ba9eee64bd8de6
|
[
"Apache-2.0"
] |
permissive
|
mbbessa/datasets
|
af2506a8cf5c46c33143d6e0266ba50d8b4c3fcc
|
2a7e8e793197637948ea0e0be4aa02a6aa2f7f55
|
refs/heads/master
| 2021-11-30T22:28:55.825453 | 2021-11-19T20:49:49 | 2021-11-19T20:52:42 | 171,528,015 | 0 | 0 |
Apache-2.0
| 2019-02-19T18:34:26 | 2019-02-19T18:34:26 | null |
UTF-8
|
Python
| false | false | 5,387 |
py
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate bccd data.
"""
import os
import random
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.image.bccd import bccd
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.testing import fake_data_utils
# In TF 2.0, eager execution is enabled by default
tf.compat.v1.disable_eager_execution()
flags.DEFINE_string("tfds_dir", py_utils.tfds_dir(),
"Path to tensorflow_datasets directory")
FLAGS = flags.FLAGS
MIN_OBJECT_HEIGHT_WIDTH = 100
MAX_OBJECT_HEIGHT_WIDTH = 400
MIN_NUM_OBJECTS = 1
MAX_NUM_OBJECTS = 3
def _output_dir():
return os.path.join(FLAGS.tfds_dir, "image", "bccd", "dummy_data")
def _write_text_file(filepath, content):
"""Write a text file given its content."""
dirname = os.path.dirname(filepath)
if not tf.io.gfile.exists(dirname):
tf.io.gfile.makedirs(dirname)
with tf.io.gfile.GFile(filepath, "w") as f:
f.write(content)
def _generate_jpeg(example_id, height, width):
"""Generate a fake jpeg image for the given example id."""
jpeg = fake_data_utils.get_random_jpeg(height=height, width=width)
filepath = os.path.join(
_output_dir(),
"BCCD_Dataset-1.0/BCCD/JPEGImages/BloodImage_{:05d}.jpg".format(
example_id))
dirname = os.path.dirname(filepath)
if not tf.io.gfile.exists(dirname):
tf.io.gfile.makedirs(dirname)
tf.io.gfile.copy(jpeg, filepath, overwrite=True)
def _generate_annotation(example_id, height, width):
"""Generate a fake annotation XML for the given example id."""
# pylint: disable=protected-access
label_names = tfds.features.ClassLabel(names=bccd._CLASS_LABELS).names # pytype: disable=module-attr
# pylint: enable=protected-access
annotation = "<annotation>\n"
annotation += "<folder>JPEGImages</folder>\n"
annotation += "<filename>%d.jpg</filename>\n" % example_id
annotation += "<path>/home/pi/detection_dataset/JPEGImages/%d.jpg</path>" % example_id
annotation += "<source>\n"
annotation += "<database>Unknown</database>\n"
annotation += "</source>"
annotation += "<size>\n"
annotation += "<width>%d</width>\n" % width
annotation += "<height>%d</height>\n" % height
annotation += "</size>\n"
for i in range(random.randint(MIN_NUM_OBJECTS, MAX_NUM_OBJECTS)):
annotation += "<object>\n"
annotation += " <name>%s</name>\n" % random.choice(label_names)
annotation += " <pose>Unspecified</pose>\n"
annotation += " <truncated>0</truncated>\n"
if i > 0:
annotation += " <difficult>%s</difficult>\n" % random.randint(0, 1)
else:
annotation += " <difficult>0</difficult>\n"
obj_w = random.randint(MIN_OBJECT_HEIGHT_WIDTH, MAX_OBJECT_HEIGHT_WIDTH)
obj_h = random.randint(MIN_OBJECT_HEIGHT_WIDTH, MAX_OBJECT_HEIGHT_WIDTH)
obj_x = random.randint(0, width - obj_w)
obj_y = random.randint(0, height - obj_h)
annotation += " <bndbox>\n"
annotation += " <xmin>%d</xmin>\n" % obj_x
annotation += " <ymin>%d</ymin>\n" % obj_y
annotation += " <xmax>%d</xmax>\n" % (obj_x + obj_w - 1)
annotation += " <ymax>%d</ymax>\n" % (obj_y + obj_h - 1)
annotation += " </bndbox>\n"
annotation += "</object>\n"
annotation += "</annotation>\n"
# Add annotation XML to the tar file.
filepath = os.path.join(
_output_dir(),
"BCCD_Dataset-1.0/BCCD/Annotations/BloodImage_{:05d}.xml".format(
example_id))
_write_text_file(filepath, annotation)
def _generate_data_for_set(set_name, example_start, num_examples):
"""Generate different data examples for the train, validation or test sets."""
# Generate JPEG and XML files of each example.
for example_id in range(example_start, example_start + num_examples):
_generate_jpeg(example_id, 480, 640)
_generate_annotation(example_id, 480, 640)
# Add all example ids to the TXT file with all examples in the set.
filepath = os.path.join(
_output_dir(), "BCCD_Dataset-1.0/BCCD/ImageSets/Main/%s.txt" % set_name)
_write_text_file(
filepath, "".join([
"BloodImage_{:05d}\n".format(example_id)
for example_id in range(example_start, example_start + num_examples)
]))
def _generate_trainval_archive():
"""Generate train/val archive."""
_generate_data_for_set("train", example_start=0, num_examples=2)
_generate_data_for_set("val", example_start=2, num_examples=1)
def _generate_test_archive():
"""Generate test archive."""
_generate_data_for_set("test", example_start=3, num_examples=2)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
_generate_trainval_archive()
_generate_test_archive()
if __name__ == "__main__":
app.run(main)
|
[
"[email protected]"
] | |
4e5784eb3aa014b7f5494320c32dd4b3d0a53444
|
2997a8d56f74e0cc4e949fb20ff7d17be75b2599
|
/百度无人驾驶比赛模型/scnn/scnn.py
|
62fcd8de6c3881491f31d3120f7f99a61881cd53
|
[] |
no_license
|
GuangyanZhang/Paddle-Paddle_SCNN-Deeplabv3-bisenet-icnet
|
039b3856ed67e177d351cbe83e73f352a4a97696
|
326439194bb24ec112c74d9de2b6196add5a76c0
|
refs/heads/master
| 2020-04-30T09:17:23.010395 | 2019-04-07T07:13:33 | 2019-04-07T07:13:33 | 176,742,644 | 18 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,219 |
py
|
import random
import cv2
import numpy as np
import paddle
import PIL.Image
import paddle.fluid as fluid
import time
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def resize_image(img, target_size1, target_size2):
# img = img.resize((target_size1, target_size2), PIL.Image.LANCZOS)
img = cv2.resize(img, (target_size2, target_size1),interpolation=cv2.INTER_NEAREST)
return img
def batch_normalization(x, relu = False, name = ''):
if relu:
return fluid.layers.batch_norm(x, act = 'relu', name = name)
else:
return fluid.layers.batch_norm(x, name = name)
def get_wav_list(filename):
'''
读取一个wav文件列表,返回一个存储该列表的字典类型值
ps:在数据中专门有几个文件用于存放用于训练、验证和测试的wav文件列表
'''
txt_obj = open(filename, 'r') # 打开文件并读入
txt_text = txt_obj.read()
txt_lines = txt_text.split('\n') # 文本分割
dic_filelist = [] # 初始化字典
list_wavmark = [] # 初始化wav列表
j = 0
for i in txt_lines:
if (i != ''):
txt_l = i.split(' ')
dic_filelist.append(txt_l[0])
list_wavmark.append(txt_l[1])
#j = j + 1
#print(list_wavmark[str(j)])
txt_obj.close()
return dic_filelist, list_wavmark
def get_data(Height, Width,train_image,label_image):
# train_image = dic_filelist[num]
# train_image = list_wavmark[num]
train_data = cv2.imread('/home/zhngqn/zgy_RUIMING/baidu_city'+train_image)
# train_data = PIL.Image.open(train_image)
image = resize_image(train_data, Height, Width)
# print('image:', image.shape)
# train_data = fluid.layers.resize_bilinear(train_data, out_shape=[1024,512])
label_image = cv2.imread('/home/zhngqn/zgy_RUIMING/baidu_city'+label_image)
# print('label_image_before:', label_image.shape)
label_image = resize_image(label_image, Height, Width)
# print('label_image_after:', label_image.shape)
# label_image = PIL.Image.open(label_image)
return image, label_image[:, :, 0] # 取了一个通道
"""
批量读取图片
"""
def get_random():
num = random.randint(1, 21913)
if (num in dic_filelist.keys()):
return num
else:
return [-1], [-1]
def data_generator(Height, Width, batch_size,dic_filelist,list_wavmark):
X = np.zeros((batch_size, Height, Width, 3), dtype=np.float32) # batch_size train data
Y = np.zeros((batch_size, Height, Width), dtype=np.int64) # batch_size label data
for i in range(batch_size):
num = random.randint(1, 21913)
if num in dic_filelist:
pass
else:
num = random.randint(1, 21913)
#ran_num = random.randint(0, 3999) #生成一个随机数
train_data, label_image = get_data(Height, Width,dic_filelist[num],list_wavmark[num])
# print('train_data:', train_data.shape)
# print('label_image:', label_image.shape)
X[i, 0:len(train_data)] = train_data
Y[i, 0:len(label_image)] = label_image
# X = X[:, :, :, ::-1].astype(np.float32) / (255.0 / 2) - 1
return X, Y
"""
conv2d + BN(TRUE OR FALSE) + RELU+pool
"""
def conv_layers(layers_name,data,num_filters,num_filter_size = 3, stride=2,pool_stride=2, padding=1,bias_attr = True,act = "relu",Use_BN = True):
conv2d = fluid.layers.conv2d(input = data,
num_filters = num_filters,
filter_size = num_filter_size,
stride=stride,
padding=padding,
bias_attr = bias_attr,
act = None,
name = layers_name + '_conv2d' )
if Use_BN:
BN = fluid.layers.batch_norm(input = conv2d,name = layers_name + '_BN')
else:
BN = conv2d
out_put = fluid.layers.relu(BN,name = layers_name + '_relu')
return fluid.layers.pool2d(
input=out_put,
pool_size=2,
pool_type='max',
pool_stride=pool_stride,
global_pooling=False)
#return out_put
class ResNet():
"""
2X
4X
8X
16X
32X
output16 = 16X
output32 = 32X
layers = 22 (2+4*5)
"""
def __init__(self, is_test=False):
self.is_test = is_test
def net(self, input):
# if layers == 22:
# depth = [1, 1, 1, 1]
conv = self.conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=1, act='relu', trainable=False)
conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=1, pool_padding=1, pool_type='max')
# conv2 = self.bottleneck_block(input=conv, num_filters=64, stride=2, trainable=False)#2 X
## 输出的channel = num_filters * 2
conv2 = self.bottleneck_block(input=conv, num_filters=64, stride=2, trainable=False) # 42 X
conv4 = self.bottleneck_block(input=conv2, num_filters=128, stride=2, trainable=False) # 8 X
conv8 = self.bottleneck_block(input=conv4, num_filters=128, stride=2, trainable=False) # 16 X ,
return conv8
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
trainable=True):
param_attr = fluid.ParamAttr(trainable=trainable)
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2, # 可能有问题,需要重新设计padding???
groups=groups,
act=None,
bias_attr=True,
param_attr=param_attr)
return fluid.layers.batch_norm(input=conv, act=act, is_test=self.is_test, param_attr=param_attr)
def shortcut(self, input, ch_out, stride, trainable=True):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
return self.conv_bn_layer(input, ch_out, 1, stride=stride, trainable=trainable)
else:
return input
def bottleneck_block(self, input, num_filters, stride, trainable=True):
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=1,
stride=1,
act='relu',
trainable=trainable)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
trainable=trainable)
conv2 = self.conv_bn_layer(
input=conv1,
num_filters=num_filters * 2,
filter_size=1,
stride=1,
act=None,
trainable=trainable)
short = self.shortcut(input, num_filters * 2, stride, trainable=trainable)
# print('element_2:', [short, conv2])
return fluid.layers.elementwise_add(x=conv2, y=short, act='relu')
"""每片之间的卷积"""
def SCNN(layers_name, data, num_filters, filter_size, padding, stride=1, bias_attr=True, act="relu"):
return fluid.layers.conv2d(input=data,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
bias_attr=bias_attr,
act=act,
name=layers_name + '_conv2d')
# out_put = fluid.layers.relu(conv2d,name = layers_name + '_relu')
"""
由上到下,下到上的卷积
"""
def SCNN_D_U(input_data, C_size,H_size, W_size): # 输入切片的高度
"""
0,1,2,3 to 0,2,1,3
"""
# print('input_data',input_data)
x_transposed = fluid.layers.transpose(input_data, perm=[0, 2, 1, 3]) ## NCHW --> NHCW
# print('x_transposed', x_transposed)
axes1 = [1]
layers_concat = list()
layers_result_concat = list()
"""SCNN_D"""
# lenth = range(0,H_size)
for i in range(0, H_size):
result = fluid.layers.slice(input=x_transposed, axes=axes1, starts=[i], ends=[i + 1]) # 切片
# 卷积操作
if i == 0: # 第一片就是原始值做卷积
layers_concat.append(result) # 保存卷积后的片
# scnn_covn2d_last = result
# scnn_covn2d = SCNN_D('scnn_covn2d',result,num_filters =1,filter_size = [32,3],
# padding=[0,1],stride=1,bias_attr = True,act = "relu")
else:
"""片于片之间的卷积"""
scnn_covn2d = SCNN('scnn_covn_D', layers_concat[-1], num_filters=1, filter_size=[3, W_size],
padding=[1, 0], stride=1, bias_attr=True, act="relu")
#print('scnn_covn2d_before:',scnn_covn2d)
# scnn_covn2d = fluid.layers.reshape(scnn_covn2d,[0,32])#三维变成一个维度
#print('scnn_covn2d',scnn_covn2d)
scnn_covn2d = fluid.layers.reshape(scnn_covn2d, [0, C_size],) # 三维变成一个维度
#print('scnn_covn2d_after:',scnn_covn2d)
# print('result_before:', result)
result = fluid.layers.transpose(result, perm=[0, 2, 1, 3])
# print('result_after:', result)
# print("result_after:", result)
# print("scnn_covn2d:", scnn_covn2d)
scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d, axis=0,act='relu') # 在最后一个维度上做广播
# scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d)
scnn_covn2d = fluid.layers.transpose(scnn_covn2d, perm=[0, 2, 1, 3])
# print('scnn_covn2d:', scnn_covn2d)
# scnn_covn2d_last = scnn_covn2d #更新保存上一片
layers_concat.append(scnn_covn2d) # 保存卷积后的片
# layers_concat.append(scnn_covn2d)
"""SCNN_U"""
for i in range(H_size - 1, -1, -1):
# print(i)
result = layers_concat[i] # 切片
# 卷积操作
if i == H_size - 1: # 第一片就是原始值做卷积
# print(i)
layers_result_concat.append(result) # 保存卷积后的片
# scnn_covn2d_last = result
# scnn_covn2d = SCNN_D('scnn_covn2d',result,num_filters =1,filter_size = [32,3],
# padding=[0,1],stride=1,bias_attr = True,act = "relu")
else:
"""片于片之间的卷积"""
# print(i)
scnn_covn2d = SCNN('scnn_covn_U', layers_result_concat[-1], num_filters=1, filter_size=[3, W_size],
padding=[1, 0], stride=1, bias_attr=True, act="relu")
# print('scnn_covn2d1',scnn_covn2d)
scnn_covn2d = fluid.layers.reshape(scnn_covn2d, [0, C_size]) # 三维变成一个维度
result = fluid.layers.transpose(result, perm=[0, 2, 1, 3])
scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d, axis=0,act='relu') # 在最后一个维度上做广播
# scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d)
scnn_covn2d = fluid.layers.transpose(scnn_covn2d, perm=[0, 2, 1, 3])
# scnn_covn2d_last = scnn_covn2d #更新保存上一片
layers_result_concat.append(scnn_covn2d) # 保存卷积后的片
return fluid.layers.concat(input=layers_result_concat, axis=1)
# print('layers_concat11',out)
# scnn_covn2d = SCNN_D('scnn_covn2d',result,num_filters =1,filter_size = [32,3], padding=[0,1],stride=1,bias_attr = True,act = "relu")
# print('scnn_covn2d',scnn_covn2d)
"""
由左到右,右到左的卷积
"""
def SCNN_R_L(input_data, C_size,H_size, W_size): # 输入切片的高度
"""
0,1,2,3 to 0,3,2,1 """
x_transposed = fluid.layers.transpose(input_data, perm=[0, 3, 2, 1]) ## NCHW -->NWCH
axes1 = [1]
layers_concat = list()
layers_result_concat = list()
"""SCNN_R"""
# lenth = range(0,H_size)
for i in range(0, W_size):
result = fluid.layers.slice(input=x_transposed, axes=axes1, starts=[i], ends=[i + 1]) # 切片
# 卷积操作
if i == 0: # 第一片就是原始值做卷积
layers_concat.append(result) # 保存卷积后的片
# scnn_covn2d_last = result
# scnn_covn2d = SCNN_D('scnn_covn2d',result,num_filters =1,filter_size = [32,3],
# padding=[0,1],stride=1,bias_attr = True,act = "relu")
else:
"""片于片之间的卷积"""
# if i == 2:
# print('SCNN_R:', layers_concat[-1])
# else:
# pass
scnn_covn2d = SCNN('scnn_covn_R', layers_concat[-1], num_filters=1, filter_size=[3, H_size],
padding=[1, 0], stride=1, bias_attr=True, act="relu")
scnn_covn2d = fluid.layers.reshape(scnn_covn2d, [0, C_size])
result = fluid.layers.transpose(result, perm=[0, 2, 1, 3])
# print("result_after:", result)
# print("scnn_covn2d:", scnn_covn2d)
scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d, axis=0,act='relu') # 在最后一个维度上做广播
# scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d)
scnn_covn2d = fluid.layers.transpose(scnn_covn2d, perm=[0, 2, 1, 3])
# scnn_covn2d_last = scnn_covn2d #更新保存上一片
layers_concat.append(scnn_covn2d) # 保存卷积后的片
"""SCNN_L"""
for i in range(W_size - 1, -1, -1):
# print(i)
result = layers_concat[i] # 切片
# 卷积操作
if i == W_size - 1: # 第一片就是原始值做卷积
# print(i)
layers_result_concat.append(result) # 保存卷积后的片
# scnn_covn2d_last = result
# scnn_covn2d = SCNN_D('scnn_covn2d',result,num_filters =1,filter_size = [32,3],
# padding=[0,1],stride=1,bias_attr = True,act = "relu")
else:
"""片于片之间的卷积"""
# print(i)
scnn_covn2d = SCNN('scnn_covn_L', layers_result_concat[-1], num_filters=1, filter_size=[3, H_size],
padding=[1, 0], stride=1, bias_attr=True, act="relu")
# print("scnn_covn2d_before:",scnn_covn2d)
scnn_covn2d = fluid.layers.reshape(scnn_covn2d, [0, C_size])
# print("scnn_covn2d_after:",scnn_covn2d)
# print("result_before:",result)
result = fluid.layers.transpose(result, perm=[0, 2, 1, 3])
# print("result_after:", result)
# print("scnn_covn2d:", scnn_covn2d)
scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d, axis=0,act='relu') # 在最后一个维度上做广播
# scnn_covn2d = fluid.layers.elementwise_add(result, scnn_covn2d)
scnn_covn2d = fluid.layers.transpose(scnn_covn2d, perm=[0, 2, 1, 3])
# print("scnn_covn2d:",scnn_covn2d)
# scnn_covn2d_last = scnn_covn2d #更新保存上一片
layers_result_concat.append(scnn_covn2d) # 保存卷积后的片
return fluid.layers.concat(input=layers_result_concat, axis=1)
def SCNN_D_U_R_L(input_data,C_size,H_size ,W_size):
#input_data = conv_layers( 'layers1',input_image,num_filters = 32, stride=2, padding=1,bias_attr = True)
#out_data_D_U = SCNN_D_U(input_data,H_size)
#print('out_data_D_U',out_data_D_U)
return fluid.layers.transpose(SCNN_R_L(SCNN_D_U(input_data,C_size,H_size,W_size),C_size,H_size,W_size), perm=[0, 2, 3,1])
#print(out_transposed)
def output_layers(input_data,C_size, H_size,W_size,num_classes):
model = ResNet(is_test=False)
# spatial_net = model.bottleneck_block1(inputs)
end_points_8 = model.net(input_data)
output_dat = SCNN_D_U_R_L(end_points_8,C_size, H_size,W_size )
net = batch_normalization(output_dat, relu=True, name='conv2d_transpose_bn1')
net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])
net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')
net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])
net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')
net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')
# net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')
net = fluid.layers.conv2d(net, num_classes, 1)
# net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])
# net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')
# print('net',net)
#net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')
return net
def save_model(exe,save_dir):
if os.path.exists(save_dir):
fluid.io.save_params(exe, dirname=save_dir, main_program=fluid.default_main_program())
else:
pass
# global configure
learning_rate = 5e-4
batch_size = 1
num_classes = 9
Image_Height = 512
Image_Width = 1024
num_pass = 100
C_size = 256
checkpoint_dir = '/home/zhngqn/zgy_RUIMING/code/ASR_v0.6_8k/version0906/SCNN/scnn_checkpoints'
# inference_dir = '/home/zhngqn/zgy_RUIMING/code/ASR_v0.6_8k/version0906/scnn_inference_model'
save_dir= '/home/zhngqn/zgy_RUIMING/code/ASR_v0.6_8k/version0906/SCNN/savepath'
# data layer
inputs = fluid.layers.data(name='img', shape=[3, Image_Height, Image_Width], dtype='float32')
# inputs = fluid.layers.image_resize(img, out_shape=[512,256], resample='BILINEAR') ## ???
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# infer
predict = output_layers(inputs, C_size, Image_Height // 8, Image_Width // 8,num_classes)
print(predict)
## reshape logits into [batch_zize*H*W, num_classes]
predict = fluid.layers.transpose(predict, [0, 2, 3, 1]) ## NCHW --> NHWC
predict_reshape = fluid.layers.reshape(x=predict, shape=[-1, num_classes])
predict_reshape = fluid.layers.softmax(predict_reshape)
print('predict_reshape:', predict_reshape)
# loss function
print('predict_reshape', predict_reshape)
print('label', label)
cost = fluid.layers.cross_entropy(predict_reshape, label,
soft_label=False) ## as same as tf.sparse_softmax_cross_entopy_with_logits()
avg_cost = fluid.layers.mean(cost)
print('avg_cost:', avg_cost)
# accuracy
weight_decay = 0.00004
# acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc = fluid.layers.accuracy(input=predict_reshape, label=label, k=1)
print('acc:', acc)
# get test program
test_program = fluid.default_main_program().clone(for_test=True)
# optimizer
optimizer = fluid.optimizer.Momentum(
learning_rate,
momentum=0.9,
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=weight_decay), )
opts = optimizer.minimize(avg_cost)
# get test program
# test_program = fluid.default_main_program().clone(for_test=True)
# run in CPUPlace
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# define a executor
exe = fluid.Executor(place)
# parameters initialize globally
exe.run(fluid.default_startup_program())
# startup_prog = fluid.default_startup_program()
# exe.run(startup_prog)
# fluid.io.load_persistables(exe, checkpoint_dir, startup_prog)
prog = fluid.default_main_program()
# fluid.io.load_persistables(executor=exe, dirname=checkpoint_dir,
# main_program=None)
if os.path.exists(checkpoint_dir + '/persistabels_model'):
fluid.io.load_persistables(exe, checkpoint_dir + '/persistabels_model', main_program=prog)
else:
pass
# define input data
feeder = fluid.DataFeeder(place=place, feed_list=[inputs, label])
# [inference_program, feed_target_names, fetch_targets]=fluid.io.load_inference_model(dirname=checkpoint_dir+'/', executor=exe)
filename = '/home/zhngqn/zgy_RUIMING/baidu_city/train.txt'
dic_filelist, list_wavmark = get_wav_list(filename)
# begin to train
for pass_id in range(num_pass):
for batch_id in range(20000):
start = time.time()
# training process
#num = random.randint(0, 21914)
#print(dic_filelist[num],list_wavmark[num])
train_data, train_label_data = data_generator(Image_Height, Image_Width, batch_size, dic_filelist,list_wavmark)
end = time.time()
# train_data transpose into NCHW
train_data = np.transpose(train_data, (0, 3, 1, 2))
# train_label_data = np.transpose(train_label_data, axes=[0, ])
train_label_data = np.reshape(train_label_data, (-1, 1))
# print('train_data, train_label:', [train_data.shape, train_label_data.shape])
# train_data = np.random.uniform(0, 1, (batch_size, 3, Image_Height, Image_Width)).astype(np.float32)
# train_label_data = np.zeros((5786640 * batch_size, 1)).astype(np.int64).reshape(5786640 * batch_size, 1)
train_cost, train_acc = exe.run(program=fluid.default_main_program(),
feed={'img': train_data,
'label': train_label_data},
fetch_list=[avg_cost.name, acc.name])
# end = time.time()
# np.size(train_label_data[train_label_data ==0])/np.size(train_label_data)
base_acc = np.size(train_label_data[train_label_data ==0])/np.size(train_label_data)
if batch_id % 10 == 0:
print("Pass: %d, Batch: %d, Train_Cost: %0.5f, Train_Accuracy: %0.5f,time: %0.5f" %
(pass_id, batch_id, train_cost[0],(train_acc[0]-base_acc)/(1.0-base_acc) ,end-start))
# save checkpoints
# if pass_id % 10 == 0 and batch_id == 0:
if batch_id % 50 == 0:
fluid.io.save_persistables(executor=exe,
dirname=checkpoint_dir + '/persistabels_model',
main_program=fluid.default_main_program()
)
# fluid.io.save_inference_model(dirname=inference_dir,
# feeded_var_names=['img'],
# target_vars=[predict],
# executor=exe,
# main_program=None,
# model_filename=None,
# params_filename=None,
# export_for_deployment=True)
save_model(exe, save_dir)
|
[
"[email protected]"
] | |
a919599073e4f2964287ff10984860b610212766
|
e1d1d2f008e8284e109d851a87826d943c095dd0
|
/TicTacToe.py
|
9b33a0f59f733fa06cbcbeafffd3d06e46b488ae
|
[
"MIT"
] |
permissive
|
mayamau/Tic-Tac-Toe
|
2f8e82a20429e595225566180171a19898614361
|
f0488d0d82382f6202d77f8c6fc0c53e89bb89f3
|
refs/heads/master
| 2022-07-09T10:54:13.762065 | 2021-09-29T19:16:00 | 2021-09-29T19:16:00 | 123,813,512 | 0 | 0 |
MIT
| 2022-06-22T04:25:10 | 2018-03-04T17:50:17 |
Python
|
UTF-8
|
Python
| false | false | 13,935 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 23:52:33 2018
@author: maya
"""
# import
from tkinter import *
import tkinter.messagebox
from random import randint
import time
import numpy as np
# functions
def restartGame(button):
global currentPlayer
global x
global TTTArray
currentPlayer = x
TTTArray = np.zeros(shape=[3, 3])
for s in range(1, 10):
button[s].config(state=NORMAL, text=' ')
def popupfunc(button, resultString):
pop = Toplevel(root)
pop.title("Result")
frame0 = Frame(pop, bg=c_black, bd=0, relief=FLAT)
frame0.pack()
resultTxt = Label(frame0, text=resultString, foreground=c_white,
background=c_black, font=('Agency FB', 20))
resultTxt.grid(row=0, column=0, padx=20, pady=20)
okButton = Button(frame0, command=pop.destroy, text="OK", width=10, relief=FLAT, bd=0,
background=c_black, foreground=c_aqua1, activebackground=c_black, activeforeground=c_black, font=('Agency FB', 20))
okButton.grid(row=1, column=0, padx=10, pady=0)
for s in range(1, 10):
button[s].config(state=DISABLED, disabledforeground=c_aqua3)
#
def displaySelection(button, buttonNumber, numberOfPlayers):
global currentPlayer
global o
global x
global TTTArray
positionMapping = {1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [
1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1], 9: [2, 2]}
currentR, currentC = positionMapping[buttonNumber]
if currentPlayer == 1:
button[buttonNumber].config(
state=DISABLED, disabledforeground=c_aqua1, text="X")
TTTArray[currentR, currentC] = currentPlayer
currentPlayer = -1
else:
button[buttonNumber].config(
state=DISABLED, disabledforeground=c_aqua1, text="O")
TTTArray[currentR, currentC] = currentPlayer
currentPlayer = 1
resultScore, resultString = checkForWinner(
TTTArray.copy(), positionMapping[buttonNumber].copy())
if resultString != 'Continue':
popupfunc(button, resultString)
else:
if numberOfPlayers == 1:
alpha, beta = -np.inf, np.inf
bestMove, resultScore = minimax(TTTArray.copy(), currentPlayer,
positionMapping[buttonNumber], alpha, beta)
currentR, currentC = bestMove
buttonNumber = [button for button, move in positionMapping.items() if move == [currentR, currentC]][0]
if currentPlayer == 1:
button[buttonNumber].config(
state=DISABLED, disabledforeground=c_aqua1, text="X")
TTTArray[currentR, currentC] = currentPlayer
currentPlayer = -1
else:
button[buttonNumber].config(
state=DISABLED, disabledforeground=c_aqua1, text="O")
TTTArray[currentR, currentC] = currentPlayer
currentPlayer = 1
resultScore, resultString = checkForWinner(
TTTArray.copy(), positionMapping[buttonNumber].copy())
if resultString != 'Continue':
popupfunc(button, resultString)
def checkForWinner(board, move):
currentR, currentC = move
rSum = board[currentR, :].sum()
cSum = board[:, currentC].sum()
dSum = [np.diag(board).sum(), np.diag(np.fliplr(board)).sum()]
if max([rSum, cSum] + dSum) == 3:
resultScore = 3
resultString = "X wins!"
elif min([rSum, cSum] + dSum) == -3:
resultScore = -3
resultString = "O wins!"
elif np.where(board == 0, 1, 0).sum() == 0:
resultScore = 0
resultString = "It's a draw!"
else:
resultScore = 999
resultString = "Continue"
return resultScore, resultString
def minimax(board, currentPlayer, move, alpha, beta):
if currentPlayer == 1:
bestScore = -np.inf
bestMove = [-1, -1]
else:
bestScore = +np.inf
bestMove = [-1, -1]
resultScore, resultString = checkForWinner(board, move)
if resultScore in [3, -3, 0]:
return [[-1, -1], resultScore]
emptyCells = np.argwhere(board == 0)
for emptyCell in emptyCells:
board[emptyCell[0], emptyCell[1]] = currentPlayer
move, score = minimax(board, -1 * currentPlayer,
emptyCell, alpha, beta)
board[emptyCell[0], emptyCell[1]] = 0
if currentPlayer == 1:
if score > bestScore:
bestScore = score
bestMove = emptyCell
alpha = max(alpha, bestScore)
if beta <= alpha:
break
else:
if score < bestScore:
bestScore = score
bestMove = emptyCell
beta = min(beta, bestScore)
if beta <= alpha:
break
return bestMove, bestScore
def showXOGrid(numberOfPlayers, frameToHide, headerTxt):
frameToHide.pack_forget()
if numberOfPlayers == 1:
headerTxt.config(text="SINGLEPLAYER GAME")
elif numberOfPlayers == 2:
headerTxt.config(text="MULTIPLAYER GAME")
b = [0 for x in range(0, 10)]
b[1] = Button(frame2, command=lambda: displaySelection(b, 1, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[1].grid(row=1, column=0, padx=0, pady=0)
b[2] = Button(frame2, command=lambda: displaySelection(b, 2, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[2].grid(row=1, column=1, padx=2, pady=0)
b[3] = Button(frame2, command=lambda: displaySelection(b, 3, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[3].grid(row=1, column=2, padx=0, pady=0)
b[4] = Button(frame2, command=lambda: displaySelection(b, 4, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[4].grid(row=2, column=0, padx=0, pady=0)
b[5] = Button(frame2, command=lambda: displaySelection(b, 5, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[5].grid(row=2, column=1, padx=0, pady=0)
b[6] = Button(frame2, command=lambda: displaySelection(b, 6, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[6].grid(row=2, column=2, padx=0, pady=2)
b[7] = Button(frame2, command=lambda: displaySelection(b, 7, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[7].grid(row=3, column=0, padx=0, pady=0)
b[8] = Button(frame2, command=lambda: displaySelection(b, 8, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[8].grid(row=3, column=1, padx=2, pady=0)
b[9] = Button(frame2, command=lambda: displaySelection(b, 9, numberOfPlayers), background=c_black, relief=FLAT,
bd=0, font=('Agency FB', 45), foreground=c_aqua1, width=4, text=" ", activebackground=c_black)
b[9].grid(row=3, column=2, padx=0, pady=0)
quitButton = Button(frame3, command=root.destroy, text="QUIT", width=10, relief=FLAT, bd=0, background=c_black,
foreground=c_aqua1, activebackground=c_black, activeforeground=c_black, font=('Agency FB', 20))
quitButton.grid(row=0, column=0, padx=10, pady=0)
restartButton = Button(frame3, command=lambda: restartGame(b), text="RESTART", width=10, relief=FLAT, bd=0,
background=c_black, foreground=c_aqua1, activebackground=c_black, activeforeground=c_black, font=('Agency FB', 20))
restartButton.grid(row=0, column=1, padx=10, pady=0)
# create root window
root = Tk()
# UI params
# --game window title
gameWindowTitle = "Tic Tac Toe"
# --hex colors
c_black = "#030305"
c_white = "#FFFFFF"
c_aqua1 = "#91FCFF"
c_aqua2 = "#00E7ED"
c_aqua3 = "#007b80" # greyed out c_aqua1
# modify root window
root.title(gameWindowTitle)
root.configure(background=c_black)
# frames
frame0 = Frame(root, bg=c_black, bd=0, relief=FLAT)
frame0.pack(padx=0, pady=0)
frame1 = Frame(root, bg=c_aqua2, bd=0, relief=FLAT)
frame1.pack(padx=0, pady=0)
frame2 = Frame(root, bg=c_aqua2, bd=0, relief=FLAT)
frame2.pack(padx=70, pady=0)
frame3 = Frame(root, bg=c_black, bd=0, relief=FLAT)
frame3.pack(padx=0, pady=30)
# text
headerTxt = Label(frame0,
text='SELECT GAME TYPE:',
foreground=c_white,
background=c_black,
font=('Agency FB', 25))
headerTxt.grid(row=0,
column=0,
columnspan=3,
padx=20,
pady=20)
b = [0] * 5
# empty button for spacing
b[0] = Button(frame1,
text='\n',
background=c_black,
bd=0,
font=('Agency FB', 15),
foreground=c_aqua1,
width=5,
activebackground=c_black)
b[0].grid(row=1, column=0, padx=0, pady=0)
# SINGLEPLAYER button
b[1] = Button(frame1,
text='SINGLEPLAYER\nGAME',
command=lambda: showXOGrid(1, frame1, headerTxt),
background=c_black,
bd=0,
font=('Agency FB', 15),
foreground=c_aqua1,
width=30,
activebackground=c_aqua1,
activeforeground=c_black,
cursor="hand2")
b[1].grid(row=1, column=1, padx=0, pady=0)
# empty button for spacing
b[2] = Button(frame1,
text='\n',
background=c_black,
bd=0,
font=('Agency FB', 15),
foreground=c_aqua1,
width=5,
activebackground=c_black)
b[2].grid(row=1, column=2, padx=0, pady=0)
# MULTIPLAYER button
b[3] = Button(frame1,
text='MULTIPLAYER\nGAME',
command=lambda: showXOGrid(2, frame1, headerTxt),
background=c_black,
bd=0,
font=('Agency FB', 15),
foreground=c_aqua1,
width=30,
activebackground=c_aqua1,
activeforeground=c_black,
cursor="hand2")
b[3].grid(row=1, column=3, padx=0, pady=0)
# empty button for spacing
b[4] = Button(frame1,
text='\n',
background=c_black,
bd=0,
font=('Agency FB', 15),
foreground=c_aqua1,
width=5,
activebackground=c_black)
b[4].grid(row=1, column=4, padx=0, pady=0)
# text
# welcomeTxt = Label(frame1,text='MULTIPLAYER GAME',foreground=c_white,background=c_black,font=('Agency FB',25))
# welcomeTxt.grid(row=0,column=0,columnspan=3,padx=20,pady=20)
# #buttons
# b=[0 for x in range(0,10)]
# b[1] = Button(frame2,command=lambda: displaySelection(1),background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[1].grid(row=1,column=0,padx=0,pady=0)
# b[2] = Button(frame2,command=lambda: displaySelection(2), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[2].grid(row=1,column=1,padx=2,pady=0)
# b[3] = Button(frame2,command=lambda: displaySelection(3), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[3].grid(row=1,column=2,padx=0,pady=0)
# b[4] = Button(frame2,command=lambda: displaySelection(4), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[4].grid(row=2,column=0,padx=0,pady=0)
# b[5] = Button(frame2,command=lambda: displaySelection(5), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[5].grid(row=2,column=1,padx=0,pady=0)
# b[6] = Button(frame2,command=lambda: displaySelection(6), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[6].grid(row=2,column=2,padx=0,pady=2)
# b[7] = Button(frame2,command=lambda: displaySelection(7), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[7].grid(row=3,column=0,padx=0,pady=0)
# b[8] = Button(frame2,command=lambda: displaySelection(8), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[8].grid(row=3,column=1,padx=2,pady=0)
# b[9] = Button(frame2,command=lambda: displaySelection(9), background=c_black,relief=FLAT,bd=0,font=('Agency FB',45),foreground=c_aqua1,width=4,text=" ",activebackground=c_black)
# b[9].grid(row=3,column=2,padx=0,pady=0)
# quitButton = Button(frame3,command=root.destroy,text="QUIT",width=10,relief=FLAT,bd=0,background=c_black, foreground=c_aqua1,activebackground=c_black,activeforeground=c_black,font=('Agency FB',20))
# quitButton.grid(row=0,column=0,padx=10,pady=0)
# restartButton = Button(frame3,command=restartGame,text="RESTART",width=10,relief=FLAT,bd=0,background=c_black, foreground=c_aqua1,activebackground=c_black,activeforeground=c_black,font=('Agency FB',20))
# restartButton.grid(row=0,column=1,padx=10,pady=0)
# variables
x = 1
o = -1
currentPlayer = x
TTTArray = np.zeros(shape=[3, 3])
root.mainloop()
|
[
"[email protected]"
] | |
ef279fb67bd4928a2129abf2c49b78475fe31e9e
|
8803c894db96609c05dcc3cdabc68a702124f07d
|
/djangochat/urls.py
|
91e43f4f8b902dbafc15f0c8e17aa37a17d7b851
|
[] |
no_license
|
JonathaCnB/django-essential
|
4531d449100f6a7c3fd11bbb0920e7e33db88300
|
182e973a70497702f630ce5acd6352be30fb421f
|
refs/heads/main
| 2023-08-01T07:41:29.683967 | 2021-09-25T01:58:00 | 2021-09-25T01:58:00 | 398,795,534 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
from django.urls import path
from .views import check_view, get_messages_view, send_view, to_enter, view_room
urlpatterns = [
path("", to_enter, name="to_enter"),
path("check-room/", check_view, name="checkview"),
path("send/", send_view, name="send"),
path("get_messages/<str:room>/", get_messages_view, name="get_messages"),
path("<str:room>/", view_room, name="room"),
]
|
[
"[email protected]"
] | |
bbf11f6525ae7e8077064cc40b92fa787c203531
|
5cc74872fc45943f27578f6bd647f77d1bd03f0b
|
/ppf/wsgi.py
|
1b26faa1eed919e95fb8543ab4cab990f745e940
|
[] |
no_license
|
johnnyliu1992/midterm-project
|
7aa00012612b00c2baea1f28431924c5b2d0ee9e
|
516ccceb415d120ea9f138ba7828e0556cafe964
|
refs/heads/master
| 2021-07-22T14:48:17.702329 | 2017-11-05T20:46:25 | 2017-11-05T20:46:25 | 109,615,230 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 384 |
py
|
"""
WSGI config for ppf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ppf.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
6c5c42ff2a8236b5cf4cb027d980ffb05aed63ad
|
fa258ee7abb5186120ff6162fbeba82d687799b5
|
/token/crowdsale.py
|
34d098e65637d28e10a87f9c9de830aa4b86bdda
|
[] |
no_license
|
frenchtoasters/Project-Ultimatum
|
31b8b0baa44bc7237e7f322fbe4d4fc7028963d3
|
bc9b9ee57a4a363a44ff944616885aa030580db6
|
refs/heads/master
| 2021-10-10T19:18:50.464234 | 2018-06-01T01:26:00 | 2018-06-01T01:26:00 | 133,738,022 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,677 |
py
|
from boa.interop.Neo.Blockchain import GetHeight
from boa.interop.Neo.Runtime import CheckWitness
from boa.interop.Neo.Action import RegisterAction
from boa.interop.Neo.Storage import Get, Put
from boa.builtins import concat
from fuckstick.token import *
from nex.txio import get_asset_attachments
# OnInvalidKYCAddress = RegisterAction('invalid_registration', 'address')
OnKYCRegister = RegisterAction('kyc_registration', 'address')
OnTransfer = RegisterAction('transfer', 'addr_from', 'addr_to', 'amount')
OnRefund = RegisterAction('refund', 'addr_to', 'amount')
def kyc_register(ctx, args):
"""
:param args:list a list of addresses to register
:param token:Token A token object with your ICO settings
:return:
int: The number of addresses to register for KYC
"""
ok_count = 0
if CheckWitness(TOKEN_OWNER):
for address in args:
if len(address) == 20:
kyc_storage_key = concat(KYC_KEY, address)
Put(ctx, kyc_storage_key, True)
OnKYCRegister(address)
ok_count += 1
return ok_count
def kyc_status(ctx, args):
"""
Gets the KYC Status of an address
:param args:list a list of arguments
:return:
bool: Returns the kyc status of an address
"""
if len(args) > 0:
addr = args[0]
kyc_storage_key = concat(KYC_KEY, addr)
return Get(ctx, kyc_storage_key)
return False
def perform_exchange(ctx):
"""
:param token:Token The token object with NEP5/sale settings
:return:
bool: Whether the exchange was successful
"""
attachments = get_asset_attachments() # [receiver, sender, neo, gas]
# this looks up whether the exchange can proceed
exchange_ok = can_exchange(ctx, attachments, False)
if not exchange_ok:
# This should only happen in the case that there are a lot of TX on the final
# block before the total amount is reached. An amount of TX will get through
# the verification phase because the total amount cannot be updated during that phase
# because of this, there should be a process in place to manually refund tokens
if attachments[2] > 0:
OnRefund(attachments[1], attachments[2])
# if you want to exchange gas instead of neo, use this
# if attachments.gas_attached > 0:
# OnRefund(attachments.sender_addr, attachments.gas_attached)
return False
# lookup the current balance of the address
current_balance = Get(ctx, attachments[1])
# calculate the amount of tokens the attached neo will earn
exchanged_tokens = attachments[2] * TOKENS_PER_NEO / 100000000
# if you want to exchange gas instead of neo, use this
# exchanged_tokens += attachments[3] * TOKENS_PER_GAS / 100000000
# add it to the the exchanged tokens and persist in storage
new_total = exchanged_tokens + current_balance
Put(ctx, attachments[1], new_total)
# update the in circulation amount
result = add_to_circulation(ctx, exchanged_tokens)
# dispatch transfer event
OnTransfer(attachments[0], attachments[1], exchanged_tokens)
return True
def can_exchange(ctx, attachments, verify_only):
"""
Determines if the contract invocation meets all requirements for the ICO exchange
of neo or gas into NEP5 Tokens.
Note: This method can be called via both the Verification portion of an SC or the Application portion
When called in the Verification portion of an SC, it can be used to reject TX that do not qualify
for exchange, thereby reducing the need for manual NEO or GAS refunds considerably
:param attachments:Attachments An attachments object with information about attached NEO/Gas assets
:return:
bool: Whether an invocation meets requirements for exchange
"""
# if you are accepting gas, use this
# if attachments[3] == 0:
# print("no gas attached")
# return False
# if youre accepting neo, use this
if attachments[2] == 0:
return False
# the following looks up whether an address has been
# registered with the contract for KYC regulations
# this is not required for operation of the contract
# status = get_kyc_status(attachments.sender_addr, storage)
if not get_kyc_status(ctx, attachments[1]):
return False
# caluclate the amount requested
amount_requested = attachments[2] * TOKENS_PER_NEO / 100000000
# this would work for accepting gas
# amount_requested = attachments.gas_attached * token.tokens_per_gas / 100000000
exchange_ok = calculate_can_exchange(ctx, amount_requested, attachments[1], verify_only)
return exchange_ok
def get_kyc_status(ctx, address):
"""
Looks up the KYC status of an address
:param address:bytearray The address to lookup
:param storage:StorageAPI A StorageAPI object for storage interaction
:return:
bool: KYC Status of address
"""
kyc_storage_key = concat(KYC_KEY, address)
return Get(ctx, kyc_storage_key)
def calculate_can_exchange(ctx, amount, address, verify_only):
"""
Perform custom token exchange calculations here.
:param amount:int Number of tokens to convert from asset to tokens
:param address:bytearray The address to mint the tokens to
:return:
bool: Whether or not an address can exchange a specified amount
"""
height = GetHeight()
current_in_circulation = Get(ctx, TOKEN_CIRC_KEY)
new_amount = current_in_circulation + amount
if new_amount > TOKEN_TOTAL_SUPPLY:
return False
if height < BLOCK_SALE_START:
return False
# if we are in free round, any amount
if height > LIMITED_ROUND_END:
return True
# check amount in limited round
if amount <= MAX_EXCHANGE_LIMITED_ROUND:
# check if they have already exchanged in the limited round
r1key = concat(address, LIMITED_ROUND_KEY)
has_exchanged = Get(ctx, r1key)
# if not, then save the exchange for limited round
if not has_exchanged:
# note that this method can be invoked during the Verification trigger, so we have the
# verify_only param to avoid the Storage.Put during the read-only Verification trigger.
# this works around a "method Neo.Storage.Put not found in ->" error in InteropService.py
# since Verification is read-only and thus uses a StateReader, not a StateMachine
if not verify_only:
Put(ctx, r1key, True)
return True
return False
return False
|
[
"[email protected]"
] | |
a1cc5cf11e5624b2b3f89755554f97571fd1a25b
|
f759188e90610e08b4d85358abeaf27f2796964e
|
/tinyos-main/apps/PIR_Sensor/util/Listener.py
|
464d97ddd4475819140e31d39a6f13222a0dc46e
|
[] |
no_license
|
SoftwareDefinedBuildings/KetiMotes
|
5555626231edb1cb76cb96bb4134a52d1d88bbb1
|
b6dfea4b7d3dd384dd78a91ce62e7990cd337009
|
refs/heads/master
| 2020-04-06T23:55:42.151717 | 2014-09-11T18:25:17 | 2014-09-11T18:25:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,595 |
py
|
import socket
import UdpReport
import re
import sys
import time
import threading
port = 7000
stats = {}
class PrintStats(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
while True:
self.print_stats()
time.sleep(3)
def print_stats(self):
global stats
print "-" * 40
for k, v in stats.iteritems():
print "%s: %i/%i (%0.2f ago) (%0.2f%%)" % (k,
v[0],
v[3] - v[2] + 1,
time.time() - v[1],
100 * float(v[0]) /
(v[3] - v[2] + 1))
print "%i total" % len(stats)
print "-" * 40
if __name__ == '__main__':
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.bind(('', port))
ps = PrintStats()
ps.start()
while True:
data, addr = s.recvfrom(1024)
if (len(data) > 0):
rpt = UdpReport.UdpReport(data=data, data_length=len(data))
print rpt.get_seqno()
print rpt.get_interval()
print rpt.get_readings()
print addr[0]
if not addr[0] in stats:
stats[addr[0]] = (0, time.time(), rpt.get_seqno(), rpt.get_seqno())
cur = stats[addr[0]]
stats[addr[0]] = (cur[0] + 1, time.time(), cur[2], rpt.get_seqno())
|
[
"[email protected]"
] | |
4e37bfb94d6849a43ef3c3a4db4716b780471c11
|
2db757415502dc57376ba2f652069f55d5537c00
|
/ipython_memory_usage/perf_process.py
|
3c825c335c93bc30a747be33faa79e4b451bc2eb
|
[
"BSD-2-Clause"
] |
permissive
|
jni/numpy-skimage-tutorial
|
9391fbc9757e56e302e98709b641978f826d6a08
|
609350a8693ae3350f49ed98734d500657bb8205
|
refs/heads/master
| 2020-05-17T00:10:55.969572 | 2014-11-26T14:02:20 | 2014-11-26T14:02:20 | 27,123,958 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,823 |
py
|
""""""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import subprocess
import unittest
FIXTURE0 = """ 0.100167119 3,183 cache-misses """
ANSWER0 = 3183
FIXTURE1 = """# time counts events\n 0.100167119 3,183 cache-misses \n 0.200354348 4,045 cache-misses \n """
ANSWER1 = [3183, 4045]
FIXTURE2 = """ 3.501390851 471,219,787 stalled-cycles-frontend\n 14.005319456 2,249,115 stalled-cycles-frontend """
ANSWER2 = [471219787, 2249115]
EVENT_TYPE_CM = "cache-misses"
EVENT_TYPE_SCF = "stalled-cycles-frontend"
EVENT_TYPE_I = "instructions"
EVENT_TYPES = set([EVENT_TYPE_CM, EVENT_TYPE_SCF, EVENT_TYPE_I])
EVENT_TYPE = EVENT_TYPE_CM
def process_line(line):
"""Process a single output line from perf-stat, extract only a value (skip help lines)"""
line_bits = line.split()
print(line_bits)
try:
value = float(line_bits[1].replace(',', ''))
except ValueError:
print(line_bits)
if line_bits[2] in EVENT_TYPES:
# we only get here if we've got a value and a key
key = line_bits[2]
value = None
except IndexError:
value = None
return value
def process_lines(lines):
"""Process many lines of perf-stat output, extract the values"""
# we're assuming we have \n as line endings in this long string
values = []
for line in lines.split('\n'):
value = process_line(line)
if value:
values.append(value)
return values
class Test(unittest.TestCase):
def test1(self):
answer0 = process_line(FIXTURE0)
self.assertEqual(ANSWER0, answer0)
def test_process_lines(self):
values = process_lines(FIXTURE0)
self.assertEqual(values, [ANSWER0])
def test_process_lines2(self):
# check we can process the cache-misses messages
values = process_lines(FIXTURE1)
self.assertEqual(values, ANSWER1)
# check that if we have repeated help messages, we still extract the
# values we expect
values = process_lines(FIXTURE1+FIXTURE1)
self.assertEqual(values, ANSWER1+ANSWER1)
def test_process_lines3(self):
# check we can process stalled-cycles-frontend messages
values = process_lines(FIXTURE2)
self.assertEqual(values, ANSWER2)
def run_capture_perf(pid):
"""Start a perf stat process monitoring pid every 100ms"""
cmd = "perf stat --pid {pid} --event {event_type} -I 100".format(pid=pid, event_type=EVENT_TYPE)
#print("run_capture_perf running:", cmd) # debug message
proc = subprocess.Popen(cmd.split(), stderr=subprocess.PIPE)
return proc
def finish_perf(proc):
"""Finish collecting data, parse and return"""
# once the job has finished, kill recording
proc.kill()
# now block to gather all output data
(stdoutdata, stderrdata) = proc.communicate()
# example stderrdata output:
# # time counts events
# 0.100173796 2,761 cache-misses
# 0.200387519 4,232 cache-misses
# 0.300540762 5,277 cache-misses
# 0.400778748 3,916 cache-misses
values = process_lines(stderrdata)
return values
if __name__ == "__main__":
# simple test for a hardcoded pid gathered over 0.5 seconds
pid = 4583
print("Using pid:", pid)
proc = run_capture_perf(pid)
import time
time.sleep(0.5)
values = finish_perf(proc)
print(values)
|
[
"[email protected]"
] | |
1e016af907e1c4eb4ab696ffded7dc703b9d20bf
|
6b359431d1fe4141b3ecdb52a23a99ede37b8044
|
/workouts/migrations/0001_initial.py
|
b279d5eaa1d12eaef436115f081f51bcdfae1b84
|
[] |
no_license
|
sergiga/fitness-backend
|
f6cdc48bd55a23da8eb671f7ffe84ac9b92c5e3e
|
a328c6c0b34a0c63e4e223ad873d1a61cf54fdeb
|
refs/heads/master
| 2022-12-24T18:03:52.293142 | 2019-12-29T14:05:23 | 2019-12-29T14:05:23 | 227,165,726 | 0 | 0 | null | 2022-12-08T03:17:25 | 2019-12-10T16:24:36 |
Python
|
UTF-8
|
Python
| false | false | 4,598 |
py
|
# Generated by Django 3.0 on 2019-12-08 17:19
import core.utils
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ExerciseInSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('device_updated', models.DateTimeField(null=True)),
('reps', models.IntegerField()),
('rep_unit', models.IntegerField(choices=[(1, 'REPS'), (2, 'SECONDS')], default=core.utils.RepUnit['REPS'])),
('group', models.IntegerField()),
('group_order', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Workout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('device_updated', models.DateTimeField(null=True)),
('name', models.CharField(max_length=100)),
('level', models.IntegerField(choices=[(1, 'BEGINNER'), (2, 'INTERMEDIATE'), (3, 'ADVANCED')], default=core.utils.Level['INTERMEDIATE'])),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='WorkoutSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('device_updated', models.DateTimeField(null=True)),
('sets', models.IntegerField()),
('order', models.IntegerField()),
('workout', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='workout_sets', to='workouts.Workout')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TrainingExercise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('device_updated', models.DateTimeField(null=True)),
('reps', models.IntegerField()),
('exercise_in_set', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='training_exercises', to='workouts.ExerciseInSet')),
('workout', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='training_exercises', to='workouts.Workout')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('device_updated', models.DateTimeField(null=True)),
('date', models.DateField()),
('start_time', models.DateTimeField(null=True)),
('end_time', models.DateTimeField(null=True)),
('exercise_in_set', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='trainings', to='workouts.ExerciseInSet')),
('workout', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='trainings', to='workouts.Workout')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='exerciseinset',
name='workout_set',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exercises_in_set', to='workouts.WorkoutSet'),
),
]
|
[
"[email protected]"
] | |
b8c4e05691d3a6bcb98ea81c533c3e2d9b3ec506
|
638bfb68541bf88fe7ed09a89b67d7e7f4f2b13f
|
/informacion/migrations/0003_comentario_autor.py
|
bb5c6f8cb4c20c69ca64fe623b6a28ef8c0bd612
|
[] |
no_license
|
LuOfLuck/perfil-sholl
|
748420734050acdda18afc4402852f6b0a4df00d
|
58ae5c649b9590d58f5fed238b548efb12180bdb
|
refs/heads/master
| 2023-04-16T21:10:06.545946 | 2021-05-03T19:52:51 | 2021-05-03T19:52:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 599 |
py
|
# Generated by Django 2.2.3 on 2020-12-30 01:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('informacion', '0002_comentario'),
]
operations = [
migrations.AddField(
model_name='comentario',
name='autor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
61ac459e07e5e8d0716e7ab4fe58cee3e73a3f15
|
0ae7422b9cf684d04fd8581c46008207c57a0ead
|
/portfolio/resume/views.py
|
97d788e80203c051ac6dc1039606d0fda7596f9c
|
[] |
no_license
|
santom11/Portfolio
|
7fb394aa6c7f2cebb0f47c041d373c30e13aa538
|
b58ec8d5f2e119f0e6c9584d693b9d9931043eeb
|
refs/heads/master
| 2022-11-23T19:03:34.399415 | 2020-07-18T13:42:15 | 2020-07-18T13:42:15 | 280,661,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 355 |
py
|
from django.shortcuts import render
from .models import Project
def all_projects(request):
projects = Project.objects.all()
return render(request, 'resume/all_projects.html', {'projects': projects})
def project_detail(request, pk):
project = Project.objects.get(pk=pk)
return render(request, 'resume/detail.html', {'project': project})
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.