blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8529c4d89d04af95f46471657da82dd417d6a547 | 14cfa53b1c8cc66aafdb98cac75a36d5722c154d | /macropy/macros2/linq_test.py | 17f6a87f2bdb9a635fc195baa1000aa3d50cde68 | [
"MIT"
] | permissive | pcn/macropy | c77a8060c4ac0304595fce895c5fac8f986db660 | 50f4a446137009d509647918a4aaef797cf86fde | refs/heads/master | 2021-01-23T22:58:06.214611 | 2013-05-17T16:39:48 | 2013-05-17T16:39:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,803 | py | import unittest
import ast
from sqlalchemy import *
from macropy.macros2.linq import macros, sql, generate_schema
from macropy.core.lift import macros, q
from macropy.core import unparse_ast
engine = create_engine("sqlite://")
for line in open("macropy/macros2/world.sql").read().split(";"):
engine.execute(line.strip())
db = generate_schema(engine)
def compare_queries(query1, query2, post_process=lambda x: x):
res1 = engine.execute(query1).fetchall()
res2 = engine.execute(query2).fetchall()
try:
assert post_process(res1) == post_process(res2)
except Exception, e:
print "FAILURE"
print e
print query1
print "\n".join(map(str, post_process(res1)))
print query2
print "\n".join(map(str, post_process(res2)))
raise e
class Tests(unittest.TestCase):
def test_expand_lets(self):
"""
This tests the sorta knotty logic involved in making the for-
comprehension variable available *outside* of the comprehension
when used in PINQ
"""
tree = q%(lambda x: x + (lambda y: y + 1)(3))(5)
goal = q%(lambda x: (lambda y: (x + (y + 1)))(3))(5)
new_tree = expand_let_bindings.recurse(tree)
assert ast.dump(new_tree) == ast.dump(goal)
tree = q%(lambda x: x + (lambda y: y + 1)(3) + (lambda z: z + 2)(4))(5)
goal = q%(lambda x: (lambda z: (lambda y: ((x + (y + 1)) + (z + 2)))(3))(4))(5)
new_tree = expand_let_bindings.recurse(tree)
assert ast.dump(new_tree) == ast.dump(goal)
tree = q%(lambda x: (x, lambda w: (lambda y: y + 1)(3) + (lambda z: z + 2)(4)))(5)
goal = q%(lambda x: (x, (lambda w: (lambda z: (lambda y: ((y + 1) + (z + 2)))(3))(4))))(5)
new_tree = expand_let_bindings.recurse(tree)
assert ast.dump(new_tree) == ast.dump(goal)
"""
Most examples taken from
http://sqlzoo.net/wiki/Main_Page
"""
def test_basic(self):
# all countries in europe
compare_queries(
"SELECT name FROM country WHERE continent = 'Europe'",
sql%(x.name for x in db.country if x.continent == 'Europe')
)
# countries whose area is bigger than 10000000
compare_queries(
"SELECT name, surface_area FROM country WHERE surface_area > 10000000",
sql%((x.name, x.surface_area) for x in db.country if x.surface_area > 10000000)
)
def test_nested(self):
# countries on the same continent as India or Iran
compare_queries(
"""
SELECT name, continent FROM country
WHERE continent IN (
SELECT continent FROM country
WHERE name IN ('India', 'Iran')
)
""",
sql%(
(x.name, x.continent) for x in db.country
if x.continent in (
y.continent for y in db.country
if y.name in ['India', 'Iran']
)
)
)
# countries in the same continent as Belize or Belgium
compare_queries(
"""
SELECT w.name, w.continent
FROM country w
WHERE w.continent in (
SELECT z.continent
FROM country z
WHERE z.name = 'Belize' OR z.name = 'Belgium'
)
""",
sql%(
(c.name, c.continent) for c in db.country
if c.continent in (
x.continent for x in db.country
if (x.name == 'Belize') | (x.name == 'Belgium')
)
)
)
def test_operators(self):
# countries in europe with a DNP per capita larger than the UK
compare_queries(
"""
SELECT name FROM country
WHERE gnp/population > (
SELECT gnp/population FROM country
WHERE name = 'United Kingdom'
)
AND continent = 'Europe'
""",
sql%(
x.name for x in db.country
if x.gnp / x.population > (
y.gnp / y.population for y in db.country
if y.name == 'United Kingdom'
)
if (x.continent == 'Europe')
)
)
def test_aggregate(self):
# the population of the world
compare_queries(
"SELECT SUM(population) FROM country",
sql%(func.sum(x.population) for x in db.country)
)
# number of countries whose area is at least 1000000
compare_queries(
"select count(*) from country where surface_area >= 1000000",
sql%(func.count(x.name) for x in db.country if x.surface_area >= 1000000)
)
def test_aliased(self):
# continents whose total population is greater than 100000000
compare_queries(
"""
SELECT DISTINCT(x.continent)
FROM country x
WHERE 100000000 < (
SELECT SUM(w.population)
from country w
WHERE w.continent = x.continent
)
""",
sql%(
func.distinct(x.continent) for x in db.country
if (
func.sum(w.population) for w in db.country
if w.continent == x.continent
).as_scalar() > 100000000
)
)
def test_query_macro(self):
query = sql%(
func.distinct(x.continent) for x in db.country
if (
func.sum(w.population) for w in db.country
if w.continent == x.continent
) > 100000000
)
sql_results = engine.execute(query).fetchall()
query_macro_results = query%(
func.distinct(x.continent) for x in db.country
if (
func.sum(w.population) for w in db.country
if w.continent == x.continent
) > 100000000
)
assert sql_results == query_macro_results
def test_join(self):
# number of cities in Asia
compare_queries(
"""
SELECT COUNT(t.name)
FROM country c
JOIN city t
ON (t.country_code = c.code)
WHERE c.continent = 'Asia'
""",
sql%(
func.count(t.name)
for c in db.country
for t in db.city
if t.country_code == c.code
if c.continent == 'Asia'
)
)
# name and population for each country and city where the city's
# population is more than half the country's
compare_queries(
"""
SELECT t.name, t.population, c.name, c.population
FROM country c
JOIN city t
ON t.country_code = c.code
WHERE t.population > c.population / 2
""",
sql%(
(t.name, t.population, c.name, c.population)
for c in db.country
for t in db.city
if t.country_code == c.code
if t.population > c.population / 2
),
lambda x: sorted(map(str, x))
)
def test_join_complicated(self):
compare_queries(
"""
SELECT t.name, t.population, c.name, c.population
FROM country c
JOIN city t
ON t.country_code = c.code
AND t.population * 1.0 / c.population = (
SELECT MAX(tt.population * 1.0 / c.population)
FROM city tt
WHERE tt.country_code = t.country_code
)
""",
sql%(
(t.name, t.population, c.name, c.population)
for c in db.country
for t in db.city
if t.country_code == c.code
if t.population * 1.0 / c.population == (
func.max(tt.population * 1.0 / c.population)
for tt in db.city
if tt.country_code == t.country_code
)
),
lambda x: sorted(map(str, x))
)
def test_order_group(self):
# the name of every country sorted in order
compare_queries(
"SELECT c.name FROM country c ORDER BY c.population",
sql%(c.name for c in db.country).order_by(c.population)
)
# sum up the population of every country using GROUP BY instead of a JOIN
compare_queries(
"""
SELECT t.country_code, sum(t.population)
FROM city t GROUP BY t.country_code
ORDER BY sum(t.population)
""",
sql%(
(t.country_code, func.sum(t.population)) for t in db.city
).group_by(t.country_code)
.order_by(func.sum(t.population))
)
def test_limit_offset(self):
# bottom 10 countries by population
compare_queries(
"SELECT c.name FROM country c ORDER BY c.population LIMIT 10",
sql%(c.name for c in db.country).order_by(c.population).limit(10)
)
# bottom 100 to 110 countries by population
compare_queries(
"SELECT c.name FROM country c ORDER BY c.population LIMIT 10 OFFSET 100",
sql%(c.name for c in db.country).order_by(c.population).limit(10).offset(100)
)
# top 10 countries by population
compare_queries(
"SELECT c.name FROM country c ORDER BY c.population DESC LIMIT 10",
sql%(c.name for c in db.country).order_by(c.population.desc()).limit(10)
)
| [
"[email protected]"
] | |
aa2536a426997227f7cf8edd5029dff9147bf8c3 | 3dc0a83108f679693680b2206fbc02794bfd60db | /my_orm/connection.py | db5478a803fca6d0e4db93f4c63e5c7bd87701f2 | [] | no_license | konstantink/my_orm | f05707c246174544851ce1a307dbcb39ab56b20a | 129517e31d6e0ab2047a81e6d41a84b1148443bb | refs/heads/master | 2021-01-25T04:52:51.420941 | 2014-05-23T14:02:50 | 2014-05-23T14:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | # coding=UTF-8
__author__ = 'Konstantin Kolesnikov'
__all__ = [
'Connect'
'Connection'
]
import MySQLdb
from threading import Lock
from collections import OrderedDict
import exc
from utils import url
from dialect import MySQLDialect
from schema import Table
def Connect(url_or_connection_str):
return Database(url_or_connection_str)
class Connection(object):
_instance_lock = Lock()
@staticmethod
def instance(url_or_conn_str=None):
if url_or_conn_str is None and not hasattr(Connection, '_instance'):
raise exc.ArgumentError('Connection should be first established.')
if not hasattr(Connection, '_instance'):
with Connection._instance_lock:
if not hasattr(Connection, '_instance'):
Connection._instance = Connection(url_or_conn_str)
return Connection._instance
def __init__(self, url_or_connection_str, read_schema_from_db=True):
if isinstance(url_or_connection_str, str):
self.url = url.make_url(url_or_connection_str)
elif isinstance(url_or_connection_str, url.URL):
self.url = url_or_connection_str
self.connection = MySQLdb.connect(**self.url.to_connection_args())
self.cursor = self.connection.cursor()
self.dialect = MySQLDialect()
# self.tables_collection = dict()
# self._explore_database(self.url.db)
def explore_database(self, schema=None):
tables_collection = dict()
if schema is None:
schema = self.url.db
tables = self.get_table_names(schema)
for table in tables:
if table in tables_collection:
raise exc.ArgumentError('Table "%s" is already defined'
' in this database' % table)
tables_collection[table] = Table(table, None)
for column in self.get_columns(table, schema):
tables_collection[table].add_column(column)
return tables_collection
# self.get_foreign_keys(schema, tables[0])
def execute(self, sql):
try:
self.cursor.execute(sql)
return self.cursor
except Exception as e:
print(e)
return None
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def get_table_names(self, schema):
return self.dialect.get_table_names(self, schema)
def get_foreign_keys(self, table, schema):
return self.dialect.get_foreign_keys(self, table, schema)
def get_columns(self, table, schema):
return self.dialect.get_columns(self, table, schema)
class Database(object):
def __init__(self, url_or_connection_str):
self.connection = Connection.instance(url_or_connection_str)
# self.tables_collection = dict()
self.tables_collection = self.connection.explore_database()
def __getattr__(self, item):
try:
return self.tables_collection[item]
except KeyError:
raise AttributeError('Table "%s" is not defined in database "%s'
% (item, self.connection.url.db))
| [
"[email protected]"
] | |
ee3b7bc569657a8761ba1f5af621babb8858a748 | d315013ea0319acc99c61739ef088280af985cce | /pybullet_commander/seed/motion_controller/action_client/trajectory_client.py | 6cef1fd0b5a813b16a36542740be161bfaa7ffa2 | [] | no_license | keithczq/GenerAL | 4225b5b55cae05e1aed7162c36de5234112e05a1 | b1f6edda3730aff1ccf4c700ad1b6aa8aae28561 | refs/heads/master | 2020-08-22T01:57:37.360044 | 2019-10-05T20:26:31 | 2019-10-05T20:26:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,865 | py | #!/usr/bin/env python
import roslib
roslib.load_manifest('my_seed_hand')
import rospy
import actionlib
from std_msgs.msg import Float64
import trajectory_msgs.msg
import control_msgs.msg
from trajectory_msgs.msg import JointTrajectoryPoint
from control_msgs.msg import JointTrajectoryAction, JointTrajectoryGoal, FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from sensor_msgs.msg import JointState
class Joint:
def __init__(self, motor_name):
#arm_name should be b_arm or f_arm
rospy.init_node('trajectory_client', anonymous=True)
self.name = motor_name
self.jta = actionlib.SimpleActionClient('/'+self.name+'/follow_joint_trajectory', FollowJointTrajectoryAction)
rospy.loginfo('Waiting for joint trajectory action')
self.jta.wait_for_server()
rospy.loginfo('Found joint trajectory action!')
rospy.Subscriber("/joint_states", JointState, self.callback)
rospy.spin()
def move_joint(self, angles):
goal = FollowJointTrajectoryGoal()
char = self.name[0] #either 'f' or 'b'
goal.trajectory.joint_names = ['wrist_rotation', 'wrist_adduction', 'wrist_flexion', 'thumb_adduction', 'thumb_flexion', 'index_flexion', 'middle_flexion', 'ring_and_pinky_flexion']
# ['claw_1f'+char, 'traction_1f'+char,'joint_1f'+char]
point = JointTrajectoryPoint()
point.positions = angles
# point.time_from_start = rospy.Duration(2)
goal.trajectory.points.append(point)
self.jta.send_goal_and_wait(goal)
# rospy.loginfo('moving...')
rospy.loginfo(angles)
def callback(self, message):
joint_names = ['wrist_rotation', 'wrist_adduction', 'wrist_flexion', 'thumb_adduction', 'thumb_flexion', 'index_flexion', 'middle_flexion', 'ring_and_pinky_flexion']
raw_joint_names = ['forearm__base', 'palm_axis__forearm', 'palm__palm_axis', 'palm__thumb_base', 'Tproximal__thumb_base', 'Iproximal__palm', 'Mproximal__palm', 'Rproximal__palm']
joint_angles = []
for joint_name in raw_joint_names:
joint_angle = message.position[message.name.index(joint_name)]
if joint_name == "forearm__base":
joint_angle += 1.57
joint_angle /= 2*1.57
joint_angle *= 6.28
elif joint_name == "palm_axis__forearm":
joint_angle += 0.79
joint_angle /= 2*0.79
joint_angle *= 6.28
elif joint_name == "palm__palm_axis":
joint_angle += 0.79
joint_angle /= 2*0.79
joint_angle *= 6.28
elif joint_name == "Rproximal__palm":
joint_angle += 0
joint_angle /= 1.48
joint_angle *= 6.28
elif joint_name == "Mproximal__palm":
joint_angle += 0
joint_angle /= 1.48
joint_angle *= 6.28
elif joint_name == "palm__thumb_base":
joint_angle += 0
joint_angle /= 1.57
joint_angle *= 6.28
elif joint_name == "Tproximal__thumb_base":
joint_angle += 0
joint_angle /= 1.57
joint_angle *= 6.28
elif joint_name == "Iproximal__palm":
joint_angle += 0
joint_angle /= 1.48
joint_angle *= 6.28
joint_angle = min(joint_angle, 5.0)
joint_angle = max(0.1, joint_angle)
joint_angles.append(joint_angle)
self.move_joint(joint_angles)
def main():
arm = Joint('f_arm_controller')
arm.move_joint([0.0,5.0,0.0,0.0,0.0,0.0,0.0,0.0])
# arm.move_joint([6.28,3.14,6.28])
if __name__ == '__main__':
hand = Joint('f_arm_controller')
| [
"[email protected]"
] | |
df906205242e3af906038e8471ab76f050b4287d | f32b6eae17f934dfa63afc7313b391b17f2ef341 | /yablist/urls.py | cdc1c835f2726ec54b3b620aac4d21f7ba50f145 | [] | no_license | kecheon/yablist | a787afd00764d877915dfbc8ea3641c220cd0db0 | 87793b1f1f793218d7a1a52ad3e1faf540e4713b | refs/heads/master | 2021-01-19T23:52:52.620201 | 2017-04-22T11:01:36 | 2017-04-22T11:01:36 | 88,383,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
admin.autodiscover()
urlpatterns = [
url(r'^sitemap\.xml$', sitemap,
{'sitemaps': {'cmspages': CMSSitemap}}),
]
urlpatterns += i18n_patterns(
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^search/', include('haystack.urls')),
url(r'^categories/', include('categories.urls')),
# following app is integrated into cms
url(r'^bidders/', include('bidders.urls', app_name='bidders', namespace='bidders')),
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
urlpatterns = [
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
| [
"[email protected]"
] | |
3d2c8cc7522515264d580dead675cb73e30e403c | c847cccec0b2c76a43410560171f3c04342230ea | /Assignment_8/Codes/Assignemnt_8.py | a71db774b75a72639347b1c304ef65bd1765093f | [] | no_license | harshal9876/AI5002 | f036c2d5792c68e46b0d2aeaa0c220a09ca91d17 | c80dbe3ab529eb9e1de5c840c2302ac6c8588378 | refs/heads/main | 2023-05-27T16:32:13.197913 | 2021-06-11T08:19:24 | 2021-06-11T08:19:24 | 330,556,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | #Importing Libraries
from itertools import product
import matplotlib.pyplot as plt
import random
import numpy as np
#defining the imput list and sum number
A = [2,3,4,5]
B = [11,12,13,14,15]
sum_of_numbers = 16
#Printing the defination list and sum
print("Set A : ",A)
print("Set B : ",B)
print("The sum of numbers to be equal to : ", sum_of_numbers)
#Creating a list of possible outcomes
numbers_choosen = list(product(A,B))
#Function to count a particular element in a string
def count(list,a):
count_val = 0
for i in range(len(list)):
if list[i]== a:
count_val = count_val + 1
return count_val
#number of times the loop will follow
iterations = 100000
#Main
sum =[]
for i in range(iterations):
sum.append(list(numbers_choosen[random.randint(0,len(numbers_choosen)-1)])[1] +list(numbers_choosen[random.randint(0,len(numbers_choosen)-1)])[0])
#Calculationg probability
probability = count(sum,sum_of_numbers)/iterations
print('\n The probability of having the sum of numbers to be equal to 16 is : ',round(probability,4))
#Plotting Variations in theoratical and practical outcome
labels = ['Sum =16']
Theoratical = [0.2 ]
Calculated = [probability ]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, Theoratical, width, label='Theoratical')
rects2 = ax.bar(x + width/2, Calculated, width, label='Practical')
ax.set_ylabel('Probability')
ax.set_title('GATE 7 : \n GIven A = [2,3,4,5] and B = [11,12,13,14,15] \n Result : Sum of two numbers choosen at random is equal to 16 ')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
#ax.bar_label(rects1, padding=3)
#ax.bar_label(rects2, padding=3)
fig.tight_layout()
plt.show()
| [
"[email protected]"
] | |
fa611b722e584ef32b46a909fb2174b6a2f44f20 | ddc7eddc26ed0a3cdb7211f6433bfe640520d5c4 | /Logs_V2.py | 8d484e8aa81e3b7b089719aabb523177dd12988d | [] | no_license | jalonsomoya/FSCLogs | cd0d9ec87958fa20055b57ef9da94af57bc0384a | 322d59363d0c4955a86764162f1b243c979b91e7 | refs/heads/master | 2023-05-15T07:39:52.550841 | 2021-06-14T15:56:38 | 2021-06-14T15:56:38 | 224,914,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,515 | py | # import re
import os
date_regex = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d{3}"
print("Seleccionar fecha de fichero")
dia = input()
print(f"Seleccionando LOG del día {dia}")
# Seleccionar fichero, editar saltos línea y añadir | después del timestamp
with open(f"FSC_{dia}.log", mode="r") as file, \
open(f"FSC_{dia}_saltopag.txt", mode="w") as wFile:
for f in file:
if "---" in f:
sp = f[23:].split("---")
for s in sp:
wFile.write(f[:23] + "| reci -" + s.rstrip() + "---\n")
else:
sp = f[23:].split("---")
for s in sp:
wFile.write(f[:23] + "|" + s.rstrip() + "--\n")
print("Salto de página y separador timestamp añadido")
# Ver tipo de telegrama. añadir reci- si necesita
with open(f"FSC_{dia}_saltopag.txt", mode="r") as file, open(f"FSC_{dia}_tel.txt", mode="w") as wFile:
for lines in file:
tel = lines[24:lines.find("|", 24)]
if ("rec" in tel) or ("ods" in lines):
wFile.write(lines[:24] + lines[lines.find("|", 24) - 2:])
with open(f"FSC_{dia}_tel.txt", mode="r") as file, open(f"FSC_{dia}_clean.txt", mode="w") as wFile:
for lines in file:
if "ve|" not in lines:
wFile.write(lines)
print("Fichero limpio generado")
# Limpiar los ficheros auxiliares
if os.path.exists(f"FSC_{dia}_saltopag.txt"):
os.remove(f"FSC_{dia}_saltopag.txt")
else:
print("The file does not exist")
if os.path.exists(f"FSC_{dia}_tel.txt"):
os.remove(f"FSC_{dia}_tel.txt")
#os.remove(f"FSC_{dia}.log")
else:
print("The file does not exist")
print("Ficheros auxiliares eliminados")
# Categorizar telegramas en ficheros distintos
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC_Sat.csv", "w") as wFile:
cabecera = "TMS|TIPO|EVENTO|SALIDA\n"
wFile.write(cabecera)
for lines in file:
if ("|30|3001" in lines) or ("|30|3002" in lines):
lines.replace("|", ",")
wFile.write(lines)
print("Fichero de saturaciones generado")
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC_Paros.csv", "w") as wFile:
cabecera = "TMS|TIPO|EVENTO|TRAMO\n"
wFile.write(cabecera)
for lines in file:
if ("|30|3021" in lines) or ("|30|3024" in lines):
lines.replace("|", ",")
wFile.write(lines)
print("Fichero de paros generado")
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC_PLC.csv", "w") as wFile:
cabecera = "tms|tipo|pic|largo|scanned|entrance_point|entrance_state|5|6|7|8|9|10|11|12|13|14|15"
wFile.write(cabecera)
for lines in file:
if "|14|" in lines:
wFile.write(lines)
print("Fichero PLC generado")
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC_10.csv", "w") as wFile:
cabecera = ("TS_FSC,TIPO_FSC,PIC_ID,LARGO,PARCEL_SCANNER_DATA,C1,"
"C2,C3,UPDATE_STATE,C4,PARCEL_ENTRANCE_POINT,"
"PARCEL_ENTRANCE_STATE,PARCEL_EXIT_POINT,PARCEL_EXIT_STATE\n")
wFile.write(cabecera)
for lines in file:
if "|10|" in lines:
lines.replace("|", ",")
wFile.write(lines.replace("|", ","))
print("Fichero de inserciones y lecturas generado")
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC__20.csv", "w") as wFile:
cabecera = "TS_FSC,TIPO_FSC,PIC_ID,LARGO,ODES,VDES,ADES,PARCEL_SCANNER_DATA,C1,C2,C3,UPDATE_STATE,C4," \
"PARCEL_ENTRANCE_POINT,PARCEL_ENTRANCE_STATE,PARCEL_EXIT_POINT,PARCEL_EXIT_STATE\n"
wFile.write(cabecera)
for lines in file:
if "|20|" in lines:
lines.replace("|", ",")
wFile.write(lines.replace("|", ","))
print("Fichero de clasificación generado")
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC_27.csv", "w") as wFile:
cabecera = "TS_FSC,TIPO_FSC,PIC_ID,LARGO,PARCEL_SCANNER_DATA,C1,C2,C3,UPDATE_STATE,C4,PARCEL_ENTRANCE_POINT," \
"PARCEL_ENTRANCE_STATE,PARCEL_EXIT_POINT,PARCEL_EXIT_STATE\n "
wFile.write(cabecera)
for lines in file:
if "|27|" in lines:
lines.replace("|", ",")
wFile.write(lines.replace("|", ","))
print("done")
print("Fichero lectura en colectora generado")
with open(f"FSC_{dia}_clean.txt", "r") as file, open(f"FSC_13.csv", "w") as wFile:
cabecera = "TS_FSC,TIPO_FSC,PIC_ID,LARGO,PARCEL_SCANNER_DATA,C1,C2,C3,UPDATE_STATE,C4,PARCEL_ENTRANCE_POINT," \
"PARCEL_ENTRANCE_STATE,PARCEL_EXIT_POINT,PARCEL_EXIT_STATE\n"
wFile.write(cabecera)
for lines in file:
if "|13|" in lines:
lines.replace("|", ",")
wFile.write(lines.replace("|", ","))
print("Fichero lectura en arco de recuperación generado")
print("Fin de ejecución")
# print("ejecutar saturaciones? (S/N)")
| [
"[email protected]"
] | |
3104940f614f3c56b589278168265e32531b6f4e | 077f0c03539713eeafa0ada37c5da4e751043bc2 | /2018_11_14_if2.py | ce95c79cd740c7326182b449bbe0149a41420f8e | [] | no_license | LucyLu66/Python | d9906e17a124cceb4eed1cac577e4d53b5744907 | 87cdeb7dc41a0c192067978d62a113b22b72e09e | refs/heads/master | 2020-04-08T18:15:01.210099 | 2018-12-06T03:29:22 | 2018-12-06T03:29:22 | 159,600,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | print('x的坐标是:')
x = int(input())
print('y的坐标是:')
y = int(input())
if x >= 0:
if y >= 0:
print('1')
else:
print('4')
else:
if y >= 0:
print('2')
else:
print('3')
| [
"[email protected]"
] | |
fa05df649d1afd51f85a10abf64dbe5a2752cbd2 | 6e4ea04d30fec41f96736c9df34712c9407c7c08 | /src/load_result.py | dc524252263958359649726a3ed59ceaf2e3ee2c | [] | no_license | sohj94/dl_study | b4e4ee44437487943fc6b83f05ec7f8cbd669091 | 46be7c4424ae0c79f92cc8375be7f39645ba34c8 | refs/heads/master | 2023-05-30T09:42:29.577469 | 2021-05-12T13:55:52 | 2021-05-12T13:55:52 | 366,718,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import sys, os
sys.path.append(os.pardir)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
result_prefix = "../data/result/hw1_result_"
dataset = "cifar-10"
result = pd.read_csv(result_prefix + dataset + ".csv")
x = [i for i in range(3,16)]
y = [i for i in range(10,151,10)]
Z = np.array([list(result[str(i)]) for i in range(len(x))]).transpose()
X, Y = np.meshgrid(x, y)
fig = plt.figure(figsize=(9,9))
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z, cmap='viridis', edgecolor='black', alpha=0.5)
# ax.plot_wireframe(X, Y, Z, color='black')
ax.set_title(dataset, size=20)
ax.set_xlim([3,15])
ax.set_ylim([10,150])
ax.set_xlabel('# of layers')
ax.set_ylabel('width')
M = np.max(Z)
ax.set_zlim([int(10*M)/10, int(10*M+1)/10])
idx_x = np.argmax(Z)//Z.shape[1]
idx_y = np.argmax(Z)%Z.shape[1]
ax.scatter(x[idx_y], y[idx_x], Z[idx_x, idx_y], color='red', s=100)
ax.text(x[idx_y], y[idx_x], Z[idx_x, idx_y], " max point: width {} depth {}".format(y[idx_x], x[idx_y]))
plt.savefig(result_prefix + dataset + ".png", dpi=300)
plt.show() | [
"[email protected]"
] | |
3834d2f63b37fe0144610139d1c1926b6c9f87fe | 6fd98e4c558bcf54778259443a383395cb80b7ec | /spam_detector2.py | 5c73c41f0ff89b8f79db3cd31a100095c10ad24e | [] | no_license | rexroy73/Data_Science_Portfolio | 06c78791480df8f84e8766763b33984b31e5d257 | 1194349972c1c5f7db7e84302b36361ec49e7f3c | refs/heads/master | 2020-03-20T08:58:24.391461 | 2018-07-15T17:34:08 | 2018-07-15T17:34:08 | 137,324,408 | 0 | 0 | null | 2018-06-28T12:56:38 | 2018-06-14T07:47:28 | Python | UTF-8 | Python | false | false | 1,866 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 17:58:51 2018
@author: Prashita
"""
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from wordcloud import WordCloud
from matplotlib import pyplot as plt
#avoiding unknown text error
df = pd.read_csv('spam.csv' , encoding='ISO-8859-1')
#drop columns not needed
df = df.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1)
#rename columns
df.columns = ['labels', 'data']
df['b_labels'] = df['labels'].map({'ham':0 , 'spam':1})
# defining output
Y = df['b_labels'].as_matrix()
#tfidf = TfidfVectorizer(decode_error = 'ignore')
#X = tfidf.fit_transform(df['data'])
count_vect = CountVectorizer(decode_error= 'ignore')
X = count_vect.fit_transform(df['data'])
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size= 0.33)
model = MultinomialNB()
model.fit(Xtrain, Ytrain)
print('train score: ', model.score(Xtrain, Ytrain))
print('test score: ', model.score(Xtest, Ytest))
#visualize the data
def visualize(label):
words = ''
for msg in df[df['labels'] == label]['data']:
msg = msg.lower()
words += msg + ' '
wordcloud = WordCloud(width = 600, height=400).generate(words)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
visualize('spam')
visualize('ham')
#analysis of model showing its short comings
df['predictions'] = model.predict(X)
sneaked_spam = df[(df['predictions'] == 0) & (df['b_labels'] == 1)]['data']
#spam must items
for msg in sneak_spam:
print(msg)
not_actual_spam = df[(df['predictions'] == 1) & (df['b_labels'] == 0)]['data']
#not spam items
for msg1 in not_actual_spam:
print(msg1)
| [
"[email protected]"
] | |
79fef6f5b8dff739ef7ca47053d1fc5c9aaf55e6 | 7c63a96fad4257f4959ffeba0868059fc96566fb | /py/m_lutz-learning_py-5_ed/code/part_04-functions_and_generators/ch_19-advanced_function_topics/02-function_objects_attributes_and_annotations/main.py | 0bca586be2664c40e3c2988e354046e63bcffc58 | [
"MIT"
] | permissive | ordinary-developer/education | b426148f5690f48e0ed4853adfc3740bd038b72c | 526e5cf86f90eab68063bb7c75744226f2c54b8d | refs/heads/master | 2023-08-31T14:42:37.237690 | 2023-08-30T18:15:18 | 2023-08-30T18:15:18 | 91,232,306 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | def example1():
def echo(message):
print(message)
echo('Direct call')
x = echo
x('Indirect call')
def example2():
def echo(message):
print(message)
def indirect(func, arg):
func(arg)
indirect(echo, 'Argument call!')
def example3():
def echo(message):
print(message)
schedule = [ (echo, 'Spam!'), (echo, 'Ham!') ]
for (func, arg) in schedule:
func(arg)
def example4():
def make(label):
def echo(message):
print(label + ':' + message)
return echo
F = make('Spam')
F('Ham!')
F('Eggs!')
def example5():
def func(a):
b = 'spam'
return b * a
print(func(8))
def example6():
def func(a):
b = 'spam'
return b * a
print(func.__name__)
print(dir(func))
print(func.__code__)
print(dir(func.__code__))
def example7():
def func():
pass
func.count = 0
func.count += 1
print(func.count)
func.handles = 'Button-Press'
func.handles
print(dir(func))
def example8():
def func(): pass
print(dir(func))
print(len(dir(func)))
print([x for x in dir(func) if not x.startswith('__')])
def example9():
def func(a: 'spam', b: (1, 10), c: float) -> int:
return a + b + c
print(func(1, 2, 3))
print(func.__annotations__)
for arg in func.__annotations__:
print(arg, '=>', func.__annotations__[arg])
def example10():
def func(a: 'spam' = 4, b: (1, 10) = 5, c: float = 6) -> int:
return a + b + c
print(func(1, 2, 3))
print(func()
print(func(1, c = 10))
print(func.__annotations__)
if __name__ == '__main__':
funcs = [ example1, example2, example3, example4, example5,
example6, example7, example8, example9, example10 ]
for func in funcs:
func()
| [
"[email protected]"
] | |
914ab4d119e7873fa52ac0ebec095a05c277d6d5 | 2b8d346e473e8b7907e513e8f854d8809345f770 | /testqt.py | c297eccddf4ffd4380e3a915e84d1ccf7f23bf95 | [] | no_license | hasikill/MyTools | 612fdd49eced56b33e3c6515645fe079f3781886 | 166c4486211e7ec8849980eda659d62cc4f0b777 | refs/heads/master | 2021-01-09T14:57:55.854870 | 2020-02-23T01:51:06 | 2020-02-23T01:51:06 | 242,346,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,878 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1148, 641)
Form.setMinimumSize(QtCore.QSize(1148, 641))
Form.setMaximumSize(QtCore.QSize(1148, 641))
self.widget = QtWidgets.QWidget(Form)
self.widget.setGeometry(QtCore.QRect(2, 1, 1145, 638))
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.toolBox = QtWidgets.QToolBox(self.widget)
self.toolBox.setStyleSheet("QToolBox{\n"
"border: 1px solid;\n"
"}\n"
"")
self.toolBox.setObjectName("toolBox")
self.page_2 = QtWidgets.QWidget()
self.page_2.setGeometry(QtCore.QRect(0, 0, 280, 570))
self.page_2.setObjectName("page_2")
self.toolBox.addItem(self.page_2, "")
self.verticalLayout.addWidget(self.toolBox)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lb_speed = QtWidgets.QLabel(self.widget)
self.lb_speed.setObjectName("lb_speed")
self.horizontalLayout.addWidget(self.lb_speed)
self.progressBar = QtWidgets.QProgressBar(self.widget)
self.progressBar.setProperty("value", 0)
self.progressBar.setAlignment(QtCore.Qt.AlignCenter)
self.progressBar.setTextVisible(True)
self.progressBar.setObjectName("progressBar")
self.horizontalLayout.addWidget(self.progressBar)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.tableWidget = QtWidgets.QTableWidget(self.widget)
self.tableWidget.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.tableWidget.setStyleSheet("QTableWidget::item::hover{\n"
" color: rgb(255, 0, 0);\n"
" text-decoration:underline;\n"
"}")
self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectItems)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.horizontalLayout_2.addWidget(self.tableWidget)
self.horizontalLayout_2.setStretch(0, 2)
self.horizontalLayout_2.setStretch(1, 6)
self.retranslateUi(Form)
self.toolBox.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "看雪在线工具包CR32"))
self.lb_speed.setText(_translate("Form", "下载进度:"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Form", "程序名称"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Form", "作者"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("Form", "说明"))
| [
"[email protected]"
] | |
21973496d5c2032f23e1b49f0db9e3c502244292 | 7f4fb112bc9ab2b90f5f2248f43285ce9ac2e0a0 | /src/igem/neutronics/air/container/borosilicate-glass-backfill/5cm/25wt/plot_all.in.one_cask.thickness_dose.rate_t4045_surface.py | 68b7c9364abc28d207af46f51a75a3f08c2773ac | [] | no_license | TheDoctorRAB/plot | dd3b5134c91c8fa7032fcc077c5427b26a80e49d | ed6746d511222c03e79f93548fe3ecd4286bf7b1 | refs/heads/master | 2021-07-11T10:21:19.347531 | 2020-07-16T17:13:15 | 2020-07-16T17:13:15 | 20,462,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,317 | py | ########################################################################
# R.A.Borrelli
# @TheDoctorRAB
# rev.11.March.2015
########################################################################
#
# Plot routine
# All in one file, with no separate control input, lib files
# Plot data is contained in a separate data file, read on command line
# Set up for a secondary y axis if needed
#
########################################################################
#
#
#
#######
#
# imports
#
# plot
#
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
#
#######
#
# command line
#
from sys import argv
script,plot_datafile=argv #column 0 is the x values then odd columns contain dose/flux
#
#######
#
# screen resolution
#
import Tkinter
root=Tkinter.Tk()
#
########################################################################
#
#
#
#######
#
# screen resolution
#
###
#
# pixels
#
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
#
###
#
# mm
#
width_mm=root.winfo_screenmmwidth()
height_mm=root.winfo_screenmmheight()
#
###
#
# in
#
width_in=width_mm/25.4
height_in=height_mm/25.4
#
###
#
# dpi
#
width_dpi=width/width_in
height_dpi=height/height_in
#
dpi_values=(96,120,144,168,192)
current_dpi=width_dpi
minimum=1000
#
for dval in dpi_values:
difference=abs(dval-width_dpi)
if difference<minimum:
minimum=difference
current_dpi=dval
#
#######
#
# output to screen
#
print('width: %i px, height: %i px'%(width,height))
print('width: %i mm, height: %i mm'%(width_mm,height_mm))
print('width: %0.f in, height: %0.f in'%(width_in,height_in))
print('width: %0.f dpi, height: %0.f dpi'%(width_dpi,height_dpi))
print('size is %0.f %0.f'%(width,height))
print('current DPI is %0.f' % (current_dpi))
#
#######
#
# open the plot data file(s)
# add plot_dataN for each plot_datafileN
#
plot_data=numpy.loadtxt(plot_datafile,dtype=float)
#
#######
#
# graph parameters
#
###
#
# font sizes
#
matplotlib.rcParams.update({'font.size': 48}) #axis numbers
#
title_fontsize=54 #plot title
axis_fontsize=48 #axis labels
annotate_fontsize=48 #annotation
#
###
#
# set up for two y axis
#
fig,left_axis=plot.subplots()
# right_axis=left_axis.twinx()
#
###
#
# plot text
#
title='Dose rate - Outer surface'
xtitle='Wall thickness [cm]'
ytitle='Dose rate [$\mu$Sv/h]'
#
###
#
# legend
# add linecolorN for each plot_dataN
# add curve_textN for each plot_dataN
#
line_color0='blue' #color
line_color1='orange' #color
line_color2='red' #color
line_color3='green' #color
line_color4='cyan' #color
#
curve_text0='10 wt% $B_4C$' #legend text
curve_text1='30 wt% $B_4C$' #legend text
curve_text2='50 wt% $B_4C$' #legend text
curve_text3='70 wt% $B_4C$' #legend text
curve_text4='90 wt% $B_4C$' #legend text
#
legend_location='lower left' #location of legend on grid
legend_font=42
#
###
#
# annotate
# position of the annotation dependent on axis domain and range
#
annotate_title='T-4045'
annotate_x=23
annotate_y=4000
#
annotate_title2='Air-Glass backfill'
annotate_x2=23
annotate_y2=2000
#
annotate_title3='25 wt% $^{10}B$'
annotate_x3=23
annotate_y3=800
#
annotate_title4='5cm thick concrete'
annotate_x4=23
annotate_y4=0.2
#
###
#
# axis domain and range
#
xmin=1
xmax=31
#
ymin=0.1
ymax=6000
#
###
#
# axis ticks
#
xmajortick=5
ymajortick=5000
#
xminortick=1
yminortick=1000
#
###
#
# grid linewidth
#
major_grid_linewidth=2.5
minor_grid_linewidth=2.1
#
major_grid_tick_length=7
minor_grid_tick_length=5
#
###
#
# curve linewidth
#
curve_linewidth=4.0
#
#######
#
# set plot diagnostics
#
###
#
# titles
#
plot.title(title,fontsize=title_fontsize)
left_axis.set_xlabel(xtitle,fontsize=axis_fontsize)
left_axis.set_ylabel(ytitle,fontsize=axis_fontsize)
# right_axis.set_ylabel()
#
###
#
# grid
#
left_axis.grid(which='major',axis='both',linewidth=major_grid_linewidth)
left_axis.grid(which='minor',axis='both',linewidth=minor_grid_linewidth)
#
left_axis.tick_params(axis='both',which='major',direction='inout',length=major_grid_tick_length)
left_axis.tick_params(axis='both',which='minor',direction='inout',length=minor_grid_tick_length)
#
###
#
# axis domain and range
#
plot.xlim(xmin,xmax)
left_axis.axis(ymin=ymin,ymax=ymax)
###
#
# axis ticks
#
left_axis.xaxis.set_major_locator(MultipleLocator(xmajortick))
left_axis.xaxis.set_minor_locator(MultipleLocator(xminortick))
left_axis.yaxis.set_major_locator(MultipleLocator(ymajortick))
left_axis.yaxis.set_minor_locator(MultipleLocator(yminortick))
#
###
#
# log scale option
# xmin,ymin !=0 for log scale
#
#left_axis.set_xscale('log')
left_axis.set_yscale('log')
#
###
#
# annotation
# comment out if not needed
#
left_axis.annotate(annotate_title,xy=(annotate_x,annotate_y),xytext=(annotate_x,annotate_y),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title2,xy=(annotate_x2,annotate_y2),xytext=(annotate_x2,annotate_y2),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title3,xy=(annotate_x3,annotate_y3),xytext=(annotate_x3,annotate_y3),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title4,xy=(annotate_x4,annotate_y4),xytext=(annotate_x4,annotate_y4),fontsize=annotate_fontsize)
#
#######
#
# plot data
#
left_axis.plot(plot_data[:,0],plot_data[:,1],marker='o',color=line_color0,label=curve_text0,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,3],marker='o',color=line_color1,label=curve_text1,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,5],marker='o',color=line_color2,label=curve_text2,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,7],marker='o',color=line_color3,label=curve_text3,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,9],marker='o',color=line_color4,label=curve_text4,linewidth=curve_linewidth,markersize=20)
left_axis.legend(loc=legend_location,fontsize=legend_font) #legend needs to be after all the plot data
plot.get_current_fig_manager().resize(width,height)
plot.gcf().set_size_inches((0.01*width),(0.01*height))
#
#######
#
# save
#
plot.savefig(title,dpi=current_dpi)
#
#######
#
# plot to screen
#
# # plot.show()
#
########################################################################
#
# EOF
#
########################################################################
| [
"[email protected]"
] | |
bebc6754c64f50b46ea8c84eecd1c17543a374a9 | 9e4fc18f267f49e1efeb69f2e95e4c4a00ce8ce6 | /3_FirstMissingInt.py | f2412817e3b1e609ce1a1904bf693211225e701b | [] | no_license | cookiewho/IPS_Workshop_2020 | 1de50e80abbedafeb48f7907ffca8461d2d2bd4a | 4dda2febb7dfa6645042e8e7a6d1138465ad12cf | refs/heads/master | 2022-12-25T02:59:16.436013 | 2020-10-09T17:31:31 | 2020-10-09T17:31:31 | 284,307,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | def first_missing_positive_integer(integers):
integers.sort()
count = 1
for y in integers:
if y == count:
count += 1
return count | [
"[email protected]"
] | |
6fadfdde9b58209e5e269e51592bc9d834e2bad9 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/app/firewall_rules/list.py | ff81b7ae1ab5630533a0f34061862bc99572334e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 1,320 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for listing all firewall rules."""
from googlecloudsdk.api_lib.app.api import appengine_firewall_api_client as api_client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.app import firewall_rules_util
class List(base.ListCommand):
"""Lists the firewall rules."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To list all App Engine firewall rules, run:
$ {command}
""",
}
@staticmethod
def Args(parser):
parser.display_info.AddFormat(firewall_rules_util.LIST_FORMAT)
def Run(self, args):
client = api_client.GetApiClientForTrack(self.ReleaseTrack())
return client.List()
| [
"[email protected]"
] | |
c6ea1a2aa909321e07d092139bc6d5316b1fb494 | 91d45a7ff2a178be2c9e44e34a7994a26b54ac40 | /main/api/v1/views.py | e1a0ffa5157ec83955ac002facf737b032e3dc12 | [] | no_license | Dima070585/Project1 | 32f50d6b4afda60729abab0cbc13ba18897d23bf | d3d714f124d13a871c19d6489ded1ff0c912ae3c | refs/heads/master | 2023-09-05T05:53:34.567882 | 2021-10-19T07:31:54 | 2021-10-19T07:31:54 | 418,823,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | from rest_framework import generics
from main.models import Project, Post
from .serializers import ProjectsListSerializers, ProjectDetailSerializer, PostListSerializers
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import OrderingFilter
class ProjectList(generics.ListAPIView):
queryset = Project.objects.all()
serializer_class = ProjectsListSerializers
class ProjectDetail(generics.RetrieveAPIView):
queryset = Project.objects.all()
serializer_class = ProjectDetailSerializer
class PostList(generics.ListAPIView):
queryset = Post.objects.all()
serializer_class = PostListSerializers
filter_backends = [DjangoFilterBackend, OrderingFilter]
ordering_fields = ['prise', 'created_at']
filterset_fields = ['category'] | [
"[email protected]"
] | |
d6463a63f7d0c66597f783150a1f4296ad3d44ff | cdac2e950dd226a30f176ba430d8a8bb2d42cb3d | /Open cv project/Opencv2/Opencv Tutorial2.py | f0ea677c4b3612862ee9751bf5cdb0801a182ef1 | [] | no_license | Chidalu567/computervision | 27d9b2f60f70e831793c7402c6a77a3279762d8e | c9a044faafa9a1bf96dcc436acf730deb8f802fb | refs/heads/master | 2023-04-16T13:23:15.907989 | 2021-04-29T06:16:24 | 2021-04-29T06:16:24 | 362,702,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | import cv2
# #===>Image reading in cv2
# img=cv2.imread('chidalu.jpg',cv2.IMREAD_COLOR); #image read in normal color
# #===>Showing image in cv2
# cv2.namedWindow('image',cv2.WINDOW_NORMAL); #create a normal named window
# cv2.imshow('image',img); #show image in window
# cv2.waitKey(0); #wait for key
# cv2.destroyWindow('image'); #destroy window 'image';
#=====>A full project
img=cv2.imread('chidalu.jpg',cv2.IMREAD_GRAYSCALE); #read image in gray scale
cv2.namedWindow('image',cv2.WINDOW_NORMAL); #create a namedwindow
cv2.imshow('image',img); #show image in window
k=cv2.waitKey(0) & 0xFF; #waitfor a key
if k ==27:
cv2.destroyWindow('image'); #destroy window image
elif k == ord('s'): #if key == s
cv2.imwrite('chidalu_gray.png',img); #createnew image
cv2.destroyWindow('image'); #destrpy window image
| [
"[email protected]"
] | |
d327409777969d2f94af518e7c12543f7e21b2f8 | bb9d905d607b933da98157508980eb9a763e30d4 | /06Deep Learning/01Introduction to Neural Networks/03Backpropagation in Neural Networks/09Updating the Weights and Biases - V/temp.py | f80261da9ef80ae26bec0da70f820e9db7c4be6b | [
"Apache-2.0"
] | permissive | talk2sunil83/UpgradLearning | 92243af00b838c31143eed3fb17d4abd3b331ec0 | 70c4f993c68ce5030e9df0edd15004bbb9fc71e7 | refs/heads/main | 2023-06-16T05:02:13.194572 | 2021-07-08T07:52:08 | 2021-07-08T07:52:08 | 330,149,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # %%
from scipy.special import expit as sigmoid
# %%
z2 = [2, 1, 3, -1]
# %%
h2 = sigmoid(z2)
h2
# %%
sp = h2*(1-h2)
sp
# %%
| [
"[email protected]"
] | |
97f0d0f83671adfdd78f491940ac6a031eca6beb | 3286ba182095b43248d4c61f6d0b73267b845b99 | /hashes/sdbm.py | f80941306a743d126221c3f76363b41263a16dac | [
"MIT"
] | permissive | arnabdas95/Python | 8e045607bd6f1c8d85497a487bce08a15949198b | 8062d282451f8cce8162a2915a69f245a598c279 | refs/heads/master | 2022-11-05T11:48:10.214443 | 2020-06-16T06:00:06 | 2020-06-16T06:00:06 | 272,621,970 | 0 | 0 | null | 2020-06-16T05:57:02 | 2020-06-16T05:57:01 | null | UTF-8 | Python | false | false | 1,322 | py | """
This algorithm was created for sdbm (a public-domain reimplementation of ndbm) database library.
It was found to do well in scrambling bits, causing better distribution of the keys and fewer splits.
It also happens to be a good general hashing function with good distribution.
The actual function (pseudo code) is:
for i in i..len(str):
hash(i) = hash(i - 1) * 65599 + str[i];
What is included below is the faster version used in gawk. [there is even a faster, duff-device version]
The magic constant 65599 was picked out of thin air while experimenting with different constants.
It turns out to be a prime.
This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere.
source: http://www.cse.yorku.ca/~oz/hash.html
"""
def sdbm(plain_text: str) -> str:
"""
Function implements sdbm hash, easy to use, great for bits scrambling.
iterates over each character in the given string and applies function to each of them.
>>> sdbm('Algorithms')
1462174910723540325254304520539387479031000036
>>> sdbm('scramble bits')
730247649148944819640658295400555317318720608290373040936089
"""
hash = 0
for plain_chr in plain_text:
hash = ord(plain_chr) + (hash << 6) + (hash << 16) - hash
return hash
| [
"[email protected]"
] | |
dfa0fb7a21b30452a69930bf0078f16429b27b99 | e04ee246bc9912b617747a17df055e8f28d63c80 | /antiphishme/tests/functional/server/server_test.py | 0ade4c572dacf900709c65127869cd46ba4150ba | [
"MIT"
] | permissive | wang-zifu/AntiPhishMe-backend | 1c94c102b68054c61028cfb2ee8a7e319f12ea93 | bde8a6fe381142ccd0481ab33a1fce5a92c6bfbc | refs/heads/master | 2022-09-24T07:36:56.071541 | 2020-05-28T15:42:04 | 2020-05-28T15:42:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,573 | py | import json
import connexion
import pytest
import allure
from antiphishme.src.config import (
connexion_app,
BASE_PATH,
SWAGGER_DIR,
arguments,
AUTH_API_KEY,
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_TRACK_MODIFICATIONS
)
from antiphishme.tests.test_helpers import (
data_to_json,
info,
assert_equal,
assert_dict_contains_key
)
from flask_sqlalchemy import SQLAlchemy
@allure.epic("Server")
@allure.parent_suite("Functional")
@allure.suite("Server")
#@allure.sub_suite("sub suite name")
class Tests:
@allure.description("""
Test endpoint "/server/health"
""")
def test_server_health(self, client_with_db):
client = client_with_db[0]
endpoint = '/server/create_db'
headers = {
"X-API-Key": AUTH_API_KEY
}
info("GET {}".format(endpoint))
response = client.get(BASE_PATH + endpoint, headers=headers)
assert_equal(response.status_code, 200, "Check status code")
endpoint = '/server/health'
info("GET {}".format(endpoint))
response = client.get(BASE_PATH + endpoint)
assert_equal(response.status_code, 200, "Check status code")
j = data_to_json(response.data)
field = "db_status"
expected_value = "OK"
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value))
field = "server_status"
expected_value = "OK"
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value))
@allure.description("""
Test endpoint "/server/create_db" with basic auth
""")
def test_database_creation(self, client_with_db):
client = client_with_db[0]
endpoint = '/server/create_db'
headers = {
"X-API-Key": AUTH_API_KEY
}
info("GET {}".format(endpoint))
response = client.get(BASE_PATH + endpoint, headers=headers)
assert_equal(response.status_code, 200, "Check status code")
j = data_to_json(response.data)
field = "message"
expected_value = "Database created."
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value))
@allure.description("""
Test endpoint "/server/add_keyword" with basic auth
""")
def test_add_keyword(self, client_with_db):
client = client_with_db[0]
db = client_with_db[1]
endpoint = '/server/create_db'
headers = {
"X-API-Key": AUTH_API_KEY
}
info("GET {}".format(endpoint))
response = client.get(BASE_PATH + endpoint, headers=headers)
assert_equal(response.status_code, 200, "Check status code")
endpoint = '/server/add_keyword'
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = {
'keyword': 'keyword1234'
}
info("POST {}".format(endpoint))
response = client.post(BASE_PATH + endpoint, headers=headers, data=data)
j = data_to_json(response.data)
assert_equal(response.status_code, 200, "Check status code")
field = "status"
expected_value = "OK"
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value))
@allure.description("""
Test endpoint "/server/add_keyword" with basic auth
Send wrong data and expect error.
""")
def test_add_keyword_too_short(self, client_with_db):
client = client_with_db[0]
db = client_with_db[1]
endpoint = '/server/create_db'
headers = {
"X-API-Key": AUTH_API_KEY
}
info("GET {}".format(endpoint))
response = client.get(BASE_PATH + endpoint, headers=headers)
assert_equal(response.status_code, 200, "Check status code")
endpoint = '/server/add_keyword'
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = {
'keyword': 'key'
}
info("POST {}".format(endpoint))
response = client.post(BASE_PATH + endpoint, headers=headers, data=data)
j = data_to_json(response.data)
assert_equal(response.status_code, 400, "Check status code")
field = "detail"
expected_value = "Keyword too short - min 4 signs"
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value))
field = "status"
expected_value = 400
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value))
field = "title"
expected_value = "Bad Request"
assert_dict_contains_key(j, field, "Check if dict contains given key - \"{}\"".format(field))
assert_equal(j[field], expected_value, "Check if item \"{}\" is equal to \"{}\"".format(field, expected_value)) | [
"[email protected]"
] | |
6ffdcc18cb1bf282c53853d5f0eb94e16793dca8 | 08ce1ea8fd6933a9aa560c22019e8ef515a18a4e | /main.py | 803627607dd1d1f344ca8b48a42a215247b60e88 | [] | no_license | GarrettCrippen/simple_weight_tracker | f6acfbe20230bc4d22923c02febfdcbbbd84bb1a | 58bae3e633a111a58d0ea7ae2617b9f9985192d4 | refs/heads/master | 2023-07-15T12:57:02.946203 | 2021-08-17T20:16:55 | 2021-08-17T20:16:55 | 397,079,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import tkinter as tk
from chester import *
from tkinter import ttk
from tkinter import *
# this is the function called when the button is clicked
def btnClickFunction():
weight= getInputBoxValue()
name = getListboxValue()
try:
entryToCell(name[0],weight.strip())
except:
pass
# this is a function to get the user input from the text input box
def getInputBoxValue():
userInput = tInput.get()
return userInput
# this is a function to get the selected list box value
def getListboxValue():
itemSelected = listTwo.curselection()
return itemSelected
root = Tk()
# This is the section of code which creates the main window
root.geometry('278x206')
root.configure(background='#F0F8FF')
root.title('Weight Tracker')
# This is the section of code which creates a button
Button(root, text='submit to google sheets', bg='#F0F8FF', font=('arial', 12, 'normal'), command=btnClickFunction).place(x=43, y=131)
# This is the section of code which creates a text input box
tInput=Entry(root)
tInput.place(x=76, y=101)
# This is the section of code which creates the a label
Label(root, text='lbs.', bg='#F0F8FF', font=('arial', 12, 'normal')).place(x=213, y=105)
# This is the section of code which creates a listbox
listTwo=Listbox(root, bg='#F0F8FF', font=('arial', 12, 'normal'), width=0, height=0)
listTwo.insert('0', 'Garrett')
listTwo.insert('1', 'Chester')
listTwo.place(x=112, y=42)
root.mainloop()
| [
"[email protected]"
] | |
29d8b2ee8e8f1f47332de7af743588638a015d9e | d3df22e32e7c85f47568fe7c939649225c9ff039 | /zdl/utils/env/__init__.py | ba0815b4beb33c26be708725d2bd7e98ff2b173f | [] | no_license | ZDL-Git/PyZDL | 6b8465eac25676dd59e36ba875b1bc3750ae18e2 | c45cc92b9bb282e90505cdfefb07630dd5ba6c39 | refs/heads/master | 2023-07-10T11:54:09.352014 | 2021-08-26T08:26:46 | 2021-08-26T08:26:46 | 297,308,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | __all__ = ['colab', 'gpu', 'installer', 'require', 'terminal']
| [
"[email protected]"
] | |
d28ab348d995c645b1a5372bfdc608e1e20db8f9 | 3921913ed746e6043cd8c43eb94d65445ed6c196 | /apps/category/migrations/0003_remove_category_posts.py | be6104f0d455058be275f2b3eb492b494457d0b9 | [] | no_license | IvesCostaBr/api-cerberus-blog | 84063d90a302cabbe99ffc76fbd2f644f22d39e0 | e9b29aec157970096dd730b19d61c276f7f4fcf4 | refs/heads/master | 2023-05-31T16:40:53.177068 | 2021-07-11T23:22:04 | 2021-07-11T23:22:04 | 384,473,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # Generated by Django 3.2.5 on 2021-07-10 22:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('category', '0002_alter_category_posts'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='posts',
),
]
| [
"[email protected]"
] | |
e684a9df73e8e7fe660c66dd6a3bee79327335ef | f90e182e26ce86788decbe3df2b8b4803ddfaa30 | /lists/views.py | 0a4218685cb80961e3f686a467eccc699b7517a6 | [] | no_license | junshoong/django-tdd-tutorial | cd4585965c480c65c8831307f7cd8996543871f6 | d17bfcd277b3da51f1abbb65f230a6a4e8ac2422 | refs/heads/master | 2021-05-30T21:11:55.268051 | 2016-02-07T14:37:19 | 2016-02-07T14:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import redirect, render
from lists.forms import ItemForm, EMPTY_LIST_ERROR, ExistingListItemForm
from lists.models import Item, List
def home_page(request):
return render(request, 'home.html', {'form':ItemForm()})
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
form = ExistingListItemForm(for_list=list_)
if request.method =='POST':
form = ExistingListItemForm(for_list=list_, data=request.POST)
if form.is_valid():
form.save()
return redirect(list_)
return render(request, 'list.html', {'list': list_, "form": form})
def new_list(request):
form = ItemForm(data=request.POST)
if form.is_valid():
list_ = List.objects.create()
form.save(for_list=list_)
return redirect(list_)
else:
return render(request, 'home.html', {"form": form})
| [
"[email protected]"
] | |
0a0e748bf4dc8a09b493a3fef0134605cf2b296f | 08ca506a56125836ef0a428c03ba9398f679a30a | /src/python/run_email_data.py | 6a62b954d8fb516f7a790ee0d5b4b4fb2f32368e | [] | no_license | mohak29/LSH-community-detection-modified | 21af0490309e6c3554fed394097ff0a2bd88d971 | 71787456a2067a47e95e9e434d5aa5dd548378b2 | refs/heads/master | 2023-01-30T02:01:51.675726 | 2020-12-10T21:37:17 | 2020-12-10T21:37:17 | 320,394,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | """
A script to run the end-to-end process for the public email data. Added for reproducibility - the code is not
to be executed end-to-end in production
"""
from generate_hashes import generate_hashes
import pandas as pd
from LSH import build_LSH_table
from run_experiments import run_email_experiments
if __name__ == '__main__':
x_path = '../../resources/email-Eu-core.txt'
y_path = '../../resources/labels.txt'
sig_path = '../../local_resources/email_signatures.txt'
lsh_path = '../../local_resources/email_hash_table.pkl'
out_folder = '../../results'
generate_hashes(x_path, y_path, sig_path, num_hashes=100)
data = pd.read_csv(sig_path, index_col=0)
signatures = data.values
build_LSH_table(signatures, lsh_path, n_bands=50)
run_email_experiments(sig_path, out_folder, lsh_path)
| [
"[email protected]"
] | |
c8f46ddc437f6d5d9d6b882cbb6b00671a84b092 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/FCOS/configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py | 6632b0c9991468cf0ac99408e8d56050e37b2cf1 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 158 | py | _base_ = ['./fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py']
model = dict(
pretrained='open-mmlab://detectron/resnet101_caffe',
backbone=dict(depth=101))
| [
"[email protected]"
] | |
1e813b81714f94e01345cacadcea1dd494ce7b85 | 0bf87446836443105f83f2959906ce229a6754a8 | /Faculty_Portal/Faculty_Portal/faculty/migrations/0021_auto_20171116_1550.py | 9f5066acfe272bb369f554932445ee357a1cd0ce | [] | no_license | Daman98/Faculty-Portal | e41e9eef2e70adeb561871390708f256e48f12b6 | 6d8f6203dee448d18dbd99f07361e7543ef37431 | refs/heads/master | 2021-09-02T16:54:04.889613 | 2018-01-03T18:01:33 | 2018-01-03T18:01:33 | 110,006,595 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-16 15:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('faculty', '0020_honor'),
]
operations = [
migrations.AlterField(
model_name='honor',
name='honor_type',
field=models.CharField(choices=[('0', 'Award'), ('1', 'Achievement'), ('2', 'Others')], max_length=20),
),
]
| [
"[email protected]"
] | |
89db1dff84377ac70faea753b40331041a064b81 | 8019dd4da01ff03872a23cb415d934b84fee4610 | /Terrain.py | d33081e724e094da06ce98fbb3a6d7323eea2473 | [] | no_license | DanielPepper2/grid-tactics | 6b0055a24b04663c16a1e61dbde37b7cfc6460dc | 940cb7bfbce07a0eb37b039945f3f2d52eced3f6 | refs/heads/master | 2020-04-09T21:46:30.776901 | 2018-12-06T03:10:27 | 2018-12-06T03:10:27 | 160,609,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | class Terrain(object):
def __init__(self):
pass
#------------------------------
class Tree(Terrain):
def __init__(self):
self.bonus_def = 1
self.obstruct = False
#-------------------------------
class Water(Terrain):
def __init__(self):
self.bonus_def = 0
self.obstruct = True
| [
"[email protected]"
] | |
55254b14cca54a9d023f381cf47289d08f4441d3 | a80fe2dac09c6dad11f8d894d110dd378eadc5fa | /tests/infra/sample/repository_test.py | be21cfd5411c0cb8f7653c2fdff65092e163b79b | [] | no_license | zoripong/flask-service-template | a0df4fed5686b94d1cab02d01c056bece821f87f | 2b08f0094f15ee91cba116b505b22b7a599ce1ac | refs/heads/main | 2023-06-01T12:26:38.332068 | 2021-06-12T09:13:43 | 2021-06-12T09:13:43 | 376,184,781 | 0 | 0 | null | 2021-06-12T09:13:44 | 2021-06-12T02:24:55 | Python | UTF-8 | Python | false | false | 1,374 | py | import uuid
from app.domain.sample.model import Sample
from app.orm import Session
from app.usecases.sample.service import SampleRepository
def test_get_samples(
fx_session: Session,
fx_sample_repository: SampleRepository,
):
repository = fx_sample_repository
result = repository.get_samples()
assert result == []
samples = [Sample(name='1'), Sample(name='2')]
fx_session.add_all(samples)
fx_session.commit()
result = repository.get_samples()
assert result == samples
def test_find_sample(
fx_session: Session,
fx_sample_repository: SampleRepository,
):
target_id = uuid.uuid4()
sample = Sample(id=target_id, name='sample')
fx_session.add(sample)
fx_session.commit()
repository = fx_sample_repository
result = repository.find_sample(uuid.uuid4())
assert result is None
result = repository.find_sample(target_id)
assert result == sample
assert result.name == 'sample'
def test_add_sample(
fx_session: Session,
fx_sample_repository: SampleRepository,
):
repository = fx_sample_repository
assert fx_session.query(Sample).count() == 0
repository.add_sample(Sample(name='sample'))
fx_session.commit()
assert fx_session.query(Sample).count() == 1
sample = fx_session.query(Sample).filter(Sample.name == 'sample').one()
assert sample is not None
| [
"[email protected]"
] | |
677698db8e086156e51250aa2f54543a01f4ac72 | eb46920d34db41c09781047967e457c856c93127 | /ej5.py | 5f23e80289c0d5a5fff059d732aa0397a99dcb46 | [] | no_license | Campachino1/Practica2 | de4571758a2935ec02d21c971aa2289ca7d425c3 | 82a5d1d295b629f8604e87e36644dcf4c657efe3 | refs/heads/main | 2023-04-13T17:39:37.868812 | 2021-04-02T00:43:43 | 2021-04-02T00:43:43 | 353,861,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | texto = input('ingrese una frase: ').lower()
palabra_clave= input('ingrese una palabra: ').lower()
texto = texto.lower()
texto_md = texto.split(' ')
cant = 0
for palabra in texto_md:
if (palabra.startswith(palabra_clave))or(palabra.endswith(palabra_clave)):
cant = cant + 1
print(f'la cantidad de veces que se repite la palabra: {palabra_clave} en el texto son: {cant}')
| [
"[email protected]"
] | |
239684968bd40f9c9afb05e428da15fdc4f35129 | 9eedf20381b82a564b1ffe68170594905e1ff4c1 | /getpic.py | 16523a804279fdd1fd0ae0bbed75978410f86e37 | [
"MIT"
] | permissive | dljgs1/autoFGO | e83ae4ae90946997b0ab0ab1f5bcdd49c66f4b29 | 04e3225071dd0353bec9f605bd72264ea3141026 | refs/heads/master | 2021-05-12T10:21:48.471778 | 2018-01-13T15:08:13 | 2018-01-13T15:08:13 | 117,352,104 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,675 | py | import os
import cv2
import threading
from numpy import *
from tkinter import *
from PIL import Image, ImageTk
tk = Tk()
canvas = Canvas(width=200,height=100)
canvas.pack()
def pull_screenshot():
os.system('adb shell screencap -p /sdcard/buffer.bmp')
os.system('adb pull /sdcard/buffer.bmp .')
def tobinary(image):
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
class pic:
def __init__(self,fname,x=200,y=200,w=100):
self.fname = fname
self.img = cv2.imread(fname)
try:
if self.img==None:
print("error file,",fname)
self.img = cv2.imread(fname)
except:
pass
self.x = int(x)
self.y = int(y)
self.w = int(w)
def get_cut(self):
w = self.w/2
x1 = int(self.x-w)
x2 = int(self.x+w)
y1 = int(self.y-w)
y2 = int(self.y+w)
b,g,r = cv2.split(self.img[y1:y2,x1:x2])
img = cv2.merge((r,g,b))
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
return imgtk
def transxy(self):
x = self.img.shape[1]-self.x
y = self.y
return x,y
def save_xy(self):
print("input pos name")
s = input()
f = open("save.txt","a")
s+=" %d %d\n"%(self.transxy())
f.write(s)
f.close()
def modi_w(self):
print("input w")
w = input()
self.w = int(w)
def flush_pic(self):
pull_screenshot()
self.img = cv2.imread(self.fname)
def compare(self,pname="buffer.bmp"):
img = pic(pname,self.x,self.y,self.w)
x,y = img.transxy()
w = self.w/2
x1 = int(x-w)
x2 = int(x+w)
y1 = int(y-w)
y2 = int(y+w)
print(x1,x2,y1,y2)
img1 = img.img[y1:y2,x1:x2]
img2 = self.img
try:
if img1 == None or img2==None:
self.img = cv2.imread(self.fname)
return self.compare(pname)
except:
pass
#原始图片是压缩了的 因此要二值化处理后再进行比较
ret,img1 = tobinary(img1)
ret,img2 = tobinary(img2)
#print(img1.shape,img2.shape)
xor = cv2.bitwise_xor(img1,img2)
#cv2.imshow('w',xor)
#cv2.waitKey(0)
#cv2.imshow('w1',img1)
#cv2.imshow('w2',img2)
#cv2.waitKey(0)
ct = count_nonzero(xor)
all = (x2-x1)*(y2-y1)
return (all-ct)/all
def save_pic(self):
w = self.w/2
print("input pic name")
name = input()
x1 = int(self.x-w)
x2 = int(self.x+w)
y1 = int(self.y-w)
y2 = int(self.y+w)
img = self.img[y1:y2,x1:x2]
cv2.imwrite(name,img)
orgpic = pic("buffer.bmp",200,480,30)
label = None
def push_pic():
global tk
global orgpic
global label
imgtk = orgpic.get_cut()
label.configure(image=imgtk)
tk.draw()
try:
tk.draw()
except:
pass
def move_pic(t):
if t=="s":
orgpic.save_xy()
if t=="w":
orgpic.modi_w()
if t=="p":
orgpic.save_pic()
if t=="f":
orgpic.flush_pic()
if t=="Up":
orgpic.y -= 10
if t=="Right":
orgpic.x += 10
if t=="Left":
orgpic.x -= 10
if t=="Down":
orgpic.y += 10
push_pic()
def echo_event(evt):
if evt.type == "2":
print("键盘:%s" % evt.keysym)
if evt.keysym!="m":
move_pic(evt.keysym)
print(evt.type)
def init():
#键盘事件
canvas.bind_all("<KeyPress>",echo_event)
#如果绑定指定的键盘,则"<Key>" 或者"<KeyPress>"都可以,具体到指定键的话后面加入下划线和指定的键就好了,如:绑定小写字母t和Left键
canvas.bind_all("<KeyPress-t>",echo_event)
canvas.bind_all("<KeyPress-Left>",echo_event)
#鼠标事件
canvas.bind_all("<Double-Button-1>",echo_event)
canvas.bind_all("<Button-1>",echo_event)
canvas.bind_all("<Button-2>",echo_event)
canvas.bind_all("<Button-3>",echo_event)
global label,orgpic
imgtk = orgpic.get_cut()
label = Label(tk, image = imgtk)
label.bm = imgtk
label.pack()
if __name__ == "__main__":
init()
tk.mainloop()
# pull_screenshot()
# img = cv2.imread('autojump.bmp')
# cv2.imshow('w',img)
# cv2.waitKey(0) | [
"[email protected]"
] | |
b8435c3dcba554c90e0b1a8928dd03061ed1c4af | 0dd204c00398c2b30ba9b9799cc659b82467f0d8 | /data_structs.py | 86ea807d78ee79bcc03065fc71f9157fd447db89 | [] | no_license | language-allocation-enpc/prod_back_end | 5c462cd18e94fbf31dc3266ef1507d0fd6740e37 | aef1d976a787911c59751b2f9eb2f75e50c8b02e | refs/heads/master | 2020-05-27T23:17:15.966556 | 2019-05-27T12:32:30 | 2019-05-27T12:32:30 | 188,817,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | import random as rd
class course():
def __init__(self, id=-1, name='', language='', creneaux =[], min_students=0, max_students=0):
self.min_students = min_students
self.max_students = max_students
self.id = id
self.language = language
self.name = name
self.creneaux = creneaux
def __eq__(self, other):
return self.id == other.id
def to_dict(self):
dic = {}
dic['id'] = self.id
dic['min_students'] = self.min_students
dic['language'] = self.language
dic['creneaux'] = self.creneaux
dic['name'] = self.name
dic['max_students'] = self.max_students
return dic
def from_dict(self, dic):
self.id = dic['id']
self.min_students = dic['min_students']
self.language = dic['language']
self.creneaux = dic['creneaux']
self.name = dic['name']
self.max_students = dic['max_students']
class vow():
def __init__(self):
self.courses = []
self.weight = 1
def __eq__(self, other):
if len(self.courses) == len(other.courses):
other_courses_ids = [c.id for c in other.courses]
for i_course in range(len(self.courses)):
if self.courses[i_course].id in other_courses_ids:
return False
return True
return False
def __hash__(self):
return hash(c.id for c in self.courses)
def to_dict(self):
dic={}
dic["courses"] = self.courses
dic["weight"] = self.weight
return dic
def from_dict(self, dic):
for course_in_dic in dic["list"]:
current_course = course()
current_course.from_dict(course_in_dic)
self.courses.append(current_course)
self.weight = dic["weight"]
class student():
def __init__(self, id = 0):
self.id = id
self.name = ''
self.vows = []
self.courses = []
| [
"[email protected]"
] | |
ceb7331d835a00793f1292ae071acf4826f5e8f2 | 3236e67432b531fca18bba5bf83c8633f78353c9 | /News-Summarizer/drivertemp.py | 13ccadc1f5b2f9b38b82c817dd557925f4dc9e66 | [
"MIT"
] | permissive | deeplearningcomplete/BadriNarayanan_BE_Finalyear_Project | 4335185b120183455d8b70512d0f9f1b3cdc15c5 | af61279912d8c95816d7e85ab983daa3f6891ee8 | refs/heads/master | 2023-02-15T12:06:31.671121 | 2021-01-14T06:43:21 | 2021-01-14T06:43:21 | 329,494,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # driver.py
# Luke Reichold - CSCI 4930
import colors
import sys
import os
from summarize import Summarizer
argentina_articles = ["argentina/argentina-guardian.txt", "argentina/argentina-nyt.txt"]
china_articles = ["china/china-cnn.txt", "china/china-nyt.txt"]
climate_articles = ["climate/climate-npr.txt", "climate/climate-nyt.txt"]
VW_articles = ["VW/VW-ars.txt", "VW/VW-nyt.txt"]
other_articles = ["otherdomain/otherdomain_one.txt", "otherdomain/otherdomain_two.txt"]
print()
with colors.pretty_output(colors.BOLD, colors.FG_RED) as out:
out.write("THE AVAILABLE OPTIONS ARE")
print()
out.write("ARGENTINA CHINA CLIMATE-CHANGE VOLKS-WAGEN ")
with colors.pretty_output(colors.BOLD, colors.FG_GREEN) as out:
print()
out.write("Press 1 for ARGENTINA ARTICLES")
print()
out.write("Press 2 for CHINA ARTICLES")
print()
out.write("Press 3 for CLIMATE CHANGE ARTICLES")
print()
out.write("Press 4 for VOLKS WAGEN CAR EMISSION STARNDARDS ARTICLES")
print()
out.write("Press 5 for OTHER ARTICLES")
print()
out.write("Press 6 for EXIT")
print()
yourinput = input()
if yourinput == "1":
magic = Summarizer(argentina_articles)
if yourinput == "2":
magic = Summarizer(china_articles)
if yourinput == "3":
magic = Summarizer(climate_articles)
if yourinput == "4":
magic = Summarizer(VW_articles)
if yourinput == "5":
magic = Summarizer(other_articles)
if yourinput == "6":
sys.exit()
#magic = Summarizer(yourvariable)
#print(magic.generate_summaries())
f = open("demofile2.txt", "a")
fp = open("demofile2.pdf", "a")
f.write(magic.generate_summaries())
f.close()
fp.close()
os.system('python scriptone.py')
| [
"[email protected]"
] | |
97da1f6f1c92e18786ea36593372ef77064b724b | 40e7c8036a426902bb3ee6032759fa3302d8f410 | /tp-3/experimental.py | 0da3909d56d34b7ed7dc065bec91d3e27ea27bac | [] | no_license | mbfaria/sistemas-nebulosos | 7e50be0000109be6093da468f594a059abc2a78b | 96582b90ca482c10df1bf64d30f8b6481bfc9b3f | refs/heads/master | 2023-08-06T19:38:35.198691 | 2021-09-10T00:32:36 | 2021-09-10T00:32:36 | 404,906,521 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,032 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ANFIS in torch: some simple functions to supply data and plot results.
@author: James Power <[email protected]> Apr 12 18:13:10 2019
"""
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
dtype = torch.float
class TwoLayerNet(torch.nn.Module):
'''
From the pytorch examples, a simjple 2-layer neural net.
https://pytorch.org/tutorials/beginner/pytorch_with_examples.html
'''
def __init__(self, d_in, hidden_size, d_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(d_in, hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, d_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
def linear_model(x, y, epochs=200, hidden_size=10):
'''
Predict y from x using a simple linear model with one hidden layer.
https://pytorch.org/tutorials/beginner/pytorch_with_examples.html
'''
assert x.shape[0] == y.shape[0], 'x and y have different batch sizes'
d_in = x.shape[1]
d_out = y.shape[1]
model = TwoLayerNet(d_in, hidden_size, d_out)
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
errors = []
for t in range(epochs):
y_pred = model(x)
tot_loss = criterion(y_pred, y)
perc_loss = 100. * torch.sqrt(tot_loss).item() / y.sum()
errors.append(perc_loss)
if t % 10 == 0 or epochs < 20:
print('epoch {:4d}: {:.5f} {:.2f}%'.format(t, tot_loss, perc_loss))
optimizer.zero_grad()
tot_loss.backward()
optimizer.step()
return model, errors
def plotErrors(errors):
'''
Plot the given list of error rates against no. of epochs
'''
plt.plot(range(len(errors)), errors, '-ro', label='errors')
plt.ylabel('Percentage error')
plt.xlabel('Epoch')
plt.show()
def plotResults(y_actual, y_predicted):
'''
Plot the actual and predicted y values (in different colours).
'''
plt.plot(range(len(y_predicted)), y_predicted.detach().numpy(),
'r', label='trained')
plt.plot(range(len(y_actual)), y_actual.numpy(), 'b', label='original')
plt.legend(loc='upper left')
plt.show()
def _plot_mfs(var_name, fv, x):
'''
A simple utility function to plot the MFs for a variable.
Supply the variable name, MFs and a set of x values to plot.
'''
# Sort x so we only plot each x-value once:
xsort, _ = x.sort()
for mfname, yvals in fv.fuzzify(xsort):
plt.plot(xsort.tolist(), yvals.tolist(), label=mfname)
plt.xlabel('Values for variable {} ({} MFs)'.format(var_name, fv.num_mfs))
plt.ylabel('Membership')
plt.legend(bbox_to_anchor=(1., 0.95))
plt.show()
def plot_all_mfs(model, x):
for i, (var_name, fv) in enumerate(model.layer.fuzzify.varmfs.items()):
_plot_mfs(var_name, fv, x[:, i])
def calc_error(y_pred, y_actual):
with torch.no_grad():
tot_loss = F.mse_loss(y_pred, y_actual)
rmse = torch.sqrt(tot_loss).item()
perc_loss = torch.mean(100. * torch.abs((y_pred - y_actual)
/ y_actual))
return(tot_loss, rmse, perc_loss)
def test_anfis(model, data, show_plots=False):
'''
Do a single forward pass with x and compare with y_actual.
'''
x, y_actual = data.dataset.tensors
if show_plots:
plot_all_mfs(model, x)
print('### Testing for {} cases'.format(x.shape[0]))
y_pred = model(x)
mse, rmse, perc_loss = calc_error(y_pred, y_actual)
print('MS error={:.5f}, RMS error={:.5f}, percentage={:.2f}%'
.format(mse, rmse, perc_loss))
if show_plots:
plotResults(y_actual, y_pred)
def train_anfis_with(model, data, optimizer, criterion,
epochs=500, show_plots=False):
'''
Train the given model using the given (x,y) data.
'''
errors = [] # Keep a list of these for plotting afterwards
# optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
print('### Training for {} epochs, training size = {} cases'.
format(epochs, data.dataset.tensors[0].shape[0]))
for t in range(epochs):
# Process each mini-batch in turn:
for x, y_actual in data:
y_pred = model(x)
# Compute and print loss
loss = criterion(y_pred, y_actual)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Epoch ending, so now fit the coefficients based on all data:
x, y_actual = data.dataset.tensors
with torch.no_grad():
model.fit_coeff(x, y_actual)
# Get the error rate for the whole batch:
y_pred = model(x)
mse, rmse, perc_loss = calc_error(y_pred, y_actual)
errors.append(perc_loss)
# Print some progress information as the net is trained:
if epochs < 30 or t % 10 == 0:
print('epoch {:4d}: MSE={:.5f}, RMSE={:.5f} ={:.2f}%'
.format(t, mse, rmse, perc_loss))
# End of training, so graph the results:
if show_plots:
plotErrors(errors)
y_actual = data.dataset.tensors[1]
y_pred = model(data.dataset.tensors[0])
plotResults(y_actual, y_pred)
def train_anfis(model, data, epochs=500, show_plots=False):
'''
Train the given model using the given (x,y) data.
'''
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.99)
criterion = torch.nn.MSELoss(reduction='sum')
train_anfis_with(model, data, optimizer, criterion, epochs, show_plots)
if __name__ == '__main__':
x = torch.arange(1, 100, dtype=dtype).unsqueeze(1)
y = torch.pow(x, 3)
model, errors = linear_model(x, y, 100)
plotErrors(errors)
plotResults(y, model(x))
| [
"[email protected]"
] | |
78012c88511b9e73dce08afc9a5ffa17d68f4a4b | 9fb15a998915669f0503a85ce2c73e4a4d15e9f7 | /billing/integrations/pay_pal_integration.py | 959801e47da27beeb7c373a949cbf1774d5525aa | [
"BSD-3-Clause"
] | permissive | jimpurbrick/merchant | 5c563c96d2bc80665ce0ab446035135eb3cc0beb | b28936b2c7ea15c8347e55da625d4ba82695dac3 | refs/heads/master | 2021-01-17T05:58:10.225947 | 2012-03-26T20:44:50 | 2012-03-27T10:52:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,669 | py | from billing import Integration
from django.conf import settings
from paypal.standard.conf import POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
from django.conf.urls.defaults import patterns, include
from paypal.standard.ipn.signals import payment_was_flagged, payment_was_successful
from billing.signals import transaction_was_successful, transaction_was_unsuccessful
class PayPalIntegration(Integration):
def __init__(self):
# Required Fields. Just a template for the user
self.fields = {"business": settings.PAYPAL_RECEIVER_EMAIL,
"item_name": "",
"invoice": "",
"notify_url": "",
"return_url": "",
"cancel_return": "",
"amount": 0,
}
@property
def service_url(self):
if self.test_mode:
return SANDBOX_POSTBACK_ENDPOINT
return POSTBACK_ENDPOINT
def get_urls(self):
urlpatterns = patterns('',
(r'^', include('paypal.standard.ipn.urls')),
)
return urlpatterns
def unsuccessful_txn_handler(sender, **kwargs):
transaction_was_unsuccessful.send(sender=sender.__class__,
type="purchase",
response=sender)
def successful_txn_handler(sender, **kwargs):
transaction_was_successful.send(sender=sender.__class__,
type="purchase",
response=sender)
payment_was_flagged.connect(unsuccessful_txn_handler)
payment_was_successful.connect(successful_txn_handler)
| [
"[email protected]"
] | |
b111f7dd30c725ef68b07755ce01b29a36ba5062 | 76c608ef4de508c2765f11266d1e022b4a4ec5b6 | /notebook_app/views.py | be2536612ed0f12f2634eb01df07a36f9fd0a035 | [] | no_license | 18rahul/Notebook | 845cde428c695bf2395230ba57e65bc63b673b10 | 33d80685cba1085972285b197cddd5ca3764e4a9 | refs/heads/main | 2023-08-21T17:47:13.602458 | 2021-09-08T11:13:30 | 2021-09-08T11:13:30 | 404,317,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from notebook_app.forms import CreateNoteForm, CreateTodoForm, EditTodoForm, EditNoteForm, UserRegistrationForm
from notebook_app.models import Todo, Note
def create_note(request):
if request.user.is_authenticated:
if request.method == "POST":
form = CreateNoteForm(request.POST)
if form.is_valid():
note = Note(user=request.user, heading=form.cleaned_data['heading'],
content=form.cleaned_data['content'])
note.save()
print('note created')
return redirect('notes-page')
else:
form = CreateNoteForm()
context = {'form': form}
return render(request, 'create_notes.html', context)
else:
return redirect('login')
def notes_page(request):
if request.user.is_authenticated:
notes = Note.objects.filter(user=request.user)
context = {
'notes': notes
}
return render(request, 'notes.html', context)
else:
return redirect('login')
def note_detail(request, pk):
if request.user.is_authenticated:
note = Note.objects.get(id=pk)
context = {'note': note}
return render(request, 'note_detail.html', context)
else:
return redirect('login')
def delete_note(request, pk):
if request.user.is_authenticated:
note = Note.objects.get(id=pk)
note.delete()
return redirect('notes-page')
else:
return redirect('login')
def update_note(request, pk):
if request.user.is_authenticated:
note = Note.objects.get(id=pk)
if request.method == "POST":
form = EditNoteForm(request.POST, instance=note)
if form.is_valid():
note.user = request.user
note.heading = form.cleaned_data['heading']
note.content = form.cleaned_data['content']
note.save()
return redirect('notes-page')
else:
form = EditNoteForm(instance=note)
context = {"form": form}
return render(request, 'update_note.html', context)
else:
return redirect('login')
def todos_page(request):
if request.user.is_authenticated:
todos = Todo.objects.filter(user=request.user)
context = {
'todos': todos,
}
return render(request, 'todos.html', context)
else:
return redirect('login')
def create_todo(request):
if request.user.is_authenticated:
if request.method == "POST":
form = CreateTodoForm(request.POST)
if form.is_valid():
todo = Todo(user=request.user, content=form.cleaned_data['content'])
todo.save()
print('todo created')
return redirect('/todos/')
else:
form = CreateTodoForm()
context = {'form': form}
return render(request, 'create_notes.html', context)
else:
return redirect('login')
def update_todo(request, pk):
if request.user.is_authenticated:
todo = Todo.objects.get(id=pk)
if request.method == 'POST':
form = EditTodoForm(request.POST, instance=todo)
if form.is_valid():
todo.user = request.user
todo.content = form.cleaned_data['content']
todo.save()
return redirect('/todos/')
else:
form = EditTodoForm(instance=todo)
context = {'form': form}
else:
return redirect('login')
return render(request, 'update_todo.html', context)
def delete_todo(request, pk):
if request.user.is_authenticated:
todo = Todo.objects.get(id=pk)
todo.delete()
return redirect('/todos/')
else:
return redirect('login')
def registration(request):
if request.method == "POST":
form = UserRegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form = UserRegistrationForm()
context = {
'form': form
}
return render(request, 'registration_form.html', context)
def logout(request):
logout(request)
return redirect('login')
| [
"[email protected]"
] | |
dda77cef180fb87236ac8dfd16fb943c9b60effa | bc9d121bad61ae9cac672617c87575aa779c317f | /avaliacao/avaliacao2/avaliacao2ex4.py | bc048369d01e7c976d2360d20d17da7883746a11 | [] | no_license | Armindo123/prog1 | 70467d948096ad01a678de515d7611c40d9a7867 | b6beb79a8fdb354ccb505e92735436680af071e1 | refs/heads/master | 2021-04-03T08:24:16.476179 | 2018-08-06T21:31:31 | 2018-08-06T21:31:31 | 125,064,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | #verificar se o numero e divisivel por 3 e 5 ao mesmo tempo
numero = int(input("Introduza um numero inteiro: "))
if numero%3 == 0 and numero%5 == 0:
print("{} e simultaneamente divisivel por 3 e 5".format(numero))
else:
print("{} nao e simultaneamente divisivel por 3 e 5".format(numero)) | [
"[email protected]"
] | |
d12162bc15d57743ceaa186b06598df673f1ed5a | dc3d32ed1b3131d35d6ec715ae1091a682a4d623 | /Data-Driven-Microservices/main/ClientRead/linereader.py | 192984a650e62f1d3551eab59c9e9805e72c569a | [] | no_license | Michael-O-Keeffe/Public-Code | cc1d2b365212a1fc5c5e3dfc5f71e64d8cc5997f | f3935237ebb461f10f0f5b5a31afd753e9266b6b | refs/heads/master | 2021-09-23T07:28:50.085185 | 2021-09-22T12:20:04 | 2021-09-22T12:20:04 | 208,252,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | from random import randint
import time
import codecs
def get_lines():
# data = open("ClientRead/ToTheLighthouse.txt")
data_input_stream = codecs.open("ClientRead/ToTheLighthouse.txt", "r", encoding='utf-8')
file_lines = data_input_stream.readlines()
size_of_data = len(file_lines)
for i in range(size_of_data//20):
chunk = file_lines[0:20]
file_lines = file_lines[20:]
message = ""
for j in range(len(chunk)):
# Take chunks out of the lines
line = chunk[j]
#print(line)
# Construct the message string
message += line + " @ "
yield message
time.sleep(120)
data_input_stream.close()
#get_lines() | [
"[email protected]"
] | |
696444d70297bd368cc8acfa944e74db0258efd5 | 567c54ba9176581a5d5e1ae65212a6e87a604f0b | /wsgi/pico/pico_blog/cms_app.py | 1ca91c9a1bd5f04d8311a3c1f68506c5058b467a | [] | no_license | andrewidya/pico | e0641433e1e63ab865fe65924c32c687c75b8d83 | 4a0e8ff885601004aa92ba05d204e3fe6bd90731 | refs/heads/master | 2021-01-10T13:46:20.543152 | 2015-12-08T13:14:40 | 2015-12-08T13:14:40 | 45,040,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
from pico_blog.menu import CategoryMenu
class PicoBlogApp(CMSApp):
name = _("Blog Application")
urls = ['pico_blog.urls']
app_name = 'pico_blog'
menus = [CategoryMenu]
apphook_pool.register(PicoBlogApp) | [
"[email protected]"
] | |
25b2cee59912a6f76e74a5851cb644a1a4cf3a69 | bcff9ef2f71fc18175cce492763b0e54fcf2943a | /Testing/Exercises/vehicle/test/test_vehicle.py | 37a6460330f22d89bf185cd10671f9b92b10b061 | [
"MIT"
] | permissive | geodimitrov/Python-OOP-SoftUni | c077c03530c7b499d6e5401313e50f318062a061 | f1c6718c878b618b3ab3f174cd4d187bd178940b | refs/heads/main | 2023-04-08T12:55:34.108490 | 2021-04-12T09:00:51 | 2021-04-12T09:00:51 | 342,348,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | import unittest
from project.vehicle import Vehicle
class VehicleTests(unittest.TestCase):
def setUp(self):
self.vehicle = Vehicle(50, 100)
def test_vehicle_init__expect_initialization(self):
self.assertEqual(50, self.vehicle.fuel)
self.assertEqual(50, self.vehicle.capacity)
self.assertEqual(100, self.vehicle.horse_power)
self.assertEqual(1.25, self.vehicle.fuel_consumption)
def test_vehicle_drive__if_not_enough_fuel__expect_exception(self):
with self.assertRaises(Exception) as ex:
self.vehicle.drive(100)
self.assertEqual("Not enough fuel", str(ex.exception))
def test_vehicle_drive__if_enough_fuel__execute(self):
self.vehicle.drive(20)
self.assertEqual(25, self.vehicle.fuel)
def test_vehicle_refuel__if_over_capacity__expect_exception(self):
with self.assertRaises(Exception) as ex:
self.vehicle.refuel(25)
self.assertEqual("Too much fuel", str(ex.exception))
def test_vehicle_refuel__if_within_capacity__execute(self):
self.vehicle.drive(20)
self.vehicle.refuel(10)
self.assertEqual(35, self.vehicle.fuel)
def test_vehicle_str__to_return_str_message(self):
expected_msg = f"The vehicle has {self.vehicle.horse_power} " \
f"horse power with {self.vehicle.fuel} fuel left and {self.vehicle.fuel_consumption} fuel consumption"
actual_msg = self.vehicle.__str__()
self.assertEqual(expected_msg, actual_msg)
def test_vehicle_cls_attr__expect_correct_values(self):
self.assertEqual(1.25, self.vehicle.DEFAULT_FUEL_CONSUMPTION)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
c310969b920dc12a8b95c596e49bfff4f00bb239 | 326cdb197d3df311d189642a51a575fb4d7fbe38 | /directioncounter.py | 22f105988f4209dec096df05e25af1ecd17b1092 | [] | no_license | sarwes/python-project-2019-2020- | 6bf7ec0564556c3a9c5533b3a5d0508b6f4f2f36 | 34791465cb4160d1991db3891174ce1b5b6b13d0 | refs/heads/master | 2023-01-28T11:44:18.831441 | 2020-12-08T08:07:55 | 2020-12-08T08:07:55 | 254,576,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,358 | py | import numpy as np
class DirectionCounter:
def __init__(self, directionMode, X, Y):
self.X = X
self.Y = Y
self.directionMode = directionMode
self.totalUp = 0
self.totalDown = 0
self.totalRight = 0
self.totalLeft = 0
self.direction = ""
def find_direction(self, to, centroid):
if self.directionMode == "horizontal":
x = [c[0] for c in to.centroids]
delta = centroid[0] - np.mean(x)
if delta < 0:
self.direction = "left"
elif delta > 0:
self.direction = "right"
elif self.directionMode == "vertical":
y = [c[1] for c in to.centroids]
delta = centroid[1] - np.mean(y)
if delta < 0:
self.direction = "up"
elif delta > 0:
self.direction = "down"
def count_object(self, to, centroid):
output = []
if self.directionMode == "horizontal":
leftOfCenter = centroid[0] < self.X
if self.direction == "left" and leftOfCenter:
self.totalLeft += 1
to.counted = True
elif self.direction == "right" and not leftOfCenter:
self.totalRight += 1
to.counted = True
output = [("Left", self.totalLeft),
("Right",self.totalRight)]
elif self.directionMode == "vertical":
aboveMiddle = centroid[1] < self.Y
if self.direction == "up" and aboveMiddle:
self.totalUp += 1
to.counted = True
elif self.direction == "down" and not aboveMiddle:
self.totalDown += 1
to.counted = True
output = [("Count", self.totalDown)]
return output
| [
"[email protected]"
] | |
c493d0af0665ae3e5afafb9518ef0d50a111c57f | dc6fec9f205f264943c504f72ab9db0a837ac6bd | /generate_sentences_from_templates.py | cb6daad498ee4d262773ed6464a14391f1f1c3c8 | [] | no_license | jagol/XWEAT | b7536bc93042d3f3731ecea9138b3f8c209ecefc | d0c19aae337f0b952b7f0bd67aa41a0ddcf1f01f | refs/heads/master | 2022-12-08T10:51:28.093370 | 2020-08-27T15:25:19 | 2020-08-27T15:25:19 | 273,503,535 | 0 | 0 | null | 2020-06-19T13:42:18 | 2020-06-19T13:42:18 | null | UTF-8 | Python | false | false | 1,088 | py | import os
def generate_cmds_names(data_dir, exec_dir):
cmds = []
os.chdir(data_dir)
name_files = [fn for fn in os.listdir() if fn.endswith('_names.txt')]
os.chdir(exec_dir)
name_file_dict = {nf: [] for nf in name_files}
for nf in name_file_dict:
fn = nf[:-4]
sent_fn = fn + '_sentences'
sent_fnf = sent_fn + '_female'
sent_fnm = sent_fn + '_male'
name_file_dict[nf] = [fname + '.txt' for fname in [sent_fn, sent_fnf, sent_fnm]]
for nf in name_file_dict:
for sent_fn, ttype in zip(name_file_dict[nf], ['name', 'fname', 'mname']):
cmds.append(f'python3 templates.py -i {data_dir}{nf} -o {data_dir}{sent_fn} -t {ttype}')
return cmds
def main():
data_dir = '/mnt/storage/harlie/users/jgoldz/bias_germ_embeddings/data/word_lists/'
exec_dir = '/home/user/jgoldz/bias/xweat'
cmds = generate_cmds_names(data_dir, exec_dir)
num_cmds = len(cmds)
for i, cmd in enumerate(cmds):
print(f'{i+1}/{num_cmds}: {cmd}')
os.system(cmd)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a59e76a8402c6f58836ecd05bd6ac2f1c90e2336 | d560de760fb101b1221e2ef55f21f61a0fa1ee82 | /lesson6/loop.py | 433c02ffea3477181b5fbd6780132e9db06465f3 | [] | no_license | hamidch97/itstep | e1bb9f2fe6eb4cea67c6b09ff506434206dc9f4c | 65206e87b35eb5bceedeb874b2f879dd969fe2c4 | refs/heads/master | 2023-05-11T03:08:03.460110 | 2021-06-01T21:26:58 | 2021-06-01T21:26:58 | 334,538,174 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | for i in range(1, 200):
if i % 5 == 0:
print(i)
if i == 115:
break
| [
"[email protected]"
] | |
8b64ca1a519d1c6df6c658821fcdb4173750b95f | f89812f0c9c8393bb9ab1d807b4a6052105841c7 | /myTool/findCheckTool.py | 045b14b1e92fc6cceef4b85241d49e91ac0d2521 | [] | no_license | forestdan/cyGetData | 58ae70027d7739b97af0ce91d36a680ad1cfd885 | 6f5cdb3d72f85edceb26a3028ca57c7e62889bfe | refs/heads/master | 2020-12-27T15:51:14.455557 | 2020-11-06T07:03:21 | 2020-11-06T07:03:21 | 237,958,508 | 0 | 1 | null | 2020-11-06T07:03:22 | 2020-02-03T12:21:12 | Python | UTF-8 | Python | false | false | 262 | py | # -*- coding: utf-8 -*-
import re
def checkPatternList(pattern, str):
return re.compile(pattern).findall(str)
def findPattern(pattern, str):
textList = checkPatternList(pattern, str)
if len(textList) == 0:
return None
return textList[0] | [
"[email protected]"
] | |
e2207eb632375240c9d6fe38f28ae9443f3d0d78 | ea989ff714319408be9b9039199481c45e722dd6 | /RMRB raw/generate the correct count for 2007_2016.py | 0e393ed226e902705bfb989e87310d330b2f2393 | [
"MIT"
] | permissive | DingqianL/China-Economic-Policy-Uncertainty | b8c7219bcc2243c4bda735227c2529a9a391949c | aa7305226028db60fb7fcb36b47f262e53e1deb5 | refs/heads/master | 2023-07-11T12:02:35.638502 | 2023-07-01T15:00:45 | 2023-07-01T15:00:45 | 273,489,634 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 08:48:50 2021
@author: dingq
"""
import os
import pandas as pd
path = r'C:\Users\dingq\Documents\200_academic\220_Academic_My dissertation\225_data_txt\RMRB_compelete\2007_2016'
os.chdir(path)
files = os.listdir(path)
files = [f for f in files if f.endswith('.xlsx')]
counts_dup = []
counts_clean = []
dates = []
for i in files:
date0 = i[6:14]
dates.append(date0)
df = pd.read_excel(i)
df = df[['title']]
counts_dup.append(len(df))
df = df.drop_duplicates()
counts_clean.append(len(df))
counts_2007_2016 = pd.DataFrame(data = {'date':dates, 'dup_count':counts_dup, 'count': counts_clean})
counts_2007_2016['date'] = pd.to_datetime(counts_2007_2016['date'])
counts_2007_2016['year'] = counts_2007_2016['date'].dt.year
counts_2007_2016['month'] = counts_2007_2016['date'].dt.month
counts_2007_2016['day'] = counts_2007_2016['date'].dt.day
'''
## to check if I downloaded all the counts for each day
check = counts_2007_2016.groupby(['year', 'month']).count()
check.to_csv(r'check counts.csv')
'''
counts_2007_2016.to_csv(r'counts_2007_2016.csv', index = False)
| [
"[email protected]"
] | |
e86b7178023c984c678439147625a2ed9059ccd6 | cdbe9863537de099b9aaa0ab93bc86d153aaf8ee | /modules/config.py | c86d21a1b502123be0323cba37a7dc22cc2a8fd5 | [] | no_license | sina-yeganeh/kid-keeper | fdaa010d5cd51754553871ccae37d7d8e9467bce | 2df065b623cb7ba75b7c6ade678fa02d170a8303 | refs/heads/main | 2023-06-12T13:44:16.822928 | 2021-07-14T17:01:41 | 2021-07-14T17:01:41 | 358,829,646 | 10 | 1 | null | 2021-07-14T17:01:42 | 2021-04-17T08:51:20 | Python | UTF-8 | Python | false | false | 154 | py | import json
def load_config_data(config_file_path: str):
with open(config_file_path, 'r') as config_file:
return json.load(config_file_path)
| [
"[email protected]"
] | |
f701bd5a7d50e7129fc3a405947514721a873363 | 1b38fb5610ce6c4d2c99dcb0ba5a07fd22db8e79 | /large_margin_softmax.py | 08a4a2f0005a458074747229bbd21eafe152184c | [
"MIT"
] | permissive | doublefish20170305/pytorch-loss | 4189f842632fb5d82541d704755c287f69084d0a | a981ceeb0aeef6aab3ec72e5518acbb875a2b6f1 | refs/heads/master | 2022-11-17T10:53:42.108472 | 2020-07-12T03:10:45 | 2020-07-12T03:10:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,969 | py |
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
proposed in the BMVC2019 paper: [Large Margin in Softmax Cross-Entropy Loss
link to paper](https://staff.aist.go.jp/takumi.kobayashi/publication/2019/BMVC2019.pdf)
'''
##
# version 1: use torch.autograd
class LargeMarginSoftmaxV1(nn.Module):
def __init__(self, lam=0.3, reduction='mean', ignore_index=255):
super(LargeMarginSoftmaxV1, self).__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.lam = lam
self.ce_crit = nn.CrossEntropyLoss(
reduction='none', ignore_index=ignore_index)
def forward(self, logits, label):
'''
args: logits: tensor of shape (N, C, H, W, ...)
args: label: tensor of shape(N, H, W, ...)
'''
# overcome ignored label
logits = logits.float()
logits.retain_grad()
logits.register_hook(lambda grad: grad)
with torch.no_grad():
num_classes = logits.size(1)
coeff = 1. / (num_classes - 1.)
lb = label.clone().detach()
mask = label == self.ignore_index
lb[mask] = 0
idx = torch.zeros_like(logits).scatter_(1, lb.unsqueeze(1), 1.)
lgts = logits - idx * 1.e6
q = lgts.softmax(dim=1)
q = q * (1. - idx)
log_q = lgts.log_softmax(dim=1)
log_q = log_q * (1. - idx)
mg_loss = ((q - coeff) * log_q) * (self.lam / 2)
mg_loss = mg_loss * (1. - idx)
mg_loss = mg_loss.sum(dim=1)
ce_loss = self.ce_crit(logits, label)
loss = ce_loss + mg_loss
loss = loss[mask == 0]
if self.reduction == 'mean':
loss = loss.mean()
if self.reduction == 'sum':
loss = loss.sum()
return loss
##
# version 2: user derived grad computation
class LargeMarginSoftmaxV2(nn.Module):
def __init__(self, lam=0.3, reduction='mean', ignore_index=255):
super(LargeMarginSoftmaxV2, self).__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.lam = lam
def forward(self, logits, labels):
'''
args: logits: tensor of shape (N, C, H, W, ...)
args: label: tensor of shape(N, H, W, ...)
'''
logits = logits.float()
mask = labels == self.ignore_index
lb = labels.clone().detach()
lb[mask] = 0
loss = LargeMarginSoftmaxFuncV2.apply(logits, lb, self.lam)
loss = loss[mask == 0]
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
class LargeMarginSoftmaxFuncV2(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, lam=0.3):
num_classes = logits.size(1)
coeff = 1. / (num_classes - 1.)
idx = torch.zeros_like(logits).scatter_(1, labels.unsqueeze(1), 1.)
lgts = logits.clone()
lgts[idx.bool()] = -1.e6
q = lgts.softmax(dim=1)
log_q = lgts.log_softmax(dim=1)
losses = q.sub_(coeff).mul_(log_q).mul_(lam / 2.)
losses[idx.bool()] = 0
losses = losses.sum(dim=1).add_(F.cross_entropy(logits, labels, reduction='none'))
ctx.variables = logits, labels, idx, coeff, lam
return losses
@staticmethod
def backward(ctx, grad_output):
'''
compute gradient
'''
logits, labels, idx, coeff, lam = ctx.variables
num_classes = logits.size(1)
p = logits.softmax(dim=1)
lgts = logits.clone()
lgts[idx.bool()] = -1.e6
q = lgts.softmax(dim=1)
qx = q * lgts
qx[idx.bool()] = 0
grad = qx + q - q * qx.sum(dim=1).unsqueeze(1) - coeff
grad = grad * lam / 2.
grad[idx.bool()] = -1
grad = grad + p
grad.mul_(grad_output.unsqueeze(1))
return grad, None, None
#
# version 3: implement wit cpp/cuda to save memory and accelerate
class LargeMarginSoftmaxV3(nn.Module):
def __init__(self, lam=0.3, reduction='mean', ignore_index=255):
super(LargeMarginSoftmaxV3, self).__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.lam = lam
def forward(self, logits, labels):
'''
args: logits: tensor of shape (N, C, H, W, ...)
args: label: tensor of shape(N, H, W, ...)
'''
logits = logits.float()
losses = LargeMarginSoftmaxFuncV3.apply(
logits, labels, self.lam, self.ignore_index)
if self.reduction == 'mean':
n_valid = (labels != self.ignore_index).sum()
losses = losses.sum() / n_valid
elif self.reduction == 'sum':
losses = losses.sum()
return losses
import large_margin_cpp
class LargeMarginSoftmaxFuncV3(torch.autograd.Function):
'''
use cpp/cuda to accelerate and shrink memory usage
'''
@staticmethod
def forward(ctx, logits, labels, lam=0.3, ignore_index=255):
losses = large_margin_cpp.l_margin_forward(logits, labels, lam, ignore_index)
ctx.variables = logits, labels, lam, ignore_index
return losses
@staticmethod
def backward(ctx, grad_output):
'''
compute gradient
'''
logits, labels, lam, ignore_index = ctx.variables
grads = large_margin_cpp.l_margin_backward(
logits, labels, lam, ignore_index)
grads.mul_(grad_output.unsqueeze(1))
return grads, None, None, None
if __name__ == '__main__':
import torchvision
import torch
import numpy as np
import random
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
net = torchvision.models.resnet18(pretrained=False)
self.conv1 = net.conv1
self.bn1 = net.bn1
self.maxpool = net.maxpool
self.relu = net.relu
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
self.out = nn.Conv2d(512, 3, 3, 1, 1)
def forward(self, x):
feat1 = self.conv1(x)
feat2 = self.bn1(feat1)
feat3 = self.relu(feat2)
# feat4 = self.maxpool(feat3)
feat5 = self.layer1(feat3)
feat6 = self.layer2(feat5)
feat7 = self.layer3(feat6)
feat8 = self.layer4(feat7)
feat9 = self.out(feat8)
out = feat9
feat8.retain_grad()
feat8.register_hook(lambda grad: grad*100000)
return out, feat8
net1 = Model()
net2 = Model()
from copy import deepcopy
net2.load_state_dict(deepcopy(net1.state_dict()))
# criteria1 = nn.CrossEntropyLoss(reduction='mean')
# criteria2 = nn.CrossEntropyLoss(reduction='mean')
criteria1 = LargeMarginSoftmaxV1(reduction='mean')
criteria2 = LargeMarginSoftmaxV3(reduction='mean')
net1.cuda()
net2.cuda()
net1.train()
net2.train()
criteria1.cuda()
criteria2.cuda()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs = 32
for it in range(1000):
inten = torch.randn(bs, 3, 256, 256).cuda()
lbs = torch.randint(0, 3, (bs, 16, 16)).cuda()
lbs[16:, :, :10] = 255
# s = lbs.cpu().detach().numpy()
# np.save('../lb.npy', s)
logits, feat = net1(inten.clone())
loss1 = criteria1(logits, lbs.clone())#.div(bs * 8 * 8)
optim1.zero_grad()
loss1.backward()
optim1.step()
# s = logits.cpu().detach().numpy()
# np.save('../logitsv2.npy', s)
logits, feat = net2(inten.clone())
loss2 = criteria2(logits, lbs.clone())#.div(bs * 8 * 8)
optim2.zero_grad()
loss2.backward()
optim2.step()
# s = logits.cpu().detach().numpy()
# np.save('../logitsv3.npy', s)
# print('net2.weight: ', net2.out.weight[0, 0, :, 0])
# net2.load_state_dict(net1.state_dict())
with torch.no_grad():
if (it+1) % 50 == 0:
# if True:
# print(loss1.item())
# print(loss2.item())
# break
print('iter: {}, ================='.format(it+1))
print('out.weight: ', torch.mean(torch.abs(net1.out.weight - net2.out.weight)).item())
print('conv1.weight: ', torch.mean(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
# print(net1.out.weight.mean().item())
# print(net2.out.weight.mean().item())
print('\nloss: ', loss1.item() - loss2.item())
| [
"[email protected]"
] | |
fe6d549cbe5b5d077471254ce96978572ea454e2 | bfedc524fa369b3e253bd3bf718a9dc2e263c0ca | /setup.py | 2d438ff913233cfd32feb375eaa57241492b6981 | [] | no_license | proycon/wikiente | 567025fb112f7413cab3ffd4244561331a98fc20 | f0e6e458fb9bb61326fd0be09d0ef4da62f88eda | refs/heads/master | 2020-05-31T19:23:37.879300 | 2019-08-06T10:43:52 | 2019-08-06T10:43:52 | 190,455,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | #! /usr/bin/env python
# -*- coding: utf8 -*-
from __future__ import print_function
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname),'r',encoding='utf-8').read()
setup(
name = "WikiEnte",
version = "0.2.1", #also edit in __init__.py
author = "Maarten van Gompel",
author_email = "[email protected]",
description = ("Entity extraction using DBPedia through spotlight"),
license = "GPL",
keywords = "nlp computational_linguistics entities wikipedia dbpedia linguistics",
url = "https://github.com/proycon/wikiente",
packages=['wikiente'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Text Processing :: Linguistic",
"Programming Language :: Python :: 3",
"Operating System :: POSIX",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
],
zip_safe=False,
include_package_data=True,
#package_data = { 'babelente': ['babelente.config.yml'] },
install_requires=[ 'pyspotlight','folia >= 2.1.3'],
entry_points = { 'console_scripts': [ 'wikiente = wikiente.wikiente:main' ] }
)
| [
"[email protected]"
] | |
cd6c0d5c6a80c1456e572db67cae0d1e6cc506b0 | f22321e1b285bc112472be3665ad6944ffad471c | /src/test/test_visit.py | 10afe0fd457784ad448bb231c0b9ff86619fbc72 | [] | no_license | stephen-bartell/py-find-home | 83e0d64aa274b45af0747ba25d70be619ed0642d | 7dba07500b64da71bf944e167fae553760d6766f | refs/heads/master | 2021-01-22T21:27:49.658186 | 2017-03-19T17:12:26 | 2017-03-19T17:12:26 | 85,433,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | from unittest import TestCase
from src import Visit
import datetime
class VisitTestCase(TestCase):
def test_full_window(self):
twelve_hours = 43200
visit = Visit(None, None, '3/17/2017 20:00:00', '3/18/2017 08:00:00')
self.assertEqual(visit.get_applicable_duration(), twelve_hours)
def test_beginning_window_edge(self):
one_second = 1
visit = Visit(None, None, '3/17/2017 12:00:00', '3/17/2017 20:00:01')
self.assertEqual(visit.get_applicable_duration(), one_second)
def test_before_beginning_window(self):
no_seconds = 0
visit = Visit(None, None, '3/18/2017 18:00:00', '3/18/2017 20:00:00')
self.assertEqual(visit.get_applicable_duration(), no_seconds)
def test_end_window_edge(self):
no_seconds = 0
visit = Visit(None, None, '3/18/2017 8:00:00', '3/18/2017 12:00:00')
self.assertEqual(visit.get_applicable_duration(), no_seconds)
def test_before_end_window_edge(self):
one_second = 1
visit = Visit(None, None, '3/18/2017 7:59:59', '3/18/2017 12:00:00')
self.assertEqual(visit.get_applicable_duration(), one_second)
def test_partial_beginning_window(self):
one_hour = 3600
visit = Visit(None, None, '3/18/2017 18:00:00', '3/18/2017 21:00:01')
self.assertEqual(visit.get_applicable_duration(), one_hour)
def test_partial_end_window(self):
three_hours = 10800
visit = Visit(None, None, '3/18/2017 5:00:00', '3/18/2017 10:00:00')
self.assertEqual(visit.get_applicable_duration(), three_hours)
def test_parse_timestring(self):
visit = Visit(None, None, '3/17/2017 20:42:43', '3/17/2017 20:42:43')
self.assertEqual(
visit._parse_time('3/17/2017 20:42:43'),
datetime.datetime(2017, 3, 17, 20, 42, 43)
)
pass
| [
"[email protected]"
] | |
0d1b00e03c7a24c45fcda6601e7f86eef93f7208 | d2d58e85775965d2cfbcb296f98b10936fc060ed | /main.py | 36e066e401f6963f465b612b249c4c91b3684e4d | [] | no_license | ChrisY0910/Python-Proj-by-Hackermans | 2cd67b27f9d2598994f0d449ba561ab300eae999 | 031e45f940ff5c7290344e02098dc73024294b2e | refs/heads/master | 2023-08-25T09:20:48.734241 | 2021-10-14T00:59:42 | 2021-10-14T00:59:42 | 416,944,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,311 | py | #Game Menu
import time
import random
import math
import turtle
t = turtle.Screen()
t.bgcolor('black')
turtle.speed(0)
for i in range (20):
for colurs in ['red', 'cyan', 'green', 'turquoise']:
turtle.color(colurs)
turtle.circle(100)
turtle.left(10)
print("Welcome to Hackerman's Minigames!\n")
print("1: Dice Game") # Harrison Ng
print("2: Coin Flip Game") # Chris Yang
print("3: Math Problem Game") # Partho Nath
print("4: Guessing The Number Game") # Harrison Ng
gamegamegame = int(input("What game would you like to play? Pick 1/2/3/4: "))
if gamegamegame == 1:
print("Welcome to Roll of the Dice!\n")
print("Rules: ")
print("Guess the number on the dice between 1-6.")
print("If you guess correctly, you get 6 points.")
print("If you get it wrong, you lose 1 point.\n")
rounds = input("How many rolls do you want to play?")
print("You will be playing " + rounds + " rounds.")
rollcounter = 0 # number of rounds
pointscore = 0 # score counter
print("Round: " + str(rollcounter))
while str(rounds) != str(rollcounter): # while the game is not over
random_number = random.randint(1,6) # random number generator between 1-6
user_guess = int(input("Guess a number between 1-6: "))
if(user_guess == random_number):
print("You guessed correctly which was: " + str(random_number))
pointscore += 6
print("You now have " + str(pointscore) + " points.")
command = input("Continue? y/n ")
if command == 'y':
rollcounter += 1
print("Round: " + str(rollcounter))
else: # user doesn't want to continue playing
break
else:
print("You guessed " + str(user_guess) + " which is incorrect!")
print("The number was: " + str(random_number))
pointscore -= 1
print("You now have " + str(pointscore) + " points.")
command = input("Continue? y/n ")
if command == 'y':
rollcounter += 1
print("Round: " + str(rollcounter))
else: # user doesn't want to continue playing
break
print("You played " + str(rollcounter) + " rounds.")
print("Your total score is: " + str(pointscore) + " points.")
print("Thanks for playing!")
elif gamegamegame == 2:
#Coin Game By Chris Yang
def coinGame():
print("Instructions Guess which side the coin would fall on! You can have one try!")
score = 0
coin = ["Heads" , "Tails"]
toss = random.choice(coin)
choice = input("Heads or Tails? : " + "Enter as Heads/Tails: ")
if choice == toss:
print("You Win! The coin landed on " + toss)
score += 1
print("Your score is " + str(score))
else:
print("You Lose! The coin landed on " + toss)
print("Your score is " + str(score))
coinGame()
elif gamegamegame == 3:
#The math game by Partho Nath
#Choice variable decides whether to restart the program or not
choic = 4
while choic == 4:
#Operation Menu
print("Operations to choose : (write in words) ")
print("1. Addition")
print("2. Subtraction")
print("3. Multiplication")
print("4. Division \n")
#taking operation from user input
op = input("What operation do you want?\n")
print("")
#Difficulty level menu
print("Difficulty levels to choose :")
print("Level 1 being 1 Digit problems")
print("Level 2 being 2 digit problems")
print("Level 3 being 3 digit problems")
print("Level 4 being 4 digit problems")
print("Level 5 being 5 digit problems")
print("Level 6 being 6 digit problems")
print("Level 7 being 7 digit problems")
print("Level 8 being 8 digit problems \n")
#Taking difficulty level from user
dlvl = int(input("Please Choose difficulty between the 8 levels ?\n"))
print("")
#These variables are used to store random numbers
r1 = 0
r2 = 0
#Correct variable stores the number of correct answers given by user
correct = 0
#Starting the timer
t0= time.perf_counter()
#For loop to give user 5 questions
for i in range(0,5):
#Generating random numbers
if dlvl == 1:
r1 = random.randint(1,9)
r2 = random.randint(1,9)
elif dlvl == 2:
r1 = random.randint(10,99)
r2 = random.randint(10,99)
elif dlvl == 3:
r1 = random.randint(100,999)
r2 = random.randint(100,999)
elif dlvl == 4:
r1 = random.randint(1000,9999)
r2 = random.randint(1000,9999)
elif dlvl == 5:
r1 = random.randint(10000, 99999)
r2 = random.randint(10000, 99999)
elif dlvl == 6:
r1 = random.randint(100000, 999999)
r2 = random.randint(100000, 999999)
elif dlvl == 7:
r1 = random.randint(1000000, 9999999)
r2 = random.randint(1000000, 9999999)
elif dlvl == 8:
r1 = random.randint(10000000, 99999999)
r2 = random.randint(10000000, 99999999)
#Depending on chosen operation, asking user appropriate questions
#based on previously generated random variables.
if op == "Addition":
print(str(r1) + " + " + str(r2))
answer = int(input())
if answer == (r1+r2):
correct = correct + 1
elif op == "Subtraction":
print(str(r1) + " - " + str(r2))
answer = int(input())
if answer == (r1-r2):
correct = correct + 1
elif op == "Multiplication":
print(str(r1) + " * " + str(r2))
answer = int(input())
if answer == (r1*r2):
correct = correct + 1
elif op == "Division":
print(wtr(r1) + " / " + str(r2))
answer = int(input())
if answer == (r1/r2):
correct = correct + 1
#Counting time required to answer these questions
t1 = time.perf_counter() - t0
#printing the result and time taken
print("Congratulations you have completed "+str(correct)+" right answers. Within " + str(round(t1,2))+ " Seconds.")
elif gamegamegame == 4:
# introduction and instructions
print("Welcome to the Guessing Number game!")
print("You will be chosen an option for the diffculty of the game.")
print("Try guessing the correct number, it will either tell you the number is too high or too low.")
print("Once you got it, you will be displayed the amount of attempts it took to do so.\n")
# diffculty selection
print("Easy(1): 1 to 100")
print("Medium(2): 1 to 1000")
print("Hard(3): 1 to 10000")
def guess_check(number_range):
guesscounter = 1
guess = int(input("Enter a number between " + number_range + ": "))
while(guess != random_number):
if(guess > random_number):
print(str(guess) + " is too high!")
guess = int(input("Enter a number between " + number_range + ": "))
guesscounter += 1
elif(guess < random_number):
print(str(guess) + " is too low!")
guess = int(input("Enter a number between " + number_range + ": "))
guesscounter += 1
if(guess == random_number):
print("You guessed the right number!")
print("The number is: " + str(random_number) + "!")
print("It took " + str(guesscounter) + " attempts!")
diffculty = int(input("What diffculty do you choose? Enter 1/2/3: "))
if diffculty == 1:
random_number = (random.randint(1, 100)) # generates random num
guess_check("1-100")
elif diffculty == 2:
random_number = (random.randint(1, 1000))
guess_check("1-1000")
elif diffculty == 3:
random_number = (random.randint(1, 10000))
guess_check("1-10000") | [
"[email protected]"
] | |
11c8dee9d67029a97d8822180a40112a671a3477 | 1fcaed157c853449172423e91faa3e4afb669bd4 | /Parliament.py | a7cc836abc81bc7bfe885dab7ebf49937367ad93 | [] | no_license | hplisiecki/Parliament | a4ccf53f1acd90c8104eb00903a105b55261676e | 316c7a43c5ec315d55750a7888298c67b605421a | refs/heads/main | 2023-03-14T08:17:24.607531 | 2021-03-04T18:39:14 | 2021-03-04T18:39:14 | 330,198,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,901 | py | from selenium import webdriver
from bs4 import BeautifulSoup
import re
import requests
import sys
import pdfminer
import urllib
import fitz
from io import StringIO
from io import BytesIO
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter, XMLConverter, HTMLConverter
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from lxml import etree as et
from tqdm import tqdm
import pickle
import textract
import PyPDF2
from tika import parser
import validators
"""
First initialize some useful functions
natural_keys; atoi - functions that the class will use to sort statements
review - takes a list of statements and allows you to inspect them one by one.
Pass the word 'end' to kill this function.
"""
def atoi(text): # Utility
return int(text) if text.isdigit() else text
def natural_keys(text): # Utility
return [atoi(c) for c in re.split(r'(\d+)', text[0][11:16])]
def review(name):
curr = 0 # Variable that stores the number of the current utterance
while curr !='end':
curr = input() # Choose which utterance to inspect
if curr == 'delete': # delete utterance
curr= input()
del name[curr]
continue
if curr == 'len': # See how many utterances are in the list
print(len(name))
continue
try:
print(name[int(curr)]) # Tries to print the utterance under the current number
except: # If the list is shorter than the called number - prints the list's length
print(f'Out of reach. The max is {len(name)-1}')
class Parliament:
def __init__(self,path):
"""
Opens the browser and accesses the government website
Initializes class variables
Sets up data folders in the directory with which the class was initialized
path = directory
"""
driver = webdriver.Chrome("/Users/hubertplisiecki/PycharmProjects/NLP/chromedriver") # loads the chromedriver (needed to run)
driver.get("https://www.sejm.gov.pl/sejm9.nsf/stenogramy.xsp") # loads the governmetnal website
self.path = path # the path that was passed into the class
self.orationes = [] # variable that will store statement (Orationes - parliament statements from latin)
self.osoby = [] # variable that will store all members of the parliament
content = driver.page_source # loads the contents of the website initialized earlier
self.soup = BeautifulSoup(content, features='html.parser') # converts the contents of the website to BeautifulSoup format
self.number = 54 # number of proceedings (can be automatized)
self.finder = [] # a list that will store the names of the politician that who's statements are currently of interest
self.digits = re.compile('[0-9][0-9][0-9]') # Useful constants intiialized here
self.digitu = re.compile('[0-9][0-9]') # for more clarity later
self.digit = re.compile('[0-9]') #
try:
os.mkdir(path + 'parliament' + '/html')
os.mkdir(path + 'parliament' + '/txt')
os.mkdir(path + 'parliament' + '/pis')
os.mkdir(path + 'parliament' + '/ko')
os.mkdir(path + 'parliament' + '/psl') # Tries to create folders
os.mkdir(path + 'parliament' + '/lewica')
os.mkdir(path + 'parliament' + '/konfederacja')
os.mkdir(path + 'parliament' + '/mn')
os.mkdir(path + 'parliament' + '/rest')
os.mkdir(path + 'parliament' + '/savedata')
except: # Does not create folders if they were already created
pass
def checker(self,name):
"""
Used to find a name of a certain politician and fill the self.finder variable with it in order
to look up his statements later.
Takes the name of the politician (str) displays matching names and asks for input. If the input is 'y'
it appends the name to the self.finder variable.
name(str) = name of the politician
"""
for i in self.osoby: # Looks for a similar name in the parliament members list
if name in i[0]:
print(i) # Shows what it has found
if input()=='y': # Asks you for permission to add it to the finder
self.finder.append(i[0])
def convert_pdf_to_html(self):
"""
Converts the pdf that is currently stored in the temporary file inside the repository to an html object
"""
rsrcmgr = PDFResourceManager() # Magic (simply functional)
retstr = BytesIO()
codec = 'utf-8'
laparams = LAParams()
device = HTMLConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(self.path+'parliament/repository/temp.pdf', 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
fp.close()
device.close()
self.text = retstr.getvalue()
retstr.close()
def khkhm(self):
"""
Clears the lists containing the names and statements of the politicians.
"""
self.orationes = []
self.finder = []
def load(self):
"""
Downloads all proceeding files from the governmental website and stores them in specified folders.
(May take long to compute)
"""
title = 1
# Opens each pdf file present on the governmental website
for a in tqdm(self.soup.findAll('a', href=True, attrs={'class': 'pdf'})):
url = a.get('href')
urllib.request.urlretrieve(url, self.path+'parliament/repository/temp.pdf')
self.convert_pdf_to_html() # Converts them first to the html format
with open(self.path + f'parliament/html/{title}.pickle', 'wb') as f:
pickle.dump(self.text, f, pickle.HIGHEST_PROTOCOL) # and saves
file_data = parser.from_file(self.path)
self.text = file_data['content'] # Then to the txt format
with open(self.path +f'parliament/txt/{title}.pickle', 'w') as f:
f.write(self.text) # and saves
title+=1
self.number = title
def orator(self):
"""
Loads all the statements made by the politician who's name is currently in the self.finder variable
"""
rue = self.path + 'parliament/rest/' + self.finder[0] + '/.pkl' # Modifies the path to load from
if self.finder in self.pis:
rue = self.path + 'parliament/pis/' + self.finder[0] + '.pkl'
if self.finder in self.ko:
rue = self.path + 'parliament/ko/' + self.finder[0] + '.pkl'
if self.finder in self.psl:
rue = self.path + 'parliament/psl/' + self.finder[0] + '.pkl'
if self.finder in self.lewica:
rue = self.path + 'parliament/lewica/' + self.finder[0] + '.pkl'
if self.finder in self.konfederacja:
rue = self.path + 'parliament/konfederacja/' + self.finder[0] + '.pkl'
if self.finder in self.mn:
rue = self.path + 'parliament/mn/' + self.finder[0] + '.pkl'
with open(rue, 'rb') as f: # Loads
self.orationes = pickle.load(f)
def party(self, name):
"""
Searches for all the party member's statements and saves them in specific folders.
self.name = name of the party (pis / ko / psl / lewica / konfederacja / mn / rest)
"""
if name == 'pis':
party = self.pis
if name == 'ko':
party = self.ko # Checks which party was called
if name == 'psl':
party = self.psl
if name == 'lewica':
party = self.lewica
if name == 'konfederacja':
party = self.konfederacja
if name == 'mn':
party = self.mn
if name == 'rest':
party = [['Duda:'],['Marszałek:']]
self.khkhm() # Clears variables
count = 0
for i in tqdm(party): # Searches for the statements made by each of the
count+=1 # party members
self.finder = i
self.rostrum()
if len(self.orationes)>0: # Ignores the members who did not speak at all
ordered = self.sort() # Sorts the statements
self.save(ordered,i[0],name) # Saves the statements of each of the party
self.khkhm() # members
def rostrum(self):
"""
Uses the self.finder variable to find all the statements made by the politicians contained in it.
Makes a call to the voice() function.
"""
for i in tqdm(range(self.number)): # Opens each proceeding file
i+=1
with open(f'/Users/hubertplisiecki/PycharmProjects/NLP/parliament/html/{i}.pickle', 'rb') as f:
self.html = pickle.load(f)
with open(f'/Users/hubertplisiecki/PycharmProjects/NLP/parliament/txt/{i}.txt', 'r') as f:
self.txt = f.read()
self.zupa = BeautifulSoup(self.html)
self.voices() # And looks for the statement made by the politician in each of them
self.orationes = list(dict.fromkeys(self.orationes)) # Deletes duplicates
def save(self,object,title,dir):
"""
Saves an object in a certain subfolder.
object = object (list, string, whatever)
title = the title of the created file
dir = subdirectory; usually one of these: pis / ko / lewica / psl / konfederacja / mn / repository
"""
with open(self.path + f'parliament/{dir}/' + f'{title}.pkl', 'wb') as f:
pickle.dump(object, f)
def set(self):
"""
Loads the lists of parliament members.
Should be called everytime just after the class is initialized (unless the variables have not been procured yet)
self.osoby = all members
self.pis = members elected from the PiS elective lists
self.ko = members elected from the KO elective lists
self.psl = members elected from the PSL elective lists
self.lewica = members elected from the elective lists of "Lewica"
self.konfederacja = members elected from the elective lists of "Konfederacja"
self.MN = members elected from the elective list of the German minority
"""
with open(self.path + 'parliament/repository/os.pkl', 'rb') as f: # Just loads variables
self.osoby = pickle.load(f)
with open(self.path + 'parliament/repository/pis.pkl', 'rb') as f:
self.pis = pickle.load(f)
with open(self.path + 'parliament/repository/ko.pkl', 'rb') as f:
self.ko = pickle.load(f)
with open(self.path + 'parliament/repository/psl.pkl', 'rb') as f:
self.psl = pickle.load(f)
with open(self.path + 'parliament/repository/lewica.pkl', 'rb') as f:
self.lewica = pickle.load(f)
with open(self.path + 'parliament/repository/konfederacja.pkl', 'rb') as f:
self.konfederacja = pickle.load(f)
with open(self.path + 'parliament/repository/mn.pkl', 'rb') as f:
self.mn = pickle.load(f)
def sort(self):
"""
Sorts the content of the self.orationes variable and outputs statements sorted by date and time.
"""
self.orationes.reverse() # First reverses the order of the statement list
split = [i.splitlines()[0] for i in self.orationes] # Creates an alternative list which contains only
count = 0 # the statement dates
for i in range(len(split[:])): #
split[i] = str(i) + ' ' + split[i] # Numbers the dates so that they can be later used
count += 1 # to order the original list
curr = split[0][-20:]
full = []
temp = []
for i in split: # Creates an embedded list where the statements
if i[-20:] != curr: # that were uttered on the same day are grouped
curr = i[-20:] # together
full.insert(0, temp)
temp = [i]
else:
temp.append(i)
full.insert(0, temp)
first = [['stycznia'], ['lutego'], ['marca'], ['kwietnia'], ['maja'], ['czerwca'], ['lipca'], ['sierpnia'],
['września'], ['października'], ['listopada'], ['grudnia']]
for i in full:
stop = False # Embedds the list one more time, this time
for j in range(len(first)): # storing the lists of utterances made in the same
if first[j][0] in i[0]: # month together.
first[j].append(i)
stop = True
if stop == True:
break
for i in first: # Sorts the elements of each of the month lists
del i[0] # according to the order of the days on which they were
i = i.sort(key=natural_keys) # uttered.
ordered = sum(first, [])
ordered = sum(ordered, []) # Flattens the list
numbers = [int(i[0:3].replace(' ', '')) for i in ordered]
new = []
for i in numbers: # Sorts the actual utterances
new.append(self.orationes[i])
return new # Returns a list of sorted utterances
def voices(self):
"""
Finds all statements made by the politician, who's name is currently stored in the self.finder variable
and saves them as the orationes list
"""
for name in self.finder: # an artifact, maybe useful later
read = False # The algorithm will read every line for which the read var will be Trure
b = self.zupa.body.find(text=re.compile('t e n o g r a f i c z n e')) # Used to easily get the date
head = b.parent.parent.next_sibling.next_sibling.get_text() + '\n' # of the proceeding
for line in self.txt.splitlines():
if name in line.strip().split()[-1:]:
read = True # In basic terms: for each line in the string
voice='' # see if the searched name is present
continue # and read all of the next lines if it is
if read == True:
# Until certain key words or another politician's name is encountered:
if line.strip().split()[-1:] in self.osoby or ('(Marszałek' and 'trzykrotnie' and 'uderza' and 'laską' in line) or ('(Przerwa w posiedzeniu o godz.' in line):
clean = head
for verse in voice.splitlines()[:]: # Then split the recovered statement
find = False
for os in self.osoby:
if os[0] in verse and '(' not in verse:
find = True
if find == True:
break
# And clean it:
if ('Informacja' and 'dla' and 'Sejmu' and 'i' and 'Senatu' and 'RP') and ('(Początek' and 'posiedzenia') and ('Spis' and 'treści') and ('posiedzenie' and 'Sejmu' and 'w' and 'dniu') not in verse:
if len(verse)>4:
if validators.url(verse.strip())!=True:
if verse.strip() != ('Sp' and 'is' and 't' and 're' and 'śc' and 'i'):
if 'Page' not in verse:
if re.match(self.digits, verse) == None and re.match(self.digitu,verse) == None and re.match(self.digit, verse) == None:
if verse != '':
clean = clean + verse + '\n'
# Finally append it to the self.orationes variable
self.orationes.append(clean)
read = False # and set read back to False
continue
voice += line + '\n'
sejm = Parliament('/Users/hubertplisiecki/PycharmProjects/NLP/') # Initialize the function with the directory that
# you would like to use
sejm.load() # Start by downloading the proceeding files
| [
"[email protected]"
] | |
56d20081ed42a3a0a4df7ef1c8a4aa8e92591758 | 8d45129798941128a51e26754892f551ee570365 | /main.py | 36805be109a378233c6c11f4a24b9f2235864955 | [] | no_license | lyimeng/Machine-Learning | 215a47fa4fe3abbd2384ca2406df865e12f90c5d | e9c1805a7bc02dc1a8158f0cf124b0941b1c2e3f | refs/heads/main | 2023-04-12T02:07:19.573991 | 2021-04-22T13:14:25 | 2021-04-22T13:14:25 | 360,523,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,817 | py | from build_dataset import build_dataset
from pyimagesearch import config
from extract_features import extract_features
from shutil import move,rmtree,make_archive
from train import train
import numpy as np
import shutil
import os
import time
# initialize the path to the *original* input directory of images
ORIG_INPUT_DATASET = "ForG"
# initialize the base path to the *new* directory that will contain
# # # our images after computing the training and testing split\
# BASE = "F:\\College\\MachineLearning\\ShortDistanceProfile\\images\\original\\original_right"
# BASE_list = ["wl_340\\wl_340_ol_0"
# ,"wl_380\\wl_380_ol_100","wl_380\\wl_380_ol_75","wl_380\\wl_380_ol_50","wl_380\\wl_380_ol_25","wl_380\\wl_380_ol_0"
# ,"wl_400\\wl_400_ol_100","wl_400\\wl_400_ol_75","wl_400\\wl_400_ol_50","wl_400\\wl_400_ol_25","wl_400\\wl_400_ol_0"
# ,"wl_420\\wl_420_ol_100","wl_420\\wl_420_ol_75","wl_420\\wl_420_ol_50","wl_420\\wl_420_ol_25","wl_420\\wl_420_ol_0"
# ,"wl_440\\wl_440_ol_100","wl_440\\wl_440_ol_75","wl_440\\wl_440_ol_25","wl_440\\wl_440_ol_0"]
# define the names of the training, testing, and validation
# directories
TRAIN = "training"
TEST = "evaluation"
VAL = "validation"
# initialize the list of class label names
CLASSES = ["gap", "foliage", "unsure"]
# set the batch size
BATCH_SIZE = 16
# initialize the label encoder file path and the output directory to
# where the extracted features (in CSV file format) will be stored
LE_PATH = os.path.sep.join(["output", "le.cpickle"])
BASE_CSV_PATH = "output"
# set the path to the serialized model after training
MODEL_PATH = os.path.sep.join(["output", "model.cpickle"])
index = np.load("e_t_index.npy", allow_pickle=True)
BASE_list = []
set_list = os.listdir("F:\\College\\MachineLearning\\ShortDistanceProfile\\images\\WL")
for s in set_list:
ol_list = os.listdir("F:\\College\\MachineLearning\\ShortDistanceProfile\\images\\WL\\" + s)
for i in ol_list:
BASE_list.append("WL\\"+s+"\\"+i)
# BASE_list = ["WL\\wl_1960\\wl_1960_ol_50"]
for i in range(len(BASE_list)):
BASE = BASE_list[i]
deliminator = "_"
name_base = BASE.split("\\")[2]
BASE = "F:\\College\\MachineLearning\\ShortDistanceProfile\\images\\" + BASE
BASE_PATH = BASE + "\\" + name_base
print(BASE_PATH)
s = time.time()
os.chdir(BASE)
if not os.path.exists(ORIG_INPUT_DATASET):
os.makedirs(ORIG_INPUT_DATASET)
if not os.path.exists(BASE_PATH):
os.makedirs(BASE_PATH)
if not os.path.exists(ORIG_INPUT_DATASET + "\\evaluation"):
os.makedirs(ORIG_INPUT_DATASET + "\\evaluation")
if not os.path.exists(ORIG_INPUT_DATASET + "\\training"):
os.makedirs(ORIG_INPUT_DATASET + "\\training")
if not os.path.exists("output"):
os.makedirs("output")
# print("Random Classify ... ")
# # Random selective
# image_list = os.listdir()
# image_list = [str(i) for i in image_list if '.jpg' in str(i)]
# image_list = [str(i) for i in image_list if i.split("_")[0] != "2"]
# total = len(image_list)
# x = np.random.choice(range(total), total, replace=False)
# e = int(total / 5)
# for i in x[:e]:
# shutil.move(image_list[i], ORIG_INPUT_DATASET + "\\evaluation\\" + image_list[i])
# for i in x[e:]:
# shutil.move(image_list[i], ORIG_INPUT_DATASET + "\\training\\" + image_list[i])
print("Building raw image folders ...")
print("Building evaluation ...")
for i in index[0]:
if (len(i.split("_")) <3):
continue
image_name = i+"_"+name_base + ".jpg"
move(image_name, "ForG\\evaluation\\" + image_name)
print("Building training ...")
for i in index[1]:
if (len(i.split("_")) <3):
continue
image_name = i+"_"+name_base+".jpg"
move(image_name, "ForG\\training\\" + image_name)
build_dataset(TRAIN, TEST, VAL, ORIG_INPUT_DATASET, CLASSES, BASE_PATH)
rmtree('ForG')
extract_features(TRAIN, TEST, BASE_PATH, BASE_CSV_PATH, BATCH_SIZE, LE_PATH)
# train(info = "Result of Right Mic with Feature Extraction - VGG\nRight Mic\nRevised each of the spectrogram\n\t1. with half of overlap.\n\t2. with half of overlap and half of window length\nTotal amount of images 30000")
train("Result of Right Mic with Feature Extraction - VGG\nRandom choose 500 gap 500 foliage for all 10 datasets \nRevised each of the spectrogram\n\twindow length: longest, overlap: 0\nTotal amount of images 30000", BASE_CSV_PATH, TRAIN, TEST, MODEL_PATH, LE_PATH)
print("Archiving file ...")
make_archive(name_base, 'zip', BASE_PATH)
print("Removing files ...")
rmtree(BASE_PATH)
os.remove(BASE+"\\"+"output"+"\\"+TRAIN+".csv")
os.remove(BASE+"\\"+"output"+"\\"+TEST +".csv")
e = time.time()
print("Duration: " + str(e-s))
| [
"[email protected]"
] | |
cd10d8c80e65a59e8a288cb9b795509c3864b1c0 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Whatsapp Automation/whatsapp_attachment.py | ccdb8123b93491e448914e300a00a0c852f81d37 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d121775ff8916c1d15d64101a4aa7195719f75e69ad16c9e45b21a9a863dd354
size 719
| [
"[email protected]"
] | |
c61187564431a8316c2ea92e0e799f63e8e7d109 | e8ef621a0c3a7b602c261cb81feb11e676f12517 | /python-tdd/python_tdd/lists/views.py | 42c380fc6ef7540d034bf516daef49d6cc96109f | [] | no_license | hwshim0810/studyData | 3f32ab80de2e6c3714bce6cb18bad7537de07350 | 0f71f0750ecd77e71a7652a368d51faed9dadc69 | refs/heads/master | 2021-01-11T15:59:52.534627 | 2018-03-16T14:39:04 | 2018-03-16T14:39:04 | 79,977,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | from django.shortcuts import render, redirect
from django.core.exceptions import ValidationError
from lists.models import Item, List
from lists.forms import ItemForm
def index(request):
return render(request, 'lists/index.html', {'form': ItemForm})
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
form = ItemForm()
if request.method == 'POST':
form = ItemForm(data=request.POST)
if form.is_valid():
Item.objects.create(text=request.POST.get('task', False), list=list_)
return redirect(list_)
return render(request, 'lists/list.html', {'list': list_, 'form': form})
def new_list(request):
form = ItemForm(data=request.POST)
if form.is_valid():
list_ = List.objects.create()
Item.objects.create(text=request.POST.get('task', False), list=list_)
# obj 의 전달로 특수 method 를 호출함
return redirect(list_)
else:
return render(request, 'lists/index.html', {'form': form})
| [
"[email protected]"
] | |
ec400a4b47d9e265b8637a811cbf97d821d30418 | be493cd376e50985a4b5312bbae5cc4e8fb970bb | /trading_src/aml_experiments/BoolingerB/boolingerB_entry.py | 673ec33845fbf100cd48a1d3f642ece40d590156 | [] | no_license | RGuseynov/Trading_AML | 5b850153f11aa1f4c9bddc6a02a08efc3558ff38 | f7e215c0e76f4491f13e525544020f041823f5d7 | refs/heads/master | 2023-01-20T07:09:20.384354 | 2020-11-28T21:10:49 | 2020-11-28T21:10:49 | 287,303,844 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | import azureml.core
from azureml.core import Environment, Experiment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.train.estimator import Estimator
from azureml.core import Model
from azureml.core import Workspace, Datastore, Dataset
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
cluster_name = "aml-cluster"
# Verify that cluster exists
try:
training_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# If not, create it
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2', max_nodes=4)
training_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
training_cluster.wait_for_completion(show_output=True)
# Get python environment
registered_env = Environment.get(ws, 'xgboost-env')
# Get the training dataset
data = ws.datasets.get("bitcoin 1H tabular dataset")
# Create an estimator
estimator = Estimator(source_directory="trading_src",
inputs=[data.as_named_input('bitcoin')],
compute_target = cluster_name, # Use the compute target created previously
environment_definition = registered_env,
entry_script='aml_experiments/BoolingerB/boolingerB_script.py')
# Create an experiment
experiment = Experiment(workspace = ws, name = 'BoolingerB-training')
# Run the experiment
run = experiment.submit(config=estimator)
run.wait_for_completion()
# Register the model
run.register_model(model_path='outputs/BoolingerB_model.pkl', model_name='BoolingerB_model',
tags={'Training context':'Azure ML compute'}, properties={'return no fee': run.get_metrics()['return no fee'],
'return with fee': run.get_metrics()['return with fee']}) | [
"[email protected]"
] | |
5694f64e95738675e7ab87398f94d54ae3f259d9 | 627746e4afafcf7342bba114e089ffe7c7c754b3 | /webroot/Pj/mysite/mysite/polls/migrations/0003_userprofile_image.py | 60719e1586e61fc554100fc560439c96b2ab76f9 | [
"Apache-2.0"
] | permissive | dinhkute/Incisive-AIESEC | 168855f7076181112aa8b1dcbf3818f07ba6cb91 | b83db81f09596d05e4bbb6362fab7af931b63e9f | refs/heads/development | 2021-01-17T18:09:16.183698 | 2017-08-16T16:22:51 | 2017-08-16T16:22:51 | 95,542,240 | 0 | 0 | null | 2017-06-28T15:39:29 | 2017-06-27T09:35:52 | Python | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-10 15:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_userprofile'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, upload_to=b'profile_image'),
),
]
| [
"[email protected]"
] | |
671ab89816e4b7577c1598aa39e51fa871991b44 | e7809b43b36b16a1602224ae7ebb4ca15d1003c8 | /userTransfer_API/views.py | 589733352eafbbd0b936df58e459b0974e9d707f | [] | no_license | dinaelgamal/BankingSystem | 9b1cf41fa460545b51a7c48080e5860d7aec1df2 | d0aef584b308b5e7d599a955ae8e9dbd1c3caf3f | refs/heads/master | 2023-05-26T21:32:02.969212 | 2021-06-08T13:04:50 | 2021-06-08T13:04:50 | 375,007,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import *
from userBalance_API.models import userInfo
from rest_framework import status
class userTransferAPIView(APIView):
def post(self, request):
amount = request.data['transferAmount']
currency = request.data['currency']
tSerializer = userTransferSerializer(data=request.data)
try:
senderID = request.data['senderID']
senderUser = userInfo.objects.get(pk=senderID)
except:
return Response("Sender User Does not Exist", status=status.HTTP_404_NOT_FOUND)
try:
receiverID = request.data['receiverID']
receiverUser = userInfo.objects.get(pk=receiverID)
except:
return Response("Receiver User Does not Exist", status=status.HTTP_404_NOT_FOUND)
if senderUser.userAccountBalance >= amount:
if currency == 'USD':
receiverUser.userAccountBalance = receiverUser.userAccountBalance + (amount *15)
receiverUser.save()
else:
receiverUser.userAccountBalance = receiverUser.userAccountBalance + amount
receiverUser.save()
if currency == 'USD':
senderUser.userAccountBalance = senderUser.userAccountBalance - (amount *15)
senderUser.save()
else:
senderUser.userAccountBalance = senderUser.userAccountBalance - amount
senderUser.save()
else:
return Response("Account Balance Is Not Enough", status=status.HTTP_406_NOT_ACCEPTABLE)
if tSerializer.is_valid():
tSerializer.save()
return Response(tSerializer.data, status = status.HTTP_200_OK)
| [
"[email protected]"
] | |
08a114c60dd9db78443445fbbeb7fe8d1e476e47 | 12937dce57bb50eda8540351e02992c59281487c | /kaleido/commands/compound.py | 4d252397c7a93d92f34d5224fab4aefccf3265d2 | [] | no_license | esameth/kaleido | 7902a1c87f4f1e7e886561dd3ec9ffd03fae4679 | b985936f483da52740d7eadf2910be0787cd0136 | refs/heads/main | 2023-03-31T03:16:27.123229 | 2021-03-31T06:54:18 | 2021-03-31T06:54:18 | 352,113,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,451 | py | import sys
from kaleido.plates import Plate
from kaleido.compounds import Compound
from kaleido.commands.well import del_well, valid_plate_well
from kaleido.command import Command, FileCommand
from kaleido.utils.util import write_file, exists
#############################################################################################
# USE CASE #
# As a Biologist, #
# I would like to input a compound ID #
# so that I can store/register a compound #
#############################################################################################
class CompoundCommand(FileCommand, Command):
"""Store/register a compound, or search/delete all plate.wells associated with a compound"""
@classmethod
def init_parser(cls, parser):
parser.add_argument('id', type=str, help='Compound ID')
parser.add_argument('--store', action='store_true', help="Store a compound")
parser.add_argument('--register', action='store_true', help="Register a compound")
parser.add_argument('--search', action='store_true',
help="Search for a compound, its state (stored/registered), and all plates it is in")
parser.add_argument('--delete', action='store_true', help='Remove a compound')
super(CompoundCommand, cls).init_parser(parser)
def run(self):
"""Run compound command"""
self.load_file()
self.comp = self.load_comp()
if self._args.store:
self.store_comp()
write_file(self._args.comp_file, self.compounds)
elif self._args.register:
self.register_comp()
write_file(self._args.comp_file, self.compounds)
elif self._args.search:
self.search_comp()
else:
self.delete_comp()
def store_comp(self):
"""Store a compound"""
# Already stored or registered
if not self.comp:
self.compounds[self._args.id] = Compound(self._args.id, state='stored').__todict__()
else:
# Assumption: once a compound is registered, it can not be unregistered
if self.comp.state == 'registered':
sys.exit(f'Compound {self.comp._id} is already registered and cannot be changed to stored')
# Give error if compound already stored
else:
sys.exit(f'Compound {self.comp._id} is already stored')
print(f'Successfully stored {self._args.id}')
def register_comp(self):
"""Register a compound"""
# Give error if compound already registered
if self.comp and self.comp.state == 'registered':
sys.exit(f'Compound {self.comp._id} is already registered')
else:
self.compounds[self._args.id] = Compound(self._args.id, state='registered').__todict__()
print(f'Successfully registered {self._args.id}')
def search_comp(self):
"""Search for a compound - gives id, state (stored, registered),
and all plates.wells associated with it"""
if not self.comp:
print(f'Compound {self._args.id} does not exist')
else:
print(f'Compound id: {self.comp._id}')
print(f'State: {self.comp.state}\n')
if self.comp.plate:
print('Associated plates and wells: \n{}'.format('\n'.join(self.comp.plate)))
def delete_comp(self):
"""Delete a compound if it exists"""
# Give error if the compound does not exist
if not self.comp:
sys.exit(f'Compound {self._args.id} does not exist')
# Remove the plate.well the compound is in
for remove in self.comp.plate:
plate, well = valid_plate_well(remove)
del_well(self._args.plate_file, self.plates, Plate(plate, plate=self.plates[plate]), well)
# Remove from compound file
del self.compounds[self.comp._id]
write_file(self._args.comp_file, self.compounds)
print(f'Successfully deleted {self._args.id}')
def load_comp(self):
# Already stored or registered
if exists(self._args.id, self.compounds):
return Compound(self._args.id, props=self.compounds[self._args.id]) | [
"[email protected]"
] | |
5066d36c7f4b1717e5413e09403d6a2fff022a9e | e273bf149407743b0e1458d35b081fbc1797199a | /users/alina/coherence_code/coherence_practice.py | cae8b7c601591703e8054bc3bc49d6bef4885669 | [] | no_license | smerdis/megavista | dad2a8e5ed1df72f8a7345bfda86d86cb3ca19cb | fa9ca02b5a3aa72937d7253d97bcfc7143b27390 | refs/heads/master | 2021-01-18T23:52:32.710292 | 2017-06-29T20:47:04 | 2017-06-29T20:47:04 | 87,128,497 | 0 | 0 | null | 2017-04-03T22:56:33 | 2017-04-03T22:56:33 | null | UTF-8 | Python | false | false | 6,438 | py | # Changed on 1/28/11
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import pickle
import datetime
import vista_utils as tsv # Get it at: https://github.com/arokem/vista_utils
from nitime.fmri.io import time_series_from_file as load_nii
import nitime.timeseries as ts
import nitime.viz as viz
from nitime.analysis import CorrelationAnalyzer, CoherenceAnalyzer
#Import utility functions:
from nitime.utils import percent_change
from nitime.viz import drawmatrix_channels, drawgraph_channels
from nitime.viz import drawmatrix_channels, drawgraph_channels, plot_xcorr
import subjects
reload(subjects) # In case you make changes in there while you analyze
from subjects import subjects, rois
def display_vox(tseries,vox_idx,fig=None):
"""
Display the voxel time-series
"""
if fig is None:
fig = plt.figure()
vox_tseries = ts.TimeSeries(tseries.data[vox_idx],sampling_interval=TR)
fig = viz.plot_tseries(vox_tseries,fig)
fig = viz.plot_tseries(ts.TimeSeries(np.mean(vox_tseries.data,0),
sampling_interval=TR),
yerror=ts.TimeSeries(stats.sem(vox_tseries.data,0),
sampling_interval=TR),fig=fig,
error_alpha = 0.3,ylabel='% signal change',
linewidth=4,
color='r')
return fig
def reshapeTS(t_fix):
# TR=2 seconds, 30 TRs in one movie
segTime=30
# Change to an array (numSess, numROIs, numTime points)
t_fixArray=np.array(t_fix)
t_fixArrayTP=np.transpose(t_fixArray, (1,0,2))
shapeTS=t_fixArrayTP.shape
numRuns=shapeTS[2]/segTime
# This returns rois x runs x TS with runs collapsed by segTime
allROIS=np.reshape(t_fixArrayTP, [shapeTS[0], shapeTS[1]*numRuns, segTime])
return allROIS
if __name__ == "__main__":
base_path = '/Volumes/Plata1/DorsalVentral/' # Change this to your path
fmri_path = base_path + 'fmri/'
sessionName=['donepazil', 'placebo']
session=1 # 0= donepazil, 1=placebo
TR = 2
allRuns=['fix_nii']
# save filename
date=str(datetime.date.today())
saveFile=base_path+ 'fmri/Results/' + 'CG&CHT&DCAallROIsOrderFix_matr'+sessionName[session] +str(len(allRuns))+'runs_'+ date + '.pck'
# The pass band is f_lb <-> f_ub.
# Also, see: http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency
f_ub = 0.15
f_lb = 0.01
#It depends on your frequency bin resolution needs. delta_freq = sampling_rate/NFFT
#So, say your sampleing rate is 1024 samples/sec and NFFT is 256. Then delta_freq = 4 Hz.
NFFT=16 # 32 for 60 TRs, 1/64= freq limit lower, .25 hertz is upper limit (1/2 of sampling rate) Nyquist freq
n_overlap=8
# The upsample factor between the Inplane and the Gray:
# Inplane Voxels: .867 x .867 x 3.3, Functional voxels: 3 x 3 x 3.3
up_samp = [3.4595,3.4595,1.0000]
# set up dictionaries to store results
corr_all=dict()
coh_all = dict()
for subject in subjects:
# len(subjects[subject])= number of session per subject
# len(subjects[subject][0][1])= number of different types of runs
# len(subjects[subject][1][1]['fix_nii'])= number of nifti files for that session
# Close any opened plots
plt.close('all')
# Get session
sess = subjects[subject][session]
# Get ROIs
roi_names=np.array(rois)
ROI_files=[]
for roi in rois:
ROI_files.append(fmri_path+sess[0]+'/Inplane/ROIs/' +roi +'.mat')
# Get the coordinates of the ROIs, while accounting for the
# up-sampling:
ROI_coords = [tsv.upsample_coords(tsv.getROIcoords(f),up_samp)
for f in ROI_files]
# Initialize lists for each behavioral condition:
t_fix = []
t_left = []
t_right = []
nifti_path = fmri_path +sess[0] + '/%s_nifti/' % sess[0]
# Plot the mean of the TS over SD (SNR) for each ROI
# len(t_fix)= number of ROIs
for runName in allRuns:
for this_fix in sess[1][runName]:
t_fix.append(load_nii(nifti_path+this_fix, ROI_coords,TR,
normalize='percent', average=True, verbose=True))
# reshape ROI matrix
allROIS=reshapeTS(t_fix)
numRuns=allROIS.shape[1]
corr_all[subject] = np.zeros((numRuns,len(rois),len(rois))) * np.nan
coh_all[subject] = np.zeros((numRuns,len(rois),len(rois))) * np.nan
# Get roi correlations and coherence
for runNum in range(allROIS.shape[1]):
#need to load timeseries by run
fixTS=ts.TimeSeries(allROIS[:,runNum,:], sampling_interval=TR)
fixTS.metadata['roi'] = roi_names
# Get plot and correlations
C=CorrelationAnalyzer(fixTS)
fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.], color_anchor=0, title='Correlation Results Run %i' % runNum)
plt.show()
# Save correlation
corr_all[subject][runNum]=C.corrcoef
# Get coherence
Coh = CoherenceAnalyzer(fixTS)
Coh.method['NFFT'] = NFFT
Coh.method['n_overlap']=n_overlap
# Get the index for the frequencies inside the ub and lb
freq_idx = np.where((Coh.frequencies > f_lb) * (Coh.frequencies < f_ub))[0]
# Extract coherence
# Coher[0]= correlations for first ROI in list with others
coher = np.mean(Coh.coherence[:, :, freq_idx], -1) # Averaging on the last dimension
fig03 = drawmatrix_channels(coher, roi_names, size=[10., 10.], color_anchor=0, title='Coherence Results Run %i' % runNum)
# Save coherence (coher is the average of the coherence over the specified frequency)
coh_all[subject][runNum]=coher
file=open(saveFile, 'w') # write mode
# First file loaded is coherence
pickle.dump(coh_all, file)
# Second file loaded is correlation
pickle.dump(corr_all, file)
# Save roi names
pickle.dump(roi_names, file)
# save subjects
pickle.dump(subjects, file)
file.close()
print 'Saving subject coherence and correlation dictionaries.'
| [
"[email protected]"
] | |
2e13581230ed203e620ba514738fbb9d866670e3 | f7cdfe407d0d33892d9d8769abfb553479b9c305 | /COMP1_Summer_2014_SkelProg_Python32Pub0.0.0_version2.py | dc49c80a84340819d5426cdbe1f929f46db29669 | [] | no_license | HarryVines/Preliminary-Tasks-2014 | 4969b71d8c2003cedd14de9c32c6d49bde41b0c1 | a6e5a196842a4cba5df17d1e9a1f68a8813b48fa | refs/heads/master | 2021-01-23T14:05:04.442079 | 2015-03-23T11:14:42 | 2015-03-23T11:14:42 | 32,727,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,188 | py | # Skeleton Program code for the AQA COMP1 Summer 2014 examination
# this code should be used in conjunction with the Preliminary Material
# written by the AQA Programmer Team
# developed in the Python 3.2 programming environment
# version 2 edited 06/03/2014
from datetime import *
import random
NO_OF_RECENT_SCORES = 3
class TCard():
def __init__(self):
self.Suit = 0
self.Rank = 0
class TRecentScore():
def __init__(self):
self.Name = ''
self.Score = 0
self.Date = None
Deck = [None]
RecentScores = [None]
Choice = ''
def GetRank(RankNo):
Rank = ''
if RankNo == 1:
Rank = 'Ace'
elif RankNo == 2:
Rank = 'Two'
elif RankNo == 3:
Rank = 'Three'
elif RankNo == 4:
Rank = 'Four'
elif RankNo == 5:
Rank = 'Five'
elif RankNo == 6:
Rank = 'Six'
elif RankNo == 7:
Rank = 'Seven'
elif RankNo == 8:
Rank = 'Eight'
elif RankNo == 9:
Rank = 'Nine'
elif RankNo == 10:
Rank = 'Ten'
elif RankNo == 11:
Rank = 'Jack'
elif RankNo == 12:
Rank = 'Queen'
elif RankNo == 13:
Rank = 'King'
return Rank
def GetSuit(SuitNo):
Suit = ''
if SuitNo == 1:
Suit = 'Clubs'
elif SuitNo == 2:
Suit = 'Diamonds'
elif SuitNo == 3:
Suit = 'Hearts'
elif SuitNo == 4:
Suit = 'Spades'
return Suit
def DisplayMenu():
print()
print('MAIN MENU')
print()
print('1. Play game (with shuffle)')
print('2. Play game (without shuffle)')
print('3. Display recent scores')
print('4. Reset recent scores')
print()
print('Select an option from the menu (or enter q to quit): ', end='')
def GetMenuChoice():
Choice = input()
Choice = Choice.lower()
if Choice == "quit":
Choice = "q"
print()
return Choice
def LoadDeck(Deck):
CurrentFile = open('deck.txt', 'r')
Count = 1
while True:
LineFromFile = CurrentFile.readline()
if not LineFromFile:
CurrentFile.close()
break
Deck[Count].Suit = int(LineFromFile)
LineFromFile = CurrentFile.readline()
Deck[Count].Rank = int(LineFromFile)
Count = Count + 1
def ShuffleDeck(Deck):
SwapSpace = TCard()
NoOfSwaps = 1000
for NoOfSwapsMadeSoFar in range(1, NoOfSwaps + 1):
Position1 = random.randint(1, 52)
Position2 = random.randint(1, 52)
SwapSpace.Rank = Deck[Position1].Rank
SwapSpace.Suit = Deck[Position1].Suit
Deck[Position1].Rank = Deck[Position2].Rank
Deck[Position1].Suit = Deck[Position2].Suit
Deck[Position2].Rank = SwapSpace.Rank
Deck[Position2].Suit = SwapSpace.Suit
def DisplayCard(ThisCard):
print()
print('Card is the', GetRank(ThisCard.Rank), 'of', GetSuit(ThisCard.Suit))
print()
def GetCard(ThisCard, Deck, NoOfCardsTurnedOver):
ThisCard.Rank = Deck[1].Rank
ThisCard.Suit = Deck[1].Suit
for Count in range(1, 52 - NoOfCardsTurnedOver):
Deck[Count].Rank = Deck[Count + 1].Rank
Deck[Count].Suit = Deck[Count + 1].Suit
Deck[52 - NoOfCardsTurnedOver].Suit = 0
Deck[52 - NoOfCardsTurnedOver].Rank = 0
def IsNextCardHigher(LastCard, NextCard):
Higher = False
if NextCard.Rank > LastCard.Rank:
Higher = True
return Higher
def GetPlayerName():
print()
Add = input("Do you want to add your name to the scores table: ")
Add = Add.lower()
if Add == "no" or Add =="n":
PlayerName = " "
elif Add == "yes" or Add == "y":
Valid = False
while Valid == False:
PlayerName = input('Please enter your name: ')
PlayerName = PlayerName.strip()
print()
if PlayerName == "" or PlayerName == "":
Valid = False
else:
Valid = True
return PlayerName
def GetChoiceFromUser():
Choice = input('Do you think the next card will be higher than the last card (enter y or n)? ')
Choice = Choice.lower()
if Choice == "yes":
Choice = "y"
elif Choice == "no":
Choice = "n"
return Choice
def DisplayEndOfGameMessage(Score):
print()
print('GAME OVER!')
print('Your score was', Score)
if Score == 51:
print('WOW! You completed a perfect game.')
print()
def DisplayCorrectGuessMessage(Score):
print()
print('Well done! You guessed correctly.')
print('Your score is now ', Score, '.', sep='')
print()
def ResetRecentScores(RecentScores):
for Count in range(1, NO_OF_RECENT_SCORES + 1):
RecentScores[Count].Name = ''
RecentScores[Count].Score = 0
def DisplayRecentScores(RecentScores):
print()
print('Recent Scores: ')
print()
print("{0} {1:>10} {2:>10}".format("Name","Score","Date"))
try:
for Count in range(1, NO_OF_RECENT_SCORES + 1):
print('{0:<10}{1:<10}{2:>10}'.format(RecentScores[Count].Name, RecentScores[Count].Score,RecentScores[Count].Date))
except TypeError:
pass
print('Press the Enter key to return to the main menu')
input()
print()
def UpdateRecentScores(RecentScores, Score):
PlayerName = GetPlayerName()
FoundSpace = False
Count = 1
while (not FoundSpace) and (Count <= NO_OF_RECENT_SCORES):
if RecentScores[Count].Name == '':
FoundSpace = True
else:
Count = Count + 1
if not FoundSpace:
for Count in range(1, NO_OF_RECENT_SCORES):
RecentScores[Count].Name = RecentScores[Count + 1].Name
RecentScores[Count].Score = RecentScores[Count + 1].Score
Count = NO_OF_RECENT_SCORES
RecentScores[Count].Name = PlayerName
RecentScores[Count].Score = Score
Date = datetime.now()
RecentScores[Count].Date = datetime.strftime(Date,"%d/%m/%Y")
def PlayGame(Deck, RecentScores):
LastCard = TCard()
NextCard = TCard()
GameOver = False
GetCard(LastCard, Deck, 0)
DisplayCard(LastCard)
NoOfCardsTurnedOver = 1
while (NoOfCardsTurnedOver < 52) and (not GameOver):
GetCard(NextCard, Deck, NoOfCardsTurnedOver)
Choice = ''
while (Choice != 'y') and (Choice != 'n'):
Choice = GetChoiceFromUser()
DisplayCard(NextCard)
NoOfCardsTurnedOver = NoOfCardsTurnedOver + 1
Higher = IsNextCardHigher(LastCard, NextCard)
if (Higher and Choice == 'y') or (not Higher and Choice == 'n'):
DisplayCorrectGuessMessage(NoOfCardsTurnedOver - 1)
LastCard.Rank = NextCard.Rank
LastCard.Suit = NextCard.Suit
else:
GameOver = True
if GameOver:
DisplayEndOfGameMessage(NoOfCardsTurnedOver - 2)
UpdateRecentScores(RecentScores, NoOfCardsTurnedOver - 2)
else:
DisplayEndOfGameMessage(51)
UpdateRecentScores(RecentScores, 51)
if __name__ == '__main__':
for Count in range(1, 53):
Deck.append(TCard())
for Count in range(1, NO_OF_RECENT_SCORES + 1):
RecentScores.append(TRecentScore())
Choice = ''
while Choice != 'q':
DisplayMenu()
Choice = GetMenuChoice()
if Choice == '1':
LoadDeck(Deck)
ShuffleDeck(Deck)
PlayGame(Deck, RecentScores)
elif Choice == '2':
LoadDeck(Deck)
PlayGame(Deck, RecentScores)
elif Choice == '3':
DisplayRecentScores(RecentScores)
elif Choice == '4':
ResetRecentScores(RecentScores)
| [
"[email protected]"
] | |
28b08ff13cf7bac0a4f3d1d0baa8ea71fd191be3 | 2654bbd5700e5153311f8e6064bee6cbbabe62fa | /qiita/client.py | 40844343b72157d2d131182d961e86ab7814be71 | [
"BSD-3-Clause"
] | permissive | heavenshell/py-qiita | 0a2017fde51f64b8443ba2602e11d9396ffdd3a3 | f39ac0dee0d7e499634fd861b06f9603eeaf2abf | refs/heads/master | 2020-04-05T23:05:52.437425 | 2012-10-22T11:07:15 | 2012-10-22T11:07:15 | 6,228,366 | 4 | 2 | NOASSERTION | 2018-10-30T13:16:04 | 2012-10-15T14:30:22 | Python | UTF-8 | Python | false | false | 3,308 | py | # -*- coding: utf-8 -*-
"""
qiita.client
~~~~~~~~~~~~
Python wrapper for Qiita API v1.
:copyright: (c) 2012 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import requests
from . import json
from .exceptions import on_complte
class Client(object):
ROOT_URL = 'https://qiita.com/api/v1{0}'
requests = None
def __init__(self, **kwargs):
options = ['url_name', 'password', 'token']
for option in options:
if option in kwargs:
setattr(self, option, kwargs[option])
else:
setattr(self, option, None)
if self.requests is None:
# TODO: Use urllib?
self.requests = requests
if self.token is None and self.url_name and self.password:
self.login()
def rate_limit(self):
"""Get api rate limit.
Max 150 count/hour.
"""
return self._request('get', '/rate_limit')
def login(self):
"""login
Login to Qiita to get token.
"""
params = {'url_name': self.url_name, 'password': self.password}
response = self._request('post', '/auth', params)
if 'token' in response:
self.token = response['token']
return response
def get(self, path, params=None):
"""GET request.
:param path:
:param params:
"""
return self._request('get', path, params)
def delete(self, path, params=None):
"""DELETE request.
:param path:
:param params:
"""
return self._request('delete', path, params)
def post(self, path, params=None):
"""POST request.
:param path:
:param params:
"""
return self._request('post', path, params)
def put(self, path, params=None):
"""PUT request.
:param path:
:param params:
"""
return self._request('put', path, params)
def _request(self, method, path, params=None):
"""_requests.
_requests depends on _requests library.
see `<http://docs.python-_requests.org/en/latest/>_` more details.
:param method:
:param path:
:param params:
"""
if self.token is not None:
if params is None:
params = {}
params['token'] = self.token
uri = self.ROOT_URL.format(path)
response = None
headers = {'Content-Type': 'application/json'}
if method == 'get':
response = self.requests.get(uri, params=params, headers=headers)
elif method == 'delete':
response = self.requests.delete(uri, params=params,
headers=headers)
elif method == 'post':
response = self.requests.post(uri, data=json.dumps(params),
headers=headers)
elif method == 'put':
response = self.requests.put(uri, data=json.dumps(params),
headers=headers)
if response.ok is False:
on_complte(response.status_code)
result = '' if response.content == '' else json.loads(response.content)
return result
| [
"[email protected]"
] | |
ec82224f8f674e04c2d12394464bbe2513bf8ad9 | 90a1f47fd8b73de2f132a90d38709f0d6e354609 | /node_modules/fsevents/build/config.gypi | aa77c1f8d50d8fabacfe7608ac183bda2bee8ccf | [
"MIT"
] | permissive | db630401865/gridsome-lagou | 75e96f0d9186fddd33b76ca70bdfc923d968309b | 3b79b792d831473a299b234f2a75b7988f34ba02 | refs/heads/master | 2023-07-10T09:06:14.814873 | 2021-08-20T04:46:28 | 2021-08-20T04:46:28 | 398,157,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt69l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "69",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "8",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 93,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_nghttp3": "false",
"node_shared_ngtcp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"openssl_quic": "true",
"ossfuzz": "false",
"shlib_suffix": "93.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/chenluyi/Library/Caches/node-gyp/16.3.0",
"standalone_static_library": 1,
"version_git_tag": "true",
"sharp_libvips_binary_host": "https://npm.taobao.org/mirrors/sharp-libvips",
"init_license": "MIT",
"email": "[email protected]",
"_terminus_registry": "http://10.137.10.236:8081/repository/front_group/",
"registry": "https://registry.yarnpkg.com",
"home": "https://www.npmjs.org",
"_terccccminus_registry": "http://10.137.10.236:8081/repository/front_group/",
"version_commit_hooks": "true",
"bin_links": "true",
"__registry_npm_taobao_org__username": "inchint",
"save_prefix": "^",
"strict_ssl": "true",
"version_git_message": "v%s",
"version_git_sign": "",
"ignore_scripts": "",
"user_agent": "yarn/1.22.4 npm/? node/v16.3.0 darwin x64",
"init_version": "1.0.0",
"ignore_optional": "",
"sharp_binary_host": "https://npm.taobao.org/mirrors/sharp",
"version_tag_prefix": "v"
}
}
| [
"[email protected]"
] | |
fe2b27a045824950873461d3f99af2bb57118106 | 4e353bf7035eec30e5ad861e119b03c5cafc762d | /QtGui/QPrintPreviewWidget.py | 076aefa669f30053247e57c1fbe3f1006f686646 | [] | no_license | daym/PyQt4-Stubs | fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5 | 57d880c0d453641e31e1e846be4087865fe793a9 | refs/heads/master | 2022-02-11T16:47:31.128023 | 2017-10-06T15:32:21 | 2017-10-06T15:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,154 | py | # encoding: utf-8
# module PyQt4.QtGui
# from C:\Python27\lib\site-packages\PyQt4\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from QWidget import QWidget
class QPrintPreviewWidget(QWidget):
"""
QPrintPreviewWidget(QPrinter, QWidget parent=None, Qt.WindowFlags flags=0)
QPrintPreviewWidget(QWidget parent=None, Qt.WindowFlags flags=0)
"""
def actionEvent(self, *args, **kwargs): # real signature unknown
pass
def changeEvent(self, *args, **kwargs): # real signature unknown
pass
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def closeEvent(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def contextMenuEvent(self, *args, **kwargs): # real signature unknown
pass
def create(self, *args, **kwargs): # real signature unknown
pass
def currentPage(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.currentPage() -> int """
return 0
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def destroy(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def dragEnterEvent(self, *args, **kwargs): # real signature unknown
pass
def dragLeaveEvent(self, *args, **kwargs): # real signature unknown
pass
def dragMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def dropEvent(self, *args, **kwargs): # real signature unknown
pass
def enabledChange(self, *args, **kwargs): # real signature unknown
pass
def enterEvent(self, *args, **kwargs): # real signature unknown
pass
def event(self, *args, **kwargs): # real signature unknown
pass
def fitInView(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.fitInView() """
pass
def fitToWidth(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.fitToWidth() """
pass
def focusInEvent(self, *args, **kwargs): # real signature unknown
pass
def focusNextChild(self, *args, **kwargs): # real signature unknown
pass
def focusNextPrevChild(self, *args, **kwargs): # real signature unknown
pass
def focusOutEvent(self, *args, **kwargs): # real signature unknown
pass
def focusPreviousChild(self, *args, **kwargs): # real signature unknown
pass
def fontChange(self, *args, **kwargs): # real signature unknown
pass
def hideEvent(self, *args, **kwargs): # real signature unknown
pass
def inputMethodEvent(self, *args, **kwargs): # real signature unknown
pass
def keyPressEvent(self, *args, **kwargs): # real signature unknown
pass
def keyReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def languageChange(self, *args, **kwargs): # real signature unknown
pass
def leaveEvent(self, *args, **kwargs): # real signature unknown
pass
def metric(self, *args, **kwargs): # real signature unknown
pass
def mouseDoubleClickEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def mousePressEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def moveEvent(self, *args, **kwargs): # real signature unknown
pass
def numPages(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.numPages() -> int """
return 0
def orientation(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.orientation() -> QPrinter.Orientation """
pass
def pageCount(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.pageCount() -> int """
return 0
def paintEvent(self, *args, **kwargs): # real signature unknown
pass
def paintRequested(self, *args, **kwargs): # real signature unknown
""" QPrintPreviewWidget.paintRequested[QPrinter] [signal] """
pass
def paletteChange(self, *args, **kwargs): # real signature unknown
pass
def previewChanged(self, *args, **kwargs): # real signature unknown
""" QPrintPreviewWidget.previewChanged [signal] """
pass
def print_(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.print_() """
pass
def receivers(self, *args, **kwargs): # real signature unknown
pass
def resetInputContext(self, *args, **kwargs): # real signature unknown
pass
def resizeEvent(self, *args, **kwargs): # real signature unknown
pass
def sender(self, *args, **kwargs): # real signature unknown
pass
def senderSignalIndex(self, *args, **kwargs): # real signature unknown
pass
def setAllPagesViewMode(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setAllPagesViewMode() """
pass
def setCurrentPage(self, p_int): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setCurrentPage(int) """
pass
def setFacingPagesViewMode(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setFacingPagesViewMode() """
pass
def setLandscapeOrientation(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setLandscapeOrientation() """
pass
def setOrientation(self, QPrinter_Orientation): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setOrientation(QPrinter.Orientation) """
pass
def setPortraitOrientation(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setPortraitOrientation() """
pass
def setSinglePageViewMode(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setSinglePageViewMode() """
pass
def setViewMode(self, QPrintPreviewWidget_ViewMode): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setViewMode(QPrintPreviewWidget.ViewMode) """
pass
def setVisible(self, bool): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setVisible(bool) """
pass
def setZoomFactor(self, p_float): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setZoomFactor(float) """
pass
def setZoomMode(self, QPrintPreviewWidget_ZoomMode): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.setZoomMode(QPrintPreviewWidget.ZoomMode) """
pass
def showEvent(self, *args, **kwargs): # real signature unknown
pass
def tabletEvent(self, *args, **kwargs): # real signature unknown
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def updateMicroFocus(self, *args, **kwargs): # real signature unknown
pass
def updatePreview(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.updatePreview() """
pass
def viewMode(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.viewMode() -> QPrintPreviewWidget.ViewMode """
pass
def wheelEvent(self, *args, **kwargs): # real signature unknown
pass
def windowActivationChange(self, *args, **kwargs): # real signature unknown
pass
def winEvent(self, *args, **kwargs): # real signature unknown
pass
def zoomFactor(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.zoomFactor() -> float """
return 0.0
def zoomIn(self, float_factor=1.1): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.zoomIn(float factor=1.1) """
pass
def zoomMode(self): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.zoomMode() -> QPrintPreviewWidget.ZoomMode """
pass
def zoomOut(self, float_factor=1.1): # real signature unknown; restored from __doc__
""" QPrintPreviewWidget.zoomOut(float factor=1.1) """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
AllPagesView = 2
CustomZoom = 0
FacingPagesView = 1
FitInView = 2
FitToWidth = 1
SinglePageView = 0
| [
"[email protected]"
] | |
29a513a97699bac4ada9d6548a4147dce1385c6c | 921bb79da08d149569668e7334124992eff1aadd | /python/migrationDataReportHelper.py | ad24e7f2193b5ccc1fb4407d59462c6e6af1a993 | [] | no_license | apaladino/sideprojects | 56b839bf12fd499e4430bd63338d552063a0eb55 | b15d7d69bd4acf8c38815875d3dba35b5452e881 | refs/heads/master | 2023-08-31T15:03:51.302741 | 2020-12-06T16:48:32 | 2020-12-06T16:48:32 | 12,347,334 | 0 | 0 | null | 2022-12-16T01:34:15 | 2013-08-24T17:52:33 | PHP | UTF-8 | Python | false | false | 2,784 | py | #! /usr/bin/env python
import sys
import urllib2
import json
##### MigrationHelper #####
##### Pulls migration related data from the migration service via rest api and #####
##### converts to a CSV file. #####
apiPath="/Service/migration/migrationStatus?migrationStatus={0}&migrationStartTime={1}&limit={2}&offset={3}"
env="live"
migrationStatus="COMPLETED"
startTime="2013-01-01-01:00:00"
limit=10000000
offset=0
partition=4
envMap = {'dev': "https://<dev-env>", 'qa': "https://<qa-env>",
'stage': "https://<stage-env>", 'live': "https://<live-env>"}
csvFileName='dataresults.csv'
printToCSVFile = True
# usage migrationHelper dev 4 COMPLETED 2013-02-08-01:00:00
sys.argv = ["migrationHelper", "live",4,"ROLLED_BACK", "2013-01-01-01:00:00", 5000, "true"]
if len(sys.argv) >= 2 and sys.argv[1] != "":
env = str(sys.argv[1])
if len(sys.argv) >= 3 and sys.argv[2] != "":
partition = str(sys.argv[2])
if len(sys.argv) >= 4 and sys.argv[3] != "":
migrationStatus = str(sys.argv[3])
if len(sys.argv) >= 5 and sys.argv[4] != "":
startTime=str(sys.argv[4])
if len(sys.argv) >= 6 and sys.argv[5] != "":
limit = int(str(sys.argv[5]))
if len(sys.argv) >= 7 and sys.argv[6] != "" and (sys.argv[6] == "false" or sys.argv[6] == "no" ):
printToCSVFile=False
# build the http get request url
requestUrl = envMap[env].format(partition)
requestPath=apiPath.format(migrationStatus, startTime, limit, offset)
print "migrationHelper request URL: "+requestUrl + requestPath
response = urllib2.urlopen(requestUrl + requestPath)
if response.code != 200:
print "Invalid response code returned: " + response.code
print response.read()
sys.exit(0)
content = response.read()
if str(printToCSVFile) == "False":
print content
sys.exit(0)
print "Writing response to CSV file: " + csvFileName
jsonObj = json.loads(content)
count = len(jsonObj)
if count > 0:
firstObj = jsonObj[0];
keys = firstObj.keys()
f = open(csvFileName, 'wb')
for key in keys:
f.write(str(key).replace(",", "") + ",")
f.write("\n")
for r in jsonObj:
for value in r.values():
val = str(value).replace(",", "")
val = val.replace("\r", "")
replaceToken = """
Migration:"""
val = val.replace(replaceToken, " Migration:")
replaceToken = """
Unable"""
val = val.replace(replaceToken, " Unable")
replaceToken = """
"""
val = val.replace(replaceToken," ")
if str(val).startswith('type=org.springframework.web.client.ResourceAccessException message'):
continue
f.write(val + "," )
f.write("\n")
print "Finished."
| [
"[email protected]"
] | |
85f0210572674ceb5890fddb8804e79065b97528 | 096731a4209054fb5aa5be7568b22d5b8360cce4 | /mercury_ml/h2o/containers.py | f475499b15f1d619d48dfd3ae20e644459845473 | [
"MIT"
] | permissive | gabrieloexle/mercury-ml | 7787ef571b7e7272a53b07a4cc44b84503fe050f | cc663f84a26ee66ae105bbfc0cd1cbd5629031cd | refs/heads/master | 2020-04-29T03:39:56.491866 | 2019-03-25T07:14:04 | 2019-03-25T07:14:04 | 175,818,972 | 0 | 0 | NOASSERTION | 2019-03-15T12:47:20 | 2019-03-15T12:47:19 | null | UTF-8 | Python | false | false | 2,442 | py | """
Simple IoC containers that provide direct access to various H2O providers
"""
class ModelDefinitions:
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.deepwater import H2ODeepWaterEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
from h2o.estimators.kmeans import H2OKMeansEstimator
from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator
from h2o.estimators.pca import H2OPrincipalComponentAnalysisEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.svd import H2OSingularValueDecompositionEstimator
rf = H2ORandomForestEstimator
mlp = H2ODeepLearningEstimator
deep_water = H2ODeepWaterEstimator
auto_encoder = H2OAutoEncoderEstimator
gbm = H2OGradientBoostingEstimator
glm = H2OGeneralizedLinearEstimator
low_rank = H2OGeneralizedLowRankEstimator
k_means = H2OKMeansEstimator
naive_bayes = H2ONaiveBayesEstimator
pca = H2OPrincipalComponentAnalysisEstimator
svd = H2OSingularValueDecompositionEstimator
class ModelFitters:
from mercury_ml.h2o.providers import model_fitting
fit = model_fitting.fit
class ModelSavers:
from mercury_ml.h2o.providers import model_saving
save_h2o_model = model_saving.save_h2o_model
save_json_details = model_saving.save_model_details
save_pojo = model_saving.save_pojo
save_mojo = model_saving.save_mojo
save_pojo_jar = model_saving.save_pojo_jar
save_mojo_jar = model_saving.save_mojo_jar
class ModelLoaders:
from mercury_ml.h2o.providers import model_loading
load_h2o_model = model_loading.load_h2o_model
load_mojo = model_loading.load_mojo_model
class ModelEvaluators:
from mercury_ml.h2o.providers import model_evaluation
evaluate = model_evaluation.evaluate
evaluate_threshold_metrics = model_evaluation.evaluate_threshold_metrics
class PredictionFunctions:
from mercury_ml.h2o.providers import prediction
predict = prediction.predict
class SessionInitiators:
from mercury_ml.h2o.providers import session
get_or_create_h2o = session.get_or_create_h2o
get_or_create_h2o_sparkling = session.get_or_create_h2o_sparkling | [
"[email protected]"
] | |
c49cf311e1d75d682fef09ca91874244e34aeb60 | b3db0c8aac2eb3cb098c9b4e811747684617534e | /flask_app/main.py | ddec340dd46a0568092f4a572dec7830b2291ecf | [] | no_license | solashirai/FoodRec | 814c58431c134a1e561414dbfe2c64f842cd53fd | a14fcf2cafaf26c5bb6396485a16052c6925aa6f | refs/heads/master | 2023-04-07T04:40:13.249868 | 2021-04-19T02:29:36 | 2021-04-19T02:29:36 | 359,238,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,133 | py | from flask import Flask, request, abort
import rdflib
from food_rec.services.food import (
RemoteGraphFoodKgQueryService,
LocalGraphFoodKgQueryService,
RecipeEmbeddingService,
)
from food_rec.services.user import (
RemoteGraphUserKgQueryService,
LocalGraphUserKgQueryService,
)
from food_rec.services.guideline import LocalGraphGuidelineKgQueryService
from food_rec.services import (
GraphIngredientSubstitutionService,
GraphExplainableFoodRecommenderService,
)
from food_rec.services.exceptions import NotFoundException, MalformedContentException
from food_rec.utils.path import DATA_DIR
recipe_ns = rdflib.Namespace("http://idea.rpi.edu/heals/kb/recipe/")
def create_app(*, TESTING=False):
app = Flask(__name__)
user_files = tuple(
(DATA_DIR / file).resolve()
for file in ["user_example.trig", "diet_restrictions.trig"]
)
USERKG_SHARED = LocalGraphUserKgQueryService(file_paths=user_files)
if TESTING:
# for testing locally
food_kg_files = tuple(
(DATA_DIR / file).resolve()
for file in [
"food_kg_test_dataset.trig",
"simplified_test_usda.ttl",
"foodon_total.trig",
"food_kg_test_precomputed_nutrition.ttl",
"manual_ingredient_substitution_sets.trig",
]
)
FOODKG_SHARED = LocalGraphFoodKgQueryService(file_paths=food_kg_files)
RECIPE_EMBEDDINGS = RecipeEmbeddingService(
id_file=(DATA_DIR / "food_kg_embeddings" / "testfoodkg_id.pkl").resolve(),
embedding_file=(
DATA_DIR / "food_kg_embeddings" / "testfoodkg_emb.pkl"
).resolve(),
)
else:
FOODKG_SHARED = RemoteGraphFoodKgQueryService(
sparql_endpoint="http://localhost:9999/blazegraph/sparql" # "http://twks-server:8080/sparql/assertions"
)
# USERKG_SHARED = RemoteGraphUserKgQueryService(
# sparql_endpoint="http://localhost:9999/blazegraph/sparql"#"http://twks-server:8080/sparql/assertions"
# )
RECIPE_EMBEDDINGS = RecipeEmbeddingService(
id_file=(DATA_DIR / "food_kg_embeddings" / "5k_foodkg_id.pkl").resolve(),
embedding_file=(
DATA_DIR / "food_kg_embeddings" / "5k_foodkg_emb.pkl"
).resolve(),
)
FOOD_SUBS = GraphIngredientSubstitutionService(
food_kg=FOODKG_SHARED, user_kg=USERKG_SHARED
)
# TODO: cleanup of how guidelineKG and recipe embedding service are set up
guideline_files = tuple(
(DATA_DIR / file).resolve() for file in ["heals-guidelines.owl"]
)
GUIDELINEKG_SHARED = LocalGraphGuidelineKgQueryService(file_paths=guideline_files)
FOOD_REC = GraphExplainableFoodRecommenderService(
food_kg=FOODKG_SHARED,
user_kg=USERKG_SHARED,
guideline_kg=GUIDELINEKG_SHARED,
recipe_embedding_service=RECIPE_EMBEDDINGS,
)
@app.route("/")
def hello_world():
return "Hello, World!"
@app.route("/recipe_subs", methods=["GET"])
def get_subs_for_recipe():
args = request.args
recipe_uri_part = args["recipe_uri"]
recipe_uri = rdflib.URIRef(recipe_uri_part)
try:
recipe = FOODKG_SHARED.get_recipe_by_uri(recipe_uri=recipe_uri)
allsubs = FOOD_SUBS.get_substitutions_for_recipe(recipe=recipe, user=None)
app.logger.info(f"retrieved substitutes for recipe {recipe.name}")
allsubs = [
{
"fromIngredient": sub.from_ing.uri,
"toIngredient": sub.to_ing.uri,
"explanation": sub.explanation,
}
for sub in allsubs
]
return {"substitution_options": allsubs}
except NotFoundException as e:
abort(404, description=e)
except MalformedContentException as e:
abort(500, description=e)
@app.route("/recipe_user_subs", methods=["GET"])
def get_subs_for_recipe_and_user():
args = request.args
user_uri_part = args["user_uri"]
user_uri = rdflib.URIRef(user_uri_part)
recipe_uri_part = args["recipe_uri"]
recipe_uri = rdflib.URIRef(recipe_uri_part)
try:
recipe = FOODKG_SHARED.get_recipe_by_uri(recipe_uri=recipe_uri)
user = USERKG_SHARED.get_user_by_uri(user_uri=user_uri)
allsubs = FOOD_SUBS.get_substitutions_for_recipe(recipe=recipe, user=user)
app.logger.info(
f"retrieved substitutes for recipe {recipe.name} for user {user.name}"
)
allsubs = [
{
"fromIngredient": sub.from_ing.uri,
"toIngredient": sub.to_ing.uri,
"explanation": sub.explanation,
}
for sub in allsubs
]
return {"substitution_options": allsubs}
except NotFoundException as e:
abort(404, description=e)
except MalformedContentException as e:
abort(500, description=e)
@app.route("/user_info", methods=["GET"])
def get_user_info():
args = request.args
user_uri_part = args["user_uri"]
# /user_info?user_uri=http%3A%2F%2Fidea.rpi.edu%2Fheals%2Fkb%2Fuser%2Fuser_id%2FUSER_001
user_uri = rdflib.URIRef(user_uri_part)
try:
user = USERKG_SHARED.get_user_by_uri(user_uri=user_uri)
user_info = user.to_dict()
return user_info
except NotFoundException as e:
abort(404, description=e)
except MalformedContentException as e:
abort(500, description=e)
@app.route("/user_mealplan_rec", methods=["GET"])
def get_mealplan_for_user():
args = request.args
user_uri_part = args["user_uri"]
# /user_mealplan_rec?user_uri=http%3A%2F%2Fidea.rpi.edu%2Fheals%2Fkb%2Fuser%2Fuser_id%2FUSER_001
user_uri = rdflib.URIRef(user_uri_part)
days = int(args.get("days", 3))
meal_per_day = int(args.get("meals_per_day", 2))
try:
user = USERKG_SHARED.get_user_by_uri(user_uri=user_uri)
mealplan_cand = FOOD_REC.get_meal_plan_for_user(
user=user, number_of_days=days, meals_per_day=meal_per_day
)
mealplan = mealplan_cand.domain_object
mp_days = [
{
"meals": [
{
"recipe_name": recipe_rec.recipe.name,
"explanation": recipe_rec.explanation,
"ingredients": [
ing.label for ing in recipe_rec.recipe.ingredient_set
],
"calories(kcal)": recipe_rec.recipe.total_nutritional_info.energ__kcal,
"sodium(mg)": recipe_rec.recipe.total_nutritional_info.sodium_mg,
"carbohydrates(g)": recipe_rec.recipe.total_nutritional_info.carbohydrt_g,
}
for recipe_rec in day.recipe_recommendations
],
"day_explanation": day.explanation,
}
for day in mealplan.meal_plan_days
]
return {
"mealplan_days": mp_days,
"overall_explanation": mealplan.explanation,
"context_username": mealplan_cand.context.target_user.name,
"context_user_favorite_recipes": [
FOODKG_SHARED.get_recipe_by_uri(recipe_uri=ru).name
for ru in mealplan_cand.context.target_user.favorite_recipe_uri_set
],
}
except NotFoundException as e:
abort(404, description=e)
except MalformedContentException as e:
abort(500, description=e)
return app
app = create_app(TESTING=False)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80)
| [
"[email protected]"
] | |
5efb8685aa7ad459083c04a11139c79ec492517f | bc4b756a5aa0c3966513fc1067228adc08790907 | /tp03_ex03.py | b8cd7ebc161b28deb4bd467e6b103710ecd6b985 | [] | no_license | rafatahmedrs/rafatahmed | b3cffd35dc30e04c7897b79359e6e43f7ce44433 | 92e3ece8f4269ba02c8f0bfc83f7ed930273e7d4 | refs/heads/master | 2020-07-28T06:57:14.390124 | 2019-11-24T22:33:07 | 2019-11-24T22:33:07 | 209,344,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | import math
### Copiez votre code de l'exercice précédent
# Fonctions
def initialisation_lapins() -> int:
"""
:return: # qui retournera un int # sans parametère,
# représentant le nombre de lapins saisi par l’utilisateur
"""
lapins_dep: int = 0
while lapins_dep < CONDITION_LAPIN:
lapins_dep = (int(input("Combien de lapins au départ (>=5)? ")))
return lapins_dep
def initialisation_renards() -> int:
"""
:return: # et qui retournera un int # sans parametère,
représentant le nombre de renards saisi par l’utilisateur
"""
renard_dep: int = 0
while renard_dep < CONDITION_RENARDS:
renard_dep = (int(input("Combien de renards au départ (>=2)? ")))
return renard_dep
### Déclaration et Initialisation des variables
CONDITION_LAPIN: int = 5
CONDITION_RENARDS: int = 2
EVALUATION: int = 50
taux_croissance_lapins: float = 0.4
taux_attaque: float = 0.01
taux_croissance_renards: float = 0.008
taux_mortalite: float = 0.1
inferieur_zero = 0.0
nb_renards: float = initialisation_renards()
nb_lapins: float = initialisation_lapins()
en_dessous_cinq_lapin: bool = False
en_dessous_cinq_renard: bool = False
remonte_cinq_lapin: bool = False
remonte_cinq_renard: bool = False
en_dessous_deux_lapin: bool = False
en_dessous_deux_renard: bool = False
# Programme Principal
def simulation(EVALUATION, nb_lapins, nb_renards):
for mois in range(EVALUATION):
mois += 1
ancien_lapins = nb_lapins
nb_lapins = nb_lapins * (1.0 + taux_croissance_lapins - taux_attaque * nb_renards)
nb_renards = nb_renards * (1.0 + taux_attaque * ancien_lapins * taux_croissance_renards - taux_mortalite)
if mois == EVALUATION: ###j'arrondis vers le bas à l'aide de math.floor
print("Après", mois, "mois,", "Il y a ", abs(math.floor(nb_lapins)), "lapins", "et", abs(math.floor(nb_renards)),
"renards")
if nb_lapins < 5:
en_dessous_cinq_lapin = True ##Si le nombre de lapins est inferieur a 5
if nb_renards < 5:
en_dessous_cinq_renard = True ##Si le nombre de renards est inferieur a 5
if nb_lapins < 2:
en_dessous_deux_lapin = True
nb_lapins = 0 ##Si le nombre de lapin est inferieur a 2 on met 0
if nb_renards < 2:
en_dessous_deux_renard = True
nb_renards = 0 ##Si le nombre de lapin est inferieur a 2 on met 0
if en_dessous_cinq_lapin is True:
print("Les lapins ont été en voie d’extinction ") ## si c'est vrai
if en_dessous_deux_lapin is True:
print("et les lapins ont disparus :-(") ## si c'est vrai
if en_dessous_cinq_renard is True:
print("Les renards ont été en voie d’extinction ") ## si c'est vrai
if en_dessous_deux_renard is True:
print("et les renards ont disparus :-(")
elif en_dessous_cinq_lapin and en_dessous_cinq_renard is False: ## si c'est faux
print("Les lapins et les renards ont des populations stables.")
### Séquence d'opérations
simulation(EVALUATION, nb_lapins, nb_renards)
| [
"[email protected]"
] | |
9c3344e9685c08ce36ce59615225678df3a850bd | 193bf5d7c825e5958f0cccf83a8c353cbaa5faba | /mlchain/cli/prepare.py | d0c4e0a577f18606fd18aa4b3e5e6aeec4e75e4e | [
"MIT"
] | permissive | phamngoclinh96/mlchain-public-dev | 057c4d2a86cfdd42e5b33bf020308a0b6e2cd144 | 56f7e4ebb54821a980f044390fc20cb5f93693f7 | refs/heads/master | 2022-11-29T13:48:16.354938 | 2020-08-12T07:08:54 | 2020-08-12T07:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | import click
import os
import sys
from mlchain import logger
op_config = click.option("--config", "-c", default=None, help="file json or yaml")
@click.command("prepare", short_help="Download files from object storage to local.")
@op_config
@click.option('--force/--no-force', default=False)
def prepare_command(config,force):
from mlchain import config as mlconfig
default_config = False
if config is None:
default_config = True
config = 'mlconfig.yaml'
if os.path.isfile(config):
if config.endswith('.json'):
config = mlconfig.load_json(config)
elif config.endswith('.yaml') or config.endswith('.yml'):
config = mlconfig.load_yaml(config)
else:
raise AssertionError("Not support file config {0}".format(config))
else:
if not default_config:
raise FileNotFoundError("Not found file {0}".format(config))
config = {}
mlconfig.prepare(config,force) | [
"[email protected]"
] | |
bdd07269231526dfe07be10f50f71430bc1eeeab | f36e69f4385185150558843ee26379cd88723e24 | /passing_the_parcel/serverclient.py | b12a7fa0a9ec1919ca7aa604b8ede5dec4a45268 | [] | no_license | ayush268/ACA_Project | 896ad6ef3b478641ad1b9b793f56fe86d87e13a1 | c0c3fc85afd15ec900544e42748b123d61f52ca0 | refs/heads/master | 2021-01-19T12:32:58.781439 | 2017-11-10T04:33:10 | 2017-11-10T04:33:10 | 82,320,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | #!/usr/bin/env python2
import zmq,sys,random # importing modules
Max = 10**7 # Setting max number (for random generation)
def server():
# ZeroMQ Context
context = zmq.Context()
# Define the socket using the "Context"
sock = context.socket(zmq.REP)
sock.bind("tcp://127.0.0.1:9002")
# Receive a request of number from next point
while True:
num = int(sock.recv())
print ("Received : " + str(num))
return num
def client(num,flag2):
# ZeroMQ Context
context = zmq.Context()
# Define the socket using the "Context"
sock = context.socket(zmq.REQ)
sock.connect("tcp://127.0.0.1:9003")
# Sends the request for processed num to previous point
if flag2: # Keeps check on not sending to a closed server
sock.send(str(num))
print ("Sent : " + str(num)) # Printing the sent numbers
if num==1:
return 0 # Keeping check on terminating on num = 1
else:
return 1
def main():
if (sys.argv[len(sys.argv)-1] == 'start'):
num = random.choice(range(1001,Max+1)) # Generating a random number
if (num % 2 == 0):
num = num//2
else:
num = (num*3)+1
flag = 1
else:
num = server()
flag = 1
flag2 = 1
while flag:
if (num % 2 == 0):
num = num//2 # Running operations on num
elif (num != 1):
num = (num*3)+1
flag = client(num,flag2) # Calls function for sending to client
if(num==2):
flag2 = 0 # Setting flag2 to keep check on last sent request
if flag:
num = server() # Asking for number from previous point
if __name__ == '__main__':
main()
#END_OF_SOURCE_CODE
| [
"[email protected]"
] | |
ae758187bb319dd9076db7cc30189ce91b587bba | 08aa99127532716e3439be3c7aaea14ba4ebbe38 | /clinic/PreModel_Encoder_CRF/dataloader_utils.py | dd3a1bc5474aac03044c7d0f957f1150d6892288 | [] | no_license | DaserIF/ccks_ner | 2a8ca4469a5a3dcfefd15a3daf9a938bf3a59500 | 2c72021b371c80e95b62c1a42e7e5dc557296332 | refs/heads/master | 2022-12-28T10:59:13.535283 | 2020-10-15T12:07:12 | 2020-10-15T12:07:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,025 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""dataloader.py utils"""
import re
def split_text(text, max_len, split_pat=r'([,。]”?)', greedy=False):
"""文本分片
将超过长度的文本分片成多段满足最大长度要求的最长连续子文本
约束条件:1)每个子文本最大长度不超过max_len;
2)所有的子文本的合集要能覆盖原始文本。
Arguments:
text {str} -- 原始文本
max_len {int} -- 最大长度
Keyword Arguments:
split_pat {str or re pattern} -- 分割符模式 (default: {SPLIT_PAT})
greedy {bool} -- 是否选择贪婪模式 (default: {False})
贪婪模式:在满足约束条件下,选择子文本最多的分割方式
非贪婪模式:在满足约束条件下,选择冗余度最小且交叉最为均匀的分割方式
Returns:
tuple -- 返回子文本列表以及每个子文本在原始文本中对应的起始位置列表
Examples:
text = '今夕何夕兮,搴舟中流。今日何日兮,得与王子同舟。蒙羞被好兮,不訾诟耻。心几烦而不绝兮,得知王子。山有木兮木有枝,心悦君兮君不知。'
sub_texts, starts = split_text(text, maxlen=30, greedy=False)
for sub_text in sub_texts:
print(sub_text)
print(starts)
for start, sub_text in zip(starts, sub_texts):
if text[start: start + len(sub_text)] != sub_text:
print('Start indice is wrong!')
break
"""
# 文本小于max_len则不分割
if len(text) <= max_len:
return [text], [0]
# 分割字符串
segs = re.split(split_pat, text)
# init
sentences = []
# 将分割后的段落和分隔符组合
for i in range(0, len(segs) - 1, 2):
sentences.append(segs[i] + segs[i + 1])
if segs[-1]:
sentences.append(segs[-1])
n_sentences = len(sentences)
sent_lens = [len(s) for s in sentences]
# 所有满足约束条件的最长子片段
alls = []
for i in range(n_sentences):
length = 0
sub = []
for j in range(i, n_sentences):
if length + sent_lens[j] <= max_len or not sub:
sub.append(j)
length += sent_lens[j]
else:
break
alls.append(sub)
# 将最后一个段落加入
if j == n_sentences - 1:
if sub[-1] != j:
alls.append(sub[1:] + [j])
break
if len(alls) == 1:
return [text], [0]
if greedy:
# 贪婪模式返回所有子文本
sub_texts = [''.join([sentences[i] for i in sub]) for sub in alls]
starts = [0] + [sum(sent_lens[:i]) for i in range(1, len(alls))]
return sub_texts, starts
else:
# 用动态规划求解满足要求的最优子片段集
DG = {} # 有向图
N = len(alls)
for k in range(N):
tmplist = list(range(k + 1, min(alls[k][-1] + 1, N)))
if not tmplist:
tmplist.append(k + 1)
DG[k] = tmplist
routes = {}
routes[N] = (0, -1)
for i in range(N - 1, -1, -1):
templist = []
for j in DG[i]:
cross = set(alls[i]) & (set(alls[j]) if j < len(alls) else set())
w_ij = sum([sent_lens[k] for k in cross]) ** 2 # 第i个节点与第j个节点交叉度
w_j = routes[j][0] # 第j个子问题的值
w_i_ = w_ij + w_j
templist.append((w_i_, j))
routes[i] = min(templist)
sub_texts, starts = [''.join([sentences[i] for i in alls[0]])], [0]
k = 0
while True:
k = routes[k][1]
sub_texts.append(''.join([sentences[i] for i in alls[k]]))
starts.append(sum(sent_lens[: alls[k][0]]))
if k == N - 1:
break
return sub_texts, starts
class InputExample(object):
"""a single set of samples of data_src
"""
def __init__(self, sentence, tag):
self.sentence = sentence
self.tag = tag
class InputFeatures(object):
"""
Desc:
a single set of features of data_src
"""
def __init__(self,
input_ids,
input_mask,
tag,
split_to_original_id,
example_id
):
self.input_mask = input_mask
self.input_ids = input_ids
self.tag = tag
# use to split
self.split_to_original_id = split_to_original_id
self.example_id = example_id
def read_examples(data_dir, data_sign):
"""read data_src to InputExamples
:return examples (List[InputExample])
"""
examples = []
# read src data
with open(data_dir / f'{data_sign}/sentences.txt', "r", encoding='utf-8') as f_sen, \
open(data_dir / f'{data_sign}/tags.txt', 'r', encoding='utf-8') as f_tag:
for sen, tag in zip(f_sen, f_tag):
example = InputExample(sentence=sen.strip().split(' '), tag=tag.strip().split(' '))
examples.append(example)
print("InputExamples:", len(examples))
return examples
def convert_examples_to_features(params, examples, tokenizer, pad_sign=True, greed_split=True):
"""convert examples to features.
:param examples (List[InputExamples])
:param pad_sign: 是否补零
"""
# tag to id
tag2idx = {tag: idx for idx, tag in enumerate(params.tags)}
features = []
# context max len
max_len = params.max_seq_length
split_pad = r'([,.!?,。!?]”?)'
for (example_idx, example) in enumerate(examples):
# split long text
sub_texts, starts = split_text(text=''.join(example.sentence), max_len=max_len,
greedy=greed_split, split_pat=split_pad)
original_id = list(range(len(example.sentence)))
# get split features
for text, start in zip(sub_texts, starts):
# tokenize返回为空则设为[UNK]
text_tokens = [tokenizer.tokenize(token)[0] if len(tokenizer.tokenize(token)) == 1 else '[UNK]'
for token in text]
# label id
tag_ids = [tag2idx[tag] for tag in example.tag[start:start + len(text)]]
# 保存子文本对应原文本的位置
split_to_original_id = original_id[start:start + len(text)]
assert len(tag_ids) == len(split_to_original_id), 'check the length of tag_ids and split_to_original_id!'
# cut off
if len(text_tokens) > max_len:
text_tokens = text_tokens[:max_len]
tag_ids = tag_ids[:max_len]
split_to_original_id = split_to_original_id[:max_len]
# token to id
text_ids = tokenizer.convert_tokens_to_ids(text_tokens)
# sanity check
assert len(text_ids) == len(tag_ids), f'check the length of text_ids and tag_ids!'
assert len(text_ids) == len(split_to_original_id), f'check the length of text_ids and split_to_original_id!'
# zero-padding up to the sequence length
if len(text_ids) < max_len and pad_sign:
# 补零
pad_len = max_len - len(text_ids)
# token_pad_id=0
text_ids += [0] * pad_len
tag_ids += [tag2idx['O']] * pad_len
split_to_original_id += [-1] * pad_len
# mask
input_mask = [1 if idx > 0 else 0 for idx in text_ids]
# get features
features.append(
InputFeatures(
input_ids=text_ids,
tag=tag_ids,
input_mask=input_mask,
split_to_original_id=split_to_original_id,
example_id=example_idx
))
return features
| [
"[email protected]"
] | |
6cdc4a60bec700acdb6e1a866de2e1df63ae5908 | dfaa71f8064d3d0773941cf14ab86ff57ff67284 | /part31_part32/contact/forms.py | 2f16b203257459d2405c31b667ec9a24549a61bd | [
"Apache-2.0"
] | permissive | yllew36/WellyGI | e94c5000ff3a7f2fd7316d22ad166fbf7916ea23 | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | refs/heads/master | 2020-09-05T15:49:37.386078 | 2019-11-15T08:16:59 | 2019-11-15T08:16:59 | 220,148,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from django import forms
class ContactForm(forms.Form):
nama = forms.CharField(max_length=255, required=True)
email = forms.EmailField(label='Alamat Email',initial='[email protected]')
Pilihan = (
('P', 'Pria'),
('W', 'Wanita'),
)
jenis_kelamin = forms.ChoiceField(
widget=forms.RadioSelect,
choices=Pilihan)
Tahun = range(1945,2019,1)
tanggal_lahir = forms.DateField(
widget= forms.SelectDateWidget(years=Tahun)
)
alamat = forms.CharField(widget=forms.Textarea)
# kode_pos = forms.CharField(max_length=5)
# kota = forms.CharField()
# provinsi = forms.CharField()
photo = forms.ImageField(required=False)
agree = forms.BooleanField() | [
"[email protected]"
] | |
c4867da0de6f4149234dd8abc1c301ca4fe9c440 | e51ef1f2505a4d97c7ec8209dd1f124b6fbf8826 | /enrichment_analysis/kegg_analysis/OriTerKeggScrape.py | 53dec3325ed825954049f8c0eafaddf545104af1 | [] | no_license | izaak-coleman/KoremRotation | ddee3d9184d10328c529008a7a85dbdde7c295e4 | 42784610107f69496fe71f135485d885ba1ec038 | refs/heads/master | 2020-05-04T17:42:13.773883 | 2019-06-16T05:34:12 | 2019-06-16T05:34:12 | 179,322,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,044 | py | import sys
import os
import urllib.request
import collections
import re
from subprocess import Popen, PIPE
import gzip
import KeggNames
kn = KeggNames.keggnames
# Use my NCBI API key to set server/second server access <= 10
os.environ["NCBI_API_KEY"] = "f4142f101db95406745385d940b13c37ab07"
# Make data struct to store ori info
Ori = collections.namedtuple('Ori', ['seq','len','start','end'])
def get_kegg_organism_list():
url = 'http://rest.kegg.jp/list/organism'
data = urllib.request.urlopen(url).read().decode('utf-8').split('\n')
data.pop()
return data
class Organism:
"""Container class storing data scraped from KEGG, NCBI and TUBIC relating
to a single organism present in KEGG organisms list."""
def __init__(self, kegg_genome_id, kegg_organism_id = None, strain_name = None, taxa_string = None):
self.kegg_genome_id = kegg_genome_id # e.g T01001
self.kegg_organism_id = kegg_organism_id # e.g hsa
self.strain_name = strain_name # e.g Homo sapiens (human)
self.taxa_string = taxa_string # e.g Eukaryotes;Animals;Vertebrates;Mammals
self.annotated_gff = []
self.refseq = ''
self.genbank = ''
self.get_metadata()
self.doric = ''
def in_taxonomic_group(self, group):
"""Returns True of group in self.taxa_string, False otherwise"""
return group in self.taxa_string
def get_genome_entry_from_kegg(self):
url = f'http://rest.kegg.jp/get/gn:{self.kegg_genome_id}'
# Download webpage from kegg, read, and convert to regular str type
return urllib.request.urlopen(url).read().decode('utf-8')
def search_ncbi_for_refseq_id(self, gb_id):
cmd = f"esearch -db nuccore -query {gb_id} | elink -target nuccore -name nuccore_nuccore_gbrs | efetch -format docsum | xtract -pattern DocumentSummary -element AccessionVersion Slen Title | sed 's/,.*//' | sort -t $'\t' -k 2,2nr"
result,err = Popen(cmd, stdout=PIPE,stderr=PIPE,shell=True).communicate()
result = result.decode('utf-8').strip().split('\n')
if len(result) != 1:
log = open(self.kegg_organism_id + '_multiple_refseq_ids.log','w')
log.write('\n'.join(result))
log.close()
return result[0].split('\t')[0]
return result[0].split('\t')[0]
def get_metadata(self):
"""Makes request to rest.kegg.jp/get/kegg_genome_id to get various metadata:
genome length; the sequence id of the genome used for kegg's annotations
(either refseq or genbank, if genbank, ncbi is searched to get refseq_id
if available); and ncbi taxonomy"""
entry = self.get_genome_entry_from_kegg()
# Make regex engine
match = lambda a, b : re.findall(rf'{a}\s+(\S.*)\n',b)
# Resolve basic metadata
self.genome_length = match(kn.LENGTH, entry)
if len(self.genome_length) > 0:
self.genome_length = int(self.genome_length[0])
if self.kegg_organism_id == None:
if len(self.organism_id) > 0:
self.organism_id = self.organism_id[0]
self.kegg_organism_id = match(kn.NAME, entry).split(',')[0]
if self.strain_name == None:
self.strain_name = match(kn.DEFINITION, entry)
if len(self.strain_name) > 0:
self.strain_name = self.strain_name[0]
if self.taxa_string == None:
self.taxa_string = match(kn.LINEAGE, entry)
if len(self.taxa_string) > 0:
self.taxa_string= self.taxa_string[0]
self.assembly = match(kn.DATA_SOURCE, entry)
if len(self.assembly) > 0:
self.assembly = self.assembly[0]
self.assembly = re.findall('Assembly:([\.\w]+)', self.assembly)[0]
# Attempt to determine the Refseq sequence id used for this kegg organism
seq = match(kn.SEQUENCE, entry)
if len(seq) > 0:
seq = seq[0]
if seq[0:2] == 'RS':
self.refseq, self.genbank = re.findall(r'RS:(\S+)\s+\(GB:(\S+)\)',seq)[0]
if seq[0:2] == 'GB':
self.genbank = re.findall(r'GB:(\S+)',seq)[0]
self.refseq = self.search_ncbi_for_refseq_id(self.genbank)
def get_kegg_gene_annotations(self, annotation):
url = f'http://rest.kegg.jp/link/{annotation}/{self.kegg_genome_id}'
data = urllib.request.urlopen(url).read().decode('utf-8').split('\n')
data = [tuple(e.split('\t')) for e in data]
data.pop()
return data
def main():
pass
if __name__ == '__main__':
main()
# to do
# def generate_index(self, gff, field):
# match = lambda a,b : re.findall(rf'[^\w]{a}=([\w]+)',b)
# d = dict()
# for index, line in enumerate(gff):
# tag = match(field, line)
# if len(tag) == 1:
# d[tag[0]] = index
# return d
#
# def set_gff_filename(self, file_name):
# self.gff_filename = file_name
#
# def kegg_annotate_gff(self):
# """Adds KEGG pathway, Ontology and other annotations to gff"""
# if len(self.annotated_gff) == 0:
# with gzip.open(self.gff_filename, 'rb') as f:
# gff = f.read().decode('utf-8').split('\n')
# gff.pop()
# gff = [g for g in gff if g[0] != '#']
# gff = [g for g in gff if (g.split('\t')[2] == 'gene' and g.split('\t')[0] == 'NC_002663.1')]
# self.annotated_gff = gff
#
# old_tag_idx = self.generate_index(gff, 'old_locus_tag')
# tag_idx = self.generate_index(gff, 'locus_tag')
# annotations = self.get_kegg_gene_annotations('pathway')
# remove_header = lambda x : re.findall(r'.*:(.*)',x)[0]
#
# log = open(self.kegg_genome_id + '_failed_kegg_annotation.log','w')
# for gene_id, kegg_anno in annotations:
# gene_id = remove_header(gene_id)
# idx = old_tag_idx.get(gene_id, -1)
# if idx == -1:
# idx = tag_idx.get(gene_id, -1)
# if idx == -1:
# log.write(gene_id + '\n')
# continue
# self.annotated_gff[idx] += '\t' + kegg_anno
# log.close()
#
# def write_annotated_gff(self):
# data = [(int(e.split('\t')[3]),e) for e in self.annotated_gff]
# data = sorted(data, key=lambda x: x[0])
# self.annotated_gff = [e for _,e in data]
# with gzip.open(self.gff_filename, 'wb') as f:
# f.write('\n'.join(self.annotated_gff))
| [
"[email protected]"
] | |
217874844c539b95323467ba4a4af14e88e57b2b | 32617b06437120912e7eb23fcf79b8fbae4f94e8 | /DP/3. Longest Substring Without Repeating Characters.py | f2efbffba394b6fdf425c8876344921c394a5d8f | [
"MIT"
] | permissive | xli1110/LC | e537c3bbc40d480dd9426ce515ad4a9464fcb7ce | 3c18b8809c5a21a62903060eef659654e0595036 | refs/heads/main | 2023-06-03T03:28:26.573215 | 2021-06-21T07:34:38 | 2021-06-21T07:34:38 | 351,021,807 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
loop:
if dic consists of char:
# use the SMALLER to iterate
if i - dic[char] <= dp[i - 1]:
dp[i] = i - dic[char]
else:
dp[i] = dp[i - 1] + 1
else:
dp[i] = dp[i - 1] + 1
dic[char] = i
"""
if not s:
return 0
dic = {s[0]: 0} # dic[char] = index, where index denotes the char's last appearance position.
dp = [1] * len(s) # dp[i] stores the maximal sub-string length ending at s[j].
for i in range(1, len(s)):
char = s[i]
if char in dic:
if i - dic[char] <= dp[i - 1]:
# Note the LEQ here.
# BXXXAXXBA, dp[j - 1] = 7, j - index = 4, dp[j] = 4 (XXBA)
dp[i] = i - dic[char]
else:
# AXXXXXBXXXBA, dp[j - 1] = 4, j - index = 11, dp[j] = 5 (XXXBA)
dp[i] = dp[i - 1] + 1
else:
# XXXXXBA, dp[j - 1] = 6, dp[j] = 7 (XXXXXBA)
dp[i] = dp[i - 1] + 1
dic[char] = i # do not forget this
return max(dp)
| [
"[email protected]"
] | |
870e15f0faa34be390ad5258327d88781bb1831f | bf5935cecf1b65461b1de9a67a013f0b03c3d1d0 | /boutique/migrations/0041_remove_product_gender.py | c1da06f004ff15a49e902dda6e25be493916489f | [] | no_license | meriemay/Shop | 53287aab6382163e6069130c8e5304ed7ffd0e3b | f5d44193e030c4ef9b5cf27896754767beaee3ef | refs/heads/master | 2021-01-18T17:50:49.688779 | 2017-08-29T14:34:34 | 2017-08-29T14:34:34 | 100,497,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-21 20:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boutique', '0040_product_gender'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='gender',
),
]
| [
"[email protected]"
] | |
99a61140b450e7d909ba4008b6398761c693d5d1 | ce7eb9e245787df912c9991e8be1d22369e88dfa | /despace.py | 155a02b82bfbdc646c8a44b8003987868fc0b47d | [] | no_license | ForgottenUmbrella/misc-python | 80d0fc97c33a863fc7138fc2ee272fb305958ca9 | 99bc8784b97218759f86656ee49b5a4100431d39 | refs/heads/master | 2020-06-25T07:59:42.992701 | 2019-01-08T07:11:10 | 2019-01-08T07:11:10 | 96,966,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | #!/usr/bin/env python3
import sys
def remove_chars(text, chars):
"""Return `text` with all `chars` removed"""
new_text = "".join([char for char in text if char not in chars])
return new_text
def main(text):
"""Print and return despaced `text`."""
despaced = remove_chars(text, ' ')
print(despaced)
return despaced
if __name__ == '__main__':
if sys.argv[1:]:
text = ' '.join(sys.argv[1:])
else:
text = input('Text to despace: ')
main(text)
| [
"[email protected]"
] | |
0a5faa27aaff6e0df9efc8434f5fd9fcd50c805c | 0221baccb984a9412c59ddf8faeb8882d7ff91ea | /predict_nuber_of_clusters.py | e0c29ecf5c218b97ec0a31e45f8c1b0523ce0b59 | [] | no_license | roysgitprojects/Unsupervised-Learning-Middle-Project | 095e511843e526ace8389dbde28231dad6440c5c | 45a3e10c7e1c7de788940b98578301365d965422 | refs/heads/main | 2023-02-14T20:21:07.497837 | 2021-01-07T09:21:15 | 2021-01-07T09:21:15 | 327,015,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import silhouette_score, silhouette_samples
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from yellowbrick.cluster import KElbowVisualizer
import clustering
def perform_elbow_method(points, method):
"""
Perform and visualize elbow method.
:param points: the data's points
:param method: clustering method - K means or Hierarchical
:return: None
"""
if method == 'K means':
model = KMeans()
elif method == 'Hierarchical':
model = AgglomerativeClustering()
else:
raise Exception('This elbow method designed only for K means and Hierarchical')
visualizer = KElbowVisualizer(model, k=(1, 12))
# Fit the data to the visualizer
visualizer.fit(points)
visualizer.set_title("The Elbow Method")
visualizer.show()
def perform_silhouette_method(points, method):
"""
Calculate and visualize silhouette scores
:param points: data's points
:param method: clustering method
:return: None
"""
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a figure
fig = plt.figure()
fig.set_size_inches(18, 7)
ax = fig.add_subplot(111)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax.set_ylim([0, len(points) + (n_clusters + 1) * 10])
# find the labels for the clustering method and number of clusters
cluster_labels = clustering.cluster(points, n_clusters, method)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(points, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(points, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax.set_title("The silhouette plot for the various clusters.")
ax.set_xlabel("The silhouette coefficient values")
ax.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax.axvline(x=silhouette_avg, color="red", linestyle="--")
ax.set_yticks([]) # Clear the yaxis labels / ticks
ax.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("The Silhouette score with %d clusters is %f" % (n_clusters, silhouette_avg)),
fontsize=14, fontweight='bold')
plt.show()
| [
"[email protected]"
] | |
62c0121b37951ff165e2f0c7a7fa47b6dc1ec980 | bc2742cac4347eb8652295a0d4aeb8633eea7c1b | /tests/layers/connections/test_connections_basic.py | 1c0996b19decffb44a73adbda978bd60ee85ad64 | [
"MIT"
] | permissive | temp3rr0r/neupy | 933648658cc2a5e85e0fc3955de0a3de65ea97c1 | f36071f5f46bf79ffd18485acca941db578656e8 | refs/heads/master | 2023-05-25T09:55:10.654122 | 2018-12-17T16:56:30 | 2018-12-17T16:56:30 | 163,117,180 | 0 | 0 | MIT | 2023-05-22T21:44:38 | 2018-12-25T23:24:17 | Python | UTF-8 | Python | false | false | 4,433 | py | import numpy as np
from neupy import layers, algorithms
from neupy.utils import asfloat
from base import BaseTestCase
class ConnectionsTestCase(BaseTestCase):
def test_connection_initializations(self):
possible_connections = (
(2, 3, 1),
# as a list
[layers.Input(2), layers.Sigmoid(3), layers.Tanh(1)],
# as forward sequence with inline operators
layers.Input(2) > layers.Relu(10) > layers.Tanh(1),
layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1),
# as backward sequence with inline operators
layers.Tanh(1) < layers.Relu(10) < layers.Input(2),
layers.Tanh(1) << layers.Relu(10) << layers.Input(2),
)
for i, connection in enumerate(possible_connections, start=1):
network = algorithms.GradientDescent(connection)
message = "[Test #{}] Connection: {}".format(i, connection)
self.assertEqual(len(network.layers), 3, msg=message)
def test_connection_inside_connection_mlp(self):
connection = layers.join(
layers.Input(2),
layers.Relu(10),
layers.Relu(4) > layers.Relu(7),
layers.Relu(3) > layers.Relu(1),
)
expected_sizes = [2, 10, 4, 7, 3, 1]
for layer, expected_size in zip(connection, expected_sizes):
self.assertEqual(expected_size, layer.size)
def test_connection_inside_connection_conv(self):
connection = layers.join(
layers.Input((28, 28, 1)),
layers.Convolution((3, 3, 8)) > layers.Relu(),
layers.Convolution((3, 3, 8)) > layers.Relu(),
layers.MaxPooling((2, 2)),
layers.Reshape(),
layers.Softmax(1),
)
self.assertEqual(8, len(connection))
expected_order = [
layers.Input, layers.Convolution, layers.Relu,
layers.Convolution, layers.Relu, layers.MaxPooling,
layers.Reshape, layers.Softmax
]
for actual_layer, expected_layer in zip(connection, expected_order):
self.assertIsInstance(actual_layer, expected_layer)
def test_connection_shapes(self):
connection = layers.Input(2) > layers.Relu(10) > layers.Tanh(1)
self.assertEqual(connection.input_shape, (2,))
self.assertEqual(connection.output_shape, (1,))
def test_connection_output(self):
input_value = asfloat(np.random.random((10, 2)))
connection = layers.Input(2) > layers.Relu(10) > layers.Relu(1)
output_value = self.eval(connection.output(input_value))
self.assertEqual(output_value.shape, (10, 1))
def test_connection_wrong_number_of_input_values(self):
input_value_1 = asfloat(np.random.random((10, 2)))
input_value_2 = asfloat(np.random.random((10, 2)))
connection = layers.Input(2) > layers.Relu(10) > layers.Relu(1)
with self.assertRaisesRegexp(ValueError, "but 2 inputs was provided"):
connection.output(input_value_1, input_value_2)
def test_one_to_many_parallel_connection_output(self):
input_connection = layers.Input(4)
parallel_connections = layers.parallel(
layers.Linear(11),
layers.Linear(12),
layers.Linear(13),
)
layers.join(input_connection, parallel_connections)
input_value = asfloat(np.random.random((10, 4)))
actual_output = self.eval(parallel_connections.output(input_value))
self.assertEqual(actual_output[0].shape, (10, 11))
self.assertEqual(actual_output[1].shape, (10, 12))
self.assertEqual(actual_output[2].shape, (10, 13))
def test_many_to_many_parallel_connection_output(self):
connection = layers.parallel(
layers.Input(1) > layers.Linear(11),
layers.Input(2) > layers.Linear(12),
layers.Input(3) > layers.Linear(13),
)
input_value_1 = asfloat(np.random.random((10, 1)))
input_value_2 = asfloat(np.random.random((20, 2)))
input_value_3 = asfloat(np.random.random((30, 3)))
actual_output = self.eval(
connection.output(input_value_1, input_value_2, input_value_3))
self.assertEqual(actual_output[0].shape, (10, 11))
self.assertEqual(actual_output[1].shape, (20, 12))
self.assertEqual(actual_output[2].shape, (30, 13))
| [
"[email protected]"
] | |
7d8cd2c126385050c66363fa4a92a053fa6ef9b3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03096/s328164681.py | 831bf64bddd7e866af446ff2b39f600d31c60135 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import sys
DBG = not True
R = 1000000007
MAXN = 200000
n = int(input())
c = []
for i in range(n):
c.append(int(input()))
tot = 1
s = [0] * (MAXN+1)
for i in range(n):
if i > 0 and c[i] == c[i-1]:
continue
x = s[c[i]]
tot = (tot + x) % R
s[c[i]] = tot
if DBG:
print("i {} sci {} tot {} newt {}".format(i,x,s[c[i]],tot))
print(s)
print(tot)
| [
"[email protected]"
] | |
90b7225eaba6669d66568ab1341e99ac26bae5a5 | e53d0217591a2dbbef1a5c6c126e14d2990e556b | /preprocessor/case_insensitive.py | 60f23c25528af34e6dafff0d83b057e7c4d86f5d | [] | no_license | wikty/EmailFilter | ee4a0f918bb74418a4e1f608f351be25cb903f8c | eb47aa1ff736ec6663b668540fb09df0e6b2ca38 | refs/heads/master | 2020-03-28T15:30:36.691360 | 2018-09-13T08:01:03 | 2018-09-13T08:01:03 | 148,601,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | def lowercase(tokens):
if isinstance(tokens, list):
return [str(token).lower() for token in tokens]
else:
return str(tokens).lower()
def uppercase(tokens):
if isinstance(tokens, list):
return [str(token).upper() for token in tokens]
else:
return str(tokens).upper() | [
"[email protected]"
] | |
7dda66d9812ab3a69b95ce0766814ca54af5f977 | 4631cb70c5af6530e2e493abbc34cedfcb82917d | /gallery/settings.py | a07394397d1d64023b1e7ba3605b318fc5e5ab7b | [] | no_license | nimomaina/Gallery | e8d55cd7cda3ab17aae9f75db2e34854391c2fe1 | 959181666c4996793800f59912063de925c0435e | refs/heads/master | 2020-04-26T16:43:59.042466 | 2019-03-05T07:22:44 | 2019-03-05T07:22:44 | 173,689,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | """
Django settings for gallery project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE = config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG')
# development
if config('MODE') == "dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'o%*pk@yz0uc-r6ycx*fb9s5r%54=hjaa8dsj#&cr%d1y$#$n+f'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
# ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'images',
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static'),
]
# Media configuration
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | [
"[email protected]"
] | |
3ca2c8aa7dd8e24c800b0e1dbee3e0bc4b3b4fb8 | 7c20132b8f5b4171e31bd4f837faf5d39a108325 | /setup.py | 4e42151e833c137be1fbd1c49fcaea3db677bf05 | [
"MIT"
] | permissive | holzenburg/feedshare | c52b7c22ed84a438dbfa409df34c6ab4cf270d40 | 44e9bfb7d92ed0707b6dd196ead4387bf8cfb588 | refs/heads/master | 2020-06-09T00:50:19.704167 | 2014-01-20T11:04:58 | 2014-01-20T11:04:58 | 15,700,850 | 10 | 0 | null | 2014-01-13T20:41:24 | 2014-01-07T09:55:56 | Python | UTF-8 | Python | false | false | 739 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
modules = ['feedshare', 'feedshare.feedlists', 'feedshare.feedlists.management.commands']
setup(
name='feedshare',
version='0.1',
description='Feedshare is a service to share and discover OPML feed lists.',
author='Arne Holzenburg',
author_email='[email protected]',
url='https://github.com/holzenburg/feedshare/',
#packages=find_packages(),
py_modules=modules,
license='License :: GNU GENERAL PUBLIC LICENSE',
# Enable django-setuptest
test_suite='setuptest.setuptest.SetupTestSuite',
tests_require=(
'django-setuptest',
# Required by django-setuptools on Python 2.6
'argparse'
),
)
| [
"[email protected]"
] | |
d706aa526770897cc82119a2a3456d0f25856e57 | 1b8d87b37cc6de4b0ffaedf0d5dc3877888865c3 | /fhirclient/r4models/capabilitystatement.py | d55536a775c0d53de8ef439c27d3deb87fedfc6d | [] | no_license | Healthedata1/Flask-Alerts-Sender | d222e689de01daaa59d51aea2054d538db231cf9 | 0637cb1bb2c8af18243fce3aecc09723c2fdd155 | refs/heads/master | 2022-12-12T14:14:04.708052 | 2021-05-05T20:52:49 | 2021-05-05T20:52:49 | 231,147,534 | 1 | 0 | null | 2022-12-08T03:22:29 | 2019-12-31T21:20:13 | Python | UTF-8 | Python | false | false | 32,417 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/CapabilityStatement) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class CapabilityStatement(domainresource.DomainResource):
""" A statement of system capabilities.
A Capability Statement documents a set of capabilities (behaviors) of a
FHIR Server for a particular version of FHIR that may be used as a
statement of actual server functionality or a statement of required or
desired server implementation.
"""
resource_type = "CapabilityStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the capability statement.
Type `str`. """
self.document = None
""" Document definition.
List of `CapabilityStatementDocument` items (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.fhirVersion = None
""" FHIR Version the system supports.
Type `str`. """
self.format = None
""" formats supported (xml | json | ttl | mime type).
List of `str` items. """
self.implementation = None
""" If this describes a specific instance.
Type `CapabilityStatementImplementation` (represented as `dict` in JSON). """
self.implementationGuide = None
""" Implementation guides supported.
List of `str` items. """
self.imports = None
""" Canonical URL of another capability statement this adds to.
List of `str` items. """
self.instantiates = None
""" Canonical URL of another capability statement this implements.
List of `str` items. """
self.jurisdiction = None
""" Intended jurisdiction for capability statement (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.kind = None
""" instance | capability | requirements.
Type `str`. """
self.messaging = None
""" If messaging is supported.
List of `CapabilityStatementMessaging` items (represented as `dict` in JSON). """
self.name = None
""" Name for this capability statement (computer friendly).
Type `str`. """
self.patchFormat = None
""" Patch formats supported.
List of `str` items. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this capability statement is defined.
Type `str`. """
self.rest = None
""" If the endpoint is a RESTful one.
List of `CapabilityStatementRest` items (represented as `dict` in JSON). """
self.software = None
""" Software that is covered by this capability statement.
Type `CapabilityStatementSoftware` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.title = None
""" Name for this capability statement (human friendly).
Type `str`. """
self.url = None
""" Canonical identifier for this capability statement, represented as
a URI (globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the capability statement.
Type `str`. """
super(CapabilityStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatement, self).elementProperties()
js.extend([
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, True),
("description", "description", str, False, None, False),
("document", "document", CapabilityStatementDocument, True, None, False),
("experimental", "experimental", bool, False, None, False),
("fhirVersion", "fhirVersion", str, False, None, True),
("format", "format", str, True, None, True),
("implementation", "implementation", CapabilityStatementImplementation, False, None, False),
("implementationGuide", "implementationGuide", str, True, None, False),
("imports", "imports", str, True, None, False),
("instantiates", "instantiates", str, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("kind", "kind", str, False, None, True),
("messaging", "messaging", CapabilityStatementMessaging, True, None, False),
("name", "name", str, False, None, False),
("patchFormat", "patchFormat", str, True, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("rest", "rest", CapabilityStatementRest, True, None, False),
("software", "software", CapabilityStatementSoftware, False, None, False),
("status", "status", str, False, None, True),
("title", "title", str, False, None, False),
("url", "url", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class CapabilityStatementDocument(backboneelement.BackboneElement):
""" Document definition.
A document definition.
"""
resource_type = "CapabilityStatementDocument"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
""" Description of document support.
Type `str`. """
self.mode = None
""" producer | consumer.
Type `str`. """
self.profile = None
""" Constraint on the resources used in the document.
Type `str`. """
super(CapabilityStatementDocument, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementDocument, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("mode", "mode", str, False, None, True),
("profile", "profile", str, False, None, True),
])
return js
class CapabilityStatementImplementation(backboneelement.BackboneElement):
""" If this describes a specific instance.
Identifies a specific implementation instance that is described by the
capability statement - i.e. a particular installation, rather than the
capabilities of a software program.
"""
resource_type = "CapabilityStatementImplementation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.custodian = None
""" Organization that manages the data.
Type `FHIRReference` (represented as `dict` in JSON). """
self.description = None
""" Describes this specific instance.
Type `str`. """
self.url = None
""" Base URL for the installation.
Type `str`. """
super(CapabilityStatementImplementation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementImplementation, self).elementProperties()
js.extend([
("custodian", "custodian", fhirreference.FHIRReference, False, None, False),
("description", "description", str, False, None, True),
("url", "url", str, False, None, False),
])
return js
class CapabilityStatementMessaging(backboneelement.BackboneElement):
""" If messaging is supported.
A description of the messaging capabilities of the solution.
"""
resource_type = "CapabilityStatementMessaging"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
""" Messaging interface behavior details.
Type `str`. """
self.endpoint = None
""" Where messages should be sent.
List of `CapabilityStatementMessagingEndpoint` items (represented as `dict` in JSON). """
self.reliableCache = None
""" Reliable Message Cache Length (min).
Type `int`. """
self.supportedMessage = None
""" Messages supported by this system.
List of `CapabilityStatementMessagingSupportedMessage` items (represented as `dict` in JSON). """
super(CapabilityStatementMessaging, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementMessaging, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("endpoint", "endpoint", CapabilityStatementMessagingEndpoint, True, None, False),
("reliableCache", "reliableCache", int, False, None, False),
("supportedMessage", "supportedMessage", CapabilityStatementMessagingSupportedMessage, True, None, False),
])
return js
class CapabilityStatementMessagingEndpoint(backboneelement.BackboneElement):
""" Where messages should be sent.
An endpoint (network accessible address) to which messages and/or replies
are to be sent.
"""
resource_type = "CapabilityStatementMessagingEndpoint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.address = None
""" Network address or identifier of the end-point.
Type `str`. """
self.protocol = None
""" http | ftp | mllp +.
Type `Coding` (represented as `dict` in JSON). """
super(CapabilityStatementMessagingEndpoint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementMessagingEndpoint, self).elementProperties()
js.extend([
("address", "address", str, False, None, True),
("protocol", "protocol", coding.Coding, False, None, True),
])
return js
class CapabilityStatementMessagingSupportedMessage(backboneelement.BackboneElement):
""" Messages supported by this system.
References to message definitions for messages this system can send or
receive.
"""
resource_type = "CapabilityStatementMessagingSupportedMessage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.definition = None
""" Message supported by this system.
Type `str`. """
self.mode = None
""" sender | receiver.
Type `str`. """
super(CapabilityStatementMessagingSupportedMessage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementMessagingSupportedMessage, self).elementProperties()
js.extend([
("definition", "definition", str, False, None, True),
("mode", "mode", str, False, None, True),
])
return js
class CapabilityStatementRest(backboneelement.BackboneElement):
""" If the endpoint is a RESTful one.
A definition of the restful capabilities of the solution, if any.
"""
resource_type = "CapabilityStatementRest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.compartment = None
""" Compartments served/used by system.
List of `str` items. """
self.documentation = None
""" General description of implementation.
Type `str`. """
self.interaction = None
""" What operations are supported?.
List of `CapabilityStatementRestInteraction` items (represented as `dict` in JSON). """
self.mode = None
""" client | server.
Type `str`. """
self.operation = None
""" Definition of a system level operation.
List of `CapabilityStatementRestResourceOperation` items (represented as `dict` in JSON). """
self.resource = None
""" Resource served on the REST interface.
List of `CapabilityStatementRestResource` items (represented as `dict` in JSON). """
self.searchParam = None
""" Search parameters for searching all resources.
List of `CapabilityStatementRestResourceSearchParam` items (represented as `dict` in JSON). """
self.security = None
""" Information about security of implementation.
Type `CapabilityStatementRestSecurity` (represented as `dict` in JSON). """
super(CapabilityStatementRest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRest, self).elementProperties()
js.extend([
("compartment", "compartment", str, True, None, False),
("documentation", "documentation", str, False, None, False),
("interaction", "interaction", CapabilityStatementRestInteraction, True, None, False),
("mode", "mode", str, False, None, True),
("operation", "operation", CapabilityStatementRestResourceOperation, True, None, False),
("resource", "resource", CapabilityStatementRestResource, True, None, False),
("searchParam", "searchParam", CapabilityStatementRestResourceSearchParam, True, None, False),
("security", "security", CapabilityStatementRestSecurity, False, None, False),
])
return js
class CapabilityStatementRestInteraction(backboneelement.BackboneElement):
""" What operations are supported?.
A specification of restful operations supported by the system.
"""
resource_type = "CapabilityStatementRestInteraction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" transaction | batch | search-system | history-system.
Type `str`. """
self.documentation = None
""" Anything special about operation behavior.
Type `str`. """
super(CapabilityStatementRestInteraction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRestInteraction, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("documentation", "documentation", str, False, None, False),
])
return js
class CapabilityStatementRestResource(backboneelement.BackboneElement):
""" Resource served on the REST interface.
A specification of the restful capabilities of the solution for a specific
resource type.
"""
resource_type = "CapabilityStatementRestResource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.conditionalCreate = None
""" If allows/uses conditional create.
Type `bool`. """
self.conditionalDelete = None
""" not-supported | single | multiple - how conditional delete is
supported.
Type `str`. """
self.conditionalRead = None
""" not-supported | modified-since | not-match | full-support.
Type `str`. """
self.conditionalUpdate = None
""" If allows/uses conditional update.
Type `bool`. """
self.documentation = None
""" Additional information about the use of the resource type.
Type `str`. """
self.interaction = None
""" What operations are supported?.
List of `CapabilityStatementRestResourceInteraction` items (represented as `dict` in JSON). """
self.operation = None
""" Definition of a resource operation.
List of `CapabilityStatementRestResourceOperation` items (represented as `dict` in JSON). """
self.profile = None
""" Base System profile for all uses of resource.
Type `str`. """
self.readHistory = None
""" Whether vRead can return past versions.
Type `bool`. """
self.referencePolicy = None
""" literal | logical | resolves | enforced | local.
List of `str` items. """
self.searchInclude = None
""" _include values supported by the server.
List of `str` items. """
self.searchParam = None
""" Search parameters supported by implementation.
List of `CapabilityStatementRestResourceSearchParam` items (represented as `dict` in JSON). """
self.searchRevInclude = None
""" _revinclude values supported by the server.
List of `str` items. """
self.supportedProfile = None
""" Profiles for use cases supported.
List of `str` items. """
self.type = None
""" A resource type that is supported.
Type `str`. """
self.updateCreate = None
""" If update can commit to a new identity.
Type `bool`. """
self.versioning = None
""" no-version | versioned | versioned-update.
Type `str`. """
super(CapabilityStatementRestResource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRestResource, self).elementProperties()
js.extend([
("conditionalCreate", "conditionalCreate", bool, False, None, False),
("conditionalDelete", "conditionalDelete", str, False, None, False),
("conditionalRead", "conditionalRead", str, False, None, False),
("conditionalUpdate", "conditionalUpdate", bool, False, None, False),
("documentation", "documentation", str, False, None, False),
("interaction", "interaction", CapabilityStatementRestResourceInteraction, True, None, False),
("operation", "operation", CapabilityStatementRestResourceOperation, True, None, False),
("profile", "profile", str, False, None, False),
("readHistory", "readHistory", bool, False, None, False),
("referencePolicy", "referencePolicy", str, True, None, False),
("searchInclude", "searchInclude", str, True, None, False),
("searchParam", "searchParam", CapabilityStatementRestResourceSearchParam, True, None, False),
("searchRevInclude", "searchRevInclude", str, True, None, False),
("supportedProfile", "supportedProfile", str, True, None, False),
("type", "type", str, False, None, True),
("updateCreate", "updateCreate", bool, False, None, False),
("versioning", "versioning", str, False, None, False),
])
return js
class CapabilityStatementRestResourceInteraction(backboneelement.BackboneElement):
""" What operations are supported?.
Identifies a restful operation supported by the solution.
"""
resource_type = "CapabilityStatementRestResourceInteraction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" read | vread | update | patch | delete | history-instance |
history-type | create | search-type.
Type `str`. """
self.documentation = None
""" Anything special about operation behavior.
Type `str`. """
super(CapabilityStatementRestResourceInteraction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRestResourceInteraction, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("documentation", "documentation", str, False, None, False),
])
return js
class CapabilityStatementRestResourceOperation(backboneelement.BackboneElement):
""" Definition of a resource operation.
Definition of an operation or a named query together with its parameters
and their meaning and type. Consult the definition of the operation for
details about how to invoke the operation, and the parameters.
"""
resource_type = "CapabilityStatementRestResourceOperation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.definition = None
""" The defined operation/query.
Type `str`. """
self.documentation = None
""" Specific details about operation behavior.
Type `str`. """
self.name = None
""" Name by which the operation/query is invoked.
Type `str`. """
super(CapabilityStatementRestResourceOperation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRestResourceOperation, self).elementProperties()
js.extend([
("definition", "definition", str, False, None, True),
("documentation", "documentation", str, False, None, False),
("name", "name", str, False, None, True),
])
return js
class CapabilityStatementRestResourceSearchParam(backboneelement.BackboneElement):
""" Search parameters supported by implementation.
Search parameters for implementations to support and/or make use of -
either references to ones defined in the specification, or additional ones
defined for/by the implementation.
"""
resource_type = "CapabilityStatementRestResourceSearchParam"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.definition = None
""" Source of definition for parameter.
Type `str`. """
self.documentation = None
""" Server-specific usage.
Type `str`. """
self.name = None
""" Name of search parameter.
Type `str`. """
self.type = None
""" number | date | string | token | reference | composite | quantity |
uri | special.
Type `str`. """
super(CapabilityStatementRestResourceSearchParam, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRestResourceSearchParam, self).elementProperties()
js.extend([
("definition", "definition", str, False, None, False),
("documentation", "documentation", str, False, None, False),
("name", "name", str, False, None, True),
("type", "type", str, False, None, True),
])
return js
class CapabilityStatementRestSecurity(backboneelement.BackboneElement):
""" Information about security of implementation.
Information about security implementation from an interface perspective -
what a client needs to know.
"""
resource_type = "CapabilityStatementRestSecurity"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.cors = None
""" Adds CORS Headers (http://enable-cors.org/).
Type `bool`. """
self.description = None
""" General description of how security works.
Type `str`. """
self.service = None
""" OAuth | SMART-on-FHIR | NTLM | Basic | Kerberos | Certificates.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(CapabilityStatementRestSecurity, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementRestSecurity, self).elementProperties()
js.extend([
("cors", "cors", bool, False, None, False),
("description", "description", str, False, None, False),
("service", "service", codeableconcept.CodeableConcept, True, None, False),
])
return js
class CapabilityStatementSoftware(backboneelement.BackboneElement):
""" Software that is covered by this capability statement.
Software that is covered by this capability statement. It is used when the
capability statement describes the capabilities of a particular software
version, independent of an installation.
"""
resource_type = "CapabilityStatementSoftware"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" A name the software is known by.
Type `str`. """
self.releaseDate = None
""" Date this version was released.
Type `FHIRDate` (represented as `str` in JSON). """
self.version = None
""" Version covered by this statement.
Type `str`. """
super(CapabilityStatementSoftware, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CapabilityStatementSoftware, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("releaseDate", "releaseDate", fhirdate.FHIRDate, False, None, False),
("version", "version", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| [
"[email protected]"
] | |
10507fc27f02ecbc999f476510c291359068d676 | b3c25afc2a98058d7e9a62b4fa5b6446b83b7a7a | /lists/migrations/0004_auto__add_tag.py | 37d91b05731cd76c29ca9f404510759cae670220 | [] | no_license | JamieHouston/writenow | ecece5645498140438acd853e28c006f9b20390e | 01b677f845cbf7bed88756135c66a4d56eefa6c5 | refs/heads/master | 2020-04-10T19:07:31.989209 | 2016-02-11T20:23:45 | 2016-02-11T20:23:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,061 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('lists_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('lists', ['Tag'])
# Adding M2M table for field tags on 'Item'
db.create_table('lists_item_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('item', models.ForeignKey(orm['lists.item'], null=False)),
('tag', models.ForeignKey(orm['lists.tag'], null=False))
))
db.create_unique('lists_item_tags', ['item_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('lists_tag')
# Removing M2M table for field tags on 'Item'
db.delete_table('lists_item_tags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 10, 21, 0, 54, 664000)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 10, 21, 0, 54, 664000)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'ordering': "['order']", 'object_name': 'Item'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Tag']", 'symmetrical': 'False'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['lists']
| [
"[email protected]"
] | |
a282d934624251ee6e01358a9696051017535156 | c8bc10d384213ed237a7dbb8e8c7ee8fbe512943 | /modelos/test_tevi.py | 9e480fd7ab04e66340c2d858d11c723ce09de7b0 | [
"MIT"
] | permissive | Chrpob/heroku-telegram-bot | b1b61282d7cd4c5c501129dd232815d5ee3b8a4e | ddaa9000110b0154899217ad826ec5a57252781e | refs/heads/master | 2022-12-10T06:10:34.750496 | 2022-12-07T17:06:40 | 2022-12-07T17:06:40 | 272,638,725 | 0 | 0 | MIT | 2020-06-16T07:25:42 | 2020-06-16T07:25:42 | null | UTF-8 | Python | false | false | 6,202 | py | import unittest
from modelos.tevi import Tevi, SOBRESALIENTE, MUY_BUENO, NORMAL, RETRASO_LEVE, RETRASO_GRAVE, PUNTAJE_NO_VALIDO
class TestTevi(unittest.TestCase):
def test_edad_2(self):
sobresaliente = Tevi(2, 28, 0)
muy_bueno = Tevi(2, 20, 0)
normal = Tevi(2, 12, 0)
retraso_leve = Tevi(2, 5, 0)
retraso_grave = Tevi(2, 1, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_3(self):
sobresaliente = Tevi(3, 35, 0)
muy_bueno = Tevi(3, 30, 0)
normal = Tevi(3, 24, 0)
retraso_leve = Tevi(3, 18, 0)
retraso_grave = Tevi(3, 13, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_4(self):
sobresaliente = Tevi(4, 51, 0)
muy_bueno = Tevi(4, 42, 0)
normal = Tevi(4, 34, 0)
retraso_leve = Tevi(4, 26, 0)
retraso_grave = Tevi(4, 18, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_5(self):
sobresaliente = Tevi(5, 62, 0)
muy_bueno = Tevi(5, 46, 0)
normal = Tevi(5, 46, 7)
retraso_leve = Tevi(5, 31, 0)
retraso_grave = Tevi(5, 24, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_6(self):
sobresaliente = Tevi(6, 61, 0)
muy_bueno = Tevi(6, 54, 0)
normal = Tevi(6, 47, 0)
retraso_leve = Tevi(6, 41, 0)
retraso_grave = Tevi(6, 34, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_7(self):
puntaje_no_valido = Tevi(7, 28, 0)
sobresaliente = Tevi(7, 71, 0)
muy_bueno = Tevi(7, 64, 0)
normal = Tevi(7, 56, 0)
retraso_leve = Tevi(7, 48, 0)
retraso_grave = Tevi(7, 40, 0)
self.assertEqual(puntaje_no_valido.resultado, PUNTAJE_NO_VALIDO)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_8(self):
sobresaliente = Tevi(8, 71, 0)
muy_bueno = Tevi(8, 64, 0)
normal = Tevi(8, 64, 8)
retraso_leve = Tevi(8, 48, 0)
retraso_grave = Tevi(8, 40, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_9_10(self):
sobresaliente = Tevi(9, 84, 0)
muy_bueno = Tevi(10, 74, 0)
normal = Tevi(9, 63, 0)
retraso_leve = Tevi(10, 51, 0)
retraso_grave = Tevi(9, 40, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_11_12(self):
sobresaliente = Tevi(12, 104, 0)
muy_bueno = Tevi(11, 88, 0)
normal = Tevi(12, 73, 0)
retraso_leve = Tevi(11, 58, 0)
retraso_grave = Tevi(12, 42, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_13_14(self):
sobresaliente = Tevi(14, 111, 0)
muy_bueno = Tevi(13, 97, 0)
normal = Tevi(14, 83, 0)
retraso_leve = Tevi(13, 69, 0)
retraso_grave = Tevi(14, 55, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
def test_edad_15_16_17_18(self):
sobresaliente = Tevi(15, 116, 0)
muy_bueno = Tevi(16, 107, 0)
normal = Tevi(17, 97, 0)
retraso_leve = Tevi(18, 86, 0)
retraso_grave = Tevi(15, 76, 0)
self.assertEqual(sobresaliente.resultado, SOBRESALIENTE)
self.assertEqual(muy_bueno.resultado, MUY_BUENO)
self.assertEqual(normal.resultado, NORMAL)
self.assertEqual(retraso_leve.resultado, RETRASO_LEVE)
self.assertEqual(retraso_grave.resultado, RETRASO_GRAVE)
if __name__ == '__main__':
unittest.main
| [
"[email protected]"
] | |
e44f46b75fe459dad587808cbc884b17e55f1608 | d86bbb7c8208544d7e72e85977181aaa0e5ffebc | /db_builder.py | acb7378a466f0be51c25c4b3ce73840ca100bdeb | [] | no_license | mkorsun/softdev09 | a86b0b26dbc5eba555f5e01ddb9021a3891f7a8e | 72811c7b621e35005cc25ecf48cc6a152b5b76e9 | refs/heads/master | 2021-05-16T00:36:29.009838 | 2017-10-16T03:06:03 | 2017-10-16T03:06:03 | 106,951,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | import sqlite3 #enable control of an sqlite database
import csv #facilitates CSV I/O
f="discobandit.db"
db = sqlite3.connect(f) #open if f exists, otherwise create
c = db.cursor() #facilitate db ops
#==========================================================
#INSERT YOUR POPULATE CODE IN THIS ZONE
def tblmkr(textfile, args):#all we need is the filename
command = ""
csvfile = open(textfile+".csv","r")
data = csv.DictReader(csvfile)
fields = data.fieldnames#list of field names
command += "CREATE TABLE "+textfile+"("+args+")" #creates table
c.execute(command)
for record in data:#look at each record
values = '('# start the values
for field in fields:# look at each field in the record
if field != 'name' and name != 'code': #if the field is a TEXT type, you dont need to add the ""
values +=record[name]+', '
else:
values +='"'+record[name]+'", '
values = values[:-2]#take away the last ,
values+=')'#add the )
insert= "INSERT INTO "+ textfile + " VALUES " #basic command
c.execute(insert + values)#concatonates the command with the values
csvfile.close()
return
tblmkr("peeps", "name TEXT, age INTEGER, id INTEGER PRIMARY KEY)")
tblmkr("course", "code TEXT, mark INTEGER, id INTEGER")
#==========================================================
#db.commit() #save changes
#db.close() #close database
| [
"[email protected]"
] | |
abe7634daecdbe2256328e69b4ffb6349957b8ba | 3f8c5bfd081113b40eb84aa1fa2ef80bfc610037 | /day1.py | bb39f44bf4462349f7db557715a872b1705fc06d | [] | no_license | jameswmccarty/AdventOfCode2018 | a0991e9565b4f5ae445c82718408aef0082e029e | 3b5ab6dc3e5dbe9ab13b22eb1448c80e028085ee | refs/heads/master | 2020-04-09T05:53:22.476567 | 2019-09-24T01:09:55 | 2019-09-24T01:09:55 | 160,085,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,469 | py | #/usr/bin/python
"""
--- Day 1: Chronal Calibration ---
"We've detected some temporal anomalies," one of Santa's Elves at the Temporal Anomaly Research and Detection Instrument Station tells you. She sounded pretty worried when she called you down here. "At 500-year intervals into the past, someone has been changing Santa's history!"
"The good news is that the changes won't propagate to our time stream for another 25 days, and we have a device" - she attaches something to your wrist - "that will let you fix the changes with no such propagation delay. It's configured to send you 500 years further into the past every few days; that was the best we could do on such short notice."
"The bad news is that we are detecting roughly fifty anomalies throughout time; the device will indicate fixed anomalies with stars. The other bad news is that we only have one device and you're the best person for the job! Good lu--" She taps a button on the device and you suddenly feel like you're falling. To save Christmas, you need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
After feeling like you've been falling for a few minutes, you look at the device's tiny screen. "Error: Device must be calibrated before first use. Frequency drift detected. Cannot maintain destination lock." Below the message, the device shows a sequence of changes in frequency (your puzzle input). A value like +6 means the current frequency increases by 6; a value like -3 means the current frequency decreases by 3.
For example, if the device displays frequency changes of +1, -2, +3, +1, then starting from a frequency of zero, the following changes would occur:
Current frequency 0, change of +1; resulting frequency 1.
Current frequency 1, change of -2; resulting frequency -1.
Current frequency -1, change of +3; resulting frequency 2.
Current frequency 2, change of +1; resulting frequency 3.
In this example, the resulting frequency is 3.
Here are other example situations:
+1, +1, +1 results in 3
+1, +1, -2 results in 0
-1, -2, -3 results in -6
Starting with a frequency of zero, what is the resulting frequency after all of the changes in frequency have been applied?
To begin, get your puzzle input.
--- Part Two ---
You notice that the device repeats the same frequency change list over and over. To calibrate the device, you need to find the first frequency it reaches twice.
For example, using the same list of changes above, the device would loop as follows:
Current frequency 0, change of +1; resulting frequency 1.
Current frequency 1, change of -2; resulting frequency -1.
Current frequency -1, change of +3; resulting frequency 2.
Current frequency 2, change of +1; resulting frequency 3.
(At this point, the device continues from the start of the list.)
Current frequency 3, change of +1; resulting frequency 4.
Current frequency 4, change of -2; resulting frequency 2, which has already been seen.
In this example, the first frequency reached twice is 2. Note that your device might need to repeat its list of frequency changes many times before a duplicate frequency is found, and that duplicates might be found while in the middle of processing the list.
Here are other examples:
+1, -1 first reaches 0 twice.
+3, +3, +4, -2, -4 first reaches 10 twice.
-6, +3, +8, +5, -6 first reaches 5 twice.
+7, +7, -2, -7, -4 first reaches 14 twice.
What is the first frequency your device reaches twice?
Although it hasn't changed, you can still get your puzzle input.
"""
if __name__ == "__main__":
#Part 1 solution
freq = 0
with open("day1_input", "r") as infile:
for line in infile.readlines():
freq += int(line.strip())
print "Part 1 solution: " + str(freq)
#Part 2 solution
freq = 0
inputs = []
seen_set = set()
found = False
with open("day1_input", "r") as infile:
for line in infile.readlines():
inputs.append(int(line.strip()))
#loop inputs as ring buffer
while not found:
for item in inputs:
freq += item
if freq in seen_set:
print "Part 2 solution " + str(freq)
found = True
break;
else:
seen_set.add(freq)
| [
"[email protected]"
] | |
427e0c9be36970d1d2ba4c227186f2349e58426f | 071c2ffac0b0787019629ff70dcce24cf155d8f7 | /figures/eulerian/vorticity.py | 1c2e5cd658d5669c10e4a432b52fcd9a0b7dbeb4 | [] | no_license | lento234/masterthesis | b988584c7cd2f892a71f08178fb5736eb842f153 | 3c0a545689e48a7fba654df4beeb5577791c3494 | refs/heads/master | 2022-01-29T12:19:46.560119 | 2019-03-04T12:55:09 | 2019-03-04T12:55:09 | 170,147,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | from dolfin import *
...
# Define Function spaces
# $X$ : scalar-valued vorticity function space $W$
X = FunctionSpace(mesh, 'CG', 1) # 1st order, Continuous-Galerkin
# Define the trial and test function
omega = TrialFunction(X) # vorticity $\omega{\in}X$
v = TestFunction(X) # test function $v{\in}\hat{X}$
...
# Define the variation problem for vorticity
a = inner(omega,v)*dx # $ \langle{\omega,v}\rangle$
b = inner(curl(u),v)*dx # $ \langle{\nabla{\times}u,v}\rangle$
# Pre-Assemble the LHS
A = assemble(a)
...
# During the time-stepping
omega = Function(X) # Define the function
B = assemble(b) # Assemble b
solve(A, omega.vector(), B) # Solve for vorticity
| [
"[email protected]"
] | |
e4f2a36f22cfc0d05557339b78ff8aec893c37d2 | 6c2789299717c3b06d0eba30b890bbe51b4cd747 | /Analyses/ATLAS_2013_CONF_2013_024/stopL.py | ddb8e9ee49d2fe4c61185870d462332870176ba0 | [] | no_license | kazuki-sakurai/Atom-validation | 950e2b38d0d45b2c7e1f03d11cab358fde29f7c9 | fb1bfa0f0c8723a7017908425b627ee44b6e93b2 | refs/heads/master | 2020-05-19T21:34:49.913654 | 2014-11-21T16:41:17 | 2014-11-21T16:41:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | #! /usr/bin/env python
__author__ = "Kazuki Sakurai <[email protected]>"
__version__ = "0.1"
import sys
import os
from math import *
sys.path.append('../..')
from functions import *
if __name__ == '__main__':
vname = 'stopL'
table_name = '$\\tilde t_L \\tilde t_L^* \\to t \\tilde \\chi_1^0 \\bar t \\tilde \\chi_1^0$ (ATLAS\\_CONF\\_2013\\_024)'
description = '''
\\begin{itemize}
\\item Process: $\\tilde t_{L/R} \\tilde t_{L/R}^* \\to t \\tilde \\chi_1^0 \\bar t \\tilde \\chi_1^0$.
\\item Mass: $m_{\\tilde t_{L/R}} = 600$~GeV, $m_{\\tilde \\chi_1^0} = 0$~GeV.
\\item The number of Atom MC events: $10^4$.
\\item Event Generator: {\\tt Herwig++ 2.5.2}.
\\end{itemize}
'''
table_caption = '''
The cut-flow table for the $\\tilde t_L \\tilde t_L^*$.
'''
inputfile = vname + '.root'
if len(sys.argv) == 2: inputfile = sys.argv[1]
ananame, eff_dict, err_dict, pid = GetEfficiencies(inputfile)
Ntot_exp = 250000.
per = 100.
eff_dict['No-cut'] = 1.
err_dict['No-cut'] = 0.
initial_list = [ # ATLAS results: number is scalled for the 600 stop xsec with 20.5/fb
['No-cut', 507.3, 'No cut'],
["Muon veto", 382.2, '$\\mu$ veto'],
["Electron veto", 292.3, '$e$ veto'],
["MET > 130", 270.1, 'MET $>$ 130'],
["Jet multiplicity and pT", 92.2, '$N_{\\rm jets}$ and $p_T$'],
["MET_track > 30", 90.5, '$\\rm MET_{track} > 30$'],
["delPhi(MET, MET_track) < pi/3", 84.3, '$\\Delta \\phi (\\rm MET, MET_{track}) < \\pi/3$'],
["delPhi(jet, MET) > pi/5", 72.0, '$\\Delta \\phi (\\rm jet, MET) < \\pi/5$'],
["Tau veto", 61.9, '$\\tau$ veto'],
[">= 2-bjet", 31.5, '$\ge$ 2-bjets'],
["mT(bjet, MET) > 175", 23.6, '$m_T(\\rm bjet, MET) > 175$'],
["80 < m^0_jjj < 270", 20.4, '$80 < m^0_{jjj} < 270$'],
["80 < m^1_jjj < 270", 11.9, '$80 < m^1_{jjj} < 270$'],
["SR1: MET > 200", 11.2, 'SR1: $\\rm MET > 200$'],
["SR2: MET > 300", 8.3, 'SR1: $\\rm MET > 300$'],
["SR3: MET > 350", 6.6, 'SR1: $\\rm MET > 350$']
]
NMC_first = Ntot_exp * initial_list[0][1] / initial_list[0][1]
texlines = cutflow_generation(ananame, vname, table_caption, initial_list, eff_dict, err_dict, NMC_first)
fout = open(vname + '.tex', 'w')
tex = tex_format()
fout.write(tex.begin_document)
fout.write('\n')
fout.write('\\subsection{' + table_name + '} \n')
fout.write('\n')
fout.write(description)
fout.write('\n')
for t in texlines: fout.write(t + '\n')
fout.write(tex.end_document)
| [
"[email protected]"
] | |
b8bae4340b91c89e849766232d9e80bba2a41190 | 5eb1c64ccb0c08ca824f6928e7366855750f5aea | /FSD/Game/board.py | c421d5408a4526f1d1e443de7107d64196c898f4 | [] | no_license | morettimatheus/python | 5878fe87560f11e6e76e64a25a79c465a663997f | 39b72083ff013bd3115ebb2f22d5ef03cc0f1fb3 | refs/heads/master | 2021-01-10T18:42:09.922932 | 2014-12-13T15:05:15 | 2014-12-13T15:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | import pygame
from constants import *
class Board:
def __init__(self, rows, screen): #constructor of our Board class
self.rows = rows
self.screen = screen
def draw(self, rows, screen):
screen.fill(BOARDBG) #color defined inside constants.py
pygame.draw.rect(screen, WHITE, [0, 0, WIDTH, HEIGHT], 10)
for i in range (0, self.rows): #this piece of code draws rects using the number of rows we have
i *= WIDTH/rows
for j in range (0, self.rows):
j *= HEIGHT/rows
pygame.draw.rect(screen, WHITE, [i, j, WIDTH/rows, HEIGHT/rows], 5)
pygame.display.flip()
| [
"[email protected]"
] | |
11b350cddf884a86a75e6b5d92a4c66d9da4e589 | 2f728f0465b6716b2e42a2566f9d852e6443ff3c | /Dealer.py | c39d771bd88a7ede482c8fe60b6705531eca0782 | [] | no_license | galievaregina/teorver | 234e3950df75e4aeacab778ae3c6d01582f05840 | f69edbe503c8c428fbc4d850b24825ed716f8f63 | refs/heads/master | 2023-08-02T11:02:42.017404 | 2021-08-31T21:21:32 | 2021-08-31T21:21:32 | 413,718,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | import random
from pai_gow.Pack import Pack
from pai_gow.Cards import Card, Cards
class Dealer:
pack = []
def __init__(self):
self.pack = make_pack()
def make_pack():
pack = []
for value in Cards.CardsValue:
if value == Cards.CardsValue.JOKER:
pack.append(Card(Cards.CardsValue.JOKER, Cards.CardsSuits.JOKER))
else:
pack.append(Card(value, Cards.CardsSuits.HEARTS))
pack.append(Card(value, Cards.CardsSuits.DIAMONDS))
pack.append(Card(value, Cards.CardsSuits.CLUBS))
pack.append(Card(value, Cards.CardsSuits.SPADES))
return pack
staticmethod
def get_five_cards(pack):
list_comb_five = []
for i in range(0, 5):
card_index = random.randint(0, len(pack) - 1)
list_comb_five.append(pack[card_index])
pack.pop(card_index)
return list_comb_five
def get_two_cards(pack):
list_comb_two = []
for i in range(0, 2):
card_index = random.randint(0, 53)
list_comb_two.append(pack[card_index])
pack.pop(card_index)
return list_comb_two
| [
"[email protected]"
] | |
a16ef578a5a77d9e0548389b49308faad495c2a8 | 93a6f1393b055f417efb6186da674e77e91963d1 | /data/build_errorchecker_data.py | 0802b74b1cba0a4e82e97b2052e03a957f5111fe | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | shiccai/Image2Katex | c287ecb202b528d525a38cfd83f782685d951255 | 15959177375c946ab963c8d891566569c55ddf60 | refs/heads/updata200412 | 2023-03-16T11:04:37.440186 | 2020-04-11T07:38:54 | 2020-04-11T07:38:54 | 576,804,063 | 1 | 0 | NOASSERTION | 2022-12-11T02:57:05 | 2022-12-11T02:57:04 | null | UTF-8 | Python | false | false | 18,206 | py |
'''
File: generate_sequence.py
Project: utils
File Created: Monday, 24th December 2018 12:23:26 pm
Author: xiaofeng ([email protected])
-----
Last Modified: Monday, 24th December 2018 12:23:37 pm
Modified By: xiaofeng ([email protected]>)
-----
2018.06 - 2018 Latex Math, Latex Math
'''
from __future__ import absolute_import, division, print_function
import collections
import json
import os
import pickle
import re
import sys
import random
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
import scipy
import tensorflow as tf
from sklearn.cluster import KMeans
from tensorflow.python.platform import gfile
from collections import defaultdict
from config_dataset import SequenceVocabulary
from get_logger import init_logger
sys.path.append('..')
sys.path.insert(0, os.path.dirname(os.path.abspath(os.getcwd())))
print(sys.path)
from models.evaluate.text import cal_score
from utils.render_image import latex_to_image
""" This progress is designed to generate the sequence that can not generate the png file
- 使用公式预测网络预测生成latex储存到对应的文件夹中,同时,存在真实的label,本程序主要为了进行nmt网络的搭建
,使用预测的latex作为源输入,使用对应的label作为目标输出nmt
"""
class ErrorChecker(object):
def __init__(self, logger, vocabulary):
self.logger = logger
self.vocabulary = vocabulary
self.token_to_idx = self.vocabulary.token_to_idx
self.idx_to_token = self.vocabulary.idx_to_token
self.bucket = self.vocabulary.bucket_size
self.logger.info('Process the ErrorChecker function')
def readfile(self, files):
if files.endswith('.txt'):
return [i.strip().split() for i in open(files, 'r').readlines()]
elif files.endswith('.dat') or files.endswith('.pkl'):
return pickle.load(open(files, 'rb'))
def readfilenums(self, files):
if files.endswith('.txt'):
return [len(i.strip().split()) for i in open(files, 'r').readlines()]
elif files.endswith('.dat') or files.endswith('.pkl'):
file_list = pickle.load(open(files, 'rb'))
return [len(i) for i in file_list]
def writefile(self, input_data, files):
if files.endswith('.txt'):
with open(files, 'w') as wr:
for line in input_data:
wr.write(' '.join([str(j) for j in line]) + '\n')
elif files.endswith('.dat') or files.endswith('.pkl'):
with open(files, 'wb') as wr:
pickle.dump(input_data, wr, True)
def fit_plot_kmeans_model(self, n, X):
""" Kmeans to predict the bucket size """
# print('clustering kmeans ...')
kmean = KMeans(n_clusters=n, max_iter=1000, tol=0.01, init='k-means++', n_jobs=-1)
kmean.fit(X)
# print('kmenas: k={} ,cost={}'.format(n, int(kmean.inertia_)))
# print('centers: {}'.format(kmean.cluster_centers_))
return kmean.cluster_centers_, kmean.inertia_
def _cal_buckets(
self, source_file, target_file, bucket_file, prepared_dir, min_val=4, max_val=20):
# cal buckets based the kmeans
self.logger.info('Cal the bucket size ...')
with open(bucket_file, 'w')as k:
x = self.readfilenums(source_file)
y = self.readfilenums(target_file)
self.logger.info('source line nums is [{}]'.format(len(x)))
self.logger.info('target line nums is [{}]'.format(len(y)))
data = np.stack([x, y], axis=1)
temp = dict()
loss = []
for i in range(min_val, max_val):
centers, distance = self.fit_plot_kmeans_model(i, data)
centers = centers.tolist()
centers_ori = [[int(i[0]), int(i[1])] for i in centers]
centers_sort = sorted(centers_ori, key=lambda k: k[0])
temp[str(i)+'_source_target'] = centers_sort
temp[str(i) + '_distance'] = distance / len(x)
loss.append(distance/len(x))
self.logger.info('Save bucket details to the file [{}]'.format(bucket_file))
json.dump(temp, k)
self.plot_scatter_lengths(
title='loss', x_title='k_iter', y_title='distance',
x_lengths=list(range(min_val, max_val)),
y_lengths=loss, out_file=prepared_dir)
def _cal_score(self, source_file, target_file):
""" calculate score between predict and target """
score = cal_score(source_file, target_file)
out = {}
out['description'] = u"The evaluation score for the predict and label"
out['evaluation'] = score
with open('score.json', 'w') as js:
json.dump(out, js)
def plot_scatter_lengths(self, title, x_title, y_title, x_lengths, y_lengths, out_file):
plt.figure()
plt.scatter(x_lengths, y_lengths)
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.ylim(0, max(y_lengths))
plt.xlim(0, max(x_lengths))
# plt.show()
plt.savefig(os.path.join(out_file, '{}.png'.format(title)))
def plot_histo_lengths(self, title, lengths):
plt.figure()
mu = np.std(lengths)
sigma = np.mean(lengths)
x = np.array(lengths)
n, bins, patches = plt.hist(x, 50, facecolor='green', alpha=0.5)
y = scipy.stats.norm.pdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.title(title)
plt.xlabel("Length")
plt.ylabel("Number of Sequences")
plt.xlim(0, max(lengths))
plt.savefig('{}.png'.format(title))
# plt.show()
def analysisfile(self, source_file, target_file, figure_file, plot_histograms=True,
plot_scatter=True):
""" Anaylsis and display the file """
source_lengths = []
target_lengths = []
with gfile.GFile(source_file, mode="r") as s_file:
with gfile.GFile(target_file, mode="r") as t_file:
source = s_file.readline()
target = t_file.readline()
counter = 0
while source and target:
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
num_source_ids = len(source.split())
source_lengths.append(num_source_ids)
num_target_ids = len(target.split()) + 1 # plus 1 for EOS token
target_lengths.append(num_target_ids)
source, target = s_file.readline(), t_file.readline()
# print(target_lengths, source_lengths)
if plot_histograms:
self.plot_histo_lengths("target lengths", target_lengths)
self.plot_histo_lengths("source_lengths", source_lengths)
if plot_scatter:
self.plot_scatter_lengths("target vs source length", "source length",
"target length", source_lengths, target_lengths, figure_file)
def merge_sequence(self, source_dir, target_dir, write_source, write_target):
""" merge the files that predicted latex and label latex into one file """
self.logger.info('Merge the files ...')
# 将验证的多个label和predict进行合并成一个单独文件,使用一个机器翻译的模型进行错误检测及纠正
assert os.path.exists(source_dir), '[{}] do not exist'.format(source_dir)
source_file_list = self.getRawFileList(source_dir)
target_file_list = self.getRawFileList(target_dir)
merged_source, merged_target = [], []
for idx in range(len(source_file_list)):
_source_file = source_file_list[idx]
_child = _source_file.split('/')[-2]
_target_file = [i for i in target_file_list if i.split('/')[-2] == _child][0]
print(_source_file)
print(_target_file)
_source_details = self.readfile(_source_file)
_target_details = self.readfile(_target_file)
assert len(_source_details) == len(_target_details), ' sequence num must be same'
merged_source.extend(_source_details)
merged_target.extend(_target_details)
self.logger.info('Source file nums is [{:d}]'.format(len(merged_source)))
self.logger.info('Target file nums is [{:d}]'.format(len(merged_target)))
self.writefile(merged_source, write_source)
self.writefile(merged_target, write_target)
self.logger.info('Merge the files done')
def convert_char_idx(self, input_file, out_file):
""" Convert the char to the idx based the vocabulayer dictionary """
out = []
missing = {}
self.logger.info('Convert the char file to ids file for the [{}]'.format(input_file))
with tf.gfile.GFile(input_file, mode='r') as ip:
source = ip.readline().strip()
counter = 0
while source:
counter += 1
if counter % 1000 == 0:
print('Reanding data line %d' % counter)
source_list = source.split()
temp = [self.vocabulary.START_ID]
for char in source_list:
try:
temp += [self.token_to_idx[char]]
except:
if char not in missing.keys():
missing[char] = 0
missing[char] += 1
temp += [self.vocabulary.UNK_ID]
temp += [self.vocabulary.EOS_ID]
out.append(temp)
source = ip.readline()
self.logger.info(' missing char is {}:'.format(missing.keys()))
with open(out_file, 'w') as ou:
for i in out:
ou.write(' '.join([str(j) for j in i]) + '\n')
def getRawFileList(self, path):
files = []
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and f.endswith('.txt'):
files.append(os.path.join(path, f))
if os.path.isdir(os.path.join(path, f)):
temp = self.getRawFileList(os.path.join(path, f))
files.extend(temp)
return files
def generata_sequence_dataset(
self, source_path, target_path, dataset_file):
# sorte the size based the target size
data_set = defaultdict(list)
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline().strip(), target_file.readline().strip()
counter = 0
while source and target:
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
for bucket_id, (source_size, target_size) in enumerate(self.bucket):
# if str(self.bucket[bucket_id]) not in data_set:
# data_set[str(self.bucket[bucket_id])] = []
if len(source_ids) < source_size and len(target_ids) < target_size:
# random droupout for the souce sequence
source_length = len(source_ids)
_source_idx = source_ids
if random.random() > 0.5:
droupout = random.randrange(source_length)
_source_idx = source_ids[0:droupout] + \
source_ids[droupout + 1:source_length]
data_set[self.bucket[bucket_id]].append([_source_idx, target_ids])
break
source, target = source_file.readline(), target_file.readline()
np.save(os.path.join(dataset_file, 'ErrorChecker_dataset'), data_set)
self.logger.info('Saving dataset to [{}]'.format(
os.path.join(dataset_file, 'ErrorChecker_dataset')))
del data_set
def split_train_val_test(self, numpy_datapath, dataset_file):
self.logger.info('Split train test and validate')
dataset_details = np.load(numpy_datapath).tolist()
key_list = dataset_details.keys()
train_perp = 0.9
test_perp = 0.98
train_dataset, test_dataset, val_dataset = defaultdict(
list), defaultdict(list), defaultdict(list)
for key in key_list:
bucket_details = dataset_details[key]
nums = len(bucket_details)
# shufull
random.shuffle(bucket_details)
_train_num = int(train_perp * nums)
_test_num = int(test_perp*nums)
train_dataset[key].extend(bucket_details[:_train_num])
test_dataset[key].extend(bucket_details[_train_num:_test_num])
val_dataset[key].extend(bucket_details[_test_num:])
np.save(os.path.join(dataset_file, 'train_buckets'), train_dataset)
np.save(os.path.join(dataset_file, 'test_buckets'), test_dataset)
np.save(os.path.join(dataset_file, 'validate_buckets'), val_dataset)
self.logger.info('Split train, test and validate done...')
del train_dataset, test_dataset, val_dataset
def rendered_filter(
self, source_token, target_token, render_path, source_filtered_path,
target_filtered_path):
filtered_data = defaultdict(list)
source_filtered_token, target_filtered_token = [], []
# current path
pwd = os.path.abspath(os.getcwd())
# switch the directory to the render path
render_path = os.path.abspath(render_path)
source_token_list = open(source_token).readlines()
target_token_list = open(target_token).readlines()
assert len(source_token_list) == len(
target_token_list), 'The length of source and target must be same'
nums = len(source_token_list)
for idx in range(nums):
source = source_token_list[idx].strip()
target = target_token_list[idx].strip()
if render_path not in pwd:
os.chdir(render_path)
render_flag = latex_to_image(source, str(idx), self.logger)
# switch to th current path
os.chdir(pwd)
# 如果可渲染成功,跳过
if render_flag:
render_img = os.path.join(render_path, str(idx) + '.png')
assert os.path.exists(
render_img), 'do not exist the file [{:s}]'.format(render_img)
os.remove(render_img)
continue
else:
source_filtered_token.append(source)
target_filtered_token.append(target)
assert len(source_filtered_token) == len(
target_filtered_token), 'Filter token nums must be same'
self.writefile(source_filtered_token, source_filtered_path)
self.writefile(target_filtered_token, target_filtered_path)
if __name__ == "__main__":
logger = init_logger(log_path='sequence_dataset.log', logger_name='ErrorCheck')
logger.info('Load logger done...')
preprocess = ErrorChecker(logger=logger, vocabulary=SequenceVocabulary)
prepared_dir = './errorchecker_dataset/prepared'
temp_dir = './errorchecker_dataset/temp'
source_dir = './errorchecker_dataset/eval_files_from_im2latex/predict'
traget_dir = './errorchecker_dataset/eval_files_from_im2latex/label'
write_source_token = './errorchecker_dataset/temp/merged_source_token.txt'
# 对源序列进行过滤,只剩下不能渲染图片的序列
source_filtered_path = './errorchecker_dataset/temp/filtered_source_token.txt'
# 对目标序列进行过滤,保证和过滤之后的源序列数量相同
target_filtered_path = './errorchecker_dataset/temp/filtered_target_token.txt'
write_source_ids = './errorchecker_dataset/temp/merged_source_ids.txt'
write_target_token = './errorchecker_dataset/temp/merged_target_token.txt'
write_target_ids = './errorchecker_dataset/temp/merged_target_ids.txt'
buckets_file = './errorchecker_dataset/prepared/buckets.json'
train_dataset = './errorchecker_dataset/prepared/train_buckets.npy'
render_path = './errorchecker_dataset/rendered'
# merged the sequence for the datasset
if not os.path.exists(write_source_token) and not os.path.exists(write_target_token):
preprocess.merge_sequence(source_dir, traget_dir, write_source_token, write_target_token)
# convert the char to the idx
# filter the sequence
if not os.path.exists(source_filtered_path) or not os.path.exists(target_filtered_path):
preprocess.rendered_filter(write_source_token, write_target_token,
render_path, source_filtered_path, target_filtered_path)
""" # source file
if not os.path.exists(write_source_ids):
preprocess.convert_char_idx(input_file=write_source_token, out_file=write_source_ids)
# target file
if not os.path.exists(write_target_ids):
preprocess.convert_char_idx(input_file=write_target_token, out_file=write_target_ids)
# cal buckets
if not os.path.exists(buckets_file):
preprocess._cal_buckets(write_source_token, write_target_token, buckets_file, prepared_dir)
# Generate dataset for the numpy format
if not os.path.exists(os.path.join(temp_dir, 'ErrorChecker_dataset.npy')):
preprocess.generata_sequence_dataset(
source_path=write_source_ids, target_path=write_target_ids, dataset_file=temp_dir)
# Split train test and validate
if not os.path.exists(train_dataset):
preprocess.split_train_val_test(os.path.join(
temp_dir, 'ErrorChecker_dataset.npy'), prepared_dir) """
| [
"[email protected] "
] | |
138debdadb0b507ae49ebdb3aba050fcc43976fd | 8da2d814ebe7b4393e04502e880a892307698688 | /lib/python2.7/site-packages/riak/tests/test_kv.py | 7db30a081269b3359feccad29c2339fc5d5b30e8 | [] | no_license | Eduardo-Baranowski/riakConnectPython27Django1e11e24RiakStartadoNoDocker | 3c1eb94e0b6ef40eb5a6f92d1988d87410986fa1 | 7a5638e98f9499b0acc15d20a969ebb7ced9b175 | refs/heads/master | 2022-12-12T02:41:09.692829 | 2019-10-16T22:42:05 | 2019-10-16T22:42:05 | 213,001,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 28,107 | py | # -*- coding: utf-8 -*-
import copy
import os
import sys
import unittest
from six import string_types, PY2, PY3
from time import sleep
from riak import ConflictError, RiakBucket, RiakError
from riak.resolver import default_resolver, last_written_resolver
from riak.tests import RUN_KV, RUN_RESOLVE, PROTOCOL
from riak.tests.base import IntegrationTestBase
from riak.tests.comparison import Comparison
try:
import simplejson as json
except ImportError:
import json
if PY2:
import cPickle
test_pickle_dumps = cPickle.dumps
test_pickle_loads = cPickle.loads
else:
import pickle
test_pickle_dumps = pickle.dumps
test_pickle_loads = pickle.loads
testrun_sibs_bucket = 'sibsbucket'
testrun_props_bucket = 'propsbucket'
def setUpModule():
if not RUN_KV:
return
c = IntegrationTestBase.create_client()
c.bucket(testrun_sibs_bucket).allow_mult = True
c.close()
def tearDownModule():
if not RUN_KV:
return
c = IntegrationTestBase.create_client()
c.bucket(testrun_sibs_bucket).clear_properties()
c.bucket(testrun_props_bucket).clear_properties()
c.close()
class NotJsonSerializable(object):
def __init__(self, *args, **kwargs):
self.args = list(args)
self.kwargs = kwargs
def __eq__(self, other):
if len(self.args) != len(other.args):
return False
if len(self.kwargs) != len(other.kwargs):
return False
for name, value in self.kwargs.items():
if other.kwargs[name] != value:
return False
value1_args = copy.copy(self.args)
value2_args = copy.copy(other.args)
value1_args.sort()
value2_args.sort()
for i in range(len(value1_args)):
if value1_args[i] != value2_args[i]:
return False
return True
@unittest.skipUnless(RUN_KV, 'RUN_KV is 0')
class BasicKVTests(IntegrationTestBase, unittest.TestCase, Comparison):
def test_no_returnbody(self):
bucket = self.client.bucket(self.bucket_name)
o = bucket.new(self.key_name, "bar").store(return_body=False)
self.assertEqual(o.vclock, None)
@unittest.skipUnless(PROTOCOL == 'pbc', 'Only available on pbc')
def test_get_no_returnbody(self):
bucket = self.client.bucket(self.bucket_name)
o = bucket.new(self.key_name, "Ain't no body")
o.store()
stored_object = bucket.get(self.key_name, head_only=True)
self.assertFalse(stored_object.data)
list_of_objects = bucket.multiget([self.key_name], head_only=True)
for stored_object in list_of_objects:
self.assertFalse(stored_object.data)
def test_many_link_headers_should_work_fine(self):
bucket = self.client.bucket(self.bucket_name)
o = bucket.new("lots_of_links", "My god, it's full of links!")
for i in range(0, 300):
link = ("other", "key%d" % i, "next")
o.add_link(link)
o.store()
stored_object = bucket.get("lots_of_links")
self.assertEqual(len(stored_object.links), 300)
def test_is_alive(self):
self.assertTrue(self.client.is_alive())
def test_store_and_get(self):
bucket = self.client.bucket(self.bucket_name)
rand = self.randint()
obj = bucket.new('foo', rand)
obj.store()
obj = bucket.get('foo')
self.assertTrue(obj.exists)
self.assertEqual(obj.bucket.name, self.bucket_name)
self.assertEqual(obj.key, 'foo')
self.assertEqual(obj.data, rand)
# unicode objects are fine, as long as they don't
# contain any non-ASCII chars
if PY2:
self.client.bucket(unicode(self.bucket_name)) # noqa
else:
self.client.bucket(self.bucket_name)
if PY2:
self.assertRaises(TypeError, self.client.bucket, u'búcket')
self.assertRaises(TypeError, self.client.bucket, 'búcket')
else:
self.client.bucket(u'búcket')
self.client.bucket('búcket')
bucket.get(u'foo')
if PY2:
self.assertRaises(TypeError, bucket.get, u'føø')
self.assertRaises(TypeError, bucket.get, 'føø')
self.assertRaises(TypeError, bucket.new, u'foo', 'éå')
self.assertRaises(TypeError, bucket.new, u'foo', 'éå')
self.assertRaises(TypeError, bucket.new, 'foo', u'éå')
self.assertRaises(TypeError, bucket.new, 'foo', u'éå')
else:
bucket.get(u'føø')
bucket.get('føø')
bucket.new(u'foo', 'éå')
bucket.new(u'foo', 'éå')
bucket.new('foo', u'éå')
bucket.new('foo', u'éå')
obj2 = bucket.new('baz', rand, 'application/json')
obj2.charset = 'UTF-8'
obj2.store()
obj2 = bucket.get('baz')
self.assertEqual(obj2.data, rand)
def test_store_obj_with_unicode(self):
bucket = self.client.bucket(self.bucket_name)
data = {u'føø': u'éå'}
obj = bucket.new('foo', data)
obj.store()
obj = bucket.get('foo')
self.assertEqual(obj.data, data)
def test_store_unicode_string(self):
bucket = self.client.bucket(self.bucket_name)
data = u"some unicode data: \u00c6"
obj = bucket.new(self.key_name, encoded_data=data.encode('utf-8'),
content_type='text/plain')
obj.charset = 'utf-8'
obj.store()
obj2 = bucket.get(self.key_name)
self.assertEqual(data, obj2.encoded_data.decode('utf-8'))
def test_string_bucket_name(self):
# Things that are not strings cannot be bucket names
for bad in (12345, True, None, {}, []):
with self.assert_raises_regex(TypeError, 'must be a string'):
self.client.bucket(bad)
with self.assert_raises_regex(TypeError, 'must be a string'):
RiakBucket(self.client, bad, None)
# Unicode bucket names are not supported in Python 2.x,
# if they can't be encoded to ASCII. This should be changed in a
# future release.
if PY2:
with self.assert_raises_regex(TypeError,
'Unicode bucket names '
'are not supported'):
self.client.bucket(u'føø')
else:
self.client.bucket(u'føø')
# This is fine, since it's already ASCII
self.client.bucket('ASCII')
def test_generate_key(self):
# Ensure that Riak generates a random key when
# the key passed to bucket.new() is None.
bucket = self.client.bucket(self.bucket_name)
o = bucket.new(None, data={})
self.assertIsNone(o.key)
o.store()
self.assertIsNotNone(o.key)
self.assertNotIn('/', o.key)
existing_keys = bucket.get_keys()
self.assertEqual(len(existing_keys), 1)
def maybe_store_keys(self):
skey = 'rkb-init'
bucket = self.client.bucket('random_key_bucket')
sobj = bucket.get(skey)
if sobj.exists:
return
for key in range(1, 1000):
o = bucket.new(None, data={})
o.store()
o = bucket.new(skey, data={})
o.store()
def test_stream_keys(self):
self.maybe_store_keys()
bucket = self.client.bucket('random_key_bucket')
regular_keys = bucket.get_keys()
self.assertNotEqual(len(regular_keys), 0)
streamed_keys = []
for keylist in bucket.stream_keys():
self.assertNotEqual([], keylist)
for key in keylist:
self.assertIsInstance(key, string_types)
streamed_keys += keylist
self.assertEqual(sorted(regular_keys), sorted(streamed_keys))
def test_stream_keys_timeout(self):
self.maybe_store_keys()
bucket = self.client.bucket('random_key_bucket')
streamed_keys = []
with self.assertRaises(RiakError):
for keylist in self.client.stream_keys(bucket, timeout=1):
self.assertNotEqual([], keylist)
for key in keylist:
self.assertIsInstance(key, string_types)
streamed_keys += keylist
def test_stream_keys_abort(self):
self.maybe_store_keys()
bucket = self.client.bucket('random_key_bucket')
regular_keys = bucket.get_keys()
self.assertNotEqual(len(regular_keys), 0)
try:
for keylist in bucket.stream_keys():
raise RuntimeError("abort")
except RuntimeError:
pass
# If the stream was closed correctly, this will not error
robj = bucket.get(regular_keys[0])
self.assertEqual(len(robj.siblings), 1)
self.assertEqual(True, robj.exists)
def test_bad_key(self):
bucket = self.client.bucket(self.bucket_name)
obj = bucket.new()
with self.assertRaises(TypeError):
bucket.get(None)
with self.assertRaises(TypeError):
self.client.get(obj)
with self.assertRaises(TypeError):
bucket.get(1)
def test_binary_store_and_get(self):
bucket = self.client.bucket(self.bucket_name)
# Store as binary, retrieve as binary, then compare...
rand = str(self.randint())
if PY2:
rand = bytes(rand)
else:
rand = bytes(rand, 'utf-8')
obj = bucket.new(self.key_name, encoded_data=rand,
content_type='text/plain')
obj.store()
obj = bucket.get(self.key_name)
self.assertTrue(obj.exists)
self.assertEqual(obj.encoded_data, rand)
# Store as JSON, retrieve as binary, JSON-decode, then compare...
data = [self.randint(), self.randint(), self.randint()]
key2 = self.randname()
obj = bucket.new(key2, data)
obj.store()
obj = bucket.get(key2)
self.assertEqual(data, json.loads(obj.encoded_data.decode()))
def test_blank_binary_204(self):
bucket = self.client.bucket(self.bucket_name)
# this should *not* raise an error
empty = ""
if PY2:
empty = bytes(empty)
else:
empty = bytes(empty, 'utf-8')
obj = bucket.new('foo2', encoded_data=empty, content_type='text/plain')
obj.store()
obj = bucket.get('foo2')
self.assertTrue(obj.exists)
self.assertEqual(obj.encoded_data, empty)
def test_custom_bucket_encoder_decoder(self):
bucket = self.client.bucket(self.bucket_name)
# Teach the bucket how to pickle
bucket.set_encoder('application/x-pickle', test_pickle_dumps)
bucket.set_decoder('application/x-pickle', test_pickle_loads)
data = {'array': [1, 2, 3], 'badforjson': NotJsonSerializable(1, 3)}
obj = bucket.new(self.key_name, data, 'application/x-pickle')
obj.store()
obj2 = bucket.get(self.key_name)
self.assertEqual(data, obj2.data)
def test_custom_client_encoder_decoder(self):
bucket = self.client.bucket(self.bucket_name)
# Teach the client how to pickle
self.client.set_encoder('application/x-pickle', test_pickle_dumps)
self.client.set_decoder('application/x-pickle', test_pickle_loads)
data = {'array': [1, 2, 3], 'badforjson': NotJsonSerializable(1, 3)}
obj = bucket.new(self.key_name, data, 'application/x-pickle')
obj.store()
obj2 = bucket.get(self.key_name)
self.assertEqual(data, obj2.data)
def test_unknown_content_type_encoder_decoder(self):
# Bypass the content_type encoders
bucket = self.client.bucket(self.bucket_name)
data = "some funny data"
if PY3:
# Python 3.x needs to store binaries
data = data.encode()
obj = bucket.new(self.key_name,
encoded_data=data,
content_type='application/x-frobnicator')
obj.store()
obj2 = bucket.get(self.key_name)
self.assertEqual(data, obj2.encoded_data)
def test_text_plain_encoder_decoder(self):
bucket = self.client.bucket(self.bucket_name)
data = "some funny data"
obj = bucket.new(self.key_name, data, content_type='text/plain')
obj.store()
obj2 = bucket.get(self.key_name)
self.assertEqual(data, obj2.data)
def test_missing_object(self):
bucket = self.client.bucket(self.bucket_name)
obj = bucket.get(self.key_name)
self.assertFalse(obj.exists)
# Object with no siblings should not raise the ConflictError
self.assertIsNone(obj.data)
def test_delete(self):
bucket = self.client.bucket(self.bucket_name)
rand = self.randint()
obj = bucket.new(self.key_name, rand)
obj.store()
obj = bucket.get(self.key_name)
self.assertTrue(obj.exists)
obj.delete()
obj.reload()
self.assertFalse(obj.exists)
def test_bucket_delete(self):
bucket = self.client.bucket(self.bucket_name)
rand = self.randint()
obj = bucket.new(self.key_name, rand)
obj.store()
bucket.delete(self.key_name)
obj.reload()
self.assertFalse(obj.exists)
def test_set_bucket_properties(self):
bucket = self.client.bucket(testrun_props_bucket)
# Test setting allow mult...
bucket.allow_mult = True
# Test setting nval...
bucket.n_val = 1
c2 = self.create_client()
bucket2 = c2.bucket(testrun_props_bucket)
self.assertTrue(bucket2.allow_mult)
self.assertEqual(bucket2.n_val, 1)
# Test setting multiple properties...
bucket.set_properties({"allow_mult": False, "n_val": 2})
c3 = self.create_client()
bucket3 = c3.bucket(testrun_props_bucket)
self.assertFalse(bucket3.allow_mult)
self.assertEqual(bucket3.n_val, 2)
# clean up!
c2.close()
c3.close()
def test_if_none_match(self):
bucket = self.client.bucket(self.bucket_name)
obj = bucket.get(self.key_name)
obj.delete()
obj.reload()
self.assertFalse(obj.exists)
obj.data = ["first store"]
obj.content_type = 'application/json'
obj.store()
obj.data = ["second store"]
with self.assertRaises(Exception):
obj.store(if_none_match=True)
def test_siblings(self):
# Set up the bucket, clear any existing object...
bucket = self.client.bucket(testrun_sibs_bucket)
obj = bucket.get(self.key_name)
bucket.allow_mult = True
# Even if it previously existed, let's store a base resolved version
# from which we can diverge by sending a stale vclock.
obj.data = 'start'
obj.content_type = 'text/plain'
obj.store()
vals = set(self.generate_siblings(obj, count=5))
# Make sure the object has five siblings...
obj = bucket.get(self.key_name)
self.assertEqual(len(obj.siblings), 5)
# When the object is in conflict, using the shortcut methods
# should raise the ConflictError
with self.assertRaises(ConflictError):
obj.data
# Get each of the values - make sure they match what was
# assigned
vals2 = set([sibling.data for sibling in obj.siblings])
self.assertEqual(vals, vals2)
# Resolve the conflict, and then do a get...
resolved_sibling = obj.siblings[3]
obj.siblings = [resolved_sibling]
self.assertEqual(len(obj.siblings), 1)
obj.store()
self.assertEqual(len(obj.siblings), 1)
self.assertEqual(obj.data, resolved_sibling.data)
@unittest.skipUnless(RUN_RESOLVE, "RUN_RESOLVE is 0")
def test_resolution(self):
bucket = self.client.bucket(testrun_sibs_bucket)
obj = bucket.get(self.key_name)
bucket.allow_mult = True
# Even if it previously existed, let's store a base resolved version
# from which we can diverge by sending a stale vclock.
obj.data = 'start'
obj.content_type = 'text/plain'
obj.store()
vals = self.generate_siblings(obj, count=5, delay=1.01)
# Make sure the object has five siblings when using the
# default resolver
obj = bucket.get(self.key_name)
obj.reload()
self.assertEqual(len(obj.siblings), 5)
# Setting the resolver on the client object to use the
# "last-write-wins" behavior
self.client.resolver = last_written_resolver
obj.reload()
self.assertEqual(obj.resolver, last_written_resolver)
self.assertEqual(1, len(obj.siblings))
self.assertEqual(obj.data, vals[-1])
# Set the resolver on the bucket to the default resolver,
# overriding the resolver on the client
bucket.resolver = default_resolver
obj.reload()
self.assertEqual(obj.resolver, default_resolver)
self.assertEqual(len(obj.siblings), 5)
# Define our own custom resolver on the object that returns
# the maximum value, overriding the bucket and client resolvers
def max_value_resolver(obj):
obj.siblings = [max(obj.siblings, key=lambda s: s.data), ]
obj.resolver = max_value_resolver
obj.reload()
self.assertEqual(obj.resolver, max_value_resolver)
self.assertEqual(obj.data, max(vals))
# Setting the resolver to None on all levels reverts to the
# default resolver.
obj.resolver = None
self.assertEqual(obj.resolver, default_resolver) # set by bucket
bucket.resolver = None
self.assertEqual(obj.resolver, last_written_resolver) # set by client
self.client.resolver = None
self.assertEqual(obj.resolver, default_resolver) # reset
self.assertEqual(bucket.resolver, default_resolver) # reset
self.assertEqual(self.client.resolver, default_resolver) # reset
@unittest.skipUnless(RUN_RESOLVE, "RUN_RESOLVE is 0")
def test_resolution_default(self):
# If no resolver is setup, be sure to resolve to default_resolver
bucket = self.client.bucket(testrun_sibs_bucket)
self.assertEqual(self.client.resolver, default_resolver)
self.assertEqual(bucket.resolver, default_resolver)
def test_tombstone_siblings(self):
# Set up the bucket, clear any existing object...
bucket = self.client.bucket(testrun_sibs_bucket)
obj = bucket.get(self.key_name)
bucket.allow_mult = True
obj.data = 'start'
obj.content_type = 'text/plain'
obj.store(return_body=True)
obj.delete()
vals = set(self.generate_siblings(obj, count=4))
obj = bucket.get(self.key_name)
# TODO this used to be 5, only
siblen = len(obj.siblings)
self.assertTrue(siblen == 4 or siblen == 5)
non_tombstones = 0
for sib in obj.siblings:
if sib.exists:
non_tombstones += 1
self.assertTrue(not sib.exists or sib.data in vals)
self.assertEqual(non_tombstones, 4)
def test_store_of_missing_object(self):
bucket = self.client.bucket(self.bucket_name)
# for json objects
o = bucket.get(self.key_name)
self.assertEqual(o.exists, False)
o.data = {"foo": "bar"}
o.content_type = 'application/json'
o = o.store()
self.assertEqual(o.data, {"foo": "bar"})
self.assertEqual(o.content_type, "application/json")
o.delete()
# for binary objects
o = bucket.get(self.randname())
self.assertEqual(o.exists, False)
if PY2:
o.encoded_data = "1234567890"
else:
o.encoded_data = "1234567890".encode()
o.content_type = 'application/octet-stream'
o = o.store()
if PY2:
self.assertEqual(o.encoded_data, "1234567890")
else:
self.assertEqual(o.encoded_data, "1234567890".encode())
self.assertEqual(o.content_type, "application/octet-stream")
o.delete()
def test_store_metadata(self):
bucket = self.client.bucket(self.bucket_name)
rand = self.randint()
obj = bucket.new(self.key_name, rand)
obj.usermeta = {'custom': 'some metadata'}
obj.store()
obj = bucket.get(self.key_name)
self.assertEqual('some metadata', obj.usermeta['custom'])
def test_list_buckets(self):
bucket = self.client.bucket(self.bucket_name)
bucket.new("one", {"foo": "one", "bar": "red"}).store()
buckets = self.client.get_buckets()
self.assertTrue(self.bucket_name in [x.name for x in buckets])
def test_stream_buckets(self):
bucket = self.client.bucket(self.bucket_name)
bucket.new(self.key_name, data={"foo": "one",
"bar": "baz"}).store()
buckets = []
for bucket_list in self.client.stream_buckets():
buckets.extend(bucket_list)
self.assertTrue(self.bucket_name in [x.name for x in buckets])
def test_stream_buckets_abort(self):
bucket = self.client.bucket(self.bucket_name)
bucket.new(self.key_name, data={"foo": "one",
"bar": "baz"}).store()
try:
for bucket_list in self.client.stream_buckets():
raise RuntimeError("abort")
except RuntimeError:
pass
robj = bucket.get(self.key_name)
self.assertTrue(robj.exists)
self.assertEqual(len(robj.siblings), 1)
def test_get_params(self):
bucket = self.client.bucket(self.bucket_name)
bucket.new(self.key_name, data={"foo": "one",
"bar": "baz"}).store()
bucket.get(self.key_name, basic_quorum=False)
bucket.get(self.key_name, basic_quorum=True)
bucket.get(self.key_name, notfound_ok=True)
bucket.get(self.key_name, notfound_ok=False)
missing = bucket.get('missing-key', notfound_ok=True,
basic_quorum=True)
self.assertFalse(missing.exists)
def test_preflist(self):
nodes = ['[email protected]', '[email protected]']
bucket = self.client.bucket(self.bucket_name)
bucket.new(self.key_name, data={"foo": "one",
"bar": "baz"}).store()
try:
preflist = bucket.get_preflist(self.key_name)
preflist2 = self.client.get_preflist(bucket, self.key_name)
for pref in (preflist, preflist2):
self.assertEqual(len(pref), 3)
self.assertIn(pref[0]['node'], nodes)
[self.assertTrue(node['primary']) for node in pref]
except NotImplementedError as e:
raise unittest.SkipTest(e)
def generate_siblings(self, original, count=5, delay=None):
vals = []
for _ in range(count):
while True:
randval = str(self.randint())
if randval not in vals:
break
other_obj = original.bucket.new(key=original.key,
data=randval,
content_type='text/plain')
other_obj.vclock = original.vclock
other_obj.store()
vals.append(randval)
if delay:
sleep(delay)
return vals
@unittest.skipUnless(RUN_KV, 'RUN_KV is 0')
class BucketPropsTest(IntegrationTestBase, unittest.TestCase):
def test_rw_settings(self):
bucket = self.client.bucket(testrun_props_bucket)
self.assertEqual(bucket.r, "quorum")
self.assertEqual(bucket.w, "quorum")
self.assertEqual(bucket.dw, "quorum")
self.assertEqual(bucket.rw, "quorum")
bucket.w = 1
self.assertEqual(bucket.w, 1)
bucket.r = "quorum"
self.assertEqual(bucket.r, "quorum")
bucket.dw = "all"
self.assertEqual(bucket.dw, "all")
bucket.rw = "one"
self.assertEqual(bucket.rw, "one")
bucket.set_properties({'w': 'quorum',
'r': 'quorum',
'dw': 'quorum',
'rw': 'quorum'})
bucket.clear_properties()
def test_primary_quora(self):
bucket = self.client.bucket(testrun_props_bucket)
self.assertEqual(bucket.pr, 0)
self.assertEqual(bucket.pw, 0)
bucket.pr = 1
self.assertEqual(bucket.pr, 1)
bucket.pw = "quorum"
self.assertEqual(bucket.pw, "quorum")
bucket.set_properties({'pr': 0, 'pw': 0})
bucket.clear_properties()
def test_clear_bucket_properties(self):
bucket = self.client.bucket(testrun_props_bucket)
bucket.allow_mult = True
self.assertTrue(bucket.allow_mult)
bucket.n_val = 1
self.assertEqual(bucket.n_val, 1)
# Test setting clearing properties...
self.assertTrue(bucket.clear_properties())
self.assertFalse(bucket.allow_mult)
self.assertEqual(bucket.n_val, 3)
@unittest.skipUnless(RUN_KV, 'RUN_KV is 0')
class KVFileTests(IntegrationTestBase, unittest.TestCase):
def test_store_binary_object_from_file(self):
bucket = self.client.bucket(self.bucket_name)
obj = bucket.new_from_file(self.key_name, __file__)
obj.store()
obj = bucket.get(self.key_name)
self.assertNotEqual(obj.encoded_data, None)
is_win32 = sys.platform == 'win32'
self.assertTrue(obj.content_type == 'text/x-python' or
(is_win32 and obj.content_type == 'text/plain') or
obj.content_type == 'application/x-python-code')
def test_store_binary_object_from_file_should_use_default_mimetype(self):
bucket = self.client.bucket(self.bucket_name)
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, 'README.md')
obj = bucket.new_from_file(self.key_name, filepath)
obj.store()
obj = bucket.get(self.key_name)
self.assertEqual(obj.content_type, 'application/octet-stream')
def test_store_binary_object_from_file_should_fail_if_file_not_found(self):
bucket = self.client.bucket(self.bucket_name)
with self.assertRaises(IOError):
bucket.new_from_file(self.key_name, 'FILE_NOT_FOUND')
obj = bucket.get(self.key_name)
# self.assertEqual(obj.encoded_data, None)
self.assertFalse(obj.exists)
@unittest.skipUnless(RUN_KV, 'RUN_KV is 0')
class CounterTests(IntegrationTestBase, unittest.TestCase):
def test_counter_requires_allow_mult(self):
bucket = self.client.bucket(self.bucket_name)
if bucket.allow_mult:
bucket.allow_mult = False
self.assertFalse(bucket.allow_mult)
with self.assertRaises(Exception):
bucket.update_counter(self.key_name, 10)
def test_counter_ops(self):
bucket = self.client.bucket(testrun_sibs_bucket)
self.assertTrue(bucket.allow_mult)
# Non-existent counter has no value
self.assertEqual(None, bucket.get_counter(self.key_name))
# Update the counter
bucket.update_counter(self.key_name, 10)
self.assertEqual(10, bucket.get_counter(self.key_name))
# Update with returning the value
self.assertEqual(15, bucket.update_counter(self.key_name, 5,
returnvalue=True))
# Now try decrementing
self.assertEqual(10, bucket.update_counter(self.key_name, -5,
returnvalue=True))
| [
"[email protected]"
] | |
e28873a44c9771e1144a08da8c57c43ff1837ea2 | e181d0db8473f4aec034a5084ae5fb7a54185369 | /fractal.py | 4fcbcb8611d84cc273cc3ae67fb6d9507a124d7c | [
"MIT"
] | permissive | mdhender/sweep3d | 5ebfa302548129cf19a0fadc9d4ba26bbfde010f | 7c48690f2b7d3fdf7ae6c809b5e75cf560c42260 | refs/heads/master | 2023-06-22T03:26:57.610205 | 2019-07-09T17:29:11 | 2019-07-09T17:29:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | from exportmesh import *
from vector import *
from sys import argv
def moreFaces(v1,v2,v3,factor):
m = (1./3)*(v1+v2+v3)
a = v2-v1
b = v3-v1
c = v3-v2
h = factor * math.sqrt(6)/3. * (a.norm()+b.norm()+c.norm()) / 3.
v4 = m + h * a.cross(b).normalize()
return [ (v1,v2,v4), (v2,v3,v4), (v3,v1,v4) ]
v1,v2,v3 = Vector(0,0,0), Vector(10,math.sqrt(3)*10,0), Vector(20,0,0)
nIterations = 2 if len(argv) < 2 else int(argv[1])
factor = 1. if len(argv) < 3 else float(argv[2])
mesh = [ (0,face) for face in moreFaces(v3,v2,v1,1)] + [(0,(v1,v2,v3))]
for iteration in range(1,1+nIterations):
newMesh = []
for i,face in mesh:
v1,v2,v3 = face
newMesh.append((i,(v1,0.5*(v1+v2),0.5*(v1+v3))))
newMesh.append((i,(0.5*(v1+v2), v2, 0.5*(v2+v3))))
newMesh.append((i,(0.5*(v2+v3), v3, 0.5*(v1+v3))))
newMesh += [(i+1,f) for f in moreFaces(0.5*(v2+v3), 0.5*(v1+v3), 0.5*(v1+v2), factor)]
mesh = newMesh
mesh = [ ((i/float(nIterations),0,1.-i/float(nIterations)), face) for i,face in mesh ]
saveSTL("fractal.stl", mesh)
| [
"[email protected]"
] | |
a0946f334eae002ed01c9a1bcfd6ca48c8db193f | 9e683d53295b8d4165ab75c4d15b702103514b44 | /S7/S7-T2.py | 8e345681bdee8626c8948df40ac4a11bdb9385b7 | [] | no_license | pliniusblah/Pyhton_IME | 551d9d56f2b5243ab151a9167156b1889d280e75 | 0fe82ee50e280c2afd026fc047632a70a389b46a | refs/heads/master | 2020-05-09T20:15:45.369153 | 2019-04-16T01:49:36 | 2019-04-16T01:49:36 | 181,400,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | largura = int(input("digite a largura: "))
altura = int(input("digite a altura: "))
n = largura
m = altura
while altura > 0:
while largura > 0:
if(altura == 1 or altura == m or largura == 1 or largura == n) :
print("#", end = "")
else:
print("", end = " ")
largura -= 1
print()
largura = n
altura -= 1 | [
"[email protected]"
] | |
590e08af70b7e6b7a851d92b857c3db5ca05e813 | e5d0eaa6f7a958e47d69c6bcbebf40e0f7cf81b7 | /PlentyReads/mybooks/filters.py | da8b4ea17493abd73d8fb735687f4227ca07ab50 | [] | no_license | TarunVasarla/Plenty-reads | ae9bce417e1803165b15433afa4aa9f2d3f099c9 | 3d3389d37a62d402ee212e30a794fc3d6fbb9dd1 | refs/heads/master | 2021-01-10T15:09:31.183190 | 2015-12-01T11:45:10 | 2015-12-01T11:45:10 | 46,712,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import django_filters
from .models import Book
class BookListFilter(django_filters.FilterSet):
# book_fltr = django_filters.CharFilter('Book__bookname')
Book.objects.filter(name__icontains=search_word)
class Meta:
model = Book
fields = ['bookname', 'AuthorName', 'preface','genre','uploadBook'] | [
"YOUR EMAIL ADDRESS"
] | YOUR EMAIL ADDRESS |
e0560da82733d1d3672f233e22996a9fca97de01 | 1fd8fcb30456db588602c9a5539fdac45a92a556 | /BlenderSetup.py | de79f8b196d9e662059f9f05cf24a798238dde89 | [] | no_license | derkling/BlenderRender | f7cd33feb6be1281cbccf62279564ba5855abcb6 | 2a4289e8c433814a8d12a3e7f30aa3645f633735 | refs/heads/master | 2021-01-22T14:32:47.336706 | 2014-06-07T15:43:16 | 2014-06-07T16:08:40 | 20,596,860 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,828 | py |
import bpy
################################################################################
### Render Configuration
################################################################################
# GPU vs CPU rendering
use_gpu = 0
# Cameras to render, list of camera names
# cameras = [ 'Camera1’, ‘Camera2’, ‘Camera3’ ]
cameras = [‘Camera’]
# Dimensions
samples = 100
res_x = 1920
res_y = 1080
res_p = 20
# Lights Paths
bnc_max = 8
bnc_min = 3
bnc_trasp_max = 8
bnc_trasp_min = 8
bnc_diff = 128
bnc_glos = 128
bnc_tran = 128
no_caustics = True
# Rendering engine parameters (for performances)
tile_gpu_x = 128
tile_gpu_y = 128
tile_cpu_x = 32
tile_cpu_y = 32
tile_order = 'CENTER'
# Scene to render
scene = 'Scene'
# Filename for the image to produce
image_name = 'Render_0001'
################################################################################
### Do not touch under this line
################################################################################
def dumpRenderSettings():
print('Compute device type: ', bpy.context.user_preferences.system.compute_device_type)
print('Compute device : ', bpy.context.user_preferences.system.compute_device)
print('Render Configuration')
print(' Engine : ', bpy.data.scenes[scene].render.engine)
print(' ResolutionX : ', bpy.data.scenes[scene].render.resolution_x)
print(' ResolutionY : ', bpy.data.scenes[scene].render.resolution_y)
print(' Resolution% : ', bpy.data.scenes[scene].render.resolution_percentage)
print(' Tile Size X : ', bpy.data.scenes[scene].render.tile_x)
print(' Tile Size Y : ', bpy.data.scenes[scene].render.tile_y)
print('Blender Cycles Configuration')
print(' Device : ', bpy.data.scenes[scene].cycles.device)
print(' Samples : ', bpy.data.scenes[scene].cycles.samples)
print(' Tile order : ', bpy.data.scenes[scene].cycles.tile_order)
print(' No Caustics : ', bpy.data.scenes[scene].cycles.no_caustics)
print(' Bounces Max : ', bpy.data.scenes[scene].cycles.max_bounces)
print(' " Min : ', bpy.data.scenes[scene].cycles.min_bounces)
print(' " TMax : ', bpy.data.scenes[scene].cycles.transparent_max_bounces)
print(' " TMin : ', bpy.data.scenes[scene].cycles.transparent_min_bounces)
print(' " Diff : ', bpy.data.scenes[scene].cycles.diffuse_bounces)
print(' " Glsy : ', bpy.data.scenes[scene].cycles.glossy_bounces)
print(' " Tran : ', bpy.data.scenes[scene].cycles.transmission_bounces)
print('*** Initial configuration')
dumpRenderSettings()
print('*** Compute Device Selection [CUDA: ', use_gpu, ']')
if use_gpu:
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
bpy.data.scenes[scene].cycles.device = 'GPU'
bpy.data.scenes[scene].render.tile_x = tile_gpu_x
bpy.data.scenes[scene].render.tile_y = tile_gpu_y
else:
bpy.context.user_preferences.system.compute_device_type = 'NONE'
bpy.context.user_preferences.system.compute_device = 'CPU'
bpy.data.scenes[scene].cycles.device = 'CPU'
bpy.data.scenes[scene].render.tile_x = tile_cpu_x
bpy.data.scenes[scene].render.tile_y = tile_cpu_y
bpy.data.scenes[scene].render.engine = 'CYCLES'
bpy.data.scenes[scene].render.resolution_x = res_x
bpy.data.scenes[scene].render.resolution_y = res_y
bpy.data.scenes[scene].render.resolution_percentage = res_p
bpy.data.scenes[scene].cycles.samples = samples
bpy.data.scenes[scene].cycles.tile_order = tile_order
bpy.data.scenes[scene].cycles.no_caustics = no_caustics
bpy.data.scenes[scene].cycles.max_bounces = bnc_max
bpy.data.scenes[scene].cycles.min_bounces = bnc_min
bpy.data.scenes[scene].cycles.transparent_max_bounces = bnc_trasp_max
bpy.data.scenes[scene].cycles.transparent_min_bounces = bnc_trasp_min
bpy.data.scenes[scene].cycles.diffuse_bounces = bnc_diff
bpy.data.scenes[scene].cycles.glossy_bounces = bnc_glos
bpy.data.scenes[scene].cycles.transmission_bounces = bnc_tran
print('*** Saving User-Settings...')
bpy.ops.wm.save_userpref()
dumpRenderSettings()
for camera_name in cameras:
# Consider just real camera object
if (bpy.data.objects[camera_name].type != 'CAMERA'):
continue
# Setup next camera
bpy.context.scene.camera = bpy.data.objects[camera_name]
# Start rendering
print('*** Rendering [' + camera_name + ']... ')
bpy.ops.render.render(write_still=True)
rendered_image = bpy.data.images['Render Result']
# Save render image and a copy based on camera name
rendered_image.save_render(filepath=image_name + '.png')
rendered_image.save_render(filepath=image_name + '_' + camera_name + '.png')
| [
"[email protected]"
] | |
19cdd4b74a624831790377e5ae12bfa50fd6b0b8 | 5cc954e27fd924da0f6f44e7d58691d612a77f80 | /coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py | a61b58650ffd75934658eae62b4940ce7caea054 | [
"BSD-3-Clause"
] | permissive | 1duo/coremltools | e25f1a8423ec368bf1e7dabfaa36e77952578e79 | 37e619d99bf603d2cb9ea0839fa3ebe649996b0a | refs/heads/master | 2021-07-15T08:48:51.930217 | 2020-07-27T20:58:33 | 2020-07-27T20:58:33 | 203,466,876 | 2 | 0 | BSD-3-Clause | 2020-07-22T00:05:02 | 2019-08-20T22:59:50 | Python | UTF-8 | Python | false | false | 7,628 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import types
import numpy as np
import logging
@register_pass(namespace="tensorflow")
def expand_tf_lstm(prog):
"""
Expand tf_lstm_block_cell to fine-grained SSA ops following:
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev .* wci + i)
f = sigmoid(cs_prev .* wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
Inputs:
prog: Program
"""
for f_name, f in prog.functions.items():
expand_tf_lstm_helper(f)
def expand_tf_lstm_helper(block):
# shallow copy hides changes on f.operations during the loop
for op in block.operations[:]:
for b in op.blocks:
expand_tf_lstm_helper(b)
if op.op_type == "tf_lstm_block_cell":
expand_tf_lstm_block_cell(op)
logging.info("Expanding {} (op_type: {})".format(op.name, op.op_type))
if op.op_type == "tf_lstm_block":
# only cs, h are supported for now. Can be easily extended to other outputs at performance hit.
i, cs, f, o, ci, co, h = op.outputs
if all(
[
len(ov.child_ops) <= 0 and len(ov.consuming_blocks) <= 0
for ov in [i, f, o, ci, co]
]
):
expand_tf_lstm_block(op)
logging.info("Expanding {} (op_type: {})".format(op.name, op.op_type))
def _lstm_cell_builder(op, x, h_prev, cs_prev, before_op=None):
b = op.bias # [4*hidden_dim]
forget_bias = op.forget_bias.val # python:float
# xh = [x, h_prev]
# xh shape: [b, input_dim+hidden_dim]
xh = mb.concat(values=[x, h_prev], axis=-1, before_op=before_op)
# w: [4*hidden_dim, input_dim + hidden_dim] (icfo layout)
w = np.transpose(op.weight.val)
# [i, ci, f, o] = xh * w + b. Shape is [b, 4*hidden_dim]
icfo = mb.linear(x=xh, weight=w, bias=b, before_op=before_op)
# i, ci, f, o shape: [b, hidden_dim]
i, ci, f, o = mb.split(x=icfo, num_splits=4, axis=-1, before_op=before_op)
if op.forget_bias.val != 0:
f = mb.add(x=f, y=forget_bias, before_op=before_op)
# i = sigmoid(cs_prev .* wci + i)
# f = sigmoid(cs_prev .* wcf + f)
if op.use_peephole.val:
wci = op.weight_peep_i.val # [hidden_dim]
wcf = op.weight_peep_f.val # [hidden_dim]
x = mb.mul(x=cs_prev, y=wci, before_op=before_op)
pre_i = mb.add(x=x, y=i, before_op=before_op)
x = mb.mul(x=cs_prev, y=wcf, before_op=before_op)
pre_f = mb.add(x=x, y=f, before_op=before_op)
else:
pre_i = i
pre_f = f
i = mb.sigmoid(x=pre_i, before_op=before_op)
f = mb.sigmoid(x=pre_f, before_op=before_op)
# ci = tanh(ci)
ci = mb.tanh(x=ci, before_op=before_op)
# cs = ci .* i + cs_prev .* f
x = mb.mul(x=ci, y=i, before_op=before_op)
y = mb.mul(x=cs_prev, y=f, before_op=before_op)
cs = mb.add(x=x, y=y, before_op=before_op)
# cs = clip(cs, cell_clip)
if op.cell_clip is not None:
clip_val = op.cell_clip.val
cs = mb.clip(x=cs, alpha=-clip_val, beta=clip_val, before_op=before_op)
# o = sigmoid(cs * wco + o)
if op.use_peephole.val:
wco = op.weight_peep_o.val
x = mb.mul(x=cs, y=wco, before_op=before_op)
pre_o = mb.add(x=x, y=o, before_op=before_op)
else:
pre_o = o
o = mb.sigmoid(x=pre_o, before_op=before_op)
# co = tanh(cs)
co = mb.tanh(x=cs, before_op=before_op)
# h = co .* o
h = mb.mul(x=co, y=o, before_op=before_op)
return [i, cs, f, o, ci, co, h]
def expand_tf_lstm_block_cell(op):
if op.op_type != "tf_lstm_block_cell":
raise ValueError()
with op.enclosing_block as block:
x = op.x # [b, input_dim]
h_prev = op.h_prev # [b, hidden_dim]
cs_prev = op.c_prev # [b, hidden_dim]
i, cs, f, o, ci, co, h = _lstm_cell_builder(
op, x, h_prev, cs_prev, before_op=op
)
# Replace all outputs
new_outputs = [i, cs, f, o, ci, co, h]
for old_v, new_v in zip(op.outputs, new_outputs):
block.replace_uses_of_var_after_op(
anchor_op=op, old_var=old_v, new_var=new_v
)
block.remove_ops([op])
def expand_tf_lstm_block(op):
if op.op_type != "tf_lstm_block":
raise ValueError()
with op.enclosing_block as block:
x = op.x # [s, b, input_dim]
h_prev = op.h_prev # [b, hidden_dim]
cs_prev = op.c_prev # [b, hidden_dim]
# Allocate two lists: cs & h
x_shape = mb.shape(x=x, before_op=op)
length = mb.slice_by_index(x=x_shape, begin=[0], end=[1], before_op=op)
h_shape = mb.shape(x=h_prev, before_op=op)
list_shape = mb.concat(values=[length, h_shape], axis=0, before_op=op)
cs_list = mb.fill(shape=list_shape, before_op=op)
h_list = mb.fill(shape=list_shape, before_op=op)
# append initial state at index 0
cs_prev = mb.expand_dims(x=cs_prev, axes=[0], before_op=op)
cs_list = mb.concat(values=[cs_prev, cs_list], axis=0, before_op=op)
h_prev = mb.expand_dims(x=h_prev, axes=[0], before_op=op)
h_list = mb.concat(values=[h_prev, h_list], axis=0, before_op=op)
def cond(i, cs_list, h_list):
return mb.less(x=i, y=length)
def body(i, cs_list, h_list):
xi = mb.gather(x=x, indices=i, axis=0)
h_prev = mb.gather(x=h_list, indices=i, axis=0)
cs_prev = mb.gather(x=cs_list, indices=i, axis=0)
ig, cs, fg, og, ci, co, h = _lstm_cell_builder(op, xi, h_prev, cs_prev)
counter = mb.add(x=i, y=1)
return (
counter,
mb.scatter(data=cs_list, indices=counter, updates=cs),
mb.scatter(data=h_list, indices=counter, updates=h),
)
_, cs_list, h_list = mb.while_loop(
_cond=cond, _body=body, loop_vars=([0], cs_list, h_list), before_op=op
)
# strip initial state or element at index 0
begin, end = [1, 0, 0], [0, 0, 0]
begin_mask = [False, True, True]
end_mask = [True, True, True]
cs = mb.slice_by_index(
x=cs_list,
begin=begin,
end=end,
begin_mask=begin_mask,
end_mask=end_mask,
before_op=op,
)
h = mb.slice_by_index(
x=h_list,
begin=begin,
end=end,
begin_mask=begin_mask,
end_mask=end_mask,
before_op=op,
)
# Replace all outputs
new_outputs = [cs, h]
for old_v, new_v in zip(
[ov for index, ov in enumerate(op.outputs) if index in [1, 6]], new_outputs
):
block.replace_uses_of_var_after_op(
anchor_op=op, old_var=old_v, new_var=new_v
)
block.remove_ops([op])
| [
"[email protected]"
] | |
5180bca50c9e7748a9f12aec748b56b3b43d2f5a | 0e0db8614de0dcfe9828c27e8e39e19493c32ffe | /sentamil/st_letters/tamil.py | 95a5fc7b9bb0cf6e57570303f4ad3815cda3716d | [] | no_license | ThaniThamizhAkarathiKalanjiyam/sentamil | a9d1ad61efe998e698e6714dd2c2bea2860be86b | a9cc2db1c5293bdd82877d15c4c87d1089163010 | refs/heads/master | 2020-05-07T12:16:10.059570 | 2019-04-13T08:53:13 | 2019-04-13T08:53:13 | 180,497,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: muthukumaran
#
# Created: 13/04/2019
# Copyright: (c) muthukumaran 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import st_encode.types as encd
def getTamilLetters(encod_type = encd.UTF8):
if(encod_type == encd.UTF8):
print "UTF8"
else:
print "TACE16"
return
| [
"[email protected]"
] | |
fea8a58ab7cfc7c12913b7468cac25c0b22b66fa | 372cf5a2df14f580e49687493eef28021bc55b11 | /backend/main.py | 6677926de6e461447e36e41766095eeae3ab3c42 | [] | no_license | Yousefkh97/currency_swap | 949c00c3c03d229ad6365bf93536a937b0c1002d | 6f4dd932616ac6ea559524e0dd39f5981a5091b1 | refs/heads/main | 2023-02-21T04:12:07.155765 | 2021-01-22T17:34:49 | 2021-01-22T17:34:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | import flask
from flask import Flask, render_template, request, jsonify, url_for
import requests
from datetime import datetime
import urllib, json
import urllib.request
import socket
from pip._internal import req
from werkzeug.utils import redirect
# the url that get the data of currncy from
api_key = "85aa5a4fb3533fbae7223f74ccb1befb"
url = "http://data.fixer.io/api/latest?access_key=" + api_key
app = Flask(__name__)
info_list =[] # list to save the transction
@app.route("/", methods=["POST", "GET"])
def index():
# get the public ip of the aws ec2
ip_address = (requests.get("http://169.254.169.254/latest/meta-data/public-ipv4").content).decode('utf-8')
fd = 'http://' + ip_address + ':8000/'
# when req a post from the front end take all the data and make a culculet to return the result
if request.method == "POST":
fistCurrency = request.form.get("firstCurrency")
secondCurrency = request.form.get("secondCurrency")
amount = request.form.get("amount")
response = requests.get(url)
app.logger.info(response)
infos = response.json()
firstValue = infos["rates"][fistCurrency]
secondValue = infos["rates"][secondCurrency]
result = (secondValue / firstValue) * float(amount)
currencyInfo = dict()
currencyInfo["firstCurrency"] = fistCurrency
currencyInfo["secondCurrency"] = secondCurrency
currencyInfo["amount"] = amount
currencyInfo["result"] = result
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
l = [current_time, amount, fistCurrency, secondCurrency, secondValue, result]
info_list.append(l)
res = requests.post(fd, json=currencyInfo)
return redirect(fd, code=302) # return to the frontend page
else:
return redirect(fd, code=302)
@app.route("/Auti/", methods=["POST", "GET"])
def Auti():
ip_address2 = (requests.get("http://169.254.169.254/latest/meta-data/public-ipv4").content).decode('utf-8')
fd2 = 'http://' + ip_address2 + ':7000/'
info_dict = dict()
info_dict["info"] = info_list
res = requests.post(fd2, json=info_dict)
return redirect(fd2, code=302)
if __name__ == "__main__":
app.run(host = "0.0.0.0", debug=True)
| [
"[email protected]"
] | |
696156339b2b19e269fdf76ca9a328bb30f18bd9 | 72b1d38b3ed41fdfe8559e36e202e04fee11e488 | /learn-doc/python学习/python_code/gevent_01.py | fe5a2bd7c5bdc311467803f40310ce7f2e5d1ccd | [] | no_license | tinyjjlin/learn_repo | d71b8b546e4021dd4f79f4d925a8b6f0d1e6bf23 | 7fd9e3b851ea2bc26c210d1ec4813d5df08c7458 | refs/heads/master | 2023-06-21T06:44:26.543676 | 2019-08-19T02:18:07 | 2019-08-19T02:18:07 | 155,843,556 | 0 | 0 | null | 2023-06-13T22:49:51 | 2018-11-02T09:28:02 | Java | UTF-8 | Python | false | false | 236 | py | import gevent
'''
python 语法重点
'''
def test1():
print(1, 2)
gevent.sleep(0)
print(3, 4)
def test2():
print(5, 6)
gevent.sleep(0)
print(7, 8)
gevent.joinall([gevent.spawn(test1), gevent.spawn(test2)])
| [
"[email protected]"
] | |
3c20ba9a793c6ee0255c31afea623e1f38b6ff90 | 62f0134d18b956076312a66c364389a0d70284ad | /tools/libari/setup.py | bc23ec24f23d7a0c42eb671284080a9f29726490 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | tomputer22/aristoteles | d145420b116eed8753e0eb09e465cff6b878c06c | ecf66cbfde1f019adfc5ed8b29e9a90ee11c9018 | refs/heads/master | 2023-07-14T19:33:09.479131 | 2021-09-06T16:26:28 | 2021-09-06T16:26:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from setuptools import setup
import os
VERSION = "0.1"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="libari",
description="Create custom ARI packets.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Tobias Kröll",
license="MIT",
version=VERSION,
packages=["libari"],
install_requires=[],
extras_require={"test": ["pytest"]},
tests_require=["libari[test]"],
python_requires=">=3.6",
) | [
"[email protected]"
] |
Subsets and Splits