blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9013aa7988405a9b4e9b670a3c8d11f8d6c89835 | 64b3b667e05455f2c0a69c93dfdf2dbc0d7a75fe | /xbot_tools/scripts/odom2yaw.py | c3dfc52aa4f27e588363064643604bc9bf84f25e | [] | no_license | krovanh/xbot | 82b6af056d5bf2afb220cfdaba596fab23621f01 | 06e8b90099e81098945976b7c4e8f1c08b311692 | refs/heads/master | 2022-11-21T18:22:10.599664 | 2020-07-07T03:25:49 | 2020-07-07T03:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | #!/usr/bin/env python
# coding=utf-8
######################################################################################
#> File Name: auto_rotate.py
#> Author:Rocwang
#> Mail: [email protected];
#> Github:https://github.com/yowlings
#> Created Time: 2018年06月14日 星期四 16时14分27秒
######################################################################################
import rospy
import tf
from std_msgs.msg import Float64
from nav_msgs.msg import Odometry
alfa=0.1
beta=0.31415926/5
class Odom2Yaw(object):
"""docstring for Odom2Yaw"""
def __init__(self):
self.help="""
"""
# if rospy.has_param("~cascPath"):
# self.cascPath = rospy.get_param("~cascPath")
# else:
# rospy.set_param("~cascPath","../scripts/haarcascade_frontalface_default.xml")
self.yaw_pub=rospy.Publisher('/yaw', Float64, queue_size = 1)
rospy.Subscriber("/odom",Odometry,self.odom_callback)
rospy.spin()
def odom_callback(self,odom):
yaw = Float64()
orie = odom.pose.pose.orientation
o=[orie.x,orie.y,orie.z,orie.w]
rpy= tf.transformations.euler_from_quaternion(o)
yaw.data = rpy[2]*180/3.1415926
self.yaw_pub.publish(yaw)
if __name__=='__main__':
rospy.init_node('odom2yaw')
try:
rospy.loginfo( "initialization system")
Odom2Yaw()
print "process done and quit"
except rospy.ROSInterruptException:
rospy.loginfo("node terminated.") | [
"[email protected]"
] | |
c05d069f25f766339c40f654028345f19be7bb15 | 28cac5b23602862ec56d973e702c517c12326fd6 | /money/admin.py | e293ff23f8579e1735cf15bcb8bf6dc2b99b0e9f | [] | no_license | mamedxanli/magazin | 0283b7e9066e0175ae006a6c3f70109794e74136 | 7cfa890b2931bd7bab297d3d7f59c368b8165c44 | refs/heads/master | 2021-06-20T06:34:07.182935 | 2019-06-30T22:13:21 | 2019-06-30T22:13:21 | 156,974,176 | 0 | 0 | null | 2021-06-10T20:57:47 | 2018-11-10T11:22:04 | JavaScript | UTF-8 | Python | false | false | 93 | py | from django.contrib import admin
from money.models import Money
admin.site.register(Money)
| [
"[email protected]"
] | |
581ea5796b807d563d1d60430cc52a42e26951b8 | 528cd83a81535a1cf55994a16710ade88d3a5449 | /tests/test_home.py | c0c1c582f090546bbc26d1d27997e5c9ea978b74 | [] | no_license | Scot3004/django-quilla-web | f8a3ed9bcec3d1ce4c7f1208266c40047e6a2f16 | 9aca2e8c2f4be7f973a81ff1c943e9f45e2300e2 | refs/heads/master | 2023-08-19T19:40:12.048894 | 2023-08-15T14:56:03 | 2023-08-15T14:56:03 | 58,422,810 | 1 | 0 | null | 2023-09-13T21:58:43 | 2016-05-10T02:17:29 | HTML | UTF-8 | Python | false | false | 400 | py | """
Test de homepage usando selenium sin ayudas, (sin patrones, tipo POM o algo por el estilo)
"""
from selenium.webdriver.common.by import By
def test_title(base_url, driver):
"""Test de la página de inicio"""
driver.get(base_url)
assert driver.title == "Python Barranquilla"
assert driver.find_element(by=By.TAG_NAME, value='h1').text == "Python Barranquilla."
driver.quit()
| [
"[email protected]"
] | |
ef5eee58ada2c24a5e1aa1cb4a8f6116aee40cb3 | 441ee65564e7135c8136d382dacd6e548cd9fcb8 | /sahara/plugins/mapr/base/base_cluster_validator.py | 9c7473bd03b0861002c173279914df6b4cea3dae | [
"Apache-2.0"
] | permissive | stackhpc/sahara | bccae95d0b8963dab1d6c1add2f6fe31fa80e8c1 | 83c7506076ee6913a381d1fda26361d9eb466e68 | refs/heads/master | 2021-01-25T09:32:55.555734 | 2017-06-08T22:21:46 | 2017-06-08T22:21:46 | 93,850,995 | 0 | 0 | Apache-2.0 | 2022-01-20T19:09:48 | 2017-06-09T11:09:22 | Python | UTF-8 | Python | false | false | 1,507 | py | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.abstract.cluster_validator as v
import sahara.plugins.mapr.util.validation_utils as vu
import sahara.plugins.mapr.versions.version_handler_factory as vhf
class BaseValidator(v.AbstractValidator):
def validate(self, cluster_context):
for service in cluster_context.required_services:
vu.assert_present(service, cluster_context)
for service in cluster_context.cluster_services:
for rule in service.validation_rules:
rule(cluster_context)
def validate_scaling(self, cluster_context, existing, additional):
cluster = cluster_context.cluster
version = cluster.hadoop_version
handler = vhf.VersionHandlerFactory.get().get_handler(version)
cluster = vu.create_fake_cluster(cluster, existing, additional)
cluster_context = handler.get_context(cluster)
self.validate(cluster_context)
| [
"[email protected]"
] | |
618f7bf3ad5363970ac8447c9c1afcb17a35d486 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/1595.py | d01faf25443576147a95bda0f8e955249ad02345 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | import sys
def tidy_count(N):
number_of_digits = len(str(N))
if number_of_digits == 1:
return N
else:
total_count = 0
count_table = [[0 for j in range(1,10)] for i in range(0,number_of_digits)]
count_table[0] = [1,1,1,1,1,1,1,1,1]
total_count += sum(count_table[0])
for i in range(1, number_of_digits):
for j in range(0, 9):
count_table[i][j] = sum(count_table[i-1][j:])
if i == number_of_digits-1:
number_as_string = str(N)
len_ind = i
prev_number = 0
N_tidy_bool = True
for d in number_as_string:
val = sum(count_table[len_ind][prev_number:int(d)-1])
total_count += val
if prev_number > int(d):
N_tidy_bool = False
prev_number = int(d) - 1
len_ind -= 1
if N_tidy_bool:
total_count += 1
else:
total_count += sum(count_table[i])
return total_count
def tidy_lowest(N):
reversed = str(N)[::-1]
num_arr = []
for r in reversed:
num_arr.append(int(r))
prev_num = 9
for ind, n in enumerate(num_arr):
if prev_num < n:
for j in range(0,ind):
num_arr[j] = 9
num_arr[ind] -= 1
prev_num = num_arr[ind]
string_return = ''
for n in num_arr:
string_return += str(n)
return int(string_return[::-1])
f = open(sys.argv[1], 'r')
input_file = f.read().splitlines()
f.close()
output_file = open('output_large_tidy_numbers.out', 'w')
t = int(input_file[0])
for i in xrange(1, t + 1):
n = input_file[i]
output = tidy_lowest(int(n))
out_string = "Case #" + str(i) + ": " + str(output)
print out_string
output_file.write(out_string)
output_file.write('\n')
# print tidy_lowest(65) | [
"[email protected]"
] | |
411720f3def8edac72d63cae616f745db2c0b311 | 35d62f3ccf1c422b13b313c4e519a5ce335e934d | /PythonIII/Python_III/DemoProgs/sortkey.py | 86ca77ffe375df949cf29a77580f2e63cfdb4273 | [] | no_license | malaybiswal/python | 357a074889299effe6a5fa2f1cd9c50ca35652d0 | 684d24d719b785725e736671faf2681232ecc394 | refs/heads/master | 2020-05-17T22:25:43.043929 | 2019-05-08T23:41:19 | 2019-05-08T23:41:19 | 183,999,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | """
This program deals with various ways to sort one- and two-dimensional
lists. With key= you should think of the function as receiving one
element of the list at a time. In the case of the two-dimensional list,
each element is, itself, a list. The data being transformed must follow
all of the pertinent rules. For example, if you add a string to lst1,
using key=abs will fail on a TypeError.
"""
lst1 = [12, -1, 18, -6, -14, 8]
lst2 = ['Smith', 'sally', 'sadie', 'Sam']
lst3 = [['sally', -12, 'm'], ['Sam', -10, 'F'], ['sadie', 10, 'f']]
z = sorted(lst1, key=abs) # sorts by the absolute value of each element
print(z)
z = sorted(lst2, key=str.lower) # sorts by the lower-case version of
print(z) # each element
z = sorted(lst3, key=lambda y: abs(y[1])) # sorts by the absolute value
print(z) # of the second element in each substring
# This lambda creates a tuple (a list would work too) containing the
# absolute value of the second element and then the lower-case version
# of the first element in each substring.
z = sorted(lst3, key=lambda y: (abs(y[1]), y[0].lower()))
print(z)
| [
"[email protected]"
] | |
700b5124c8595e09088fab3c3c7132575291b8d6 | f2b138710d3d32a7f02326f65714164855810f37 | /blog/.~c9_invoke_AndTR.py | 9ed3f32605a353273e71845ebbabde25291cc9e3 | [] | no_license | watanta/portfolio | 720f82663b5ee24b81bdc64174df3f70f51dd9c0 | 29e160ff39b7a98a4b0950b825dfb111d200faab | refs/heads/master | 2020-03-22T13:00:21.552145 | 2018-07-07T11:27:01 | 2018-07-07T11:27:01 | 140,076,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py |
from django.urls import path, include
from . import views
urlpatterns = [
path
]
| [
"[email protected]"
] | |
6a07549cbf1c12c6a1f056529de46e95dbe28f89 | 49b073c87b2bc405ed0e1ec50751ab9cd247d9ea | /venv/bin/pip3 | 06fef993c588d5e64a2039f7dc896df9d43f2d36 | [] | no_license | KarlPineau/P13-donneesculturelles-algo-movie | 6f4235190a391d5a9cc8202188b885d63026e048 | c6e880b09a21dac0adc36bc2eb41e6cb47be47f9 | refs/heads/master | 2020-04-06T18:25:32.808638 | 2018-11-15T11:04:40 | 2018-11-15T11:04:40 | 157,697,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | #!/Users/karlpineau/PycharmProjects/P13-dataculture/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
589916a3fe64cb4fb3046df64f26dc4024335ea7 | 42100ccfb1ebca6c0cf3eef17acd1f6b0a38e15c | /sqlcode.py | 141ae209b2e7dd48140a6ad973950b8fba1bf794 | [] | no_license | Sanyam2112/WhatsAppChatBot | a7631e271e49357dcbad1e48e71e5485aed4be0b | eb0f5bca3b808ed28b7e91eb26025772dfed27f2 | refs/heads/master | 2020-06-10T12:38:35.581691 | 2020-03-25T09:12:52 | 2020-03-25T09:12:52 | 193,646,345 | 0 | 0 | null | 2019-06-25T06:17:01 | 2019-06-25T06:17:00 | null | UTF-8 | Python | false | false | 2,450 | py | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome('C:\ChromeDriver\chromedriver.exe')
driver.get('https://web.whatsapp.com/')
input("Press enter")
def replymsg(v):
v.lower()
if "schemes" in v:
reply = "These are the following schemes available"
elif "card" in v:
reply = "Do you want to make a card"
elif "hello" in v:
reply = "Hey, how can i help you"
elif "yes" in v:
reply = "Okay here's the link to make your new Kissan card"
elif "crop" in v:
reply = "Here's information about crops"
elif "quit" in v:
reply = "Thank you for using the chatbot"
else:
reply = "I'm sorry, I do not understand"
msg_box = driver.find_element_by_class_name('_3u328')
msg_box.send_keys(reply)
msg_box.send_keys(Keys.ENTER)
def InputOutput (unread, recipient):
names = [recipient]
for name in names:
#find = name
person = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
person.click()
for i in range(1, 4):
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
msg_got = driver.find_elements_by_css_selector("span.selectable-text.invisible-space.copyable-text")
msg = [message.text for message in msg_got]
for i in range(-unread, 0):
print(msg[i])
replymsg(msg[i])
def loop():
count = 0
for i in range(1, 16):
try:
c = "//*[@id=\"pane-side\"]/div[1]/div/div/div[" + str(i) + "]/div/div/div[2]/div[2]/div[2]/span[1]/div"
d = driver.find_element_by_xpath(c)
x = d.text
try:
a = "//*[@id=\"pane-side\"]/div[1]/div/div/div[" + str(i) + "]/div/div/div[2]/div[1]/div[1]/span/span"
b = driver.find_element_by_xpath(a)
y = b.text
print(y)
print(x)
InputOutput(int(x), y)
time.sleep(2)
except Exception as e1:
print('error')
continue
except Exception as e:
print('error')
count = count + 1
continue
return count
for a in range(100):
if loop() == 15:
driver.refresh()
time.sleep(15)
print("chat bot ended") | [
"[email protected]"
] | |
3271073143057a6f5c10c32a9663947c7ade845c | 18f7aa2a416da68c1e38587b60cfff32fbb06a63 | /Random/merge_sort.py | 6908d247d5757beee50c89fe12a05e4d22d34f1d | [] | no_license | kumaravinashm/Python-Question | d0bc837b373cb979da676f8d98faee3fb531c6d1 | 4d46a968a5aec38f494d2cc65a285c161c150c0c | refs/heads/master | 2020-06-17T19:33:52.186800 | 2019-10-04T02:50:31 | 2019-10-04T02:50:31 | 196,027,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | def mergeSort(arr):
if len(arr) > 1:
mid = len(arr) // 2
L = arr[:mid]
R = arr[mid:]
mergeSort(L)
mergeSort(R)
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < len(L):
arr[k] = L[i]
i += 1
k += 1
while j < len(R):
arr[k] = R[j]
j += 1
k += 1
arr = [34, 456, 45, 3, 2, 34, 243, 6, 457, 56, 5, 434135345, 7, 5, 34, 467]
mergeSort(arr)
print(arr)
| [
"[email protected]"
] | |
31f90e38fc0633e8bdf039de832130e8d11a1685 | 3588ff6731cddef10eedb06d3a1500e43ec981e8 | /排序算法/insert_sort.py | c6d24dddecc08456813383691ed2c518daf18cce | [] | no_license | anitazhaochen/nowcoder | 4e301354ccd4d54f622b0dd78c96c32c5b28e02a | 5df9a4208d6de969350ff7405a291c7b2c64e082 | refs/heads/master | 2020-04-02T20:40:30.584985 | 2019-11-26T08:09:36 | 2019-11-26T08:09:36 | 154,775,322 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from myutils import getRandom
from myutils import compare
@compare
def insertSort(arr):
if arr is None or len(arr) == 0:
return
length = len(arr)
for i in range(1, length):
for j in range(i,0,-1):
if arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
if __name__ == "__main__":
arr = getRandom(-999,999, 10)
print(insertSort(arr))
| [
"[email protected]"
] | |
312dade636eb5f118e4e499650b9e03c76e77e24 | 2d860475a273e4fc5906cd74a4a9a98d0e684483 | /oarias2710-iic2233-2020-2-master/Actividades/AS01/jugadores.py | 8fb395bd62358086f657c6b51312515d09482397 | [] | no_license | oarias2710/IIC2233_2020_2sem | af41501a778242661805452e77c7f4624b45c76c | a770d7b803ea23f9ebe198deabe2f4f8c3b717da | refs/heads/main | 2023-02-09T05:31:29.274385 | 2020-12-30T16:13:18 | 2020-12-30T16:13:18 | 325,592,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,860 | py | from estudiantes import Programago
import random
import parametros as p
from abc import ABC, abstractmethod
class Jugador(Programago, ABC):
def __init__(self, nombre, saludo, numero_polera):
super().__init__(nombre, saludo)
self.nerviosismo = None
self.velocidad = None
self.equilibrio = None
self.numero_polera = numero_polera
# self.saludo = saludo
# self.nombre = nombre
@abstractmethod
def asignar_cualidades(self):
pass
@abstractmethod
def competir(self):
pass
def celebrar(self):
print(f"{self.nombre}: ¡Lo logré!")
class Buscador(Jugador):
def __init__(self, nombre, saludo, numero_polera):
super().__init__(nombre, saludo, numero_polera)
self.asignar_cualidades()
def asignar_cualidades(self):
self.nerviosismo = random.uniform(p.NERVIOSISMO_BUSCADOR_MIN, p.NERVIOSISMO_BUSCADOR_MAX)
self.velocidad = random.uniform(p.VELOCIDAD_BUSCADOR_MIN, p.VELOCIDAD_BUSCADOR_MAX)
self.equilibrio = random.uniform(p.EQUILIBRIO_BUSCADOR_MIN, p.EQUILIBRIO_BUSCADOR_MAX)
def competir(self):
v1 = self.velocidad * p.PONDERADOR_VELOCIDAD_BUSCADOR
v2 = self.equilibrio * p.PONDERADOR_EQUILIBRIO_BUSCADOR
v3 = self.nerviosismo * p.PONDERADOR_NERVIOSISMO_BUSCADOR
return float(v1 + v2 - v3)
class Golpeador(Jugador):
def __init__(self, nombre, saludo, numero_polera):
super().__init__(nombre, saludo, numero_polera)
self.asignar_cualidades()
def asignar_cualidades(self):
self.nerviosismo = random.uniform(p.NERVIOSISMO_GOLPEADOR_MIN, p.NERVIOSISMO_GOLPEADOR_MAX)
self.velocidad = random.uniform(p.VELOCIDAD_GOLPEADOR_MIN, p.VELOCIDAD_GOLPEADOR_MAX)
self.equilibrio = random.uniform(p.EQUILIBRIO_GOLPEADOR_MIN, p.EQUILIBRIO_GOLPEADOR_MAX)
def competir(self):
v1 = self.velocidad * p.PONDERADOR_VELOCIDAD_GOLPEADOR
v2 = self.equilibrio * p.PONDERADOR_EQUILIBRIO_GOLPEADOR
v3 = self.nerviosismo * p.PONDERADOR_NERVIOSISMO_GOLPEADOR
return float(v1 + v2 - v3)
class Cazador(Jugador):
def __init__(self, nombre, saludo, numero_polera):
super().__init__(nombre, saludo, numero_polera)
self.asignar_cualidades()
def asignar_cualidades(self):
self.nerviosismo = random.uniform(p.NERVIOSISMO_CAZADOR_MIN, p.NERVIOSISMO_CAZADOR_MAX)
self.velocidad = random.uniform(p.VELOCIDAD_CAZADOR_MIN, p.VELOCIDAD_CAZADOR_MAX)
self.equilibrio = random.uniform(p.EQUILIBRIO_CAZADOR_MIN, p.EQUILIBRIO_CAZADOR_MAX)
def competir(self):
v1 = self.velocidad * p.PONDERADOR_VELOCIDAD_CAZADOR
v2 = self.equilibrio * p.PONDERADOR_EQUILIBRIO_CAZADOR
v3 = self.nerviosismo * p.PONDERADOR_NERVIOSISMO_CAZADOR
return float(v1 + v2 - v3)
if __name__ == '__main__':
# Instancias de prueba
buscador = Buscador('Pruebinelda', 'probando la clase Buscador', '42')
golpeador = Golpeador('Pruebardo', 'probando la clase Golpeador', 'Pi')
cazador = Cazador('Pruebina', 'probando la clase Cazador', 'e')
# Pruebas de atributos
print('Soy ' + buscador.nombre + ' y estoy ' + buscador.saludo + ', mi numero de polera es ' + buscador.numero_polera)
print('Soy ' + golpeador.nombre + ' y estoy ' + golpeador.saludo + ', mi numero de polera es ' + golpeador.numero_polera)
print('Soy ' + cazador.nombre + ' y estoy ' + cazador.saludo + ', mi numero de polera es ' + cazador.numero_polera)
# Pruebas de clases/subclase
if isinstance(buscador, Jugador):
print('Buscador hereda correctamente de Jugador!')
if isinstance(golpeador, Jugador):
print('Golpeador hereda correctamente de Jugador!')
if isinstance(cazador, Jugador):
print('Cazador hereda correctamente de Jugador!')
| [
"[email protected]"
] | |
d02475a37e1fd28c5fcbc270cde64d594cefbc67 | 0a6b70c7139de6793b866c2e852528f751dfa6b5 | /link_pred.py | 7d5fb66cc1d4c84b82844c88a44489eb2401742a | [] | no_license | fatemehsrz/JAME | 3a398d19fc6dc4d7aa57bd5adc74d14caa8c0240 | 2150764074f2039257b593ff9549335a4c68af9d | refs/heads/master | 2021-07-16T13:35:49.689021 | 2021-03-15T12:18:20 | 2021-03-15T12:18:20 | 243,335,860 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,355 | py |
import networkx as nx
from sklearn import pipeline
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import average_precision_score
class Graph():
def __init__(self,
nx_G=None, is_directed=False,
prop_pos=0.5, prop_neg=0.5,
workers=1,
random_seed=None):
self.G = nx_G
self.is_directed = is_directed
self.prop_pos = prop_pos
self.prop_neg = prop_neg
self.wvecs = None
self.workers = workers
self._rnd = np.random.RandomState(seed=random_seed)
def read_graph(self, input):
G = nx.read_edgelist(input, nodetype=int, create_using=nx.DiGraph())
for edge in G.edges():
G.adj[edge[0]][edge[1]]['weight'] = 1
print("Read graph, nodes: %d, edges: %d" % (G.number_of_nodes(), G.number_of_edges()))
G1= G.to_undirected()
self.G = G1
def generate_pos_neg_links(self):
# Select n edges at random (positive samples)
n_edges = self.G.number_of_edges()
n_nodes = self.G.number_of_nodes()
npos = int(self.prop_pos * n_edges)
nneg = int(self.prop_neg * n_edges)
n_neighbors = [len(list(self.G.neighbors(v))) for v in self.G.nodes()] ##
n_non_edges = n_nodes - 1 - np.array(n_neighbors)
non_edges = [e for e in nx.non_edges(self.G)]
print("Finding %d of %d non-edges" % (nneg, len(non_edges)))
# Select m pairs of non-edges (negative samples)
rnd_inx = self._rnd.choice(len(non_edges), nneg, replace=False)
neg_edge_list = [non_edges[ii] for ii in rnd_inx]
if len(neg_edge_list) < nneg:
raise RuntimeWarning(
"Only %d negative edges found" % (len(neg_edge_list))
)
print("Finding %d positive edges of %d total edges" % (npos, n_edges))
# Find positive edges, and remove them.
edges = self.G.edges()
edges=list(edges)
pos_edge_list = []
n_count = 0
n_ignored_count = 0
rnd_inx = self._rnd.permutation(n_edges)
for eii in rnd_inx.tolist():
edge = edges[eii]
# Remove edge from graph
data = self.G[edge[0]][edge[1]]
self.G.remove_edge(*edge)
reachable_from_v1 = nx.connected._plain_bfs(self.G, edge[0])
if edge[1] not in reachable_from_v1:
self.G.add_edge(*edge, **data)
n_ignored_count += 1
else:
pos_edge_list.append(edge)
print("Found: %d edges " % (n_count), end="\r")
n_count += 1
if n_count >= npos:
break
edges_num= len(pos_edge_list)
self._pos_edge_list = pos_edge_list
self._neg_edge_list = neg_edge_list
print('pos_edge_list',len(self._pos_edge_list))
print('neg_edge_list',len(self._neg_edge_list ))
def get_selected_edges(self):
edges = self._pos_edge_list + self._neg_edge_list
labels = np.zeros(len(edges))
labels[:len(self._pos_edge_list)] = 1
return edges, labels
def edges_to_features(self, edge_list, edge_function, emb_size, model):
n_tot = len(edge_list)
feature_vec = np.empty((n_tot, emb_size), dtype='f')
for ii in range(n_tot):
v1, v2 = edge_list[ii]
# Edge-node features
emb1 = np.asarray(model[v1])
emb2 = np.asarray(model[v2])
# Calculate edge feature
feature_vec[ii] = edge_function(emb1, emb2)
return feature_vec
class Link_Prediction(object):
def __init__(self, embeddings, edge_file):
self.embeddings = embeddings
self.edge_file = edge_file
def create_train_test_graphs(self, input= 'Facebook.edges',regen='regen',workers=8 ):
default_params = {
'edge_function': "hadamard",
"prop_pos": 0.5, # Proportion of edges to remove nad use as positive samples
"prop_neg": 0.5, # Number of non-edges to use as negative samples
}
# Remove half the edges, and the same number of "negative" edges
prop_pos = default_params['prop_pos']
prop_neg = default_params['prop_neg']
print("Regenerating link prediction graphs")
# Train graph embeddings on graph with random links
Gtrain = Graph(is_directed=False,
prop_pos=prop_pos,
prop_neg=prop_neg,
workers=workers)
Gtrain.read_graph(input)
Gtrain.generate_pos_neg_links()
# Generate a different random graph for testing
Gtest = Graph(is_directed=False,
prop_pos=prop_pos,
prop_neg=prop_neg,
workers = workers)
Gtest.read_graph(input)
Gtest.generate_pos_neg_links()
return Gtrain, Gtest
def test_edge_functions(self ,num_experiments=2, emb_size=128, model=None,edges_train=None , edges_test=None, Gtrain=None, Gtest=None, labels_train=None, labels_test=None):
edge_functions = {
"hadamard": lambda a, b: a * b,
"average": lambda a, b: 0.5 * (a + b),
"l1": lambda a, b: np.abs(a - b),
"l2": lambda a, b: np.abs(a - b) ** 2,
}
aucs = {func: [] for func in edge_functions}
aps = {func: [] for func in edge_functions}
for iter in range(num_experiments):
print("Iteration %d of %d" % (iter, num_experiments))
for edge_fn_name, edge_fn in edge_functions.items():
#print(edge_fn_name, edge_fn)
# Calculate edge embeddings using binary function
edge_features_train = Gtrain.edges_to_features(edges_train, edge_fn, emb_size, model)
edge_features_test = Gtest.edges_to_features(edges_test, edge_fn, emb_size, model)
# Linear classifier
scaler = StandardScaler()
lin_clf = LogisticRegression(C=1.0)
clf = pipeline.make_pipeline(scaler, lin_clf)
# Train classifier
clf.fit(edge_features_train, labels_train)
AUC= metrics.scorer.roc_auc_scorer(clf, edge_features_test, labels_test)
aucs[edge_fn_name].append(AUC)
#AP = average_precision_score(labels_test, clf.predict(edge_features_test))
#aps[edge_fn_name].append(AP)
return aucs
def predict(self):
Gtrain, Gtest = self.create_train_test_graphs(self.edge_file, regen='regen', workers=2)
# Train and test graphs, with different edges
edges_train, labels_train = Gtrain.get_selected_edges()
edges_test, labels_test = Gtest.get_selected_edges()
auc= self.test_edge_functions(2, 128, self.embeddings, edges_train , edges_test, Gtrain, Gtest, labels_train ,labels_test)
return auc
| [
"[email protected]"
] | |
e5802422f5e1586de5fc03a8643b8f91e03eab64 | 95023f309f0061a9139c82b62ecc91e90bf70e76 | /Library/migrations/0011_auto_20190126_0041.py | 03db185e270c29ec93252ace2b4c5295fd5cbbcb | [] | no_license | rachanaahire/Django-CollegeDepartmentWebsite | 63f3b8e55aca9c7cb4aa4d09a60562a0c338bc49 | df19a8f2eb4a408c62b9adb7ac70a2737bfa8e27 | refs/heads/master | 2021-12-14T19:43:37.411630 | 2021-12-05T18:28:46 | 2021-12-05T18:28:46 | 224,184,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # Generated by Django 2.1.4 on 2019-01-25 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Library', '0010_auto_20190126_0036'),
]
operations = [
migrations.AlterField(
model_name='book',
name='course',
field=models.CharField(blank=True, choices=[('BSC', 'BSC'), ('MSC', 'MSC')], max_length=100, verbose_name='Select Course'),
),
]
| [
"[email protected]"
] | |
76f7520ec348276ea03ea5fc6b1410b0cb6b3960 | 067bd682010d7fe495ac35c601a31d37a985aa89 | /adoc/test4.py | 5755c92f071ee8ef06579a9a51bf7dfdd49306cc | [] | no_license | framr/pytudes | 17c80dc26ea5fa5ab8e08c0ae47c1cc82e452f85 | f7893c50a5301b410cade1a5f0ace34f658743fa | refs/heads/master | 2023-02-09T10:40:50.224531 | 2020-12-25T16:25:36 | 2020-12-25T16:25:36 | 225,833,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | #!/usr/bin/env python3
from itertools import groupby
def valid_number2(num):
nums = [int(dd) for dd in str(num)]
collapsed = []
for dd, it in groupby(nums):
collapsed.append((dd, len(list(it))))
prev = -1
have_same = False
for digit, cnt in collapsed:
if digit < prev:
return False
if cnt == 2:
have_same = True
prev = digit
if not have_same:
return False
return True
def check(min_val, max_val):
cnt = 0
for num in range(min_val, max_val + 1):
if valid_number2(num):
cnt += 1
return cnt
if __name__ == "__main__":
#tests = [(111111, True), (223450, False), (123789, False)]
tests = [(112233, True), (123444, False), (111122, True)]
for num, res in tests:
print(num, res, valid_number2(num))
print(check(356261, 846303)) | [
"[email protected]"
] | |
f12813c2503db148a1eaacc7bde00ded0464d25b | 51aa2894c317f60726fe9a778999eb7851b6be3e | /140_gui/pyqt_pyside/examples/PyQt_PySide_book/005_Lists and tables/001_Drop-down list/358_addItem.py | 49266f8f8700998f77644b2f498178ac850918ca | [] | no_license | pranaymate/Python_Topics | dd7b288ab0f5bbee71d57080179d6481aae17304 | 33d29e0a5bf4cde104f9c7f0693cf9897f3f2101 | refs/heads/master | 2022-04-25T19:04:31.337737 | 2020-04-26T00:36:03 | 2020-04-26T00:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding: utf-8 -*-
from PyQt4 import QtGui
import sys
def on_clicked():
print("Текст:", comboBox.currentText())
print("Данные:", comboBox.itemData(comboBox.currentIndex()))
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
window.setWindowTitle("Класс QComboBox")
window.resize(300, 90)
comboBox = QtGui.QComboBox()
for i in range(1, 11):
comboBox.addItem("Пункт {0}".format(i), i)
ico = window.style().standardIcon(QtGui.QStyle.SP_MessageBoxCritical)
comboBox.addItem(ico, "Пункт 11", 11)
button = QtGui.QPushButton("Получить значение")
button.clicked.connect(on_clicked)
box = QtGui.QVBoxLayout()
box.addWidget(comboBox)
box.addWidget(button)
window.setLayout(box)
window.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
6fe45e6ab46a1d837bc8f439c510e91a4fa929c5 | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/data/region_CL.py | 3d4d764e24767605296bdc23b9705e51691eac0a | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,409 | py | """Auto-generated file, do not edit by hand. CL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CL = PhoneMetadata(id='CL', country_code=56, international_prefix='(?:0|1(?:1[0-69]|2[0-57]|5[13-58]|69|7[0167]|8[018]))0',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[2-9]|600|123)\\d{7,8}', possible_number_pattern='\\d{7,11}', possible_length=(9, 10, 11), possible_length_local_only=(7, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1962\\d{4}|2\\d{7}|32[0-2467]\\d{5})|(?:3[2-5]|[47][1-35]|5[1-3578]|6[13-57])\\d{7}', possible_number_pattern='\\d{7,9}', example_number='221234567', possible_length=(9,), possible_length_local_only=(7, 8)),
mobile=PhoneNumberDesc(national_number_pattern='9[3-9]\\d{7}', possible_number_pattern='\\d{8,9}', example_number='961234567', possible_length=(9,), possible_length_local_only=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6}|1230\\d{7}', possible_number_pattern='\\d{9,11}', example_number='800123456', possible_length=(9, 11)),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(national_number_pattern='600\\d{7,8}', possible_number_pattern='\\d{10,11}', example_number='6001234567', possible_length=(10, 11)),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='44\\d{7}', possible_number_pattern='\\d{9}', example_number='441234567', possible_length=(9,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(national_number_pattern='600\\d{7,8}', possible_number_pattern='\\d{10,11}', example_number='6001234567', possible_length=(10, 11)),
national_prefix='0',
national_prefix_for_parsing='0|(1(?:1[0-69]|2[0-57]|5[13-58]|69|7[0167]|8[018]))',
number_format=[NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2[23]'], national_prefix_formatting_rule='(\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[357]|4[1-35]|6[13-57]'], national_prefix_formatting_rule='(\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(9)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(44)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['44'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([68]00)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['60|8'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(600)(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['60'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(1230)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(\\d{5})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['219'], national_prefix_formatting_rule='(\\1)', domestic_carrier_code_formatting_rule='$CC (\\1)'),
NumberFormat(pattern='(\\d{4,5})', format='\\1', leading_digits_pattern=['[1-9]'], national_prefix_formatting_rule='\\1')],
intl_number_format=[NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2[23]']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[357]|4[1-35]|6[13-57]']),
NumberFormat(pattern='(9)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['9']),
NumberFormat(pattern='(44)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['44']),
NumberFormat(pattern='([68]00)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['60|8']),
NumberFormat(pattern='(600)(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['60']),
NumberFormat(pattern='(1230)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1']),
NumberFormat(pattern='(\\d{5})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['219']),
NumberFormat(pattern='(\\d{4,5})', format='NA', leading_digits_pattern=['[1-9]'])],
mobile_number_portable_region=True)
| [
"[email protected]"
] | |
da3b88b2885c7cc9168a4839ac71c0bb2c7686fb | 60120043bf7964a1b185a13c3211c51f425caad6 | /AcademicProjects/COLONELS3 copy/cnn.py | 583ea46bbcb6b265430a9e69dbe540b0e5dc82fa | [] | no_license | prg007/AcademicProjectsSoFar | 15a360dea7559c133bb12acb282d660dc0d833b3 | c27c8ced06483dbfb2c2c3a9872de025a75b2e20 | refs/heads/master | 2022-11-15T16:27:38.838207 | 2020-06-26T09:04:59 | 2020-06-26T09:04:59 | 109,450,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,701 | py | import torch
import torch.optim as optim
import json
from datamanager import retrieve_image, DataPartition, DataManager
from training import Dense, ReLU, nlog_softmax_loss, minibatch_training
import math
import numpy as np
# IMPORTANT: DO NOT IMPORT OR USE ANYTHING FROM torch.nn
# BESIDES THESE THINGS
from torch.nn import Parameter, Conv2d, MaxPool2d, Module, init, Sequential
def create_kernel_row_matrix(kernels):
"""
Creates a kernel-row matrix (as described in the notes on
"Computing Convolutions"). See the unit tests for example input
and output.
"""
a,b,c,d = kernels.shape
return kernels.reshape(a,b*c*d)
raise NotImplementedError("Question 8.")
def create_window_column_matrix(images, window_width, stride):
"""
Creates a window-column matrix (as described in the notes on
"Computing Convolutions"). See the unit tests for example input
and output.
"""
#Approach 2
num_images, num_channels, height, width = images.shape
windows = []
ctr = 0
outer = torch.tensor([])
for i in range(0, height-window_width+1, stride):
for j in range(0, width-window_width+1, stride):
xs = images[:,:,i:i+window_width,j:j+window_width]
xs = xs.reshape(num_images,window_width*window_width*num_channels)
if ctr == 0:
outer = xs
ctr += 1
else:
outer = torch.cat([outer,xs])
outer = outer.reshape(num_images*(ctr+1),window_width*window_width*num_channels)
ctr += 1
timothy = torch.tensor([])
for i in range(num_images):
xsg = outer[i::num_images,::]
if i == 0:
timothy = xsg
else:
timothy = torch.cat([timothy,xsg])
return timothy.t()
raise NotImplementedError("Question 9.")
def pad(images, padding):
"""
Adds padding to a tensor of images.
The tensor is assumed to have shape (B, C, W, W), where B is the batch
size, C is the number of input channels, and W is the width of each
(square) image.
Padding is the number of zeroes we want to add as a border to each
image in the tensor.
"""
num_images, num_channels, height, width = images.shape
a = torch.zeros(num_images,num_channels,height,padding) ##These 3 lines create the horizontal padding
ee = torch.cat([images,a],dim=3)
ff = torch.cat([a,ee],dim = 3)
b = torch.zeros(num_images,num_channels,padding,width+2*padding) ##These 3 lines create the vertical padding
hh = torch.cat([b,ff],dim=2)
ii = torch.cat([hh,b],dim=2)
return ii
def convolve(kernels, images, stride, padding):
"""
Convolves a kernel tensor with an image tensor, as described in the
notes on "Computing Convolutions." See the unit tests for example input
and output.
"""
kp = create_kernel_row_matrix(kernels)
num_kernels,y = kp.shape
num_images1, num_channels1, height1, width1 = kernels.shape
num_images, num_channels, height, width = images.shape
padup = pad(images,padding)
img = create_window_column_matrix(padup, width1,stride)
mp = torch.mm(kp,img)
mp1,mp2 = mp.shape
rp = width
starting_index = 0
ending_index = 2**width1
rm = num_kernels
outer = torch.tensor([])
ctr1 = 0
width = ((mp1*mp2)//num_images)//num_kernels
width2 = int(math.sqrt(width))
while ending_index<mp2+1:
lm1 = mp[:,starting_index:ending_index]
lm3 = lm1.reshape((2**width1)*num_kernels)
if starting_index == 0:
outer = lm3
else:
outer = torch.cat([outer,lm3])
starting_index = starting_index+ 2**width1
ending_index = ending_index + 2**width1
xs = outer.reshape(num_images,num_kernels,width2,width2)
return xs
#raise NotImplementedError("Question 11.")
class ConvLayer(Module):
"""A convolutional layer for images."""
def __init__(self, input_channels, num_kernels,
kernel_size, stride, padding):
super(ConvLayer, self).__init__()
self.stride = stride
self.weight = Parameter(torch.empty(num_kernels, input_channels,
kernel_size, kernel_size))
self.offset = Parameter(torch.empty(num_kernels, 1, 1))
self.padding = padding
# randomly initializes the parameters
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
init.kaiming_uniform_(self.offset, a=math.sqrt(5))
def forward(self, x):
"""This will only work after you've implemented convolve (above)."""
return self.offset + convolve(self.weight, x,
self.stride, self.padding)
class Flatten(Module):
"""
Flattens a tensor into a matrix. The first dimension of the input
tensor and the output tensor should agree.
For instance, a 3x4x5x2 tensor would be flattened into a 3x40 matrix.
See the unit tests for example input and output.
"""
def __init__(self):
super(Flatten,self).__init__()
def forward(self,images):
num_images, num_channels, height, width = images.shape
flatten = images.reshape(num_images,num_channels*height*width)
return flatten
#raise NotImplementedError("Question 12.")
def create_cnn(num_kernels, kernel_size,
output_classes, dense_hidden_size,
image_width, is_grayscale=True,
use_torch_conv_layer = True,
use_maxpool=True):
"""
Builds a CNN with two convolutional layers and two feedforward layers.
Maxpool is added by default, but can be disabled.
"""
if use_torch_conv_layer:
Conv = Conv2d
else:
Conv = ConvLayer
padding = kernel_size//2
output_width = image_width
if use_maxpool:
output_width = output_width // 16
model = Sequential()
if is_grayscale:
num_input_channels = 1
else:
num_input_channels = 3
model.add_module("conv1", Conv(num_input_channels, num_kernels,
kernel_size=kernel_size,
stride=1, padding=padding))
model.add_module("relu1", ReLU())
if use_maxpool:
model.add_module("pool1", MaxPool2d(kernel_size=4, stride=4, padding=0))
model.add_module("conv2", Conv(num_kernels, num_kernels,
kernel_size=kernel_size,
stride=1, padding=padding))
model.add_module("relu2", ReLU())
if use_maxpool:
model.add_module("pool2", MaxPool2d(kernel_size=4, stride=4, padding=0))
model.add_module("flatten", Flatten())
model.add_module("dense1", Dense(num_kernels * output_width**2,
dense_hidden_size,
init_bound = 0.1632993161855452))
model.add_module("relu3", ReLU())
model.add_module("dense2", Dense(dense_hidden_size, output_classes,
init_bound = 0.2992528008322899))
return model
class Classifier:
"""
Allows the trained CNN to be saved to disk and loaded back in.
You can call a Classifier instance as a function on an image filename
to obtain a probability distribution over whether it is a zebra.
"""
def __init__(self, net, num_kernels, kernel_size,
dense_hidden_size, categories, image_width):
self.net = net
self.num_kernels = num_kernels
self.kernel_size = kernel_size
self.dense_hidden_size = dense_hidden_size
self.image_width = image_width
self.categories = categories
def __call__(self, img_filename):
self.net.eval()
image = retrieve_image(img_filename, self.image_width)
inputs = image.float().unsqueeze(dim=0)
outputs = torch.softmax(self.net(inputs), dim=1)
result = dict()
for i, category in enumerate(self.categories):
result[category] = outputs[0][i].item()
return result
def save(self, filename):
model_file = filename + '.model'
with torch.no_grad():
torch.save(self.net.state_dict(), model_file)
config = {'dense_hidden_size': self.dense_hidden_size,
'num_kernels': self.num_kernels,
'kernel_size': self.kernel_size,
'image_width': self.image_width,
'categories': self.categories,
'model_file': model_file}
with open(filename + '.json', 'w') as outfile:
json.dump(config, outfile)
@staticmethod
def load(config_file):
with open(config_file) as f:
data = json.load(f)
net = create_cnn(data['num_kernels'],
data['kernel_size'],
len(data['categories']),
data['dense_hidden_size'],
data['image_width'])
net.load_state_dict(torch.load(data['model_file']))
return Classifier(net,
data['num_kernels'],
data['kernel_size'],
data['dense_hidden_size'],
data['categories'],
data['image_width'])
def run(data_config, n_epochs, num_kernels,
kernel_size, dense_hidden_size,
use_maxpool, use_torch_conv_layer):
"""
Runs a training regime for a CNN.
"""
train_set = DataPartition(data_config, './data', 'train')
test_set = DataPartition(data_config, './data', 'test')
manager = DataManager(train_set, test_set)
loss = nlog_softmax_loss
learning_rate = .001
image_width = 64
net = create_cnn(num_kernels = num_kernels, kernel_size= kernel_size,
output_classes=2, image_width=image_width,
dense_hidden_size=dense_hidden_size,
use_maxpool = use_maxpool,
use_torch_conv_layer = use_torch_conv_layer)
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
best_net, monitor = minibatch_training(net, manager,
batch_size=32, n_epochs=n_epochs,
optimizer=optimizer, loss=loss)
classifier = Classifier(best_net, num_kernels, kernel_size,
dense_hidden_size, manager.categories, image_width)
return classifier, monitor
def experiment1():
return run('stripes.data.json',
n_epochs = 20,
num_kernels = 20,
kernel_size = 3,
dense_hidden_size = 64,
use_maxpool = True,
use_torch_conv_layer = False)
def experiment2():
return run('stripes.data.json',
n_epochs = 20,
num_kernels = 20,
kernel_size = 3,
dense_hidden_size = 64,
use_maxpool = True,
use_torch_conv_layer = True)
def experiment3():
return run('stripes.data.json',
n_epochs = 20,
num_kernels = 20,
kernel_size= 3,
dense_hidden_size=64,
use_maxpool=False,
use_torch_conv_layer = True)
def experiment4():
return run('zebra.data.json',
n_epochs = 8,
num_kernels = 20,
kernel_size= 7,
dense_hidden_size=64,
use_maxpool=True,
use_torch_conv_layer = True)
| [
"[email protected]"
] | |
7f068be0f3ed88f297013a5af90764c373606ab8 | 60715c9ea4c66d861708531def532814eab781fd | /python-programming-workshop/test/data_structures/list/listcomprehensionall.py | db343a2519e8ef02335e917ba4bce51bc4d34be9 | [] | no_license | bala4rtraining/python_programming | 6ce64d035ef04486f5dc9572cb0975dd322fcb3e | 99a5e6cf38448f5a01b310d5f7fa95493139b631 | refs/heads/master | 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py |
one = [x.strip() for x in ('foo\n', 'bar\n', 'baz\n')]
print(one)
two = [int(x) for x in ('1', '2', '3')]
print(two)
#using a dictionary with list comprehension
d = {'foo': '10', 'bar': '20', 'baz': '30'}
three = [d[x] for x in ['foo', 'baz']]
print(three)
d = {'foo': '10', 'bar': '20', 'baz': '30'}
four = [int(d[x].rstrip('0')) for x in ['foo', 'baz']]
| [
"[email protected]"
] | |
7edca7e1cdbd28ec2360213c9f9a879bf37cf904 | 17c03102b5ea124e072c168d3f6e91b032f3de77 | /buildsettings.py | 34fcb57881f739574d5077d0df8717ee2cf0a97b | [
"MIT"
] | permissive | randomizax/export-portals-list | 44d8221e208f9aecb21c232b1712e669186a00eb | 7ee9d8bcd822b79fd71020382889676519b74c08 | refs/heads/master | 2021-01-15T16:17:31.572095 | 2019-03-14T16:00:24 | 2019-03-14T16:00:24 | 27,567,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,333 | py | # settings file for builds.
# if you want to have custom builds, copy this file to "localbuildsettings.py" and make changes there.
# possible fields:
# resourceBaseUrl - optional - the URL base for external resources (all resources embedded in standard IITC)
# distUrlBase - optional - the base URL to use for update checks
# buildMobile - optional - if set, mobile builds are built with 'ant'. requires the Android SDK and appropriate mobile/local.properties file configured
# preBuild - optional - an array of strings to run as commands, via os.system, before building the scripts
# postBuild - optional - an array of string to run as commands, via os.system, after all builds are complete
buildSettings = {
# local: use this build if you're not modifying external resources
# no external resources allowed - they're not needed any more
'randomizax': {
'resourceUrlBase': None,
'distUrlBase': 'https://randomizax.github.io/export-portals-list',
},
# local8000: if you need to modify external resources, this build will load them from
# the web server at http://0.0.0.0:8000/dist
# (This shouldn't be required any more - all resources are embedded. but, it remains just in case some new feature
# needs external resources)
'local8000': {
'resourceUrlBase': 'http://0.0.0.0:8000/dist',
'distUrlBase': None,
},
# mobile: default entry that also builds the mobile .apk
# you will need to have the android-sdk installed, and the file mobile/local.properties created as required
'mobile': {
'resourceUrlBase': None,
'distUrlBase': None,
'buildMobile': 'debug',
},
# if you want to publish your own fork of the project, and host it on your own web site
# create a localbuildsettings.py file containing something similar to this
# note: Firefox+Greasemonkey require the distUrlBase to be "https" - they won't check for updates on regular "http" URLs
#'example': {
# 'resourceBaseUrl': 'http://www.example.com/iitc/dist',
# 'distUrlBase': 'https://secure.example.com/iitc/dist',
#},
}
# defaultBuild - the name of the default build to use if none is specified on the build.py command line
# (in here as an example - it only works in localbuildsettings.py)
#defaultBuild = 'local'
| [
"[email protected]"
] | |
2fbdc36b2bbf594eb7cc85af996201d84ff990f7 | 211b5f06332a00edc39abdcf91640729d827bb11 | /pyramidpooling.py | d10967427db3123efca2efa60079665452c33ec5 | [
"MIT"
] | permissive | revidee/pytorch-pyramid-pooling | 00d107bfc2b6ac2b428ad21a2ba194bcd876d091 | d814eacc81bbc5d1826104b2046b9344b2a9c45c | refs/heads/master | 2020-05-26T11:13:35.514608 | 2019-06-21T11:22:09 | 2019-06-21T11:22:09 | 188,212,383 | 48 | 9 | null | null | null | null | UTF-8 | Python | false | false | 9,848 | py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class PyramidPooling(nn.Module):
def __init__(self, levels, mode="max"):
"""
General Pyramid Pooling class which uses Spatial Pyramid Pooling by default and holds the static methods for both spatial and temporal pooling.
:param levels defines the different divisions to be made in the width and (spatial) height dimension
:param mode defines the underlying pooling mode to be used, can either be "max" or "avg"
:returns a tensor vector with shape [batch x 1 x n], where n: sum(filter_amount*level*level) for each level in levels (spatial) or
n: sum(filter_amount*level) for each level in levels (temporal)
which is the concentration of multi-level pooling
"""
super(PyramidPooling, self).__init__()
self.levels = levels
self.mode = mode
def forward(self, x):
return self.spatial_pyramid_pool(x, self.levels, self.mode)
def get_output_size(self, filters):
out = 0
for level in self.levels:
out += filters * level * level
return out
@staticmethod
def spatial_pyramid_pool(previous_conv, levels, mode):
"""
Static Spatial Pyramid Pooling method, which divides the input Tensor vertically and horizontally
(last 2 dimensions) according to each level in the given levels and pools its value according to the given mode.
:param previous_conv input tensor of the previous convolutional layer
:param levels defines the different divisions to be made in the width and height dimension
:param mode defines the underlying pooling mode to be used, can either be "max" or "avg"
:returns a tensor vector with shape [batch x 1 x n],
where n: sum(filter_amount*level*level) for each level in levels
which is the concentration of multi-level pooling
"""
num_sample = previous_conv.size(0)
previous_conv_size = [int(previous_conv.size(2)), int(previous_conv.size(3))]
for i in range(len(levels)):
h_kernel = int(math.ceil(previous_conv_size[0] / levels[i]))
w_kernel = int(math.ceil(previous_conv_size[1] / levels[i]))
w_pad1 = int(math.floor((w_kernel * levels[i] - previous_conv_size[1]) / 2))
w_pad2 = int(math.ceil((w_kernel * levels[i] - previous_conv_size[1]) / 2))
h_pad1 = int(math.floor((h_kernel * levels[i] - previous_conv_size[0]) / 2))
h_pad2 = int(math.ceil((h_kernel * levels[i] - previous_conv_size[0]) / 2))
assert w_pad1 + w_pad2 == (w_kernel * levels[i] - previous_conv_size[1]) and \
h_pad1 + h_pad2 == (h_kernel * levels[i] - previous_conv_size[0])
padded_input = F.pad(input=previous_conv, pad=[w_pad1, w_pad2, h_pad1, h_pad2],
mode='constant', value=0)
if mode == "max":
pool = nn.MaxPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
elif mode == "avg":
pool = nn.AvgPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
else:
raise RuntimeError("Unknown pooling type: %s, please use \"max\" or \"avg\".")
x = pool(padded_input)
if i == 0:
spp = x.view(num_sample, -1)
else:
spp = torch.cat((spp, x.view(num_sample, -1)), 1)
return spp
@staticmethod
def temporal_pyramid_pool(previous_conv, out_pool_size, mode):
"""
Static Temporal Pyramid Pooling method, which divides the input Tensor horizontally (last dimensions)
according to each level in the given levels and pools its value according to the given mode.
In other words: It divides the Input Tensor in "level" horizontal stripes with width of roughly (previous_conv.size(3) / level)
and the original height and pools the values inside this stripe
:param previous_conv input tensor of the previous convolutional layer
:param levels defines the different divisions to be made in the width dimension
:param mode defines the underlying pooling mode to be used, can either be "max" or "avg"
:returns a tensor vector with shape [batch x 1 x n],
where n: sum(filter_amount*level) for each level in levels
which is the concentration of multi-level pooling
"""
num_sample = previous_conv.size(0)
previous_conv_size = [int(previous_conv.size(2)), int(previous_conv.size(3))]
for i in range(len(out_pool_size)):
# print(previous_conv_size)
#
h_kernel = previous_conv_size[0]
w_kernel = int(math.ceil(previous_conv_size[1] / out_pool_size[i]))
w_pad1 = int(math.floor((w_kernel * out_pool_size[i] - previous_conv_size[1]) / 2))
w_pad2 = int(math.ceil((w_kernel * out_pool_size[i] - previous_conv_size[1]) / 2))
assert w_pad1 + w_pad2 == (w_kernel * out_pool_size[i] - previous_conv_size[1])
padded_input = F.pad(input=previous_conv, pad=[w_pad1, w_pad2],
mode='constant', value=0)
if mode == "max":
pool = nn.MaxPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
elif mode == "avg":
pool = nn.AvgPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
else:
raise RuntimeError("Unknown pooling type: %s, please use \"max\" or \"avg\".")
x = pool(padded_input)
if i == 0:
tpp = x.view(num_sample, -1)
else:
tpp = torch.cat((tpp, x.view(num_sample, -1)), 1)
return tpp
class SpatialPyramidPooling(PyramidPooling):
def __init__(self, levels, mode="max"):
"""
Spatial Pyramid Pooling Module, which divides the input Tensor horizontally and horizontally
(last 2 dimensions) according to each level in the given levels and pools its value according to the given mode.
Can be used as every other pytorch Module and has no learnable parameters since it's a static pooling.
In other words: It divides the Input Tensor in level*level rectangles width of roughly (previous_conv.size(3) / level)
and height of roughly (previous_conv.size(2) / level) and pools its value. (pads input to fit)
:param levels defines the different divisions to be made in the width dimension
:param mode defines the underlying pooling mode to be used, can either be "max" or "avg"
:returns (forward) a tensor vector with shape [batch x 1 x n],
where n: sum(filter_amount*level*level) for each level in levels
which is the concentration of multi-level pooling
"""
super(SpatialPyramidPooling, self).__init__(levels, mode=mode)
def forward(self, x):
return self.spatial_pyramid_pool(x, self.levels, self.mode)
def get_output_size(self, filters):
"""
Calculates the output shape given a filter_amount: sum(filter_amount*level*level) for each level in levels
Can be used to x.view(-1, spp.get_output_size(filter_amount)) for the fully-connected layers
:param filters: the amount of filter of output fed into the spatial pyramid pooling
:return: sum(filter_amount*level*level)
"""
out = 0
for level in self.levels:
out += filters * level * level
return out
class TemporalPyramidPooling(PyramidPooling):
def __init__(self, levels, mode="max"):
"""
Temporal Pyramid Pooling Module, which divides the input Tensor horizontally (last dimensions)
according to each level in the given levels and pools its value according to the given mode.
Can be used as every other pytorch Module and has no learnable parameters since it's a static pooling.
In other words: It divides the Input Tensor in "level" horizontal stripes with width of roughly (previous_conv.size(3) / level)
and the original height and pools the values inside this stripe
:param levels defines the different divisions to be made in the width dimension
:param mode defines the underlying pooling mode to be used, can either be "max" or "avg"
:returns (forward) a tensor vector with shape [batch x 1 x n],
where n: sum(filter_amount*level) for each level in levels
which is the concentration of multi-level pooling
"""
super(TemporalPyramidPooling, self).__init__(levels, mode=mode)
def forward(self, x):
return self.temporal_pyramid_pool(x, self.levels, self.mode)
def get_output_size(self, filters):
"""
Calculates the output shape given a filter_amount: sum(filter_amount*level) for each level in levels
Can be used to x.view(-1, tpp.get_output_size(filter_amount)) for the fully-connected layers
:param filters: the amount of filter of output fed into the temporal pyramid pooling
:return: sum(filter_amount*level)
"""
out = 0
for level in self.levels:
out += filters * level
return out
| [
"b4b39543vb8"
] | b4b39543vb8 |
608e11389f0ecee548b0c0ee175cb0ef1bc7a32a | d7d2f984f20ea483156b9fc686dc923c792e3e7c | /scripts/test_search_input.py | 38470e8b19034f7f9b82e14600af1abf8b965801 | [] | no_license | liyonqiang/test_zj | 0a04bcc276c6dcc224968791991b93fd164160b4 | 4ba16afcaabfcd593adb5ebb667d56c1c263ef79 | refs/heads/main | 2023-04-06T08:33:33.814618 | 2021-04-24T12:20:35 | 2021-04-24T12:20:35 | 361,153,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py |
# allure 报告导包
import allure
import pytest
class TestSearchInput:
def setup(self):
pass
def teardown(self):
pass
# 1. 添加测试步骤: @allure.step(title="测试步骤001")
@allure.step(title="测试步骤001")
def test_login_001(self):
# 2. 添加步骤描述
allure.attach("步骤001描述", "此步骤会打印 001")
print("001")
assert 1
# Severity:严重级别(BLOCKER, CRITICAL, NORMAL, MINOR, TRIVIAL)
# 3. 添加严重级别 @pytest.allure.severity(Severity)
# @pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
@allure.step(title="测试步骤002")
def test_login_002(self):
allure.attach("步骤002描述", "此步骤会打印 002")
print("002")
assert 0 | [
"[email protected]"
] | |
9123c18ddd9a16fdafcb165d54957ca128f40662 | a7d020c8df05d4980876a7b230d7702fe1a9b196 | /tests/functional/test_jobseeker.py | 2b4d1c56b22d5be61526345ccabe9b10052d05f2 | [] | no_license | temon/rpush | 7720387e265d9e5cdb70f59d67bcbf6dc4dcd7ca | dbf0f98b3e473aefebf6313c38313fcf95a81cde | refs/heads/master | 2021-01-18T05:07:58.450940 | 2017-10-31T14:26:55 | 2017-10-31T14:26:55 | 8,747,627 | 0 | 1 | null | 2017-10-31T14:26:56 | 2013-03-13T08:03:37 | Python | UTF-8 | Python | false | false | 19 | py | ## @TODO: code here | [
"[email protected]"
] | |
73c9a3a1a9272aad188de26218cf91eb7d8d6fb3 | 063ae19b4f36ec2895dc481d33f6e43c3852e173 | /bin/flask | 78925439609ff7f9a55901c90c6280de0cae2900 | [] | no_license | arifams/lokaferi | ddb64c7146bbdb68b32f3295f87abb9773388764 | 4eec2da4c82e9cfb3428d3fd1e16eea4989899a1 | refs/heads/master | 2020-03-27T23:40:27.515476 | 2018-09-04T13:13:13 | 2018-09-04T13:13:13 | 147,337,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/Users/bang/Documents/belajar/python/lokaferi/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
47d12e8660fbea5dee75365da4029327fe5a06a3 | 21e26337bc1b482d74c74316528e08dc058856d3 | /src/day-06/solution-1.py | be48a1e60d97d179af44db065eaceceb412ba635 | [] | no_license | FudgeMunkey/adventofcode-2020 | 393117157aed9050c0fb2b1e61121b825cf97306 | 6e7eb57b2d560e4378c206f49efca513f1712d3a | refs/heads/main | 2023-02-02T16:27:30.264297 | 2020-12-22T08:00:53 | 2020-12-22T08:00:53 | 317,835,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | def read_input():
# Read input
f = open("puzzle_input.txt", "r")
raw_data = f.readlines()
clean_data = []
# Clean Data
for rd in raw_data:
clean_data.append(rd.strip())
return clean_data
if __name__ == "__main__":
data = read_input()
question_sum = 0
alphabet = [0] * 26
for d in data:
if d == '':
question_sum += sum(alphabet)
alphabet = [0] * 26
for c in d:
alphabet_index = ord(c) - 97
if alphabet[alphabet_index] == 0:
alphabet[alphabet_index] += 1
question_sum += sum(alphabet)
print(question_sum)
| [
"[email protected]"
] | |
75c901600cb02b2cab164badb64387af31b908e5 | 54060fe766a7ce368fb65b40c718320529aedc37 | /db_tools/data/product_data.py | 4f66daff268f835a54fa6babf37df33c1d1bc095 | [] | no_license | zzsen/DjangoTest | b13d839e44d536fddff660947dfa2f933da66048 | 4ce65227ed11aa44416e96960eaa71c976303959 | refs/heads/master | 2022-12-12T15:47:08.873561 | 2021-05-27T07:47:59 | 2021-05-27T07:47:59 | 206,477,417 | 0 | 0 | null | 2022-12-08T06:20:06 | 2019-09-05T04:54:04 | JavaScript | UTF-8 | Python | false | false | 36,825 | py | #!/usr/bin/env python
# encoding: utf-8
row_data = [
{
'images': [
'books/images/1_P_1449024889889.jpg',
'books/images/1_P_1449024889264.jpg',
'books/images/1_P_1449024889726.jpg',
'books/images/1_P_1449024889018.jpg',
'books/images/1_P_1449024889287.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'根茎类'
],
'market_price': '¥232元',
'name': '新鲜水果甜蜜香脆单果约800克',
'desc': '食用百香果可以增加胃部饱腹感,减少余热量的摄入,还可以吸附胆固醇和胆汁之类有机分子,抑制人体对脂肪的吸收。因此,长期食用有利于改善人体营养吸收结构,降低体内脂肪,塑造健康优美体态。',
'sale_price': '¥156元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/2_P_1448945810202.jpg',
'books/images/2_P_1448945810814.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥106元',
'name': '田然牛肉大黄瓜条生鲜牛肉冷冻真空黄牛',
'desc': '前腿+后腿+羊排共8斤,原生态大山放牧羊羔,曾经的皇室贡品,央视推荐,2005年北京招待全球财金首脑。五层专用包装箱+真空包装+冰袋+保鲜箱+顺丰冷链发货,路途保质期8天',
'sale_price': '¥88元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/7_P_1448945104883.jpg',
'books/images/7_P_1448945104734.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'叶菜类'
],
'market_price': '¥286元',
'name': '酣畅家庭菲力牛排10片澳洲生鲜牛肉团购套餐',
'desc': None,
'sale_price': '¥238元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/47_P_1448946213263.jpg',
'books/images/47_P_1448946213157.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'根茎类'
],
'market_price': '¥156元',
'name': '日本蒜蓉粉丝扇贝270克6只装',
'desc': None,
'sale_price': '¥108元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/10_P_1448944572085.jpg',
'books/images/10_P_1448944572532.jpg',
'books/images/10_P_1448944572872.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥106元',
'name': '内蒙新鲜牛肉1斤清真生鲜牛肉火锅食材',
'desc': None,
'sale_price': '¥88元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/4_P_1448945381985.jpg',
'books/images/4_P_1448945381013.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'茄果类'
],
'market_price': '¥90元',
'name': '乌拉圭进口牛肉卷特级肥牛卷',
'desc': None,
'sale_price': '¥75元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/8_P_1448945032810.jpg',
'books/images/8_P_1448945032646.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'进口生鲜'
],
'market_price': '¥150元',
'name': '五星眼肉牛排套餐8片装原味原切生鲜牛肉',
'desc': None,
'sale_price': '¥125元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/11_P_1448944388277.jpg',
'books/images/11_P_1448944388034.jpg',
'books/images/11_P_1448944388201.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥31元',
'name': '澳洲进口120天谷饲牛仔骨4份原味生鲜',
'desc': None,
'sale_price': '¥26元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/6_P_1448945167279.jpg',
'books/images/6_P_1448945167015.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'菌菇类'
],
'market_price': '¥239元',
'name': '潮香村澳洲进口牛排家庭团购套餐20片',
'desc': None,
'sale_price': '¥199元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/9_P_1448944791617.jpg',
'books/images/9_P_1448944791129.jpg',
'books/images/9_P_1448944791077.jpg',
'books/images/9_P_1448944791229.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'根茎类'
],
'market_price': '¥202元',
'name': '爱食派内蒙古呼伦贝尔冷冻生鲜牛腱子肉1000g',
'desc': None,
'sale_price': '¥168元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/3_P_1448945490837.jpg',
'books/images/3_P_1448945490084.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'精品肉类'
],
'market_price': '¥306元',
'name': '澳洲进口牛尾巴300g新鲜肥牛肉',
'desc': '新鲜羊羔肉整只共15斤,原生态大山放牧羊羔,曾经的皇室贡品,央视推荐,2005年北京招待全球财金首脑。五层专用包装箱+真空包装+冰袋+保鲜箱+顺丰冷链发货,路途保质期8天',
'sale_price': '¥255元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/48_P_1448943988970.jpg',
'books/images/48_P_1448943988898.jpg',
'books/images/48_P_1448943988439.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥126元',
'name': '新疆巴尔鲁克生鲜牛排眼肉牛扒1200g',
'desc': None,
'sale_price': '¥88元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/5_P_1448945270390.jpg',
'books/images/5_P_1448945270067.jpg',
'books/images/5_P_1448945270432.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'蛋制品'
],
'market_price': '¥144元',
'name': '澳洲进口安格斯牛切片上脑牛排1000g',
'desc': '澳大利亚是国际公认的没有疯牛病和口蹄疫的国家。为了保持澳大利亚产品的高标准,澳大利亚牛肉业和各级政府共同努力简历了严格的标准和体系,以保证生产的整体化和产品的可追溯性',
'sale_price': '¥120元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'images/201705/books_img/53_P_1495068879687.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'茄果类'
],
'market_price': '¥120元',
'name': '帐篷出租',
'desc': None,
'sale_price': '¥100元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/16_P_1448947194687.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'红酒'
],
'market_price': '¥23元',
'name': '52度茅台集团国隆双喜酒500mlx6',
'desc': '贵州茅台酒厂(集团)保健酒业有限公司生产,是以“龙”字打头的酒水。中国龙文化上下8000年,源远而流长,龙的形象是一种符号、一种意绪、一种血肉相联的情感。',
'sale_price': '¥19元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/14_P_1448947354031.jpg',
'books/images/14_P_1448947354433.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥43元',
'name': '52度水井坊臻酿八號500ml',
'desc': None,
'sale_price': '¥36元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/12_P_1448947547989.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'其他酒品'
],
'market_price': '¥190元',
'name': '53度茅台仁酒500ml',
'desc': None,
'sale_price': '¥158元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/46_P_1448946598711.jpg',
'books/images/46_P_1448946598301.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'葡萄酒'
],
'market_price': '¥38元',
'name': '双响炮洋酒JimBeamwhiskey美国白占边',
'desc': None,
'sale_price': '¥28元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/21_P_1448946793276.jpg',
'books/images/21_P_1448946793153.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥55元',
'name': '西夫拉姆进口洋酒小酒版',
'desc': None,
'sale_price': '¥46元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/15_P_1448947257324.jpg',
'books/images/15_P_1448947257580.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'洋酒'
],
'market_price': '¥22元',
'name': '茅台53度飞天茅台500ml',
'desc': None,
'sale_price': '¥18元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/13_P_1448947460386.jpg',
'books/images/13_P_1448947460276.jpg',
'books/images/13_P_1448947460353.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'葡萄酒'
],
'market_price': '¥42元',
'name': '52度兰陵·紫气东来1600mL山东名酒',
'desc': None,
'sale_price': '¥35元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/50_P_1448946543091.jpg',
'books/images/50_P_1448946542182.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥24元',
'name': 'JohnnieWalker尊尼获加黑牌威士忌',
'desc': None,
'sale_price': '¥20元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/51_P_1448946466595.jpg',
'books/images/51_P_1448946466208.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'洋酒'
],
'market_price': '¥31元',
'name': '人头马CLUB特优香槟干邑350ml',
'desc': None,
'sale_price': '¥26元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/17_P_1448947102246.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'啤酒'
],
'market_price': '¥54元',
'name': '张裕干红葡萄酒750ml*6支',
'desc': None,
'sale_price': '¥45元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/20_P_1448946850602.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'葡萄酒'
],
'market_price': '¥46元',
'name': '原瓶原装进口洋酒烈酒法国云鹿XO白兰地',
'desc': None,
'sale_price': '¥38元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/19_P_1448946951581.jpg',
'books/images/19_P_1448946951726.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'白酒'
],
'market_price': '¥82元',
'name': '法国原装进口圣贝克干红葡萄酒750ml',
'desc': None,
'sale_price': '¥68元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/18_P_1448947011435.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'白酒'
],
'market_price': '¥67元',
'name': '法国百利威干红葡萄酒AOP级6支装',
'desc': None,
'sale_price': '¥56元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/22_P_1448946729629.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'洋酒'
],
'market_price': '¥71元',
'name': '芝华士12年苏格兰威士忌700ml',
'desc': None,
'sale_price': '¥59元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/45_P_1448946661303.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥31元',
'name': '深蓝伏特加巴维兰利口酒送预调酒',
'desc': None,
'sale_price': '¥18元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/32_P_1448948525620.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'精选蔬菜'
],
'market_price': '¥43元',
'name': '赣南脐橙特级果10斤装',
'desc': None,
'sale_price': '¥36元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/30_P_1448948663450.jpg',
'books/images/30_P_1448948662571.jpg',
'books/images/30_P_1448948663221.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'进口水果'
],
'market_price': '¥11元',
'name': '泰国菠萝蜜16-18斤1个装',
'desc': '【懒人吃法】菠萝蜜果肉,冰袋保鲜,收货就吃,冰爽Q脆甜,2斤装,全国顺丰空运包邮,发出后48小时内可达,一线城市基本隔天可达',
'sale_price': '¥9元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/31_P_1448948598947.jpg',
'books/images/31_P_1448948598475.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'国产水果'
],
'market_price': '¥22元',
'name': '四川双流草莓新鲜水果礼盒2盒',
'desc': None,
'sale_price': '¥18元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/35_P_1448948333610.jpg',
'books/images/35_P_1448948333313.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'有机蔬菜'
],
'market_price': '¥67元',
'name': '新鲜头茬非洲冰草冰菜',
'desc': None,
'sale_price': '¥56元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/36_P_1448948234405.jpg',
'books/images/36_P_1448948234250.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'有机蔬菜'
],
'market_price': '¥6元',
'name': '仿真蔬菜水果果蔬菜模型',
'desc': None,
'sale_price': '¥5元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/33_P_1448948479966.jpg',
'books/images/33_P_1448948479886.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'精选蔬菜'
],
'market_price': '¥28元',
'name': '现摘芭乐番石榴台湾珍珠芭乐',
'desc': '''海南产精品释迦果,
释迦是水果中的贵族,
产量少,
味道很甜,
奶香十足,
非常可口,
果裹果园顺丰空运,
保证新鲜.果子个大,
一斤1-2个左右,
大个头的果子更尽兴!
''',
'sale_price': '¥23元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/34_P_1448948399009.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'国产水果'
],
'market_price': '¥46元',
'name': '潍坊萝卜5斤/箱礼盒',
'desc': '脐橙规格是65-90MM左右(标准果果径平均70MM左右,精品果果径平均80MM左右),一斤大概有2-4个左右,脐橙产自江西省赣州市信丰县安西镇,全过程都是采用农家有机肥种植,生态天然',
'sale_price': '¥38元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/43_P_1448948762645.jpg'
],
'categorys': [
'首页',
'休闲食品'
],
'market_price': '¥154元',
'name': '休闲零食膨化食品焦糖/奶油/椒麻味',
'desc': None,
'sale_price': '¥99元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/38_P_1448949220255.jpg'
],
'categorys': [
'首页',
'奶类食品',
'奶粉'
],
'market_price': '¥84元',
'name': '蒙牛未来星儿童成长牛奶骨力型190ml*15盒',
'desc': None,
'sale_price': '¥70元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/44_P_1448948850187.jpg'
],
'categorys': [
'首页',
'奶类食品',
'进口奶品'
],
'market_price': '¥70元',
'name': '蒙牛特仑苏有机奶250ml×12盒',
'desc': None,
'sale_price': '¥32元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'images/201511/books_img/49_P_1448162819889.jpg'
],
'categorys': [
'首页',
'奶类食品'
],
'market_price': '¥1元',
'name': '1元支付测试商品',
'desc': None,
'sale_price': '¥1元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/40_P_1448949038702.jpg'
],
'categorys': [
'首页',
'奶类食品',
'进口奶品'
],
'market_price': '¥70元',
'name': '德运全脂新鲜纯牛奶1L*10盒装整箱',
'desc': None,
'sale_price': '¥58元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/39_P_1448949115481.jpg'
],
'categorys': [
'首页',
'奶类食品',
'有机奶'
],
'market_price': '¥38元',
'name': '木糖醇红枣早餐奶即食豆奶粉538g',
'desc': None,
'sale_price': '¥32元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/41_P_1448948980358.jpg'
],
'categorys': [
'首页',
'奶类食品',
'原料奶'
],
'market_price': '¥26元',
'name': '高钙液体奶200ml*24盒',
'desc': None,
'sale_price': '¥22元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/37_P_1448949284365.jpg'
],
'categorys': [
'首页',
'奶类食品',
'国产奶品'
],
'market_price': '¥720元',
'name': '新西兰进口全脂奶粉900g',
'desc': None,
'sale_price': '¥600元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'books/images/42_P_1448948895193.jpg'
],
'categorys': [
'首页',
'奶类食品',
'进口奶品'
],
'market_price': '¥43元',
'name': '伊利官方直营全脂营养舒化奶250ml*12盒*2提',
'desc': None,
'sale_price': '¥36元',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥156元',
'images': [
'books/images/27_P_1448947771805.jpg'
],
'market_price': '¥187元',
'categorys': [
'首页',
'粮油副食',
'厨房调料'
],
'desc': None,
'name': '维纳斯橄榄菜籽油5L/桶',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥15元',
'images': [
'books/images/23_P_1448948070348.jpg'
],
'market_price': '¥18元',
'categorys': [
'首页',
'粮油副食',
'食用油'
],
'desc': None,
'name': '糙米450gx3包粮油米面',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥45元',
'images': [
'books/images/26_P_1448947825754.jpg'
],
'market_price': '¥54元',
'categorys': [
'首页',
'粮油副食',
'调味品'
],
'desc': None,
'name': '精炼一级大豆油5L色拉油粮油食用油',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥26元',
'images': [
'books/images/28_P_1448947699948.jpg',
'books/images/28_P_1448947699777.jpg'
],
'market_price': '¥31元',
'categorys': [
'首页',
'粮油副食',
'南北干货'
],
'desc': None,
'name': '橄榄玉米油5L*2桶',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥9元',
'images': [
'books/images/24_P_1448948023823.jpg',
'books/images/24_P_1448948023977.jpg'
],
'market_price': '¥11元',
'categorys': [
'首页',
'粮油副食',
'方便速食'
],
'desc': None,
'name': '山西黑米农家黑米4斤',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥12元',
'images': [
'books/images/25_P_1448947875346.jpg'
],
'market_price': '¥14元',
'categorys': [
'首页',
'粮油副食',
'米面杂粮'
],
'desc': None,
'name': '稻园牌稻米油粮油米糠油绿色植物油',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥12元',
'images': [
'books/images/29_P_1448947631994.jpg'
],
'market_price': '¥14元',
'categorys': [
'首页',
'粮油副食',
'食用油'
],
'desc': None,
'name': '融氏纯玉米胚芽油5l桶',
'books_desc':'<p><img src="/media/books/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/books/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
}
]
pass | [
"[email protected]"
] | |
5d949b19ac66b7471688398e07a855dfa02336dd | f1b420b0e3f44c9df9ba4e537a780de215572552 | /regex.py | d32d14ebb9457073a953cdef979d4d53c5154c44 | [] | no_license | sociopath00/PythonBasics | 9049720e8d0a3e6ea86290d26aa97a7dcd3bdd61 | 0720444042e9d67127939742021497a6f4aec97e | refs/heads/master | 2021-07-13T00:09:33.217740 | 2020-11-21T18:01:43 | 2020-11-21T18:01:43 | 219,669,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | """
@author: Akshay Nevrekar
@created_on: 6th November,2019
@last_updated_on: 6th November,2019
"""
# import module
import re
"""
Identifiers
\d -> any number
\D -> anything but number
\s -> space
\S -> anything bus space
\w -> any character
\W -> anything but character
. -> any character except new line
\b -> the whitespace around words
\. -> .
Modifiers
{1,3} 1-3
+ match 1 or more
? match 0 or 1
* match 0 or more
$ match end of a string
^ match the beginning of a string
| either or or
[] range
"""
statement = "PYTHON is a programming language but cat and rat are playing hide and seek. Is python popular?"
statement1 = "The earth is flat"
statement2 = "is it free?"
# find python in above statement
word = re.search(r"python", statement, re.IGNORECASE)
print(word)
print(word.group())
# # find all occurance
words = re.findall(r"python", statement, re.IGNORECASE)
print(words)
# find statements starts with `The`
pattern = "^The"
words = re.search(pattern, statement1, re.IGNORECASE)
print(words)
words = re.search(pattern, statement2)
print(words)
# check if str ends with `?`
pattern = ".*\?$"
words = re.search(pattern, statement1)
print(words)
words = re.search(pattern, statement)
print(words)
print(words.group())
# words = re.search(pattern, statement2)
# print(words)
## More
statement = "Raj is 17 years old and born in 2002 whereas Sameer is born in 1999 and 20. Jas is 7 years old where"
# find numbers
numbers = re.findall("\d", statement)
print(numbers)
numbers = re.findall("\d+", statement)
print(numbers)
years = re.findall("\d{2,4}", statement)
print(years)
names = re.findall("[A-Z][a-z]+", statement)
print(names)
print(re.findall(r"\bwhere\b", statement))
| [
"[email protected]"
] | |
985ba7540d4a52c6e2fc483f7bf744a12568939d | e9c3e8f6ae05b0144237d01671f9a02404c43154 | /hw_timer_demo.py | 4a5fedda73c77fd651b7c016d6e9bbaa1870fc76 | [] | no_license | vtt-info/micropython-stm32-examples | b836fe8a54119fcfdd95046d4edae043a156b695 | b6bbcb98a23b615914a015c7cbdedd550f5807ed | refs/heads/master | 2022-07-27T17:38:31.202661 | 2020-05-13T10:57:20 | 2020-05-13T10:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # File: hw_timer_demo.py
# Date: 2020-05-12
# Micropython-STM31F411CE Black Pill
import utime as time
from machine import Pin
import pyb
led = pyb.LED(1) # on-board LED (blue)
# create Timer (select TIM1..TIM11)
tim = pyb.Timer( 1, freq=10 ) # 10 Hz (fast LED blink)
tim.callback( lambda t: led.toggle() )
try:
while True:
pass
except KeyboardInterrupt:
pass
finally:
tim.callback(None)
tim.deinit()
print('Done')
| [
"[email protected]"
] | |
6b61385d0fdf99629f23627dd07d90d5bf712491 | 14e3728f61b89ee20155d497bad8b15753ce5d77 | /gui.py | ff9b1f683d9abe9467556c62721303fe55bba249 | [] | no_license | jonathanvanschenck/Platformer2 | d6f121be71a10eae96cbedfd0c683c0c8ce89add | 9cf80d6bf272f4fb3fb3ea708f36ce934262cb62 | refs/heads/master | 2020-04-18T00:42:35.839136 | 2019-01-26T05:24:38 | 2019-01-26T05:24:38 | 167,087,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,253 | py | """
Main gui for game.
"""
import pygame as pg
from pygame.locals import *
import varbs
import classes
import engine
import initialize
def main():
# Initalize the Screen
pg.init()
screen = pg.display.set_mode((varbs.screenW,varbs.screenH))
pg.display.set_caption("The Judges")
# Initalized a background
background = pg.Surface(screen.get_size())
background = background.convert()
background.fill((250,250,250))
# Populate background
# Initialize player
global player
global sword
sword = initialize.initSword()
player = initialize.initPlayer(sword)
# Initialize mobs
global mobs
mobs = initialize.initMobs()
# Initialize platforms
global platforms
platforms = initialize.initPlatforms()
# Initialize Lava and Goal
global lava
lava = initialize.initLava()
global goal
goal = initialize.initGoal()
# Initialize sprite groups
playerSprite = pg.sprite.Group(player)
itemSprites = pg.sprite.Group(sword)
platformSprites = pg.sprite.Group(platforms)
scrollingSprites = classes.RenderUpdatesSneaky([lava]+[player,sword]+mobs+platforms+[goal])
physicsSprites = pg.sprite.Group([player]+mobs)
mobSprites = pg.sprite.Group(mobs)
# Blit everything to the screen
screen.blit(background, (0,0))
pg.display.update()
# Initalize killcounter
global killcounter
killcounter = 0
# Initialize the clock
clock = pg.time.Clock()
while 1:
clock.tick(varbs.framerate)
for event in pg.event.get():
if event.type == QUIT:
return
if event.type == KEYDOWN:
if event.key == K_a:
player.setAccel(left=-1.0)
if event.key == K_d:
player.setAccel(right=1.0)
if event.key == K_w:
player.jumpAttempt()
player.jumpActive = False
if event.key == K_SPACE:
player.setAttack(True)
if event.type == KEYUP:
if event.key == K_a:
player.setAccel(left=0.0)
if event.key == K_d:
player.setAccel(right=0.0)
if event.key == K_w:
player.jumpActive = True
#if event.key == K_SPACE:
# player.setAttack(False)
# Erase Player and platform location
scrollingSprites.clear(screen,background)
# Step physics
engine.stepPhysics(physicsSprites)
engine.stepMobAI(playerSprite,mobSprites)
engine.stepPlayer(playerSprite)
for sprite in physicsSprites.sprites():
sprite.moveFromV()
engine.collisionPlatform(sprite,platformSprites)
# Kill mobs in the lava
pg.sprite.spritecollide(lava, mobSprites,1)
# Kill mobs hit by sword
if sword.visible:
killcounter += len(pg.sprite.spritecollide(sword, mobSprites, 1))
# Check for collisions with mobs
dead, kc = engine.collisionMob(player,mobSprites)
killcounter += kc
# Scroll Screen
charx = player.rect.centerx
if charx < 0.39*varbs.screenW or charx > 0.41*varbs.screenW:
dx = 0.4*varbs.screenW - charx
for sprite in scrollingSprites.sprites():
sprite.rect.move_ip(dx,0)
if len(mobSprites.sprites()) < 2:
newMob = classes.Mob(x = 1.3*varbs.screenW)
mobSprites.add(newMob)
physicsSprites.add(newMob)
scrollingSprites.add(newMob)
# Update all sprites
scrollingSprites.update()
# Draw all sprites (via groups)
dirty_rect = scrollingSprites.draw(screen)
# Update display with new screen
pg.display.update(dirty_rect)
# Check for victory
if pg.sprite.collide_rect(player,goal):
print("YOU WIN!")
return
# Check for death
if pg.sprite.collide_rect(player,lava) or dead:
print("YOU DIED!")
return
print("Purifications: ",killcounter)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c62750ec08e06522ae9aff12220e62eac5024980 | c5a7bf957611a5f825577cddd21ed1a6474e27b0 | /learning_logs/migrations/0002_entry.py | 894d779c211fc9ed330458ed4a9e5080c0769d57 | [] | no_license | o-shmidke/LearningLog | 05cd1929bb84e1a379750526eddde1e9ac00c29f | 848d84203a35029bae146e0aff5519600f6b41bd | refs/heads/main | 2023-01-31T10:26:14.057815 | 2020-12-09T12:02:41 | 2020-12-09T12:02:41 | 319,942,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # Generated by Django 3.1.4 on 2020-12-08 05:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('date_added', models.DateTimeField(auto_now_add=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='learning_logs.topic')),
],
options={
'verbose_name_plural': 'Entries',
},
),
]
| [
"[email protected]"
] | |
b7ab17516bfe48f16e4d23abe58a871006b093b2 | ff768c02e02064459249df9190555d2e32a9ef37 | /admin/products/migrations/0001_initial.py | 384949c9126372488b0c37671a9f3ce366c66c6b | [] | no_license | Raja-mishra1/microservices_with_python | de0b25fee984084a5b03de47684c98a8bbf442ea | 5b6295a591568fa22f379c87ef579e449c697a90 | refs/heads/main | 2023-02-04T13:27:33.707466 | 2020-12-30T18:27:14 | 2020-12-30T18:27:14 | 324,816,832 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Generated by Django 3.1.3 on 2020-12-21 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('image', models.CharField(max_length=200)),
('likes', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"[email protected]"
] | |
0ef962f1ba33aef4daecd4b987ab4fb573293a3a | a9f38bb28ff9bd04b151d86c653cde9f46768c7c | /medium/linkedListCycle.py | d8b96b07fc2ad820cad79b087c355a056bd0bd37 | [] | no_license | Xynoclafe/leetcode | 02388516b10b8ee6bec6ee1b91ab5681c3254d33 | 4a80f02683e7fc14cb49c07170651ea3eeb280ac | refs/heads/master | 2020-12-01T21:05:44.656581 | 2020-02-02T09:05:32 | 2020-02-02T09:05:32 | 230,770,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from collections import defaultdict
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
seen = defaultdict(lambda: False)
node = head
while node != None:
if seen[node]:
return node
else:
seen[node] = True
node = node.next
return None | [
"[email protected]"
] | |
3677d8a92b150fe0027aa08c9cab0b9074543e10 | c5b9e959c02397b6adb5de773eebd4cc877530da | /models.py | 96d4b736ec6db17bf855a1195c69d3dc19f95024 | [] | no_license | andriyor/soc-counter | 0335970210b0bc9549be3ec417baeca79569cc4f | 8ac74f367a7f7fa8ec5f76762be4955cac3cf641 | refs/heads/master | 2021-08-07T20:35:54.044002 | 2017-04-25T11:13:41 | 2017-04-25T11:13:41 | 83,723,726 | 0 | 0 | null | 2022-04-11T17:36:34 | 2017-03-02T20:52:53 | JavaScript | UTF-8 | Python | false | false | 1,272 | py | from flask_login import UserMixin
from flask_mongoengine import MongoEngine
from app import app, login_manager, flask_bcrypt
db = MongoEngine(app)
handle = 'rozetked'
class Links(db.EmbeddedDocument):
youtube = db.StringField(max_length=200)
twitter = db.StringField(max_length=200)
instagram = db.StringField(max_length=200)
facebook = db.StringField(max_length=200)
class User(UserMixin, db.Document):
email = db.StringField(max_length=50, unique=True)
username = db.StringField(max_length=50, unique=True)
password_hash = db.StringField(max_length=200)
links = db.EmbeddedDocumentField(Links)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = flask_bcrypt.generate_password_hash(password).decode("utf-8")
def verify_password(self, password):
return flask_bcrypt.check_password_hash(self.password_hash, password)
@staticmethod
def get_by_email(email):
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return None
@login_manager.user_loader
def load_user(user_id):
return User.objects.with_id(user_id)
| [
"[email protected]"
] | |
5991c89b63335624c6ba7129d7deebee22fd5e98 | 66e0ab971c9509e8ce79df6e0c5a52905c830be8 | /prob067/prob067.py | 438f596fb54fe2df9d2e878319140420543f256d | [] | no_license | davidized/euler | 0e171d6ede19d3286f282df26632b90babca7fc9 | 7c3a14febf1dedb5051dc5fd92469660f0f888f3 | refs/heads/master | 2021-06-11T15:53:05.367189 | 2021-05-29T14:45:25 | 2021-05-29T14:45:25 | 8,613,285 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 754 | py |
# Retrieve the triangle file and create lists
input = open('triangle.txt')
filelines = input.readlines()
triangle = []
for line in filelines:
line = line.rstrip()
triangle.append(line.rsplit())
rows = len(triangle)-1 #We need the number of rows so we can start one row up
on_row = rows - 1
working_row = triangle[rows] #Row below the one we're working on (sums)
while on_row >= 0: #Step through rows
item_pos = 0
new_row = []
for item in triangle[on_row]:
adj1 = int(working_row[item_pos]) + int(item)
adj2 = int(working_row[item_pos+1]) + int(item)
new_row.append(max(adj1, adj2))
item_pos += 1
#print(on_row, ':\t', new_row)
working_row = new_row
on_row -= 1
print(working_row)
| [
"[email protected]"
] | |
5999842176ecc8a4ae17f350b9572baf683a41cf | e17752012d3b9d65a2b444e9f73195fa89c71a00 | /repData.py | cda43a5dd0b0f03367f010144431dd3cebb1d6d8 | [] | no_license | jadenbh13/brain-text | ea5746d932671824c4e9e1771b57ef2f29f2d96a | f55ee6e750550a8cf15a70f458ee2156d2c2633a | refs/heads/main | 2023-03-30T05:39:14.390966 | 2021-04-07T16:06:37 | 2021-04-07T16:06:37 | 355,544,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,396 | py | import datetime
import random
import serial
import time
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from twilio.rest import Client
from pynput.keyboard import Key, Controller
keyboard = Controller()
# Your Account SID from twilio.com/console
account_sid = "ACCNT_SID"
# Your Auth Token from twilio.com/console
auth_token = "ACCNT_TOKEN"
# /dev/tty.SLAB_USBtoUART
# /dev/tty.usbmodem1411
client = Client(account_sid, auth_token)
toStr = 'RECIPIENT_PHONE_NUMBER'
COM = '/dev/tty.SLAB_USBtoUART'# /dev/tty.SLAB_USBtoUART
BAUD = 9600
ser = serial.Serial(COM, BAUD, timeout = .1)
print('Waiting for device');
time.sleep(3)
print(ser.name)
char = 0
charList = [0]
def read_data():
global char
global charList
m = 0
n = 0
#Get serial output from nodemcu
val = str(ser.readline().decode().strip('\r\n'))
print(val)
e = val.split(',')
#If brain data is sent
if len(e) > 4:
#Get high and low alpha waves
l = int(e[6]) + int(e[7])
#Average them for single alpha wave value
num = l / 2
nuu = int(e[7])
print(num)
#If alpha waves are greater than 9000(user is focusing)
if num > 9000:
#Append current char to charList
charList.append(char)
print("Added")
#If not
else:
#Add 1 to char
char += 1
#If char is greater than 9
if char > 9:
#Reset to 0(start from top)
char = 0
"""if len(charList) % 3 == 0:
charList.append(0)"""
print(" ")
print(char)
print(charList)
print(" ")
m = num
n = nuu
#Return alpha wave values
return m, n
def animate(frame, xs, ys):
# Read data
dat = read_data()
#Correspond x axis with seconds elapsed
xs.append(datetime.datetime.now().strftime('%S'))
#Correspond y axis with alpha wave values
ys.append(dat[0])
#Set limits for graph
size_limit = 30
xs = xs[-size_limit:]
ys = ys[-size_limit:]
#Clear axes and plot x and y
ax.clear()
ax.plot(xs, ys)
#Set values for plot
plt.grid()
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('Brain wave data')
plt.ylabel('Alpha wave amplitude')
plt.xlabel('Time')
if __name__ == '__main__':
e = 0
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
x_data = []
y_data = []
#Show plot
ani = animation.FuncAnimation(fig, animate, fargs=(x_data, y_data), interval=200)
plt.show()
#Once exited
print(" ")
h = 0
lst = []
mnLst = []
sts = ""
#Loop through charList
while h < len(charList):
print(" ")
print(h)
print(charList[h])
#Append current index to lst
lst.append(charList[h])
print(lst)
#For every 2 values:
if h % 2 == 0:
if len(lst) == 3:
vb = [lst[1], lst[2]]
mnLst.append(vb)
#Append lst to mnLst
mnLst.append(lst)
#Reset lst(keep to 2 values)
lst = []
print(" ")
h += 1
print(charList)
#Loop through mnLst
for b in mnLst:
#If length of current index is greater than 1(there exists a 2 number ASCII code at current index)
if len(b) > 1:
#Create empty string
st = ""
#Loop through each number
for j in b:
#Add string of j to string(creates a string of numbers)
st += str(j)
#Convert st to integer
chars = int(st)
print(chars)
#Convert from ASCII code to actual letter
print(chr(chars))
#Add letter to string of all letters
sts += chr(chars)
print(mnLst)
print(sts)
print(toStr)
#Call to twilio messages(create sms message)
#Use string of ASCII letters to fill body
#Use previously defined recipient string to fill to value
message = client.messages \
.create(
body=sts,
from_='TWILIO_PHONE_NUMBER',
to=toStr
)
#Print message SID to confirm sending
print(message.sid)
print(" ")
| [
"[email protected]"
] | |
d755d565180edb43da8505893ff541726e9d7e91 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_syrians.py | 2ea97ed156d2f2172951b08f48c5d732cd91cdcb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _SYRIANS():
def __init__(self,):
self.name = "SYRIANS"
self.definitions = syrian
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['syrian']
| [
"[email protected]"
] | |
faebb5179ffd852e252b6d8110e6dd1532dbae1a | 8a51f71741f5ec37ea5d0b1f51050f68e432241c | /src/utils.py | b74fdb434d6211ab295368c88388e467b7404418 | [] | no_license | jakm/VideoConvertor | d93c306a6f51c1a5e7b170189ab5188e0158bcaa | 5b3e7d6de2b204b0d6a9abdb14e484227a0ba993 | refs/heads/master | 2021-01-19T08:11:31.888852 | 2014-01-02T08:35:54 | 2014-01-02T08:35:54 | 3,355,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,887 | py | # -*- coding: utf8 -*-
"""
Provides miscellaneous useful functions and classes.
"""
import functools
from twisted.internet import protocol
def encode(string):
"""
Encode string with preferred encoding.
@param string str, String to encode
@return str
"""
import locale
encoding = locale.getpreferredencoding()
return string.encode(encoding)
def decode(string):
"""
Decode string with preferred encoding.
@param string str, String to decode
@return str
"""
import locale
encoding = locale.getpreferredencoding()
return string.decode(encoding)
def singleton(cls):
"""
Decorator. Create singleton from decorated class.
@param cls type, Singleton class
@return object, Instance of class
"""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def get_version():
"""
Return application's version. Use module _pkgdata generated by setup.py.
If _pkgdata doesn't exist return None.
@return str
"""
try:
from _pkgdata import version
return version
except ImportError:
return None
def get_install_dir():
"""
Return path where application is installed. Use module _pkgdata generated
by setup.py. If _pkgdata doesn't exist get directory from first command
argument (path to executable).
@return str
"""
try:
from _pkgdata import install_path
return install_path
except ImportError:
import os.path
import sys
return os.path.dirname(sys.argv[0])
def get_app_dir():
"""
Return application's directory in user's home directory. Create new if
doesn't exist.
@return str
"""
import os
import os.path
user_home = os.path.expanduser('~')
app_dir = os.path.join(user_home, '.videoconvertor')
if not os.path.exists(app_dir):
os.mkdir(app_dir)
return app_dir
def setup_logging():
"""
Set up logging module according to options in application's configuration
file.
"""
import logging
import os.path
from twisted.python import log
from config import Configuration
config = Configuration()
levels_map = {'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR,
'WARNING': logging.WARNING, 'INFO': logging.INFO,
'DEBUG': logging.DEBUG}
level_str = config.get('logging', 'level')
filename = config.get('logging', 'filename')
try:
level = levels_map[level_str]
except KeyError:
default = logging.INFO
print ('Unknown logging level %s, using default %s'
% (level_str, logging.getLevelName(default)))
level = default
if filename is None or filename == '':
filename = 'stdout'
if filename == 'stdout':
filepath = None
else:
filepath = os.path.join(get_app_dir(), filename)
# http://twistedmatrix.com/documents/current/core/howto/logging.html#auto3
observer = log.PythonLoggingObserver()
observer.start()
print ("Openning log '%s' with level %s"
% (filepath if filepath else filename, logging.getLevelName(level)))
logging.basicConfig(level=level, filename=filepath)
def async_function(fnc):
"""
Decorator. Decorated function will by executed in standalone thread and
will return t.i.d.Deferred.
@param fnc callable, Function or method to decorate
@return function
"""
from twisted.internet import threads
@functools.wraps(fnc)
def wrapper(*args, **kwargs):
return threads.deferToThread(fnc, *args, **kwargs)
return wrapper
# see http://code.activestate.com/recipes/576563-cached-property/
def cached_property(f):
"""returns a cached property that is calculated by function f"""
def get(self):
try:
return self._property_cache[f]
except AttributeError:
self._property_cache = {}
x = self._property_cache[f] = f(self)
return x
except KeyError:
x = self._property_cache[f] = f(self)
return x
return property(get)
class WatchingProcessProtocol(protocol.ProcessProtocol):
"""
ProcessProtocol that is watching termination of process and callbacks owns
deferred.
"""
def __init__(self, deferred):
"""
Save deferred.
@param deferred t.i.d.Deferred
"""
self.deferred = deferred
def processExited(self, status):
"""
Raise errback of protocol's deferred after process termination. Pass
t.i.e.ProcessDone or t.i.e.ProcessTerminated as parameter. It contains
return value or signal number (that killed the process)
"""
self.deferred.errback(status)
| [
"[email protected]"
] | |
5322af7f07ad32bc337c8b32e40381f72879c7a9 | 2e9d6aa04ab53ad4d9ff5c83d253d4b58e151420 | /daily_image_scraper.py | b3eaf169dffe5c7204d724242847c4123159cf6d | [
"MIT"
] | permissive | meisty/Daily-Image-Scraper | c94958c03bf9e74cc34fd634ffe69c588bf26817 | 1fa8b26e14db33e903e46d839d6138d8795347bb | refs/heads/master | 2020-04-03T10:06:44.245666 | 2019-12-02T22:36:24 | 2019-12-02T22:36:24 | 95,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import requests, bs4, os
def getDailyImage(imageUrl):
url=imageUrl
res = requests.get(imageUrl)
res.raise_for_status
soup = bs4.BeautifulSoup(res.text, 'html.parser')
imageElem = soup.select('html body center p a img')
if imageElem == []:
print("Could not find the image")
return False
else:
imgUrl = imageElem[0].get('src')
imgUrl = url+imgUrl
#download the image
print("Downloading the image %s..." % (imgUrl))
res = requests.get(imgUrl)
res.raise_for_status
print(res)
with open('background.jpg', 'wb') as fo:
for chunk in res.iter_content(4096):
fo.write(chunk)
os.system("/usr/bin/gsettings set org.gnome.desktop.background picture-uri file:/home/shaun/python/webscraping/daily_image_scraper/background.jpg")
result = getDailyImage('https://apod.nasa.gov/apod/')
if result == False:
print("Image failed to download")
else:
print("Your image has been downloaded and has been set as your desktop background")
| [
"[email protected]"
] | |
0c5bcc232151d3d0cfc29a8d4879ddf6f1b4e2b6 | e9ea875168e2025f10b9b103b6b1a39a038c2e60 | /python solvers/2048/GreedySearchNoRandom.py | dcb1c399e0d617c223c479d9a3167e86b48b926d | [] | no_license | greengatz/senior_project | 0dda6ef93c878aec9731ce6e25a1c37fa5f18535 | 6ab42e48583ddd4d7a13959b33f7044f63beda69 | refs/heads/master | 2021-01-15T15:36:27.085469 | 2016-08-30T03:24:34 | 2016-08-30T03:24:34 | 51,871,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,581 | py | from GameState import GameState
from random import randint
from ValueCalculator import value
from Directions import *
class GreedySearchNoRandom(object):
def __init__(self, inDepth):
self.game = GameState()
self.numMoves = 0
self.depth = inDepth
pass
'keeps searching for moves until the game is complete'
def playGame(self):
self.game.printState(self.game.gameArray)
count = 0
while (self.game.isGoing()):
testBoard = self.game.copyArr()
bestMove = self.search(testBoard, self.depth)
print(bestMove[0])
# when at the end, all decisions might lead to an inevitable failure
if (not self.game.isValid(bestMove)):
pass
#self.game.printState(self.game.gameArray)
self.game.takeMove(bestMove[0])
self.game.printState(self.game.gameArray)
pass
'returns best move and the value of that move'
'best move is only useful for the top-level call'
def search(self, board, depth):
if (depth == 0):
return (Move.up, 0)
bestMove = Move.up
bestValue = -1
move = Move.up
moveValue = self.searchDirection(board, depth, move)
if (moveValue > bestValue):
bestMove = move
bestValue = moveValue
move = Move.left
moveValue = self.searchDirection(board, depth, move)
if (moveValue > bestValue):
bestMove = move
bestValue = moveValue
move = Move.right
moveValue = self.searchDirection(board, depth, move)
if (moveValue > bestValue):
bestMove = move
bestValue = moveValue
move = Move.down
moveValue = self.searchDirection(board, depth, move)
if (moveValue > bestValue):
bestMove = move
bestValue = moveValue
return (bestMove, bestValue)
'returns the number of matches that a given move would make'
'this only determines value of one move and no further searching'
def valueOfMove(self, board, move):
return value(self.game.preRotate(move, board), self.game, move)
'returns the expected value of a given move searching with the given depth'
'this ignores the new tiles appearing, which saves tons on complexity'
def searchDirection(self, board, depth, move):
testGame = GameState()
testGame.setBoard(board)
testGame.setBoard(testGame.copyArr())
# if the move isn't valid, don't consider it
if (not testGame.isValid(move)):
return -1
# determine the value for making the move at this level
ourValue = self.valueOfMove(testGame.gameArray, move)
# using that as the starting board, check the child's options
afterMove = testGame.executeMove(move)
searchValue = self.search(afterMove, depth - 1)[1]
return ourValue + searchValue
'generic methods of every solver'
def getScore(self):
return self.game.getScore()
def getMaxTile(self):
return self.game.getMaxTile()
def getMoves(self):
return self.numMoves
def printGame(self):
self.game.printState()
pass | [
"[email protected]"
] | |
ca4b35dcc9254c770b8fbcf280e5d949ec61f07e | f6cc05e5b93321e54845665464b4411612105487 | /venv/lib/python3.9/site-packages/hungarian_algorithm/tests/test_graph.py | 3eeefbe0208064fd61df3574656289951ccfee5f | [] | no_license | gostephen/Data-Structures-and-Algorithms | f46fc68e9f69e0e36fdd30ec314f5da0415686bc | 48e72e2fd595ea0a02bcb0efbdefae89b030aa6f | refs/heads/master | 2023-07-09T02:44:31.685903 | 2021-08-22T07:55:36 | 2021-08-22T07:55:36 | 392,228,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,432 | py | '''
File name: test_graph.py
Description: Tests for Graph methods.
Author: Ben Chaplin
GitHub: https://github.com/benchaplin/hungarian-algorithm
Package: hungarian_algorithm
Python Version: 3.7.5
License: MIT License Copyright (c) 2020 Ben Chaplin
'''
from ..algorithm import *
import unittest
ex_G = {
'a': {'b': 2, 'c': 7, 'e': 1},
'b': {'a': 2, 'd': 5},
'c': {'a': 7},
'd': {'b': 5},
'e': {'a': 1}
}
ex_H = {
'x1': {'y1': 1, 'y2': 6},
'x2': {'y2': 8, 'y3': 6},
'x3': {'y1': 4, 'y3': 1}
}
ex_X = {
'x': {'y', 'z'},
'y': {'x', 'z'},
'z': {'x', 'y'}
}
ex_Y = {
'x1': {'y1': 1, 'y2': 6},
'x2': {'y2': 8, 'y3': 6},
'x3': {'y1': 4, 'y3': 6},
'y1': {'y2': 2}
}
class TestGraphMethods(unittest.TestCase):
def test_is_bipartite_empty(self):
self.assertTrue(Graph({}).is_bipartite(None))
def test_is_bipartite_single(self):
self.assertTrue(Graph({'a': {'b'}}).is_bipartite('a'))
def test_is_bipartite_pass1(self):
self.assertTrue(Graph(ex_G).is_bipartite('a'))
def test_is_bipartite_pass2(self):
self.assertTrue(Graph(ex_H).is_bipartite('x1'))
def test_is_bipartite_fail1(self):
self.assertFalse(Graph(ex_X).is_bipartite('x'))
def test_is_bipartite_fail2(self):
self.assertFalse(Graph(ex_Y).is_bipartite('x1'))
def test_make_complete_bipartite_single(self):
G = Graph({'a': {'b', 'd'}, 'c': {'d'}})
G.make_complete_bipartite('a')
self.assertTrue('b' in G.vertices['c'].neighbors)
def test_make_complete_bipartite1(self):
G = Graph(ex_G)
G.make_complete_bipartite('a')
self.assertTrue({'b', 'c', 'e'} == G.vertices['a'].neighbors
and {'b', 'c', 'e'} == G.vertices['d'].neighbors
and G.vertices['d'].get_edge('c').weight == 0
and G.vertices['d'].get_edge('e').weight == 0)
def test_make_complete_bipartite1(self):
G = Graph(ex_H)
G.make_complete_bipartite('x1')
self.assertTrue({'y1', 'y2', 'y3'} == G.vertices['x1'].neighbors
and {'y1', 'y2', 'y3'} == G.vertices['x2'].neighbors
and {'y1', 'y2', 'y3'} == G.vertices['x3'].neighbors
and G.vertices['x1'].get_edge('y3').weight == 0
and G.vertices['x2'].get_edge('y1').weight == 0
and G.vertices['x3'].get_edge('y2').weight == 0)
def test_feasibly_label_single(self):
G = Graph({'a': {'b'}})
G.feasibly_label('a')
self.assertEqual(G.vertices['a'].label, 1)
def test_feasibly_label1(self):
G = Graph(ex_G)
G.feasibly_label('a')
self.assertEqual(G.vertices['a'].label, 7)
def test_feasibly_label2(self):
G = Graph(ex_G)
G.feasibly_label('d')
self.assertEqual(G.vertices['d'].label, 5)
def test_feasibly_label3(self):
G = Graph(ex_H)
G.feasibly_label('x1')
self.assertEqual(G.vertices['x1'].label, 6)
def test_feasibly_label4(self):
G = Graph(ex_H)
G.feasibly_label('x2')
self.assertEqual(G.vertices['x2'].label, 8)
def test_feasibly_label5(self):
G = Graph(ex_H)
G.feasibly_label('x3')
self.assertEqual(G.vertices['x3'].label, 4)
def test_generate_feasible_labeling_pass1(self):
G = Graph(ex_G)
self.assertTrue(G.generate_feasible_labeling('a'))
def test_generate_feasible_labeling_pass2(self):
G = Graph(ex_H)
self.assertTrue(G.generate_feasible_labeling('x1'))
def test_generate_feasible_labeling_fail1(self):
G = Graph(ex_X)
self.assertFalse(G.generate_feasible_labeling('x'))
def test_generate_feasible_labeling_fail2(self):
G = Graph(ex_Y)
self.assertFalse(G.generate_feasible_labeling('x1'))
def test_generate_feasible_labeling_single1(self):
G = Graph({'a': {'b': 1}})
G.generate_feasible_labeling('a')
self.assertEqual((G.vertices['a'].label,
G.vertices['b'].label),
(1, 0))
def test_generate_feasible_labeling_single2(self):
G = Graph({'a': {'b': 1}})
G.generate_feasible_labeling('b')
self.assertEqual((G.vertices['a'].label,
G.vertices['b'].label),
(0, 1))
def test_generate_feasible_labeling1(self):
G = Graph(ex_G)
G.generate_feasible_labeling('a')
self.assertEqual((G.vertices['a'].label,
G.vertices['d'].label,
G.vertices['b'].label,
G.vertices['c'].label,
G.vertices['e'].label),
(7, 5, 0, 0, 0))
def test_generate_feasible_labeling2(self):
G = Graph(ex_H)
G.generate_feasible_labeling('x1')
self.assertEqual((G.vertices['x1'].label,
G.vertices['x2'].label,
G.vertices['x3'].label,
G.vertices['y1'].label,
G.vertices['y2'].label,
G.vertices['y3'].label),
(6, 8, 4, 0, 0, 0))
def test_edge_in_equality_subgraph_single(self):
G = Graph({'a': {'b': 1}})
G.generate_feasible_labeling('a')
e = G.vertices['a'].get_edge('b')
self.assertTrue(G.edge_in_equality_subgraph(e))
def test_edge_in_equality_subgraph_pass1(self):
G = Graph(ex_G)
G.generate_feasible_labeling('a')
e = G.vertices['a'].get_edge('c')
self.assertTrue(G.edge_in_equality_subgraph(e))
def test_edge_in_equality_subgraph_pass2(self):
G = Graph(ex_H)
G.generate_feasible_labeling('x1')
e = G.vertices['x1'].get_edge('y2')
self.assertTrue(G.edge_in_equality_subgraph(e))
def test_edge_in_equality_subgraph_fail1(self):
G = Graph(ex_G)
G.generate_feasible_labeling('a')
e = G.vertices['a'].get_edge('e')
self.assertFalse(G.edge_in_equality_subgraph(e))
def test_edge_in_equality_subgraph_fail2(self):
G = Graph(ex_H)
G.generate_feasible_labeling('x1')
e = G.vertices['x1'].get_edge('y1')
self.assertFalse(G.edge_in_equality_subgraph(e))
def test_equality_subgraph_single(self):
G = Graph({'a': {'b': 1}})
G.generate_feasible_labeling('a')
eq_G = G.equality_subgraph()
self.assertTrue(eq_G.vertices['a'].get_edge('b'))
def test_equality_subgraph1(self):
G = Graph(ex_G)
G.generate_feasible_labeling('a')
eq_G = G.equality_subgraph()
self.assertTrue(eq_G.vertices['a'].get_edge('c')
and eq_G.vertices['b'].get_edge('d')
and not eq_G.vertices['a'].get_edge('b')
and not eq_G.vertices['a'].get_edge('e'))
def test_equality_subgraph2(self):
G = Graph(ex_H)
G.generate_feasible_labeling('x1')
eq_G = G.equality_subgraph()
self.assertTrue(eq_G.vertices['x1'].get_edge('y2')
and eq_G.vertices['x2'].get_edge('y2')
and eq_G.vertices['x3'].get_edge('y1')
and not eq_G.vertices['x1'].get_edge('y1')
and not eq_G.vertices['x2'].get_edge('y3')
and not eq_G.vertices['x3'].get_edge('y3'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a50897633e5c496269182fceeef7b6dbda62131f | 88772525590667104bd070f46281694051b9bca3 | /Manager/Policies/old/wealthredistrev.py | df73e967e5d5b6e0a15ae425bcb7f24b813b8271 | [] | no_license | iFlex/SociableJavascript | 425c39d256c557207f3f620769570e6e28a3288c | ce2c600b1074d432eeb596c2f7a7a3b2cc56cdef | refs/heads/master | 2021-01-10T10:12:42.452546 | 2016-03-28T16:36:56 | 2016-03-28T16:36:56 | 43,653,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,993 | py | #Wealth Redistribution - ThDrop
#Author@ Milorad Liviu Felix
import math
#inspect and fix with extra care
MAX_THPT = 200.0
context = {}
def init(context):
context["poor_treshold"] = 0
if "csvlog" in context:
context["csvlog"].commitSchema(["totalAvailableMemory","redistributable","RichD","PoorD"])
def throughputRescale(t):
t = max(0.00001,t)
return -math.log10(t/MAX_THPT);
def getGini(isolates):
sumOfDifferences = 0
sumOfThroughputs = 0
for x in isolates:
for y in isolates:
sumOfDifferences += math.fabs(throughputRescale(x["throughput"])-throughputRescale(y["throughput"]))
sumOfThroughputs += throughputRescale(x["throughput"])
giniIndex = sumOfDifferences / (2*sumOfThroughputs)
return giniIndex;
def calcPoorTreshold(isolates):
sm = 0.0
for i in isolates:
sm += throughputRescale(i["throughput"])
#sm += i["throughput"]
sm = float(sm)/len(isolates);
return math.pow(10,-sm)
#return sm
def calcThroughputDrops(isolates):
for i in isolates:
if "old_tp" in i:
i["tdrop"] = max(0.0,throughputRescale(i["throughput"]) - i["old_tp"])
else:
i["tdrop"] = 0.0
i["old_tp"] = throughputRescale(i["throughput"])
def calcRedistribution(isolates,poor_treshold):
RichContrib = 0.0
PoorContrib = 0.0
Redistributable = 0.0
totalUsed = 0.0
for i in isolates:
totalUsed += i["hardHeapLimit"]
if i["throughput"] <= poor_treshold:
PoorContrib += i["tdrop"]
else:
RichContrib += i["hardHeapLimit"]
Redistributable += i["hardHeapLimit"]
return (RichContrib,PoorContrib,Redistributable,totalUsed)
def redistribute(isolates,rc,pc,ac,redistribute,allocatable,poor_treshold):
richd = []
poord = []
allocatable += redistribute;
if rc == 0 and pc == 0:
return;
#poor and rich exist
if rc != 0 and pc != 0:
for i in isolates:
if i["throughput"] <= poor_treshold:
coef = i["tdrop"]/pc;
poord.append(coef);
i["hardHeapLimit"] += coef*allocatable
else:
richd.append((i["hardHeapLimit"]/rc))
i["hardHeapLimit"] -= (i["hardHeapLimit"]/rc)*redistribute
#everyone is rich
if pc == 0:
allocatable -= redistribute
for i in isolates:
richd.append((i["hardHeapLimit"]/rc))
i["hardHeapLimit"] += (i["hardHeapLimit"]/rc)*allocatable
#everyone is poor
if rc == 0:
allocatable -= redistribute
for i in isolates:
poord.append((i["hardHeapLimit"]/ac))
i["hardHeapLimit"] += (i["hardHeapLimit"]/ac)*allocatable
return (richd,poord)
def markIsolates(isolates,totalAvailableMemory):
memlim = totalAvailableMemory / len(isolates)
hasNewIsolates = False;
for isolate in isolates:
if "pMark" not in isolate:
isolate["pMark"] = True;
isolate["average"] = 0;
isolate["avindex"] = 0;
hasNewIsolates = True
if hasNewIsolates:
for isolate in isolates:
isolate["hardHeapLimit"] = memlim;
return hasNewIsolates
def calculate(totalAvailableMemory,isolates,ctx):
global context
context = ctx
if markIsolates(isolates,totalAvailableMemory):
return isolates;
old = []
new = []
for i in isolates:
old.append(i["hardHeapLimit"])
poor_treshold = 0.99 #calcPoorTreshold(isolates);
calcThroughputDrops(isolates);
gini = getGini(isolates)
rc,pc,redist,totalUsed = calcRedistribution(isolates,poor_treshold)
allocatable = totalAvailableMemory - totalUsed;
allocatable = max(0,allocatable)
redist /= 3
redist = min(redist,totalAvailableMemory*gini);
redist -= allocatable;
redist = max(0,redist)
richd,poord = redistribute(isolates,rc,pc,totalUsed,redist,allocatable,poor_treshold)
for i in isolates:
new.append(i["hardHeapLimit"])
if "csvlog" in ctx:
richds = "("+str(len(richd))+")"
poords = "("+str(len(poord))+")"
for i in richd:
richds += str(i)+"|"
for i in poord:
poords += str(i)+"|"
ctx["csvlog"].commitLine(["gini:"+str(gini)+" alc:"+str(allocatable)+" rdst:"+str(redist)]);
return isolates
def name():
return "Wealth Redistribution v1.2"
def stats():
return "No stats available" | [
"[email protected]"
] | |
d11bc35e80312bd9d5b08d3091ee5098da77f10d | 91b8a4c7278e25012733de1af7c3f14d5d3c6688 | /reservation/migrations/0001_initial.py | bac817b9235c8d01cbaea9910699cb3cf26dcb01 | [] | no_license | FernandoSka/dogger | ab35d5730f579b5798a6f1db9e9c134a4eedb440 | b2a8a550936f4d9947518f71acf643d651c99f2d | refs/heads/master | 2020-04-18T16:19:03.458129 | 2019-01-28T18:15:26 | 2019-01-28T18:15:26 | 167,631,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | # Generated by Django 2.1.5 on 2019-01-24 07:21
import customer.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DogItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Dog')),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('duration', models.DurationField()),
('status', models.CharField(choices=[('pending', 'pending'), ('acepted', 'acepted'), ('concluded', 'concluded'), ('refused', 'refused'), ('canceled', 'canceled')], default='pending', max_length=9)),
('reward', models.FloatField(validators=[customer.validators.min_validator])),
('dogs', models.ManyToManyField(through='reservation.DogItems', to='customer.Dog')),
('walker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Walker')),
],
),
migrations.AddField(
model_name='dogitems',
name='reservation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reservation.Reservation'),
),
]
| [
"[email protected]"
] | |
7b51afeec4b4b08650f1da993bea865407253bed | 03a210fac12f08bf66eedaa6c39431df0e980fb2 | /0x05-python-exceptions/0-safe_print_list.py | af6a0965648801e2476feab18a3fb58038d557ea | [] | no_license | jhosep7/holbertonschool-higher_level_programming | 9107daee8c6642dcbfc7d7a3957c5fdd97b3f8e2 | c16437413b6b6995d6631851a9e1c0aeb24e18e4 | refs/heads/master | 2020-09-29T04:00:11.969366 | 2020-05-15T13:37:11 | 2020-05-15T13:37:11 | 226,944,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | #!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
counter = 0
try:
for i in range(x):
print("{}".format(my_list[i]), end="")
counter += 1
except IndexError:
pass
print()
return counter
| [
"[email protected]"
] | |
9c4c6f65428497d682a9810ee5fcf8eca28e6d13 | 8159412a6301c7ae76242180618ec0623f6a8e99 | /Aula2/flask_templates.py | b297686f0316c12d1b3d1cc41004d6250766fa34 | [] | no_license | yama1102/python521 | bba4faf3c81d74c1b380207f787ba4e3a83883e2 | 1d84b4c44797541fc0c53fd15ee8166b25f0a88c | refs/heads/master | 2020-12-21T22:39:58.826052 | 2020-01-31T19:32:46 | 2020-01-31T19:32:46 | 236,586,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | #!/usr/bin/env python3
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
dados = {
'title': 'Titulo jinja',
'word': 'Eu sei programar',
'botao': True
}
# return flask.render_template('index.html', title = 'Titulo Jinja', index=dados)
return flask.render_template('principal.html')
app.run(debug=True)
| [
"[email protected]"
] | |
bf834121013df4c7d43567494dd7c59522486142 | ac8dc0d68941644c5abb75620ea6fe093e0c9c5e | /advanced/threadingtest.py | 98a57d1bbce0e93935fc009b45f24dca4cf412cd | [] | no_license | Languomao/PythonProject | 23a04710141afadda60966d76af201574519d5a1 | c6d7103c444ea990585520a16965fad3f6b41ffa | refs/heads/master | 2022-12-19T02:08:21.209941 | 2020-09-22T10:55:10 | 2020-09-22T10:55:10 | 290,992,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | import threading
import time
exitFlag = 0
class MyThread(threading.Thread):
def __init__(self, threadName, threadID, counter):
threading.Thread.__init__(self)
self.threadName = threadName
self.threadID = threadID
self.counter = counter
def run(self):
print("开始线程:" + self.threadName)
print_time(self.threadName, self.counter, 5)
print("退出线程:" + self.threadName)
def print_time(threadName, counter, delay):
while counter:
if exitFlag:
threadName.exit()
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
mythread1 = MyThread("Thread-1", 1, 5)
mythread2 = MyThread("Thread-2", 2, 4)
mythread1.start()
mythread2.start()
mythread1.join()
mythread2.join()
| [
"[email protected]"
] | |
a08f6064088a1e6734363005161d93dfea63de55 | 9c92b6ce98167e33cbe5535b1502c7e6c444cd7d | /constructors.py | 94ff1ee0b3b72f27debd68fd0f43572a894a87e4 | [] | no_license | seblee97/f1_fantasy | d7bf454c2c48986db0c02cd320ea7d2f209ec8ed | 231bd967f5ebffee7553ab71e85414e3781803fe | refs/heads/master | 2023-04-17T12:02:38.359698 | 2021-04-18T20:06:52 | 2021-04-18T20:06:52 | 352,050,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | import constants
from f1_objects import Constructor
mercedes = Constructor(
name=constants.MERCEDES, price=37.8, drivers=[constants.HAMILTON, constants.BOTTAS]
)
red_bull = Constructor(
name=constants.RED_BULL, price=26.0, drivers=[constants.VERSTAPPEN, constants.PEREZ]
)
mclaren = Constructor(
name=constants.MCLAREN, price=18.8, drivers=[constants.NORRIS, constants.RICCIARDO]
)
ferrari = Constructor(
name=constants.FERRARI, price=18.5, drivers=[constants.LECLERC, constants.SAINZ]
)
alpha_tauri = Constructor(
name=constants.ALPHA_TAURI, price=13.1, drivers=[constants.TSUNODA, constants.GASLY]
)
aston_martin = Constructor(
name=constants.ASTON_MARTIN,
price=17.0,
drivers=[constants.STROLL, constants.VETTEL],
)
alpine = Constructor(
name=constants.ALPINE, price=15.1, drivers=[constants.ALONSO, constants.OCON]
)
alfa_romeo = Constructor(
name=constants.ALFA_ROMEO,
price=8.9,
drivers=[constants.RAIKKONEN, constants.GIOVINAZZI],
)
haas = Constructor(
name=constants.HAAS, price=6.1, drivers=[constants.SCHUMACHER, constants.MAZEPIN]
)
williams = Constructor(
name=constants.WILLIAMS, price=6.3, drivers=[constants.RUSSELL, constants.LATIFI]
)
all_constructors = [
mercedes,
red_bull,
mclaren,
ferrari,
alpha_tauri,
aston_martin,
alpine,
alfa_romeo,
haas,
williams,
]
| [
"[email protected]"
] | |
13a4cfdc9105077329bdddd1b212b3f638efe040 | 3db5e39d9bbe1c86229a26e7d19e3ceb37f902e3 | /algorithm/10week/workshop1_최적경로.py | 527bc797632c491ccd80c8f088ab9cf46ba3bc3d | [] | no_license | sweetrain096/rain-s_python | 5ca2fe5e7f97a681b6e75e64264687a723be1976 | eb285eb50eeebfaa2b4a4d7816314e2073faab00 | refs/heads/master | 2021-07-19T16:06:01.389283 | 2020-05-29T14:56:16 | 2020-05-29T14:56:16 | 162,240,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | import sys
sys.stdin = open("workshop1_input.txt")
def perm(n, k, sum_data):
global result
if result < sum_data:
return
if n == k:
last_check = sum_data + abs(data[k - 1][0] - er) + abs(data[k - 1][1] - ec)
result = min(last_check, result)
# print(data)
else:
for i in range(k, n):
data[i], data[k] = data[k], data[i]
if not k:
tmp_sum = abs(sr - data[k][0]) + abs(sc - data[k][1])
perm(n, k + 1, sum_data + tmp_sum)
else:
tmp_sum = abs(data[k - 1][0] - data[k][0]) + abs(data[k - 1][1] - data[k][1])
perm(n, k + 1, sum_data + tmp_sum)
data[i], data[k] = data[k], data[i]
t = int(input())
for tc in range(t):
n = int(input())
tmp = list(map(int, input().split()))
sr, sc, er, ec = tmp[0], tmp[1], tmp[2], tmp[3]
data = []
a = []
for i in range(4, 4 + (n * 2)):
a.append(tmp[i])
if len(a) == 2:
data.append(a)
a = []
# print(sr, sc, er, ec)
# print(data)
result = 99999999999999999999999999999
perm(n, 0, 0)
print("#{} {}".format(tc + 1, result))
| [
"[email protected]"
] | |
50cb9be03e7a90465adb9cc4c291309a1c456643 | 2c13f599cc54be77ed202edc82422127ac52e9a5 | /led/views.py | e86d399db2d2663a65dec37287effe51560b959d | [] | no_license | Alexander3/raspberry-leds | f83129f8c6350616ecc3e28d107fe974d68d1e58 | 3970798bcd9a3d79fce289ae6878a73ff968aa9e | refs/heads/master | 2023-01-10T15:33:11.763194 | 2020-12-26T21:22:45 | 2020-12-26T21:22:45 | 170,722,411 | 0 | 0 | null | 2023-01-01T08:28:45 | 2019-02-14T16:30:02 | TypeScript | UTF-8 | Python | false | false | 1,556 | py | import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from pi_led.celery import app
from .domain.leds import led_strip
from .tasks import wake_me_up, display_wave
from django_celery_beat.models import PeriodicTask, CrontabSchedule
@csrf_exempt
def set_color(request):
data = json.loads(request.body.decode())
hexcolor = data['color'].lstrip('#')
rgb = tuple(int(hexcolor[i:i + 2], 16) for i in (0, 2, 4))
# set_led_colors(*rgb)
led_strip.lerp(rgb, 1)
return HttpResponse()
@csrf_exempt
def set_alarm(request):
data = json.loads(request.body.decode())
schedule = CrontabSchedule.objects.get(id=1)
schedule.hour = data['hour']
schedule.minute = data['minute']
schedule.save()
PeriodicTask.objects.update_or_create(
crontab=schedule,
name='Wake me',
defaults=dict(
task='led.tasks.wake_me_up',
kwargs=json.dumps({'speed': 15 * 60}),
)
)
return HttpResponse()
@csrf_exempt
def wake_me_slowly(request):
wake_me_up.delay(60 * 10)
return HttpResponse()
@csrf_exempt
def wave(request):
display_wave.delay()
return HttpResponse()
@csrf_exempt
def stop_celery(request):
app.control.purge()
i = app.control.inspect()
for tasks in i.active().values():
for task in tasks:
app.control.revoke(task['id'], terminate=True)
app.control.purge()
return HttpResponse()
@csrf_exempt
def me(request):
return HttpResponse('This is RaspberyPi')
| [
"[email protected]"
] | |
7b2521891b78babd941f5180ba79de7d9488c4e8 | ee55a6ce2197d690d5b642bf54918b1de6ad0633 | /cash/apps/cards/migrations/0001_initial.py | 4c6de5bfe7acf83cbfcb85202267d07f48845ff1 | [] | no_license | ATNC/cash | d4152dd122310ca3cbca112ef2c12cddfaa1f2c9 | 56298fab5bd9b9115fa559b357fb4d7ecf496d35 | refs/heads/master | 2021-01-11T11:17:28.499012 | 2016-02-11T22:53:21 | 2016-02-11T22:53:21 | 51,240,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 09:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cards',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cards_num', models.CharField(max_length=16, verbose_name='Card id')),
('cards_pin', models.PositiveIntegerField(max_length=4, verbose_name='Card pin')),
('cards_success', models.PositiveIntegerField(default=4, max_length=1, verbose_name='Card success')),
('cards_balance', models.IntegerField(default=0, verbose_name='Card balance')),
],
options={
'ordering': ['cards_success'],
},
),
migrations.CreateModel(
name='Transactions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transactions_code', models.CharField(choices=[('CB', 'Check balance'), ('GM', 'Get money'), ('SS', 'Success operation')], max_length=2)),
('transaction_timestamp', models.TimeField(auto_now_add=True)),
('transaction_sum', models.CharField(default='0', max_length=255, verbose_name='Transaction sum')),
('transactions_link', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='card', to='cards.Cards', verbose_name='Card')),
],
options={
'ordering': ['transaction_timestamp'],
},
),
]
| [
"[email protected]"
] | |
32b642d2a024996ca327534823b55c601921ace7 | 279069128ad65c94c0cbd6fe5f79184602724ba3 | /App_assist.py | 027f160c27ec44a8d04351dcf8e766c1e85fbe8f | [] | no_license | Maxhendo/refactored-eureka | 28e2a22d8f98a41b0368599ad8571abbabe62c1f | 7ecd650eaf4b1c6872f341ea35fd17a00ae1db5c | refs/heads/main | 2023-03-15T08:49:00.281230 | 2021-02-23T04:37:04 | 2021-02-23T04:37:04 | 341,406,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import os
import requests
from flask import redirect, render_template, request, session
from functools import wraps
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_ID") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function | [
"[email protected]"
] | |
f18ce412fbc8b06df919a3ca53bba5efec7859e7 | 2ab0791967ae3bbf52053ef82d9ca4ab14f55aac | /algorithm/python/NumberofSubarraysofSizeKandAverageGreaterthanorEqualtoThreshold/test_solution.py | 280afc783a7ccc7b1bdb8aa073f2c31688c67a7d | [] | no_license | verybada/leetcode | 8f01dde93f1cd3b0b9698029424cae262d776a48 | 1f0fc27b635bd6c3d6b0f8baad628d1a254bc143 | refs/heads/master | 2022-10-13T13:47:21.506053 | 2022-09-15T15:08:22 | 2022-09-15T15:08:40 | 41,243,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import pytest
from .solution import Solution
@pytest.mark.parametrize(
"arr, k, threshold, expected",
[
[[2, 2, 2, 2, 5, 5, 5, 8], 3, 4, 3],
[[11, 13, 17, 23, 29, 31, 7, 5, 2, 3], 3, 5, 6],
],
)
def test_num_of_subarrays(arr, k, threshold, expected):
assert Solution().numOfSubarrays(arr, k, threshold) == expected
| [
"[email protected]"
] | |
03192564be49ec52d56ec49d63bc7b541888ec09 | bf5804ecc4f1730b9248974dd04a3abd4e705316 | /bitutil.py | feab605bdeea8f3221d4d6748ecbe48bd113ae44 | [] | no_license | Haretidm/Matrix | cec50cf87a7062ba6e623175fd2f7b2f03d42de1 | f0c1f4fdd9a36fddb2dc37041711aad4792a72a1 | refs/heads/master | 2021-01-23T07:16:33.572780 | 2013-08-27T10:20:29 | 2013-08-27T10:20:29 | 12,402,919 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | """
Implements several convenience operations for use with the ECC lab.
Author: Landon Judkins (ljudkins)
Date: Spring 2009
Updated by Nick Gaya, Spring 2013
Requires: fields matutil
"""
from GF2 import zero, one
import mat
import random
def str2bits(inp):
"""
Convert a string into a list of bits, with each character's bits in order
of increasing significance.
"""
bs = [1<<i for i in range(8)]
return [one if ord(s)&b else zero for s in inp for b in bs]
def bits2str(inp):
"""
Convert a list of bits into a string. If the number of bits is not a
multiple of 8, the last group of bits will be padded with zeros.
"""
bs = [1<<i for i in range(8)]
return ''.join(chr(sum(bv if bit else 0 for bv,bit in zip(bs, inp[i:i+8]))) for i in range(0, len(inp), 8))
def bits2mat(bits,nrows=4,trans=False):
"""
Convert a list of bits into a matrix with nrows rows.
The matrix is populated by bits column by column
Keyword arguments:
nrows -- number of rows in the matrix (default 4)
trans -- whether to reverse rows and columns of the matrix (default False)
"""
ncols = len(bits)//nrows
f = {(i,j):one for j in range(ncols) for i in range(nrows) if bits[nrows*j+i]}
A = mat.Mat((set(range(nrows)), set(range(ncols))), f)
if trans: A = mat.transpose(A)
return A
def mat2bits(A, trans=False):
"""
Convert a matrix into a list of bits.
The bits are taken from the matrix column by column with keys in sorted order
Keyword arguments:
trans -- whether to reverse rows and columns of the matrix (default False)
"""
if trans:
return [A[i,j] for i in sorted(A.D[0]) for j in sorted(A.D[1])]
else:
return [A[i,j] for j in sorted(A.D[1]) for i in sorted(A.D[0])]
def noise(A,freq):
"""
return a random noise matrix with the same domain as A.
The probability for 1 in any entry of the matrix is freq.
"""
f = {(i,j):one for i in A.D[0] for j in A.D[1] if random.random() < freq}
return mat.Mat(A.D, f) | [
"[email protected]"
] | |
fa997f82b13e2be1aa2a078894efc7faea5c6144 | ca6fcc6697d08cc30ed9ce4577db42b0991b16cd | /1Scripts/TouchObjs.py | 84a917622557dabed5354df390b0cd1017fc12a2 | [] | no_license | brnold/LDBInteractiveModelProject | 139e3fb35cb99733b277223169f2979e383d0ec1 | cd7abb30c1ff480b4c5327810e5535b87643d07c | refs/heads/master | 2021-01-17T11:17:09.245811 | 2016-06-07T13:53:52 | 2016-06-07T13:53:52 | 52,618,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,354 | py | #************************************************************************************************************
# ECE 491 Senior Design
# Winter 2016
# Team 6
#
# Python Script : TouchObjs.py
# Supporting Scripts : ObjTouchCtrl.py,
#
# Purpose:
# Touch objects script. This python script module contains 32 objects of type TouchCtrl.TouchObj()
# There are 4 touch objects (an address space of for 4 MPR121 IC chips) in
# sub-network (an I2C channel) by 8 sub-networks (the number in I2C channels allowable by the
# TCA9548A 1-to-8 I2C Multiplexer.
#
# Functions:
# ConFigController(): - configure every MPR121 IC chip in the system.
#
# GetStatus(): - Update the touch status of every touch object.
#
#
# Primary coder Benjamin
# Secondary coder Bret
# Code contributor Katrina
# Code contributor Ryan
#************************************************************************************************************
# Revision
# 0 Supporting script creation for touch sensing management. Bret 2016-03-20
#
# x Edits. TBD 2016-0x-xx
#
#************************************************************************************************************
import TouchCtrl
Status = 0
TOs = [
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 0
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 1
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 2
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 3
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 4
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 5
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), # net 6
TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj(), TouchCtrl.TouchObj() # net 7
]
StatusOfTOs = [
Status, Status, Status, Status, # net 0
Status, Status, Status, Status, # net 1
Status, Status, Status, Status, # net 2
Status, Status, Status, Status, # net 3
Status, Status, Status, Status, # net 4
Status, Status, Status, Status, # net 5
Status, Status, Status, Status, # net 6
Status, Status, Status, Status # net 7
]
FiltValsOfTOs =[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
BaseLineValsOfTOs =[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
def ConFigController(): # Configure every touch sensing IC
idx = 0
for idx in range (0,32): # idx will range from 0 to 31
TOs[idx].ConFigController()
def GetStatus(): # Update touch status from all ICs.
idx = 0
for idx in range (0,32): # idx will range from 0 to 31
StatusOfTOs[idx] = TOs[idx].displayStatus()
FiltValsOfTOs[idx] = TOs[idx].filtVal
BaseLineValsOfTOs[idx] = TOs[idx].baseLines
# print FiltValsOfTOs[idx]
# print StatusOfTOs # Feedback during development.
return StatusOfTOs
# End of TouchObjs.py
| [
"[email protected]"
] | |
dc746b9692d6ba2d3d49f9f8721cc97b63ddb939 | be8112d14884ce4e50aeb0cdc15cac4e459a04a1 | /backend/face-crop.py | ce9a1e7bbd57fae5b0896e7280786fe00fe0b186 | [] | no_license | apalmk/SLAC | b0a348bd7b26e2388dcdc8b89cc5abe389a778c2 | 66a3e9091368f0b5e49a6f4fb5f0f083394d4e39 | refs/heads/master | 2020-12-09T03:05:39.657209 | 2020-01-29T22:46:12 | 2020-01-29T22:46:12 | 233,172,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import os
import cv2
image_path = "images"
def save_faces(cascade, imgname):
img = cv2.imread(os.path.join(image_path, imgname))
for i, face in enumerate(cascade.detectMultiScale(img)):
x, y, w, h = face
sub_face = img[y:y + h, x:x + w]
cv2.imwrite(os.path.join("faces", "{}_{}.jpg".format(imgname, i)), sub_face)
if __name__ == '__main__':
face_cascade = "haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(face_cascade)
# Iterate through files
for f in [f for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]:
save_faces(cascade, f) | [
"[email protected]"
] | |
1097053a79b90164857c8dc4bcb6fad625bceb8e | 1c321da055cecc5dad2fc00fa327178671bf19a2 | /50/动动手0.py | 866974e850374f95fc27e98d81ed08400e4a81d8 | [] | no_license | Qiren-Wise/Python-FishC-learning | da498a10d9c501d738cf74d90fce9c0d5f41de80 | be5b6ac2cc4395dd361d51446cd266a9a49b047a | refs/heads/master | 2023-05-18T11:45:18.232922 | 2021-06-04T14:58:28 | 2021-06-04T14:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | # 0. 要求实现一个功能与 reversed() 相同(内置函数 reversed(seq) 是返回一个迭代器,是序列 seq 的逆序显示)的生成器。
def myRev(string):
index = len(string)
for i in range(index-1,-1,-1):
yield string[i]
for j in myRev("fishc"):
print(j,end='')
| [
"[email protected]"
] | |
0c1aa9e8deee657c8629444b2a96f99eef1029ab | 2bb9776f275c3c9bfb4aade6dc5dea4b8747b4eb | /search/search.py | f4ed00315a03a93d3ceaa66a46967d09c4e33a2e | [] | no_license | silverjosh90/pacmanAI | 831272d61d333fe14340a7729b4d157ab5ee5712 | ae9649dd5e6c732b1ede1ae52ce7e778832928ad | refs/heads/master | 2021-01-20T07:56:45.469992 | 2017-05-02T19:29:46 | 2017-05-02T19:29:46 | 90,070,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,218 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
fringe = util.Stack()
fringe.push( (problem.getStartState(), [], []) )
# Node is a current state.
while not fringe.isEmpty():
print("this is my fringe", fringe)
node, actions, visited = fringe.pop()
print("I am the node", node)
print("I am the actions", actions)
print("I am the visited", visited)
print("I am the successors", problem.getSuccessors(node))
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"[email protected]"
] | |
de1929d44d98ae36272a6cc91df53af4a41c83a7 | 579d09f04d7f1adade8213bf72439f9c9474f91a | /DoG Weighting/gridConstructionSphere2.py | 115a4ca3ec3e3dcdae6a8169a220c1cba69b7699 | [] | no_license | rmachavariani/ae2223 | b96857aa1deb22d2520d49ef2f6e88ee99c528bb | ec7110588630341620a1f4998088f1957d926676 | refs/heads/master | 2023-05-13T06:33:05.415909 | 2021-06-10T20:55:10 | 2021-06-10T20:55:10 | 346,340,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,864 | py | import numpy as np
from class_def2 import *
from class_def3 import *
from math import floor, sqrt
import time
def loadData():
''' Loads data from carMirrorData.dat
:return: nrVectors x 6 np array
'''
# load the data
t1 = time.time()
data = np.loadtxt("carMirrorData.dat", max_rows=500)
t2 = time.time()
print("Loading done in ", "{:.2f}".format(t2 - t1), " s")
return data
def determineMaxMin(data):
'''Determines the mininum and maximum value for every dimension
:return: xMin, xMax, yMin, yMax, zMin, zMax
'''
t1 = time.time()
# determine min and max
xMin = np.amin(data[:, 0])
xMax = np.amax(data[:, 0])
yMin = np.amin(data[:, 1])
yMax = np.amax(data[:, 1])
zMin = np.amin(data[:, 2])
zMax = np.amax(data[:, 2])
# report to user
t2 = time.time()
print("Max and min found in ", "{:.2f}".format(t2 - t1), " s")
return xMin, xMax, yMin, yMax, zMin, zMax
def createVectorObjects(data):
''' Creates objects from the particle/vector data
:param data: raw data in numpy array
:return: 1D numpy array with vectors as vector objects
'''
t1 = time.time()
# create empty numpy array
dataPoints = np.empty(np.size(data, axis=0), dtype=object)
# loop over data and create vector object for each particle row
for i in range(np.size(data, axis=0)):
dataPoints[i] = vector(data[i, :])
# report to user
t2 = time.time()
print("Objects created in ", "{:.2f}".format(t2 - t1), " s")
return dataPoints
def createGridPitchAndRadius(pitch, radius, xMin, xMax, yMin, yMax, zMin, zMax):
t1 = time.time()
# calculate amount of bins in every direction
nrBinsX = floor((xMax - xMin) / pitch) + 2
nrBinsY = floor((yMax - yMin) / pitch) + 2
nrBinsZ = floor((zMax - zMin) / pitch) + 2
# create empty 3D array
grid = np.empty((nrBinsX, nrBinsY, nrBinsZ), dtype=object)
# set radius of bins and
gridBin.radius = radius
gridBin.nrBinsX = nrBinsX
gridBin.nrBinsY = nrBinsY
gridBin.nrBinsZ = nrBinsZ
# define x, y and z coordinates of center bin
x = np.array([(xMin + i * pitch) for i in range(nrBinsX)])
y = np.array([(yMin + i * pitch) for i in range(nrBinsY)])
z = np.array([(zMin + i * pitch) for i in range(nrBinsZ)])
# fill matrix with bin objects by looping over matrix
for i in range(nrBinsX):
for j in range(nrBinsY):
for k in range(nrBinsZ):
grid[i, j, k] = gridBin(x[i], y[j], z[k])
# report to user
t2 = time.time()
print('Grid created in ', "{:.2f}".format(t2 - t1), " s")
return grid, nrBinsX, nrBinsY, nrBinsZ
def createGrid(nrBinsX, nrBinsY, nrBinsZ, radius, xMin, xMax, yMin, yMax, zMin, zMax):
'''Creates the grid by generating a 3D numpy array filled with
objects of class gridbin
:return: nrX x nrY x nrZ numpy array
'''
t1 = time.time()
# create empty 3D array
grid = np.empty((nrBinsX, nrBinsY, nrBinsZ), dtype=object)
# calculate width of bins in all directions
widthX = (xMax - xMin) / nrBinsX
widthY = (yMax - yMin) / nrBinsY
widthZ = (zMax - zMin) / nrBinsZ
# set widths of bin to class static members
gridBin.widthX = widthX
gridBin.widthY = widthX
gridBin.widthZ = widthZ
# define x, y and z coordinates of center bin
x = np.linspace(xMin, xMax - widthX, nrBinsX) + widthX / 2
y = np.linspace(yMin, yMax - widthY, nrBinsY) + widthY / 2
z = np.linspace(zMin, zMax - widthZ, nrBinsZ) + widthZ / 2
# fill matrix with bin objects by looping over matrix
for i in range(nrBinsX):
for j in range(nrBinsY):
for k in range(nrBinsZ):
grid[i, j, k] = gridBin(x[i], y[j], z[k])
# report to user
t2 = time.time()
print('Grid created in ', "{:.2f}".format(t2 - t1), " s")
return grid
def assignVectorsToGrid(vectors, grid):
t1 = time.time()
# get bin radius and amount of bins in each direction
radius = gridBin.radius
nrBinsX = gridBin.nrBinsX
nrBinsY = gridBin.nrBinsY
nrBinsZ = gridBin.nrBinsZ
# loop through all bins
for i in range(nrBinsX):
for j in range(nrBinsY):
for k in range(nrBinsZ):
# get bin object
aBin = grid[i][j][k]
# get coordinates
x = aBin.x
y = aBin.y
z = aBin.z
# loop over all vectors
for vector in vectors:
# get coordinates
xx = vector.x
yy = vector.y
zz = vector.z
if sqrt((xx-x)**2 + (yy-y)**2 + (zz-z)**2) <= radius:
aBin.addVector(vector)
# report to user
t2 = time.time()
print("Assigning of vectors to bins completed in ", "{:.2f}".format(t2 - t1), " s")
return grid
#-------------------------------MAIN--------------------------------#
def getSphericalGridWithVectors(pitch,radius):
t1 = time.time()
# load the data
data = loadData()
# determine max and min of data in every dimension
minMax = determineMaxMin(data)
# set parameters for bins
xMin = minMax[0]
xMax = minMax[1]
yMin = minMax[2]
yMax = minMax[3]
zMin = minMax[4]
zMax = minMax[5]
# transform raw data into vector objects
dataPoints = createVectorObjects(data)
# create bins in grid
grid, nrBinsX, nrBinsY, nrBinsZ = createGridPitchAndRadius(pitch,radius,xMin,xMax,yMin,yMax,zMin,zMax)
# assign vector objects to correct bins
# grid is the 3D array filled with gridBin objects containing
# the correct vector objects
grid = assignVectorsToGrid(dataPoints,grid)
# report to user
t2 = time.time()
print("Total time: ","{:.2f}".format(t2-t1)," s")
return grid, nrBinsX, nrBinsY, nrBinsZ
grid, nrBinsX, nrBinsY, nrBinsZ = getSphericalGridWithVectors(50,50)
biggrid, bignrBinsX, bignrBinsY, bignrBinsZ = getSphericalGridWithVectors(50,75)
for i in range(nrBinsX):
for j in range(nrBinsY):
for k in range(nrBinsZ):
# normal averaging method
grid[i][j][k].calculateNormalAverage()
# Gaussian averaging method
grid[i][j][k].calculateStandardDeviation()
datavarU, datavarV, datavarW = grid[i][j][k].calculateVariance()
biggrid[i][j][k].calculateNormalAverage()
# Gaussian averaging method
biggrid[i][j][k].calculateStandardDeviation()
bigdatavarU, bigdatavarV, bigdatavarW = biggrid[i][j][k].calculateVariance()
grid[i][j][k].calculateNewGaussianAverage(datavarU, datavarV, datavarW, bigdatavarU, bigdatavarV, bigdatavarW)
print(grid[1][1][1].newGaussianAverage)
| [
"[email protected]"
] | |
56a0ac27d45c3d9350de507c52e868018bb99da5 | a0517ded9387212c385d5c6a2d5a921213b00c36 | /miller_ecog_tools/SubjectLevel/Analyses/subject_bri_novelty.py | b564cf595eab20fe9e57deb1cfcdfd0f3f930c8a | [] | no_license | jayfmil/miller_ecog_tools | 10e7b96090aec4f6ee906df89061494ed2b80358 | a2b7cd2b9c8ff311fd2d60916acd1959e3b07306 | refs/heads/master | 2021-06-27T23:23:07.170567 | 2019-05-10T15:46:25 | 2019-05-10T15:46:25 | 60,645,025 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 40,496 | py | """
"""
import os
import pycircstat
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray
import h5py
from tqdm import tqdm
from joblib import Parallel, delayed
from scipy import signal
from scipy.stats import zscore, ttest_ind, sem
from scipy.signal import hilbert
from ptsa.data.timeseries import TimeSeries
from ptsa.data.filters import MorletWaveletFilter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from miller_ecog_tools.SubjectLevel.subject_analysis import SubjectAnalysisBase
from miller_ecog_tools.SubjectLevel.subject_BRI_data import SubjectBRIData
from miller_ecog_tools.Utils import neurtex_bri_helpers as bri
# figure out the number of cores available for a parallel pool. Will use half
import multiprocessing
NUM_CORES = multiprocessing.cpu_count()
class SubjectNoveltyAnalysis(SubjectAnalysisBase, SubjectBRIData):
"""
"""
def __init__(self, task=None, subject=None, montage=0):
super(SubjectNoveltyAnalysis, self).__init__(task=task, subject=subject, montage=montage)
# this needs to be an event-locked analyses
self.do_event_locked = True
# frequencies at which to compute power using wavelets
self.power_freqs = np.logspace(np.log10(1), np.log10(100), 50)
# also compute power/phase at frequencies in specific bands using hilbert transform, if desired
self.hilbert_bands = np.array([[1, 4], [4, 9]])
# how much time (in s) to remove from each end of the data after wavelet convolution
self.buffer = 1.5
# settings for guassian kernel used to smooth spike trains
# enter .kern_width in milliseconds
self.kern_width = 150
self.kern_sd = 10
# window to use when computing spike phase
self.phase_bin_start = 0.0
self.phase_bin_stop = 1.0
# do we want to only include neurons and trials where the neuron actual modulates its firing rate in response
# to the item coming on the screen
self.z_responsive_thresh = 3
# set to True to only include hits and correct rejections
# self.only_correct_items = False
self.max_lag = 8
# string to use when saving results files
self.res_str = 'novelty.hdf5'
def _generate_res_save_path(self):
self.res_save_dir = os.path.join(os.path.split(self.save_dir)[0], self.__class__.__name__+'_res')
def load_res_data(self):
"""
Load results if they exist and modify self.res to hold them.
"""
if self.res_save_file is None:
self._make_res_dir()
if os.path.exists(self.res_save_file):
print('%s: loading results.' % self.subject)
self.res = h5py.File(self.res_save_file, 'r')
else:
print('%s: No results to load.' % self.subject)
def save_res_data(self):
"""
"""
pass
def unload_res_data(self):
"""
Load results if they exist and modify self.res to hold them.
"""
self.res.close()
def analysis(self):
"""
For each session, channel
"""
if self.subject_data is None:
print('%s: compute or load data first with .load_data()!' % self.subject)
return
# create the file
res_file = h5py.File(self.res_save_file, 'w')
# arguments to pass into the event filtering function
event_filters = {'all_events': {'only_correct': False},
# 'all_events_correct': {'only_correct': True},
'resp_events': {'filter_events': 'one', 'do_inverse': False, 'only_correct': False},
# 'resp_events_only_correct': {'filter_events': 'one', 'do_inverse': False, 'only_correct': True},
'resp_items': {'filter_events': 'both', 'do_inverse': False, 'only_correct': False},
# 'resp_items_only_correct': {'filter_events': 'both', 'do_inverse': False, 'only_correct': True},
'resp_events_inv': {'filter_events': 'one', 'do_inverse': True, 'only_correct': False},
'resp_items_inv': {'filter_events': 'both', 'do_inverse': True, 'only_correct': False}}
# open a parallel pool using joblib
# with Parallel(n_jobs=int(NUM_CORES/2) if NUM_CORES != 1 else 1, verbose=5) as parallel:
parallel = None
# loop over sessions
for session_name, session_grp in self.subject_data.items():
print('{} processing.'.format(session_grp.name))
# and channels
for channel_num, channel_grp in tqdm(session_grp.items()):
res_channel_grp = res_file.create_group(channel_grp.name)
# self.res[channel_grp.name] = {}
# self.res[channel_grp.name]['firing_rates'] = {}
# load behavioral events
events = pd.read_hdf(self.subject_data.filename, channel_grp.name + '/event')
events['item_name'] = events.name.apply(lambda x: x.split('_')[1])
# load eeg for this channel
eeg_channel = self._create_eeg_timeseries(channel_grp, events)
# length of buffer in samples. Used below for extracting smoothed spikes
samples = int(np.ceil(float(eeg_channel['samplerate']) * self.buffer))
# next we want to compute the power at all the frequencies in self.power_freqs and at
# all the timepoints in eeg. This function save the results to file and returns phase data
# for later use.
# Compute for wavelet frequencies
phase_data = run_novelty_effect(eeg_channel, self.power_freqs, self.buffer, res_channel_grp,
parallel, '_wavelet', save_to_file=False)
# and for hilbert bands
phase_data_hilbert = run_novelty_effect(eeg_channel, self.hilbert_bands, self.buffer,
res_channel_grp, parallel, '_hilbert', save_to_file=False)
# also store region and hemisphere for easy reference
res_channel_grp.attrs['region'] = eeg_channel.event.data['region'][0]
res_channel_grp.attrs['hemi'] = eeg_channel.event.data['hemi'][0]
# and clusters
for cluster_num, cluster_grp in channel_grp['spike_times'].items():
clust_str = cluster_grp.name.split('/')[-1]
res_cluster_grp = res_channel_grp.create_group(clust_str)
# find number of spikes at each timepoint and the time in samples when each occurred
spike_counts, spike_rel_times = self._create_spiking_counts(cluster_grp, events,
eeg_channel.shape[1])
# smooth the spike train. Also remove the buffer
kern_width_samples = int(eeg_channel.samplerate.data / (1000 / self.kern_width))
kern = signal.gaussian(kern_width_samples, self.kern_sd)
kern /= kern.sum()
smoothed_spike_counts = np.stack([signal.convolve(x, kern, mode='same')[samples:-samples]
for x in spike_counts * eeg_channel.samplerate.data], 0)
smoothed_spike_counts = self._create_spike_timeseries(smoothed_spike_counts,
eeg_channel.time.data[
samples:-samples],
channel_grp.attrs['samplerate'],
events)
# get the phases at which the spikes occurred and bin into novel and repeated items
# 1. for each freq in power_freqs
spike_phases = _compute_spike_phase_by_freq(np.array(spike_rel_times),
self.phase_bin_start,
self.phase_bin_stop,
phase_data,
events)
# 2: for each hilbert band
spike_phases_hilbert = _compute_spike_phase_by_freq(np.array(spike_rel_times),
self.phase_bin_start,
self.phase_bin_stop,
phase_data_hilbert,
events)
# and finally loop over event conditions
for this_event_cond, event_filter_kwargs in event_filters.items():
# figure out which events to use
if 'all_events' in this_event_cond:
events_to_keep = np.array([True] * events.shape[0])
else:
events_to_keep = self._filter_to_event_condition(eeg_channel, spike_counts, events,
**event_filter_kwargs)
if event_filter_kwargs['only_correct']:
events_to_keep = self._filter_to_correct_items(events, events_to_keep)
if self.max_lag is not None:
events_to_keep = events_to_keep & (events.lag.values <= self.max_lag)
# do the same computations on the wavelet derived spikes and hilbert
for phase_data_list in zip([spike_phases, spike_phases_hilbert],
['_wavelet', '_hilbert'],
[self.power_freqs, self.hilbert_bands]):
event_filter_grp = res_cluster_grp.create_group(this_event_cond+phase_data_list[1])
event_filter_grp.create_dataset('events_to_keep', data=events_to_keep)
do_compute_mem_effects = run_phase_stats(phase_data_list[0], events, events_to_keep,
event_filter_grp)
# also compute the power effects for these filtered event conditions
if do_compute_mem_effects:
_ = run_novelty_effect(eeg_channel[events_to_keep], phase_data_list[2], self.buffer,
event_filter_grp, parallel, '',
save_to_file=True)
# finally, compute stats based on normalizing from the pre-stimulus interval
spike_res_zs = compute_novelty_stats_without_contrast(smoothed_spike_counts[events_to_keep])
event_filter_grp.create_dataset('zdata_novel_mean',
data=spike_res_zs[0])
event_filter_grp.create_dataset('zdata_repeated_mean',
data=spike_res_zs[1])
event_filter_grp.create_dataset('zdata_novel_sem',
data=spike_res_zs[2])
event_filter_grp.create_dataset('zdata_repeated_sem',
data=spike_res_zs[3])
event_filter_grp.create_dataset('zdata_ts',
data=spike_res_zs[4])
event_filter_grp.create_dataset('zdata_ps',
data=spike_res_zs[5])
res_file.close()
self.res = h5py.File(self.res_save_file, 'r')
def _filter_to_correct_items(self, events, to_keep_bool):
# get boolean of correct responses
novel_items = events['isFirst'].values
pressed_old_key = events['oldKey'].values
hits = pressed_old_key & ~novel_items
correct_rejections = ~pressed_old_key & novel_items
correct = hits | correct_rejections
return to_keep_bool & correct
def _filter_to_event_condition(self, eeg_channel, spike_counts, events, filter_events='', do_inverse=False,
only_correct=False):
# normalize the presentation interval based on the mean and standard deviation of a pre-stim interval
baseline_bool = (eeg_channel.time.data > -1) & (eeg_channel.time.data < -.2)
baseline_spiking = np.sum(spike_counts[:, baseline_bool], axis=1) / .8
baseline_mean = np.mean(baseline_spiking)
baseline_std = np.std(baseline_spiking)
# get the firing rate of the presentation interval now and zscore it
presentation_bool = (eeg_channel.time.data > 0) & (eeg_channel.time.data <= 1)
presentation_spiking = np.sum(spike_counts[:, presentation_bool], axis=1) / 1.
z_firing = (presentation_spiking - baseline_mean) * baseline_std
# Here, keep all events where the firing is above our threshold
if filter_events == 'one':
responsive_items = np.unique(events['item_name'][z_firing > self.z_responsive_thresh])
# make sure no "filler" items are present
responsive_items = np.array([s for s in responsive_items if 'filler' not in s])
# Here, only keep events if both presentations of the item are above threshold
else:
responsive_items_all = events['item_name'][z_firing > self.z_responsive_thresh]
responsive_items = []
for this_item in responsive_items_all:
if np.sum(responsive_items_all == this_item) == 2:
responsive_items.append(this_item)
responsive_items = np.unique(responsive_items)
# make a boolean of the items to keep
to_keep_bool = np.in1d(events['item_name'], responsive_items)
if do_inverse:
to_keep_bool = ~to_keep_bool
return to_keep_bool
def _compute_item_pair_diff(self, smoothed_spike_counts):
data = smoothed_spike_counts[~((smoothed_spike_counts.event.data['isFirst']) & (smoothed_spike_counts.event.data['lag'] == 0))]
item_names = data.event.data['item_name']
novel_rep_diffs = []
mean_item_frs = []
novel_mean = []
rep_mean = []
for this_item in np.unique(item_names):
data_item = data[item_names == this_item]
if data_item.shape[0] == 2:
novel_data_item = data_item[data_item.event.data['isFirst']].values
rep_data_item = data_item[~data_item.event.data['isFirst']].values
diff_due_to_cond = novel_data_item - rep_data_item
novel_rep_diffs.append(diff_due_to_cond)
novel_mean.append(novel_data_item)
rep_mean.append(rep_data_item)
mean_item_frs.append(np.mean(data_item.data))
novel_mean = np.squeeze(np.stack(novel_mean))
novel_sem = sem(novel_mean, axis=0)
novel_trial_means = np.mean(novel_mean, axis=1)
novel_mean = np.mean(novel_mean, axis=0)
rep_mean = np.squeeze(np.stack(rep_mean))
rep_sem = sem(rep_mean, axis=0)
rep_trial_means = np.mean(rep_mean, axis=1)
rep_mean = np.mean(rep_mean, axis=0)
return np.squeeze(np.stack(novel_rep_diffs)), np.stack(mean_item_frs), novel_mean, rep_mean, novel_sem, \
rep_sem, novel_trial_means, rep_trial_means
def _create_spiking_counts(self, cluster_grp, events, n):
spike_counts = []
spike_ts = []
# loop over each event
for index, e in events.iterrows():
# load the spike times for this cluster
spike_times = np.array(cluster_grp[str(index)])
# interpolate the timestamps for this event
start = e.stTime + self.start_ms * 1000
stop = e.stTime + self.stop_ms * 1000
timestamps = np.linspace(start, stop, n, endpoint=True)
# find the closest timestamp to each spike (technically, the closest timestamp following a spike, but I
# think this level of accuracy is fine). This is the searchsorted command. Then count the number of spikes
# that occurred at each timepoint with histogram
spike_bins = np.searchsorted(timestamps, spike_times)
bin_counts, _ = np.histogram(spike_bins, np.arange(len(timestamps) + 1))
spike_counts.append(bin_counts)
spike_ts.append(spike_bins)
return np.stack(spike_counts, 0), spike_ts
def _create_eeg_timeseries(self, grp, events):
data = np.array(grp['ev_eeg'])
time = grp.attrs['time']
channel = grp.attrs['channel']
sr = grp.attrs['samplerate']
# create an TimeSeries object (in order to make use of their wavelet calculation)
dims = ('event', 'time', 'channel')
coords = {'event': events[events.columns[events.columns != 'index']].to_records(),
'time': time,
'channel': [channel]}
return TimeSeries.create(data, samplerate=sr, dims=dims, coords=coords)
def _create_spike_timeseries(self, spike_data, time, sr, events):
# create an TimeSeries object
dims = ('event', 'time')
coords = {'event': events[events.columns[events.columns != 'index']].to_records(),
'time': time}
return TimeSeries.create(spike_data, samplerate=sr, dims=dims, coords=coords)
def aggregate_ensemble_phases_by_condition(self):
# labels of each band
bands = np.array(['{}-{}'.format(*x) for x in self.hilbert_bands])
# will hold long dataframes, one for novel and one for repeated spiking phases
novel_dfs = []
rep_dfs = []
# loop over each channel
for k, v in self.res.items():
# hemi and region is the same for all clusters on this channel
hemi = v['hemi']
region = v['region']
# loop over each cluster in channel
for k_clust, v_clust in v['firing_rates'].items():
novel_phases = v_clust['novel_phases_hilbert']
rep_phases = v_clust['rep_phases_hilbert']
# make a dataframe for novel and for repeated spiking phases
for i, data in enumerate([novel_phases, rep_phases]):
df = pd.DataFrame(data=data.T)
df['hemi'] = hemi
df['region'] = region
df['bands'] = bands
df['label'] = k + '-' + k_clust
df = df.melt(id_vars=['label', 'hemi', 'region', 'bands'], var_name='spike', value_name='phase')
# and store it
if i == 0:
novel_dfs.append(df)
else:
rep_dfs.append(df)
# combine into one larger dataframe for each conditon
novel_dfs = pd.concat(novel_dfs).reset_index(drop=True)
rep_dfs = pd.concat(rep_dfs).reset_index(drop=True)
return novel_dfs, rep_dfs
def plot_channel_res(self, channel_str, savedir=None, do_t_not_z=False):
"""
Plot time x freq heatmap, firing rates, and phase results for a given channel.
"""
# results for this channel only
channel_res = self.res[channel_str]
# pull out the specific results
if do_t_not_z:
lfp_data = channel_res['delta_t']
spike_data_key = 'delta_spike_t'
cbar_label = 't-stat'
else:
lfp_data = channel_res['delta_z']
spike_data_key = 'delta_spike_z'
cbar_label = 'z-score'
time = lfp_data.columns.values
clim = np.max(np.abs(lfp_data.values))
hemi = channel_res['hemi']
region = channel_res['region']
# how many units were recorded on this channel
cluster_keys = channel_res['firing_rates']
num_clusters = len(cluster_keys)
with plt.style.context('seaborn-white'):
with mpl.rc_context({'ytick.labelsize': 22,
'xtick.labelsize': 22}):
# make the initial figure
# top left, heatmap
ax1 = plt.subplot2grid((6, 6), (0, 0), colspan=3, rowspan=3)
# below heatmap, up to 3 cluster firing rates
ax2 = plt.subplot2grid((6, 6), (3, 0), colspan=3)
ax2.axis('off')
ax3 = plt.subplot2grid((6, 6), (4, 0), colspan=3)
ax3.axis('off')
ax4 = plt.subplot2grid((6, 6), (5, 0), colspan=3)
ax4.axis('off')
# to the right of heatmap, up to 3 phase by freq
ax5 = plt.subplot2grid((6, 6), (0, 3), rowspan=3)
ax5.axis('off')
ax6 = plt.subplot2grid((6, 6), (0, 4), rowspan=3)
ax6.axis('off')
ax7 = plt.subplot2grid((6, 6), (0, 5), rowspan=3)
ax7.axis('off')
fig = plt.gcf()
fig.set_size_inches(30, 20)
# make heatmap
im = ax1.imshow(lfp_data.values,
aspect='auto', interpolation='bicubic', cmap='RdBu_r', vmax=clim, vmin=-clim)
ax1.invert_yaxis()
# set the x values to be specific timepoints
x_vals = np.array([-500, -250, 0, 250, 500, 750, 1000]) / 1000
new_xticks = np.round(np.interp(x_vals, time, np.arange(len(time))))
ax1.set_xticks(new_xticks)
ax1.set_xticklabels([x for x in x_vals], fontsize=22)
ax1.set_xlabel('Time (s)', fontsize=24)
# now the y
new_y = np.interp(np.log10(np.power(2, range(7))), np.log10(self.power_freqs),
np.arange(len(self.power_freqs)))
ax1.set_yticks(new_y)
ax1.set_yticklabels(np.power(2, range(7)), fontsize=20)
ax1.set_ylabel('Frequency (Hz)', fontsize=24)
# add colorbar
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.1)
fig.colorbar(im, cax=cax, orientation='vertical')
# add a title
title_str = '{} - {} {}'.format(channel_str, hemi, region)
ax1.set_title(title_str, fontsize=20)
# firing rate plots
for i, this_cluster in enumerate(zip([ax2, ax3, ax4], list(cluster_keys))):
this_cluster_ax = this_cluster[0]
this_cluster_ax.axis('on')
divider = make_axes_locatable(this_cluster_ax)
dummy_ax = divider.append_axes('right', size='5%', pad=0.1)
dummy_ax.axis('off')
this_cluster_data = channel_res['firing_rates'][this_cluster[1]][spike_data_key]
zdata_novel = channel_res['firing_rates'][this_cluster[1]]['zdata_novel_mean']
zdata_novel_sem = channel_res['firing_rates'][this_cluster[1]]['zdata_novel_sem']
zdata_repeated = channel_res['firing_rates'][this_cluster[1]]['zdata_repeated_mean']
zdata_repeated_sem = channel_res['firing_rates'][this_cluster[1]]['zdata_repeated_sem']
zdata_ps = channel_res['firing_rates'][this_cluster[1]]['zdata_ps']
novel_c = [0.6922722029988465, 0.0922722029988466, 0.1677047289504037]
rep_c = [0.023913879277201077, 0.19653979238754324, 0.3919261822376009]
this_cluster_ax.plot(this_cluster_data.columns.values,
zdata_novel, lw=3, label='Novel', c=novel_c)
this_cluster_ax.fill_between(this_cluster_data.columns.values,
zdata_novel - zdata_novel_sem,
zdata_novel + zdata_novel_sem, alpha=.6, color=novel_c)
this_cluster_ax.plot(this_cluster_data.columns.values,
zdata_repeated, lw=3, label='Repeated', c=rep_c)
this_cluster_ax.fill_between(this_cluster_data.columns.values,
zdata_repeated - zdata_repeated_sem,
zdata_repeated + zdata_repeated_sem, alpha=.6, color=rep_c)
this_cluster_ax.legend(loc='best')
x = np.array([-500, -250, 0, 250, 500, 750, 1000]) / 1000
this_cluster_ax.set_xticks(x)
this_cluster_ax.set_xticklabels(['{0:.2}'.format(xstr) for xstr in x])
this_cluster_ax.set_ylabel('Z(Firing Rate)', fontsize=22)
this_cluster_ax.plot([-.5, 1], [0, 0], '--k', zorder=-2, lw=1.5, c=[.7, .7, .7])
this_cluster_ax.set_xlim(-.5, 1)
this_cluster_ax.set_title(this_cluster[1], fontsize=16)
if (i + 1) == num_clusters:
this_cluster_ax.set_xlabel('Time (s)', fontsize=22)
# phase_plots
for i, this_cluster in enumerate(zip([ax5, ax6, ax7], list(cluster_keys))):
this_cluster_ax_left = this_cluster[0]
this_cluster_ax_left.axis('on')
z_novel = channel_res['firing_rates'][this_cluster[1]]['z_novel']
z_rep = channel_res['firing_rates'][this_cluster[1]]['z_rep']
z_delta = z_novel - z_rep
this_cluster_ax_left.plot(z_delta,
np.log10(self.power_freqs), '-k', lw=3)
yticks = np.power(2, range(1, 7))
this_cluster_ax_left.set_yticks(np.log10(yticks))
this_cluster_ax_left.set_yticklabels(yticks)
this_cluster_ax_left.set_ylim(np.log10(1), np.log10(100))
this_cluster_ax_left.set_xlabel(r'$\Delta$(Z)', fontsize=22)
this_cluster_ax_left.plot([0, 0], this_cluster_ax_left.get_ylim(), '--k', zorder=-2, lw=1.5,
c=[.7, .7, .7])
xlim = np.max(np.abs(this_cluster_ax_left.get_xlim()))
this_cluster_ax_left.set_xlim(-xlim, xlim)
this_cluster_ax_left.set_xticks([-xlim, xlim])
divider = make_axes_locatable(this_cluster_ax_left)
this_cluster_ax_right = divider.append_axes('right', size='95%', pad=0.05)
data = -np.log10(channel_res['firing_rates'][this_cluster[1]]['med_pvals'])
this_cluster_ax_right.plot(data,
np.log10(self.power_freqs), '-k', lw=3)
yticks = np.power(2, range(1, 7))
this_cluster_ax_right.set_yticks(np.log10(yticks))
this_cluster_ax_right.set_yticklabels([])
this_cluster_ax_right.set_ylim(np.log10(1), np.log10(100))
this_cluster_ax_right.xaxis.set_label_position("top")
this_cluster_ax_right.xaxis.tick_top()
this_cluster_ax_right.set_xlabel('-log(p)', fontsize=22)
this_cluster_ax_right.plot([-np.log10(0.05), -np.log10(0.05)], this_cluster_ax_right.get_ylim(),
'--', zorder=-2, lw=1.5, c=[.4, .0, .0])
this_cluster_ax_right.set_title(this_cluster[1], color='k', rotation=-90, x=1.2, y=0.55,
fontsize=20)
plt.subplots_adjust(wspace=0.8, hspace=1.)
# plt.tight_layout()
if savedir is not None:
fname = '{}_{}_time_x_freq_grid.pdf'.format(self.subject, channel_str.replace('/', '-'))
fname = os.path.join(savedir, fname)
fig.savefig(fname, bbox_inches='tight')
return fname
def compute_hilbert_at_single_band(eeg, freq_band, buffer_len):
# band pass eeg
# makes sure to pass in a list not an array because wtf PTSA
band_eeg = bri.band_pass_eeg(eeg, freq_band.tolist() if isinstance(freq_band, np.ndarray) else freq_band).squeeze()
# run hilbert to get the complexed valued result
complex_hilbert_res = hilbert(band_eeg.data, N=band_eeg.shape[-1], axis=-1)
# get phase at each timepoint
phase_data = band_eeg.copy()
phase_data.data = np.angle(complex_hilbert_res)
# phase_data = phase_data.remove_buffer(buffer_len)
phase_data.coords['frequency'] = np.mean(freq_band)
# and power
power_data = band_eeg.copy()
power_data.data = np.log10(np.abs(complex_hilbert_res) ** 2)
# power_data = power_data.remove_buffer(buffer_len)
power_data.coords['frequency'] = np.mean(freq_band)
return power_data, phase_data
def compute_wavelet_at_single_freq(eeg, freq, buffer_len):
# compute phase
data = MorletWaveletFilter(eeg,
np.array([freq]),
output=['power', 'phase'],
width=5,
cpus=12,
verbose=False).filter()
# remove the buffer from each end
# data = data.remove_buffer(buffer_len)
return data.squeeze()
def _compute_spike_phase_by_freq(spike_rel_times, phase_bin_start, phase_bin_stop, phase_data, events):
# only will count samples the occurred within window defined by phase_bin_start and _stop
valid_samps = np.where((phase_data.time > phase_bin_start) & (phase_data.time < phase_bin_stop))[0]
# throw out novel items that were never repeated
good_events = events[~((events['isFirst']) & (events['lag'] == 0))].index.values
# will grow as we iterate over spikes in each condition.
phases = []
for (index, e), spikes, phase_data_event in zip(events.iterrows(), spike_rel_times, phase_data):
phases_event = []
if index in good_events:
if len(spikes) > 0:
valid_spikes = spikes[np.in1d(spikes, valid_samps)]
if len(valid_spikes) > 0:
phases_event = phase_data_event[valid_spikes].data
phases.append(phases_event)
# will be number of spikes x frequencies
# if len(novel_phases) > 0:
# novel_phases = np.vstack(novel_phases)
# else:
# novel_phases = np.array(novel_phases)
# if len(rep_phases) > 0:
# rep_phases = np.vstack(rep_phases)
# else:
# rep_phases = np.array(rep_phases)
return np.array(phases)
def run_phase_stats(spike_phases, events, events_to_keep, event_filter_grp):
novel_phases = np.array([])
rep_phases = np.array([])
# get the novel and repeated spike phases for this event condition. Some events have no
# spikes, so filter those out
spike_phase_cond = spike_phases[events_to_keep]
if np.any(events[events_to_keep].isFirst):
novel_phases = spike_phase_cond[events[events_to_keep].isFirst]
novel_phases = novel_phases[np.array([len(x) > 0 for x in novel_phases])]
if novel_phases.shape[0] == 0:
novel_phases = []
else:
novel_phases = np.vstack(novel_phases)
if np.any(~events[events_to_keep].isFirst):
rep_phases = spike_phase_cond[~events[events_to_keep].isFirst]
rep_phases = rep_phases[np.array([len(x) > 0 for x in rep_phases])]
if rep_phases.shape[0] == 0:
rep_phases = []
else:
rep_phases = np.vstack(rep_phases)
if (len(novel_phases) > 0) & (len(rep_phases) > 0):
p_novel, z_novel, p_rep, z_rep, ww_pvals, ww_fstat, med_pvals, med_stat, p_kuiper, \
stat_kuiper = _compute_novel_rep_spike_stats(novel_phases, rep_phases)
else:
p_novel = z_novel = p_rep = z_rep = ww_pvals = ww_fstat = med_pvals \
= med_stat = p_kuiper = stat_kuiper = np.array([np.nan])
event_filter_grp.create_dataset('p_novel', data=p_novel)
event_filter_grp.create_dataset('z_novel', data=z_novel)
event_filter_grp.create_dataset('p_rep', data=p_rep)
event_filter_grp.create_dataset('z_rep', data=z_rep)
event_filter_grp.create_dataset('ww_pvals', data=ww_pvals)
event_filter_grp.create_dataset('ww_fstat', data=ww_fstat)
event_filter_grp.create_dataset('med_stat', data=med_stat)
event_filter_grp.create_dataset('p_kuiper', data=p_kuiper)
event_filter_grp.create_dataset('stat_kuiper', data=stat_kuiper)
event_filter_grp.create_dataset('rep_phases', data=rep_phases)
event_filter_grp.create_dataset('novel_phases', data=novel_phases)
return (len(novel_phases) > 0) & (len(rep_phases) > 0)
def _compute_novel_rep_spike_stats(novel_phases, rep_phases):
# compute rayleigh test for each condition
p_novel, z_novel = pycircstat.rayleigh(novel_phases, axis=0)
p_rep, z_rep = pycircstat.rayleigh(rep_phases, axis=0)
# test whether the means are different
ww_pvals, ww_tables = pycircstat.watson_williams(novel_phases, rep_phases, axis=0)
ww_fstat = np.array([x.loc['Columns'].F for x in ww_tables])
# test whether the medians are different
med_pvals, med_stat = pycircstat.cmtest(novel_phases, rep_phases, axis=0)
# finall run kuiper test for difference in mean and/or dispersion
p_kuiper, stat_kuiper = pycircstat.kuiper(novel_phases, rep_phases, axis=0)
return p_novel, z_novel, p_rep, z_rep, ww_pvals, ww_fstat, med_pvals, med_stat, p_kuiper, stat_kuiper
def run_novelty_effect(eeg_channel, power_freqs, buffer, grp, parallel=None, key_suffix='', save_to_file=False):
f = compute_lfp_novelty_effect
if parallel is None:
memory_effect_channel = []
for freq in power_freqs:
memory_effect_channel.append(f(eeg_channel, freq, buffer))
else:
memory_effect_channel = parallel((delayed(f)(eeg_channel, freq, buffer) for freq in power_freqs))
# phase_data = xarray.concat([x[4] for x in memory_effect_channel], dim='frequency').transpose('event', 'time', 'frequency')
phase_data = xarray.concat([x[2] for x in memory_effect_channel], dim='frequency').transpose('event', 'time',
'frequency')
if save_to_file:
fname = grp.file.filename
pd.concat([x[0] for x in memory_effect_channel]).to_hdf(fname, grp.name + '/delta_z'+key_suffix)
pd.concat([x[1] for x in memory_effect_channel]).to_hdf(fname, grp.name + '/delta_t'+key_suffix)
# pd.concat([x[2] for x in memory_effect_channel]).to_hdf(fname, grp.name + '/delta_z_lag'+key_suffix)
# pd.concat([x[3] for x in memory_effect_channel]).to_hdf(fname, grp.name + '/delta_t_lag'+key_suffix)
return phase_data
def compute_novelty_stats(data_timeseries, buffer_len):
def compute_z_diff_lag(df):
novel = df[df.isFirst]
repeated = df[~df.isFirst]
cols = df.columns[~np.in1d(df.columns, ['lag', 'isFirst'])]
return novel[cols].mean() - repeated[cols].mean()
def compute_t_stat_lag(df):
novel = df[df.isFirst]
repeated = df[~df.isFirst]
cols = df.columns[~np.in1d(df.columns, ['lag', 'isFirst'])]
ts, ps = ttest_ind(novel[cols], repeated[cols], axis=0)
return pd.Series(ts.data, index=cols)
# remove the filler novel items (they were never repeated)
data = data_timeseries[~((data_timeseries.event.data['isFirst']) & (data_timeseries.event.data['lag'] == 0))]
# remove buffer
data = data.remove_buffer(buffer_len)
# log it
data.data = np.log10(data.data)
# then zscore across events
zdata = zscore(data, axis=0)
# split events into conditions of novel and repeated items
novel_items = data.event.data['isFirst']
# compute mean difference in zpower at each timepoint
zpower_diff = zdata[novel_items].mean(axis=0) - zdata[~novel_items].mean(axis=0)
df_zpower_diff = pd.DataFrame(pd.Series(zpower_diff, index=data.time)).T
# also compute t-stat at each timepoint
ts, ps = ttest_ind(zdata[novel_items], zdata[~novel_items], axis=0)
df_tstat_diff = pd.DataFrame(pd.Series(ts, index=data.time)).T
# create dataframe of results for easier manipulation for computing difference by lag
df = pd.DataFrame(data=zdata, columns=data.time)
df['lag'] = data.event.data['lag']
df['isFirst'] = novel_items
# df_lag_zpower_diff = df.groupby(['lag']).apply(compute_z_diff_lag)
# df_lag_tstat_diff = df.groupby(['lag']).apply(compute_t_stat_lag)
return df_zpower_diff, df_tstat_diff#, df_lag_zpower_diff, df_lag_tstat_diff
def compute_lfp_novelty_effect(eeg, freq, buffer_len):
# compute the power first
if isinstance(freq, float):
power_data, phase_data = compute_wavelet_at_single_freq(eeg, freq, buffer_len)
else:
power_data, phase_data = compute_hilbert_at_single_band(eeg, freq, buffer_len)
freq = np.mean(freq)
# compute the novelty statistics
# df_zpower_diff, df_tstat_diff, df_lag_zpower_diff, df_lag_tstat_diff = compute_novelty_stats(power_data)
df_zpower_diff, df_tstat_diff = compute_novelty_stats(power_data, buffer_len)
# add the current frequency to the dataframe index
df_zpower_diff.set_index(pd.Series(freq), inplace=True)
df_zpower_diff.index.rename('frequency', inplace=True)
df_tstat_diff.set_index(pd.Series(freq), inplace=True)
df_tstat_diff.index.rename('frequency', inplace=True)
# n_rows = df_lag_tstat_diff.shape[0]
# index = pd.MultiIndex.from_arrays([df_lag_zpower_diff.index, np.array([freq] * n_rows)], names=['lag', 'frequency'])
# df_lag_zpower_diff.index = index
# index = pd.MultiIndex.from_arrays([df_lag_tstat_diff.index, np.array([freq] * n_rows)], names=['lag', 'frequency'])
# df_lag_tstat_diff.index = index
# return df_zpower_diff, df_tstat_diff, df_lag_zpower_diff, df_lag_tstat_diff, phase_data
return df_zpower_diff, df_tstat_diff, phase_data
def compute_novelty_stats_without_contrast(data_timeseries, baseline_bool=None):
# remove the filler novel items (they were never repeated)
data = data_timeseries[~((data_timeseries.event.data['isFirst']) & (data_timeseries.event.data['lag'] == 0))]
# determine the mean and std of the baseline period for normalization
# if baseline bool is not given, use all timepoints before 0
if baseline_bool is None:
baseline_bool = data.time.values < 0
baseline_data = data[:, baseline_bool].mean(dim='time')
m = np.mean(baseline_data)
s = np.std(baseline_data)
# compute the zscored data
zdata = (data - m) / s
# pull out the data for each condition
novel_items = data.event.data['isFirst']
zdata_novel = zdata[novel_items]
zdata_repeated = zdata[~novel_items]
# run stats at each timepoint
ts, ps = ttest_ind(zdata_novel, zdata_repeated, axis=0)
# return the statistics and the mean of each condition
zdata_novel_mean = np.mean(zdata_novel, axis=0)
zdata_novel_sem = sem(zdata_novel, axis=0)
zdata_repeated_mean = np.mean(zdata_repeated, axis=0)
zdata_repeated_sem = sem(zdata_repeated, axis=0)
return zdata_novel_mean, zdata_repeated_mean, zdata_novel_sem, zdata_repeated_sem, ts, ps
| [
"[email protected]"
] | |
17739918d8f1c5cc8f413494bccfd308568c32fe | 79df1aa5db9fc14f8c9a84a99605c554995dae90 | /ML_builders/keywords_extraction_update_db.py | eec8ebfa73f7ad16d5c3827f5c7bf0fb9ce162eb | [] | no_license | COVID-19-Text-Mining/DBProcessingScripts | 920dc225ff19352e4c9626fb6125398b102e49ef | 6a81ce4bf89ab6a0e6e5501b514d564a560f3ba1 | refs/heads/master | 2022-10-10T14:36:41.470078 | 2021-04-01T20:44:52 | 2021-04-01T20:44:52 | 249,071,379 | 0 | 4 | null | 2022-09-30T20:12:37 | 2020-03-21T22:23:59 | Python | UTF-8 | Python | false | false | 1,900 | py | import json
from pprint import pprint
from IndependentScripts.common_utils import get_mongo_db
from keywords_extraction import KeywordsExtractorBase
from keywords_extraction import KeywordsExtractorNN
def extract_keywords_in_entries(mongo_db):
processed_ids = set()
extractor = KeywordsExtractorNN(
only_extractive=True,
use_longest_phrase=True,
)
col_name = 'entries'
col = mongo_db[col_name]
query = col.find(
{
"doi": {"$exists": True},
"abstract": {"$exists": True},
},
{
'_id': True,
'doi': True,
'abstract': True,
},
no_cursor_timeout=True
)
total_num = query.count()
print('query.count()', total_num)
for i, doc in enumerate(query):
if i%1000 == 0:
print('extract_keywords_in_entries: {} out {}'.format(i, total_num))
if str(doc['_id']) in processed_ids:
continue
if doc['abstract']:
abstract = KeywordsExtractorBase().clean_html_tag(doc['abstract'])
try:
keywords = extractor.process(abstract)
except:
print('Error')
print('doi:', doc.get('doi'))
print(abstract)
# update in db
col.find_one_and_update(
{"_id": doc['_id']},
{
"$set": {
'keywords_ML': keywords,
},
}
)
processed_ids.add(str(doc['_id']))
if len(processed_ids) % 1000 == 0:
with open('../scratch/processed_ids.json', 'w') as fw:
json.dump(list(processed_ids), fw, indent=2)
if __name__ == '__main__':
db = get_mongo_db('../config.json')
print(db.collection_names())
extract_keywords_in_entries(db) | [
"[email protected]"
] | |
17e7cf7e865a2081c5c483f80afa767141dfae12 | e85e697cadabafca4c5853a30ba7851ce8925d75 | /RandomForest/eval.py | 161df5d1923b0aa13cb201b521d6637a4e370697 | [
"MIT"
] | permissive | euirim/fundraising-propensity | 811c919a381ca23610ec1c66a41e22034569742e | 33b01b04e4ef8cae10a3224b98792049e6a5fcd7 | refs/heads/master | 2022-11-25T12:26:49.366530 | 2019-12-14T03:16:22 | 2019-12-14T03:16:22 | 216,437,345 | 0 | 0 | MIT | 2022-11-22T04:51:31 | 2019-10-20T22:45:28 | Jupyter Notebook | UTF-8 | Python | false | false | 213 | py | import pickle
pickle_off = open('data/y_pred.pkl', "rb")
y_pred = pickle.load(pickle_off)
pickle_off = open('data/y_test.pkl', "rb")
y_test = pickle.load(pickle_off)
for i in y_test:
print(i)
print("Done") | [
"[email protected]"
] | |
d04fc23f52eaaeb5f6834499c8a6eee5855907f6 | 3e39c0ae9ea2a438a6353e10aaf24afd794278b5 | /team4.py | 46ef7f0b24fac36608738705223d1fd74ad6ad66 | [] | no_license | prathyakshun/Extreme_TIc_Tac_Toe_Bot | 724f2d4c641bd24155362a92a43ddf462f05bd5f | f07bc479ee44c3b99db3ad701fc7d103279a9b58 | refs/heads/master | 2020-03-18T15:53:17.418913 | 2018-05-26T06:23:38 | 2018-05-26T06:23:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,597 | py | import random
from time import time
import copy
INF = 1e10
class Team4:
def __init__(self):
self.time_limit = 15
self.max_depth = 100
self.time_out = 0
self.depth = 6
self.cell_weight = [6, 4, 4, 6, 4, 3, 3, 4, 4, 3, 3, 4, 6, 4, 4, 6]
self.mapping = {'x':1, 'o':-1, 'd':0, '-':0}
def move(self, board, old_move, flag):
self.time_start = time()
self.time_out = 0
ret = random.choice(board.find_valid_move_cells(old_move))
for i in xrange(3,self.max_depth+1):
self.depth = i
ret = self.alpha_beta(board, -INF, INF, 0, old_move, flag)
if self.time_out == 1:
break
print board.print_board()
print ret
return ret[1][0], ret[1][1]
def alpha_beta(self, board, alpha, beta, depth, old_move, flag):
available_cells = board.find_valid_move_cells(old_move)
random.shuffle(available_cells)
# print available_cells
if (flag == 'x'):
tmp = copy.deepcopy(board.block_status)
ans = -INF, available_cells[0]
for cell in available_cells:
if (time() - self.time_start >= self.time_limit):
self.time_out = 1
break
# print cell
board.update(old_move, cell, flag)
# print "--------------------------------------------------BEFORE-----------------------------------------"
# print board.print_board()
if board.find_terminal_state()[0] == 'o':
board.board_status[cell[0]][cell[1]] = '-'
board.block_status = copy.deepcopy(tmp)
continue;
elif board.find_terminal_state()[0] == 'x':
board.board_status[cell[0]][cell[1]] = '-'
board.block_status = copy.deepcopy(tmp)
ans = INF, cell
return ans
elif (depth >= self.depth and len(board.find_valid_move_cells(old_move)) > 0):
ret = self.heuristic(board, old_move)
elif (depth < self.depth):
ret = self.alpha_beta(board, alpha, beta, depth+1, cell, 'o')
board.board_status[cell[0]][cell[1]] = '-'
board.block_status = copy.deepcopy(tmp)
# print "--------------------------------------------------AFTER-----------------------------------------"
# print board.print_board()
if (ret > ans[0]):
ans = ret, cell
if (ans[0] >= beta):
break
alpha = max(alpha, ans[0])
return ans
elif (flag == 'o'):
tmp = copy.deepcopy(board.block_status)
ans = INF, available_cells[0]
for cell in available_cells:
if (time() - self.time_start >= self.time_limit):
self.time_out = 1
break
# print cell
board.update(old_move, cell, flag)
# print "--------------------------------------------------BEFORE-----------------------------------------"
# print board.print_board()
if board.find_terminal_state()[0] == 'x':
board.board_status[cell[0]][cell[1]] = '-'
board.block_status = copy.deepcopy(tmp)
continue;
elif board.find_terminal_state()[0] == 'o':
board.board_status[cell[0]][cell[1]] = '-'
board.block_status = copy.deepcopy(tmp)
ans = INF, cell
return ans
elif (depth >= self.depth and len(board.find_valid_move_cells(old_move)) > 0):
ret = self.heuristic(board, old_move)
elif (depth < self.depth):
ret = self.alpha_beta(board, alpha, beta, depth+1, cell, 'x')
board.board_status[cell[0]][cell[1]] = '-'
board.block_status = copy.deepcopy(tmp)
# print "--------------------------------------------------AFTER-----------------------------------------"
# print board.print_board()
if (ret < ans[0]):
ans = ret, cell
if (ans[0] <= alpha):
break
beta = min(beta, ans[0])
return ans
def heuristic(self, board, old_move):
goodness = 0
goodness += self.calc_single_blocks(board, old_move)
goodness += self.calc_as_whole(board, old_move)
return goodness
def calc_single_blocks(self, board, old_move):
block_goodness = 0
for i in xrange(4):
for j in xrange(4):
block_goodness += self.calc_per_block(board, old_move, i, j)
def calc_per_block(self, board, old_move, block_x, block_y):
# For checking how good a row/col is
row_weight = [10 10 10 10]
col_weight = [10 10 10 10]
for i in xrange(4):
for j in yrange(4):
mapping_val = self.mapping[board.board_status[4*block_x+i][4*block_y+j]]
# row_weight += mapping_val * self.cell_weight probably will only help in case of overall block
if (mapping_val != 0):
if (row_weight != 0):
row_weight += mapping_val * 10
if (col_weight != 0):
col_weight += mapping_val * 10
if (mapping_val == -1):
row_weight[i] = 0
col_weight[j] = 0
row_weight *= 3
col_weight *= 3
# For checking how good diamond state is
diamond1 = 0, diamond2 = 0, diamond3 = 0, diamond4 = 0
if board.board_status[4*block_x+1][4*block_y] == 1 and board.board_status[4*block_x][4*block_y+1] == 1:
if (board.board_status[4*block_x+1][4*block_y+2] == 1 and board.board_status[4*block_x+2][4*block_y+1] == 1):
diamond1 =
def calc_as_whole(self, board):
# For checking how good a row/col is
row_weight = [10 10 10 10]
col_weight = [10 10 10 10]
for i in xrange(4):
for j in yrange(4):
mapping_val = self.mapping[board.block_status[i][sj]]
if (mapping_val != 0):
if (row_weight != 0):
row_weight += mapping_val * self.cell_weight # probably will only help in case of overall block
row_weight += mapping_val * 10
if (col_weight != 0):
col_weight += mapping_val * self.cell_weight
col_weight += mapping_val * 10
if (mapping_val == -1):
row_weight[i] = 0
col_weight[j] = 0
row_weight *= 3
col_weight *= 3
# For checking how good diamond state is | [
"[email protected]"
] | |
47c291c064e807e68a7f1ef58a24677a9fdc71c4 | a9a86dd97ac152825657571f1e94ffe01811fffe | /assets/migrations/0001_initial.py | 9e6e6ed56620e4f03efb4811e32e3f724c4a12ee | [] | no_license | Dawson0x00/AssetManage | 359ee269f2b9d88639c486093e81a19cd4696532 | 777ee250addc6465f655a6a4b9041701f6235268 | refs/heads/master | 2022-04-03T06:06:49.067648 | 2020-02-14T14:11:50 | 2020-02-14T14:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,943 | py | # Generated by Django 2.2 on 2020-01-13 13:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CSP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('csp_type', models.CharField(choices=[('AliCloud', '阿里云'), ('AWS', '亚马逊云'), ('Azure', '微软云')], default='aliCloud', max_length=20, verbose_name='云服务供应商')),
],
options={
'verbose_name': '云服务供应商',
'verbose_name_plural': '云服务供应商',
},
),
migrations.CreateModel(
name='OSType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('OSType', models.CharField(choices=[('Windows', 'Windows'), ('Linux', 'Linux')], default='Linux', max_length=20, verbose_name='操作系统类型')),
('OSVersion', models.CharField(blank=True, max_length=64, null=True, unique=True, verbose_name='操作系统版本')),
],
options={
'verbose_name': '操作系统类型',
'verbose_name_plural': '操作系统类型',
},
),
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('OwnerName', models.CharField(max_length=30, verbose_name='Owner_Name')),
('OwnerNum', models.CharField(max_length=7, verbose_name='Owner_Num')),
],
options={
'verbose_name': '负责人',
'verbose_name_plural': '负责人',
},
),
migrations.CreateModel(
name='Port',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PortNum', models.IntegerField(verbose_name='端口号')),
],
options={
'verbose_name': '端口列表',
'verbose_name_plural': '端口列表',
},
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ServerName', models.CharField(blank=True, max_length=64, null=True, verbose_name='服务器名称')),
('PublicIP', models.GenericIPAddressField(blank=True, null=True, protocol='IPv4', verbose_name='公网IPV4地址')),
('PrivateIP', models.GenericIPAddressField(blank=True, null=True, protocol='IPv4', verbose_name='私网IPV4地址')),
('CSPID', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.CSP', verbose_name='云服务供应商')),
('OSTID', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.OSType', verbose_name='操作系统类型')),
('OwnerID', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Owner', verbose_name='负责人')),
],
options={
'verbose_name': '服务器列表',
'verbose_name_plural': '服务器列表',
},
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ServiceName', models.CharField(max_length=64, verbose_name='服务名称')),
],
options={
'verbose_name': '服务列表',
'verbose_name_plural': '服务列表',
},
),
migrations.CreateModel(
name='ServerPort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PID', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Port', verbose_name='端口')),
('SCID', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Service', verbose_name='Service')),
('SID', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='assets.Server', verbose_name='Server')),
],
options={
'verbose_name': '服务器端口',
'verbose_name_plural': '服务器端口',
},
),
]
| [
"[email protected]"
] | |
c6fa9206cc5ff0c2d394edb90d2809935d172d50 | cefdf82a0f9b802e006c19c85f1157bc1012dec3 | /src/model_dispatcher.py | a18824f7ab365e8483623be88f0f7194139ba354 | [] | no_license | hazim111/Categorical-Feature-Encoding-Challenge | 465829b256c648f8f33f1b13f36af880b5eabb47 | 26482165ee576516c8b924714b2259d37e1115cb | refs/heads/main | 2023-01-23T10:31:43.872954 | 2020-11-10T10:07:57 | 2020-11-10T10:07:57 | 311,287,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from sklearn import ensemble
MODELS = {
"randomforest": ensemble.RandomForestClassifier(n_estimators=100, n_jobs=-1, verbose=2),
"extratrees": ensemble.ExtraTreesClassifier(n_estimators=200, n_jobs=-1, verbose=2)
} | [
"[email protected]"
] | |
62018629232c1e237cabe676ac0d9f85c5b74caf | 83d4de4a66526891728f77cfcb31f3bbb1b70757 | /Training/5.python/DAY3/tik_tok/tik_menu.py | cff9a8494c8eb4b6119489a926af044ab740b4d4 | [] | no_license | sd-karthik/karthik-repo | dd2a0689bf75d90121e0d27c56a1a11b6325f470 | 5c37b943f8d1bc44a18a1d78cdbeaf22df0fae38 | refs/heads/master | 2021-01-09T06:22:50.568636 | 2017-03-19T06:54:51 | 2017-03-19T06:54:51 | 80,975,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # FILE Name : tik_menu.py
# Function : tik-tok game implementation
import os
lst1 = [['*','*','*'],['*','*','*'],['*', '*','*']]
#choice = input("Choose the player\n 1. Player_1 (0)\n2. Player_2 (1)\n")
#if choice == 1:
def matrix():
# os.system("clear")
for i in lst1:
print "\t", i[0]," ",i[1]," ",i[2],"\n"
def compare(pos):
f_row = pos/3
print f_row
for i in lst1[f_row]:
f_pos = pos%3
if i[f_pos] == '*':
i[pos] = 0
else:
print "Already entered"
def valid(pos):
if pos < 0 and pos > 9:
print pos
print "Enter valid input"
pos = input("PLAYER1 : Enter the position:\n")
valid(pos)
matrix()
pos = input("PLAYER1 : Enter the position:\n")
valid(pos)
#compare(pos)
matrix()
| [
"[email protected]"
] | |
9393de465090e8eca76d6d35fe72c3f91592711c | e61d07975a64d95eb41e9aef88eff631be2d1e9b | /projects/flask_blueprint/jobcn/company/__init__.py | b00674aa8cf350cd8eca19a50acb27818185d0a7 | [] | no_license | yuhaoboris/LearnPythonTheHardWay | 4375c916fc1d10b9d73c2f3e7aa608c5912f05fa | 771d7f5c533109d14049f88bc6ecc03e5e1eeeca | refs/heads/master | 2021-05-31T09:02:45.983210 | 2016-04-22T09:33:32 | 2016-04-22T09:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from flask import Blueprint
company = Blueprint(
'company',
__name__,
template_folder='company_templates'
)
from . import views
| [
"[email protected]"
] | |
652f47df5507cc851df7d349a9387a2aaa905013 | 36bc466d22e9eaf2056b3dd0d44fc307046d792e | /HW5/config.py | b7ba2469fee655804e3005064a90bfec2e5d38b7 | [] | no_license | jtanwk/capp30254 | 05ff8a168187fb968e02d8222801400f7a756a21 | 3e9cea4d167a4a49a5dbc3934a6fd4fddd2107f3 | refs/heads/master | 2022-07-10T19:42:09.399914 | 2019-06-02T07:59:55 | 2019-06-02T07:59:55 | 179,363,081 | 0 | 0 | null | 2022-06-21T22:02:34 | 2019-04-03T20:13:22 | Jupyter Notebook | UTF-8 | Python | false | false | 3,869 | py | # CAPP 30254 Machine Learning for Public Policy
# Homework 5 - Improving the Pipeline
# Pipeline Configuration file
######################
# 1. READ/WRITE DATA #
######################
# Filepath where input data is stored
DATA_PATH = 'data/projects_2012_2013.csv'
# Filepath where trained classifiers are stored
CLASSIFIER_PATH = 'output/trained_classifiers.pkl'
# Filepath where cleaned test/train data are stored
TEST_TRAIN_PATH = 'output/test_train_clean.pkl'
# Identifying column of interest
LABEL = 'not_funded_60_days'
DATE_COL = 'date_posted'
########################
# 3. TEST/TRAIN SPLITS #
########################
# Dates for temporal test/train splits
TEMPORAL_SPLITS = [
{
'train_start': '1/1/2012',
'train_end': '6/30/2012',
'test_start': '7/1/2012',
'test_end': '12/31/2012'
},
{
'train_start': '1/1/2012',
'train_end': '12/31/2012',
'test_start': '1/1/2013',
'test_end': '6/30/2013'
},
{
'train_start': '1/1/2012',
'train_end': '6/30/2013',
'test_start': '7/1/2013',
'test_end': '12/31/2013'
},
]
#######################
# 5. BUILD CLASSIFIER #
#######################
# Large grid - most exhaustive option
GRID_MAIN = {
'classifiers': ['LogisticRegression', 'KNeighborsClassifier',
'DecisionTreeClassifier', 'LinearSVC',
'RandomForestClassifier', 'AdaBoostClassifier',
'BaggingClassifier'],
'thresholds': [0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 0.5, 1],
'LogisticRegression': [
{'penalty': x, 'C': y, 'solver': 'liblinear', 'random_state': 0} \
for x in ('l1', 'l2') \
for y in (0.01, 0.1, 1, 10, 100) \
],
'KNeighborsClassifier': [
{'n_neighbors': x, 'weights': y, 'algorithm': z, 'n_jobs': -1} \
for x in (5, 10, 50) \
for y in ('uniform', 'distance') \
for z in ('auto', 'ball_tree', 'kd_tree')
],
'DecisionTreeClassifier': [
{'max_depth': x, 'max_features': y, 'min_samples_leaf': z,
'random_state': 0} \
for x in (5, 10, 50) \
for y in ('sqrt', 'log2', None) \
for z in (5, 10)
],
'LinearSVC': [
{'penalty': 'l2', 'C': x, 'random_state': 0} \
for x in (0.01, 0.1, 1, 10, 100)
],
'RandomForestClassifier': [
{'n_estimators': x, 'max_depth': y, 'max_features': z,
'random_state': 0, 'n_jobs': -1} \
for x in (10, 100, 1000) \
for y in (5, 10, 50) \
for z in ('sqrt', 'log2')
],
'AdaBoostClassifier': [
{'n_estimators': x, 'algorithm': y, 'random_state': 0} \
for x in (10, 100, 1000) \
for y in ('SAMME', 'SAMME.R')
],
'BaggingClassifier': [
{'n_estimators': x, 'random_state': 0, 'n_jobs': -1} \
for x in (10, 100, 1000)
]
}
# Test grid to make sure everything works - 1 model per classifier
GRID_TEST = {
'classifiers': ['LogisticRegression', 'DecisionTreeClassifier'],
'thresholds': [0.5],
'LogisticRegression': [
{'penalty': 'l2', 'C': 1, 'solver': 'liblinear', 'random_state': 0}
],
'KNeighborsClassifier': [
{'n_neighbors': 1, 'weights': 'uniform', 'algorithm': 'auto',
'n_jobs': -1}
],
'DecisionTreeClassifier': [
{'max_depth': 1, 'max_features': 'sqrt', 'min_samples_leaf': 1,
'random_state': 0}
],
'LinearSVC': [
{'penalty': 'l2', 'C': 1, 'random_state': 0}
],
'RandomForestClassifier': [
{'n_estimators': 10, 'max_depth': 1, 'max_features': 10,
'random_state': 0, 'n_jobs': -1}
],
'AdaBoostClassifier': [
{'n_estimators': 10, 'algorithm': 'SAMME.R', 'random_state': 0}
],
'BaggingClassifier': [
{'n_estimators': 10, 'random_state': 0, 'n_jobs': -1}
]
}
| [
"[email protected]"
] | |
2bd7aacef0b5f06ea9fbbaf4f6b6fc36ab09b27c | 49bfa101bbb5ed35f9c8282ddb352ebbbfcc47cc | /DeepLearning_NeuralNetwork/ConvNets/Block.py | 523f0cfe1ee42dd2b193f3660d5ca31808720a4c | [] | no_license | sudhirkk/ML_Projects | de422ff39b7c69e54fd0a7c78f41a9628826d602 | 8acfd430cf219818fe33c08456411d9d6954c95b | refs/heads/master | 2023-04-22T14:05:43.548925 | 2021-05-04T18:27:03 | 2021-05-04T18:27:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | class Block(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=3, stride=1):
super(Block, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.kernel_size = kernel_size
self.stride = stride
# This block contains a convolutional layer
# then a batch normalization layer
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride)
self.bn = nn.BatchNorm2d(out_channel)
return
def forward(self, x):
# passes the input image through a convolutional layer
# followed by a batch normalization layer and relu transition
out = F.relu(self.bn(self.conv(x)))
return out
# Create a block that is made up of 10 filters size 3x3 and stride 1
# The block takes in a image of 1 channel
Block1 = Block(1, 10, 3, 1)
out = Block1(X)
# After passing our image through block 1
# we get the following tensor
print(out.shape)
| [
"[email protected]"
] | |
7186bad46805eb47e6211c9419da1e7e919c2d4c | 887f2e664c6d92f17e784f57022333a2fb859d06 | /analysis/movement/simulations/topo.py | 1061723ff603246df35486ce122f83a75a02f3f3 | [] | no_license | ctorney/dolphinUnion | 1968e258c6045060b2c921bd723d0ef0daea0147 | 9d7212d172a8a48a36fc4870fcdb04d66130bb76 | refs/heads/master | 2021-01-19T04:40:57.286526 | 2017-08-17T20:44:58 | 2017-08-17T20:44:58 | 46,424,670 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | import numpy as np
from math import pi, sin, cos, atan2
from math import *
import math
import matplotlib.pyplot as plt
import pandas as pd
outTracks = pd.DataFrame(columns= ['frame','x','y','heading','c_id'])
trackName = 'topo.csv'
# number of individuals
N=100
# set to random initial conditions on (0,1)x(0,1)
xpos = np.random.uniform(0.5,1,N)
ypos = np.random.uniform(0.5,1,N)
xpos = np.random.uniform(50,51,N)
ypos = np.random.uniform(50,51,N)
# set to random inital headings
heading = np.random.uniform(0,2*pi,N)
randvar = 0.1
# set speed individuals move
speed = 1
# run for this many time steps
TIMESTEPS = 2000
mvp = 3 # mean reversion parameter
mean_heading = 0;
sig = 0.5 # noise
dt = 1e-2 # time step
#t = np.arange(0,dt*2,dt) # % Time vector
#x0 = 0; #% Set initial condition
##rng(1); #% Set random seed
#W = np.zeros((len(t))); #% Allocate integrated W vector
#np.random.seed(0)
#for i in range(len(t)-1):
# W[i+1] = sqrt(exp(2*th*dt)-1)*np.random.normal()
#
#ex = np.exp(-th*t);
#x = x0*ex+mu*(1-ex)+sig*ex*W/sqrt(2*th);
#
#np.random.seed(0)
exp_mr = math.exp(-mvp*dt)
add_noise = sig*math.sqrt((math.exp(2*mvp*dt)-1)/(2*mvp))
repRad = 0.1
attTop = 2
align=0.25
socWeight = 2.5
def getHeading(i):
c_angle = heading[i] #xpos[i]*2*pi + ypos[i]*2*pi
socx = 0
socy = 0
asocx = 0
asocy = 0
xdiffs = xpos - xpos[i]
ydiffs = ypos - ypos[i]
dists = np.sqrt(xdiffs**2+ydiffs**2)
sortedJ = np.argsort(dists)
count= 0
for j in sortedJ:
if i==j:
continue
distij = dists[j]
if distij < repRad:
thisAngle = atan2(-ydiffs[j],-xdiffs[j])
socx = socx + cos(thisAngle)
socy = socy + sin(thisAngle)
else:
if count < attTop:
anglej = atan2(ydiffs[j],xdiffs[j])
anglej = atan2(sin(anglej - c_angle),cos(anglej - c_angle))
if anglej <0.7854 and anglej> -0.7854:
count=count+1
headj = heading[j]-c_angle
relx = cos(anglej)+align*cos(headj)
rely = sin(anglej)+align*sin(headj)
angle2 = atan2(rely,relx)
asocx = asocx + cos(angle2+c_angle)
asocy = asocy + sin(angle2+c_angle)
angle = 0
if socx!=0 or socy!=0:
soc_angle = atan2(socy,socx)
angle = atan2(sin(soc_angle),cos(soc_angle))
else:
if asocx!=0 or asocy!=0:
soc_angle = atan2(asocy,asocx)
angle = atan2(sin(soc_angle),cos(soc_angle))
return math.atan2(math.sin(angle),math.cos(angle))
# simulate individual movement
for t in range(TIMESTEPS):
print(t)
for i in range(N):
mean_heading = getHeading(i)
new_angle = math.atan2(math.sin(heading[i]-mean_heading),math.cos(heading[i]-mean_heading))
new_angle = mean_heading + exp_mr*(new_angle+add_noise*np.random.normal())
heading[i] =math.atan2(math.sin(new_angle),math.cos(new_angle))
#heading = heading + np.random.normal(0,randvar,N)
# individuals move in direction defined by heading with fixed speed
xpos = xpos + dt*speed*np.cos(heading)
ypos = ypos + dt*speed*np.sin(heading)
# boundary conditions are periodic
# xpos[xpos<0]=xpos[xpos<0]+1
# xpos[xpos>1]=xpos[xpos>1]-1
# ypos[ypos<0]=ypos[ypos<0]+1
# ypos[ypos>1]=ypos[ypos>1]-1
xpos[xpos<0]=xpos[xpos<0]+100
xpos[xpos>100]=xpos[xpos>100]-100
ypos[ypos<0]=ypos[ypos<0]+100
ypos[ypos>100]=ypos[ypos>100]-100
# plot the positions of all individuals
# plt.clf()
# plt.plot(xpos, ypos,'k.')
# plt.xlim([50,60])
# plt.ylim([45,55])
# plt.axes().set_aspect('equal')
# plt.draw()
# plt.pause(0.01)
if t>1000:
newcpos = pd.DataFrame(np.column_stack((np.full(N,t,dtype='int64'),xpos,ypos,heading,np.arange(0,N))), columns= ['frame','x','y','heading','c_id'])
outTracks = outTracks.append(newcpos,ignore_index=True )
outTracks.to_csv(trackName, index=False) | [
"[email protected]"
] | |
eb54622d41e0bc71b02a07090a87baf2db7404d7 | 73989f945d779ecb83c8aaf7b854e1f6239fdd39 | /fuzzy/adaptation-iris/main.py | 1b08e9690d9752b15a2d0d28f932832ac349b812 | [] | no_license | wayter95/neural-network-exercises | c364b71b9131dde650b2a8753fd79006ce622337 | 99b97d2d2aeb1ef45505e3b054b43a33a00d5933 | refs/heads/main | 2023-05-07T22:12:08.048943 | 2021-06-08T17:49:01 | 2021-06-08T17:49:01 | 375,084,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
cs = ctrl.Antecedent(np.arange(4,8), 'CS')
ls = ctrl.Antecedent(np.arange(2,5), 'LS')
cp = ctrl.Antecedent(np.arange(1,7), 'CP')
lp = ctrl.Antecedent(np.arange(0,3), 'LP')
classe = ctrl.Consequent(np.arange(0, 1), 'classe')
cs['baixa'] = fuzz.trimf(cs.universe, [0, 0.25, 0.5])
cs['media'] = fuzz.trimf(cs.universe, [0.3,0.5,0.7])
cs['alta'] = fuzz.trimf(cs.universe, [0.6,0.8,1])
ls['baixa'] = fuzz.trimf(ls.universe, [0, 0.25, 0.5])
ls['media'] = fuzz.trimf(ls.universe, [0.3,0.5,0.7])
ls['alta'] = fuzz.trimf(ls.universe, [0.6,0.8,1])
cp['baixa'] = fuzz.trimf(cp.universe, [0, 0.25, 0.5])
cp['media'] = fuzz.trimf(cp.universe, [0.3,0.5,0.7])
cp['alta'] = fuzz.trimf(cp.universe, [0.6,0.8,1])
lp['baixa'] = fuzz.trimf(lp.universe, [0, 0.25, 0.5])
lp['media'] = fuzz.trimf(lp.universe, [0.3,0.5,0.7])
lp['alta'] = fuzz.trimf(lp.universe, [0.6,0.8,1])
classe['íris-versicolor'] = fuzz.trapmf(classe.universe, [0, 0.25, 0.5])
classe['íris-setosa'] = fuzz.trapmf(classe.universe, [0.3,0.5,0.7])
classe['íris-virginica'] = fuzz.trapmf(classe.universe, [0.6,0.8,1])
cs.view()
ls.view()
cp.view()
lp.view()
classe.view()
# regras
regra1 = ctrl.Rule(cs['medio'], ls['grande'],cp['medio'], lp['medio'], classe['íris-versicolor'])
regra2 = ctrl.Rule(cs['pequeno'], ls['medio'],cp['pequeno'], lp['pequeno'], classe['íris-setosa'])
regra3 = ctrl.Rule(cs['grande'], ls['medio'],cp['grande'], lp['medio'], classe['íris-virginica'])
iris_ctrl = ctrl.ControlSystem([regra1, regra2, regra3])
engine1 = ctrl.ControlSystemSimulation(imovel_ctrl)
engine2 = ctrl.ControlSystemSimulation(imovel_ctrl)
# modelo 1
engine1.input['CS'] = 5.8
engine1.input['LS'] = 4.1
engine1.input['CP'] = 3.5
engine1.input['LP'] = 1.1
# modelo 2
engine2.input['CS'] = 7.8
engine2.input['LS'] = 4
engine2.input['CP'] = 6.7
engine2.input['LP'] = 2
engine1.compute()
engine2.compute()
print(engine1.output['classe'])
print(engine2.output['classe'])
classe.view(sim=engine1)
classe.view(sim=engine2) | [
"[email protected]"
] | |
242116a8e3514f4012a07dd058710361e5fd3378 | c1f56a01707563cbbd9784d803c015611e93aee8 | /kolibri/core/content/test/test_tasks.py | 718db2db875864bdb7fe37452fac56d21285e876 | [
"MIT"
] | permissive | swiftugandan/kolibri | 4b9a42232c27d83e1979ffc5a0172b1ccfb934db | c87008905afa785dce06e63e5189358abd5113cc | refs/heads/develop | 2022-08-03T22:50:23.008609 | 2022-07-22T09:04:54 | 2022-07-22T09:04:54 | 183,503,563 | 0 | 0 | MIT | 2019-04-25T20:14:53 | 2019-04-25T20:14:52 | null | UTF-8 | Python | false | false | 8,988 | py | import uuid
import mock
from django.test import TestCase
from rest_framework import serializers
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.tasks import ChannelResourcesValidator
from kolibri.core.content.tasks import ChannelValidator
from kolibri.core.content.tasks import LocalChannelImportValidator
from kolibri.core.content.tasks import RemoteChannelImportValidator
from kolibri.core.discovery.models import NetworkLocation
from kolibri.utils import conf
class ValidateContentTaskTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.channel_id = uuid.uuid4().hex
root = ContentNode.objects.create(
id=uuid.uuid4().hex,
title="kolibri_le_root",
channel_id=cls.channel_id,
content_id=uuid.uuid4().hex,
)
ChannelMetadata.objects.create(id=cls.channel_id, name="kolibri_le", root=root)
def test_missing_channel_id(self):
with self.assertRaises(serializers.ValidationError):
ChannelValidator(
data={
"type": "kolibri.core.content.tasks.remotecontentimport",
"channel_name": "test",
}
).is_valid(raise_exception=True)
def test_invalid_channel_id(self):
with self.assertRaises(serializers.ValidationError):
ChannelValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": "test",
"channel_name": "test",
}
).is_valid(raise_exception=True)
def test_missing_channel_name(self):
with self.assertRaises(serializers.ValidationError):
ChannelValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": self.channel_id,
}
).is_valid(raise_exception=True)
def test_wrong_node_ids_type(self):
with self.assertRaises(serializers.ValidationError):
ChannelResourcesValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": self.channel_id,
"channel_name": "test",
"node_ids": ["test"],
}
).is_valid(raise_exception=True)
def test_wrong_exclude_node_ids_type(self):
with self.assertRaises(serializers.ValidationError):
ChannelResourcesValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": self.channel_id,
"channel_name": "test",
"exclude_node_ids": ["test"],
}
).is_valid(raise_exception=True)
def test_returns_right_data(self):
include_id = uuid.uuid4().hex
exclude_id = uuid.uuid4().hex
validator = ChannelResourcesValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": self.channel_id,
"channel_name": "test",
"node_ids": [include_id],
"exclude_node_ids": [exclude_id],
}
)
validator.is_valid(raise_exception=True)
# The `task_data` is already correct so no changes should've been made.
self.assertEqual(
validator.validated_data,
{
"args": [self.channel_id],
"kwargs": {
"exclude_node_ids": [exclude_id],
"node_ids": [include_id],
},
"extra_metadata": {
"channel_id": self.channel_id,
"channel_name": "test",
},
},
)
class ValidateRemoteImportTaskTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create(name="pytest_facility")
cls.facility_user = FacilityUser.objects.create(
username="pytest_user", facility=cls.facility
)
cls.network_location = NetworkLocation.objects.create(
base_url="http://test.org"
)
def test_wrong_peer_id(self):
with self.assertRaises(serializers.ValidationError):
RemoteChannelImportValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": uuid.uuid4().hex,
"channel_name": "test",
"peer": "test",
}
).is_valid(raise_exception=True)
@mock.patch("kolibri.core.content.tasks.NetworkClient")
def test_no_peer_id(self, network_client_mock):
channel_id = uuid.uuid4().hex
validator = RemoteChannelImportValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": channel_id,
"channel_name": "test",
}
)
network_client_mock.return_value.base_url = conf.OPTIONS["Urls"][
"CENTRAL_CONTENT_BASE_URL"
]
validator.is_valid(raise_exception=True)
self.assertEqual(
validator.validated_data,
{
"args": [channel_id],
"extra_metadata": {
"channel_id": channel_id,
"channel_name": "test",
"peer_id": None,
},
"kwargs": {
"baseurl": conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"],
"peer_id": None,
},
},
)
@mock.patch("kolibri.core.content.tasks.NetworkClient")
def test_correct_peer_id(self, network_client_mock):
channel_id = uuid.uuid4().hex
validator = RemoteChannelImportValidator(
data={
"type": "kolibri.core.content.tasks.remotechannelimport",
"channel_id": channel_id,
"channel_name": "test",
"peer": self.network_location.id,
}
)
network_client_mock.return_value.base_url = self.network_location.base_url
validator.is_valid(raise_exception=True)
self.assertEqual(
validator.validated_data,
{
"args": [channel_id],
"extra_metadata": {
"channel_id": channel_id,
"channel_name": "test",
"peer_id": self.network_location.id,
},
"kwargs": {
"baseurl": self.network_location.base_url,
"peer_id": self.network_location.id,
},
},
)
class ValidateLocalImportTaskTestCase(TestCase):
def test_wrong_drive_id(self):
with self.assertRaises(serializers.ValidationError):
LocalChannelImportValidator(
data={
"type": "kolibri.core.content.tasks.localchannelimport",
"channel_id": uuid.uuid4().hex,
"channel_name": "test",
"drive_id": "test",
}
).is_valid(raise_exception=True)
def test_no_drive_id(self):
with self.assertRaises(serializers.ValidationError):
LocalChannelImportValidator(
data={
"type": "kolibri.core.content.tasks.localchannelimport",
"channel_id": uuid.uuid4().hex,
"channel_name": "test",
}
).is_valid(raise_exception=True)
@mock.patch("kolibri.core.content.tasks.get_mounted_drive_by_id")
def test_correct_peer_id(self, mock_get_mounted_drive_by_id):
channel_id = uuid.uuid4().hex
drive_id = "test_id"
validator = LocalChannelImportValidator(
data={
"type": "kolibri.core.content.tasks.localchannelimport",
"channel_id": channel_id,
"channel_name": "test",
"drive_id": drive_id,
}
)
class drive(object):
datafolder = "kolibri"
mock_get_mounted_drive_by_id.return_value = drive
validator.is_valid(raise_exception=True)
self.assertEqual(
validator.validated_data,
{
"args": [channel_id, drive_id],
"extra_metadata": {
"channel_id": channel_id,
"channel_name": "test",
"drive_id": drive_id,
},
"kwargs": {},
},
)
| [
"[email protected]"
] | |
907377f7ea2c199b9511095ba160addc731a03bc | 3e900cb2d3cbb949b3050646ca6531f2bff86a84 | /core/Lib/codecs.py | cf82ccb4e0305b45d7291928fdd97fafe066088a | [
"Apache-2.0"
] | permissive | tuankien2601/python222 | e8043367d73d785c4281895f26487cb45b0ab812 | 205414c33fba8166167fd8a6a03eda1a68f16316 | refs/heads/master | 2021-06-21T04:47:38.784778 | 2017-08-14T10:02:33 | 2017-08-14T10:02:33 | 100,253,763 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,262 | py | # Portions Copyright (c) 2005 Nokia Corporation
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import struct, __builtin__
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError, why:
raise SystemError,\
'Failed to load the builtin codecs: %s' % why
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE"]
### Constants
#
# Byte Order Mark (BOM) and its possible values (BOM_BE, BOM_LE)
#
BOM = struct.pack('=H', 0xFEFF)
#
BOM_BE = BOM32_BE = '\376\377'
# corresponds to Unicode U+FEFF in UTF-16 on big endian
# platforms == ZERO WIDTH NO-BREAK SPACE
BOM_LE = BOM32_LE = '\377\376'
# corresponds to Unicode U+FFFE in UTF-16 on little endian
# platforms == defined as being an illegal Unicode character
#
# 64-bit Byte Order Marks
#
BOM64_BE = '\000\000\376\377'
# corresponds to Unicode U+0000FEFF in UCS-4
BOM64_LE = '\377\376\000\000'
# corresponds to Unicode U+0000FFFE in UCS-4
### Codec base classes (defining the API)
class Codec:
def encode(self, input, errors='strict'):
raise NotImplementedError
def decode(self, input, errors='strict'):
raise NotImplementedError
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
self.stream = stream
self.errors = errors
def write(self, object):
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
self.write(''.join(list))
def reset(self):
pass
def __getattr__(self, name,
getattr=getattr):
return getattr(self.stream, name)
###
class StreamReader(Codec):
def __init__(self, stream, errors='strict'):
self.stream = stream
self.errors = errors
def read(self, size=-1):
# Unsliced reading:
if size < 0:
return self.decode(self.stream.read(), self.errors)[0]
# Sliced reading:
read = self.stream.read
decode = self.decode
data = read(size)
i = 0
while 1:
try:
object, decodedbytes = decode(data, self.errors)
except ValueError, why:
# This method is slow but should work under pretty much
# all conditions; at most 10 tries are made
i = i + 1
newdata = read(1)
if not newdata or i > 10:
raise
data = data + newdata
else:
return object
def readline(self, size=None):
if size is None:
line = self.stream.readline()
else:
line = self.stream.readline(size)
return self.decode(line, self.errors)[0]
def readlines(self, sizehint=None):
if sizehint is None:
data = self.stream.read()
else:
data = self.stream.read(sizehint)
return self.decode(data, self.errors)[0].splitlines(1)
def reset(self):
pass
def __getattr__(self, name,
getattr=getattr):
return getattr(self.stream, name)
###
class StreamReaderWriter:
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
return getattr(self.stream, name)
###
class StreamRecoder:
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
if sizehint is None:
data = self.reader.read()
else:
data = self.reader.read(sizehint)
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
return getattr(self.stream, name)
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
(e, d, sr, sw) = lookup(encoding)
srw = StreamReaderWriter(file, sr, sw, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
if file_encoding is None:
file_encoding = data_encoding
encode, decode = lookup(data_encoding)[:2]
Reader, Writer = lookup(file_encoding)[2:]
sr = StreamRecoder(file,
encode, decode, Reader, Writer,
errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
return lookup(encoding)[0]
def getdecoder(encoding):
return lookup(encoding)[1]
def getreader(encoding):
return lookup(encoding)[2]
def getwriter(encoding):
return lookup(encoding)[3]
### Helpers for charmap-based codecs
def make_identity_dict(rng):
res = {}
for i in rng:
res[i]=i
return res
def make_encoding_map(decoding_map):
m = {}
for k,v in decoding_map.items():
if not m.has_key(v):
m[v] = k
else:
m[v] = None
return m
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
import sys
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| [
"[email protected]"
] | |
e53a5359a67d02e0656b36d63bcba3b3e44b2b56 | cd23b0457bc02a60b89f1f52783e56cc36d85b5e | /thread/thread_condition.py | 6d246780edd0b81a190b55004966c83037b22cc5 | [] | no_license | cluo/learingPython | 65c7068613e1a2ae0178e23770503043d9278c45 | 54609288e489047d4dd1dead5ac142f490905f0e | refs/heads/master | 2020-04-01T13:04:15.981758 | 2015-02-23T13:21:31 | 2015-02-23T13:21:31 | 28,440,969 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by iFantastic on 15-2-14
__author__ = 'cluo'
import logging
import threading
import time
#consumer()要设置Condition 才能继续
#producer() 负责设置条件 并通知其他进程可以继续‰
logging.basicConfig(
level = logging.DEBUG,
format = '%(asctime)s (%(threadName)-2s) %(message)s'
)
def consumer(cond):
"""wait fro condition and use the resource"""
logging.debug('Starting consumer thread')
t = threading.currentThread()
with cond: #condition 使用一个Lock
cond.wait()
logging.debug('Resource is alailable to consumer')
def producer(cond):
"""set up the resource to be used by the consumer"""
logging.debug('Starting producer thread')
with cond:
logging.debug('Making resource available')
cond.notifyAll()
condition = threading.Condition()
c1 = threading.Thread(name = 'c1', target = consumer, args = (condition,))
c2 = threading.Thread(name = 'c2', target = consumer, args = (condition,))
p = threading.Thread(name = 'p', target = producer, args = (condition,))
if __name__ == '__main__':
c1.start()
time.sleep(2)
c2.start()
time.sleep(2)
p.start()
| [
"[email protected]"
] | |
94e5e6c39be326578004189d03989bf0d64c771f | b038d58ff77367c52f1f9288a9c1ed3bc70846a7 | /bot.py | d549c737bc95b269bacd8b8055c07f1b6f795a2c | [] | no_license | TheTrueProblematic/TradingBot | f5a26a5fe6ae59658fc100305ee68290a454ffa2 | 9d9f157259e3c0a942a38e172a752ef3db1a0e4d | refs/heads/main | 2023-03-03T21:23:36.271194 | 2021-02-14T11:58:52 | 2021-02-14T11:58:52 | 337,211,885 | 0 | 1 | null | 2021-02-12T17:33:42 | 2021-02-08T21:16:18 | Python | UTF-8 | Python | false | false | 4,431 | py | import shrimpy
import time
from os import name
from subprocess import call
import sys
class bot:
def error_handler(self, err):
print(err)
class printBot(bot):
def __init__(self):
self.stream = False
self.clearLog()
def clear(self):
_ = call('clear' if name == 'posix' else 'cls')
self.logWrite('output.txt', '\n\n\n')
self.logWrite('log.txt', '\n\n\n')
def error_handler(self, err):
self.print("Error", err)
def titleCenter(self, text):
l = len(text)
sp = 36 - (l / 2)
ret = text
for i in range(int(sp)):
ret = "-" + ret + "-"
ret = "<" + ret + ">"
return ret
def center(self, text):
l = len(text)
sp = 37 - (l / 2)
ret = text
for i in range(int(sp)):
ret = " " + ret
return ret
def wrap(self, text):
l = len(text)
ret = ""
if l <= 37:
return self.center(text)
else:
while l > 0:
l = l - 37
tmp = text[:37]
text = text[37:]
ret = ret + self.center(tmp) + "\n"
return ret
def clearLog(self):
file = open("output.txt", "r+")
file.truncate(0)
file.close()
def logWrite(self, filename, content):
# import sys
# print('This message will be displayed on the screen.')
original_stdout = sys.stdout # Save a reference to the original standard output
with open(str(filename), 'w') as f:
sys.stdout = f # Change the standard output to the file we created.
print(str(content)) #This message will be written to a file
sys.stdout = original_stdout # Reset the standard output to its original value
def fullPrint(self, content):
self.logWrite('output.txt', content)
self.logWrite('log.txt', content)
print(str(content))
def print(self, title, content):
if self.stream:
self.fullPrint(str(title)+": "+str(content))
else:
self.clear()
time.sleep(1)
full = self.titleCenter(str(title)) + "\n\n" + self.wrap(str(content))
self.fullPrint(full)
def startPrintStream(self):
self.clear()
time.sleep(1)
self.stream = True
def endPrintStream(self):
self.clear()
time.sleep(1)
self.stream = False
class pumpBot(printBot):
def __init__(self, exchange_public_key,exchange_secret_key, shrimpy_public_key, shrimpy_secret_key, user_id, account_id):
super().__init__()
self.ex_pub = exchange_public_key
self.ex_sec = exchange_secret_key
self.smp_pub = shrimpy_public_key
self.smp_sec = shrimpy_secret_key
self.user_id = user_id
self.account_id = account_id
self.client = shrimpy.ShrimpyApiClient(shrimpy_public_key, shrimpy_secret_key)
self.balance = self.client.get_balance(user_id, account_id)
self.coin = ''
def getBalance(self):
return self.balance
def setBalance(self):
self.balance = self.client.get_balance(self.user_id, self.account_id)
def getRawToken(self):
return self.client.get_token()
def setCoin(self, coin):
self.coin = coin.upper()
def getCoin(self):
return self.coin.upper()
def coinPrompt(self):
self.print('Enter Coin', ' ')
coin = input("Coin: ")
self.setCoin(coin)
return coin.upper()
def condense(self, coinName):
self.setBalance()
holdings = self.balance['balances']
consolidation_symbol = str(coinName)
for asset in holdings:
asset_symbol = asset['symbol']
asset_amount = asset['nativeValue']
if asset_symbol != consolidation_symbol:
create_trade_response = self.client.create_trade(
self.user_id,
self.account_id,
asset_symbol,
consolidation_symbol,
asset_amount
)
def limitSell(self, coinFrom, coinTo, limit):
return coinFrom
def fileCoin(self):
f = open("coin.txt", "r")
content = f.read()
l = len(content)
if l>0:
return content
else:
return None
| [
"[email protected]"
] | |
806443a52fc5cc7e2da8af70220f0e6289e87733 | baa1811841825b321f281112afcac1d9204509b9 | /donphan/abc.py | 746487fea16e00ecd69d09519e8c85d7a6bd689d | [
"MIT"
] | permissive | Gobot1234/Donphan | d3fcb523d33a1496a09a39d2998b11cf5cf69cfb | 1f430e98148420060f6915713e8cd25cf7be14b2 | refs/heads/master | 2023-05-06T09:38:03.219882 | 2020-09-28T16:29:21 | 2020-09-28T16:29:21 | 299,368,495 | 0 | 0 | null | 2020-09-28T16:30:53 | 2020-09-28T16:30:52 | null | UTF-8 | Python | false | false | 22,471 | py | from .connection import Connection, MaybeAcquire, Record
from .column import Column
from .sqltype import SQLType
import abc
import inspect
from typing import Any, Iterable, List, Optional, Tuple, Union
_DEFAULT_SCHEMA = 'public'
_DEFAULT_OPERATORS = {
'eq': '=',
'ne': '!=',
'lt': '<',
'gt': '>',
'le': '<=',
'ge': '>='
}
class Creatable(metaclass=abc.ABCMeta):
@classmethod
def _query_create_schema(cls, if_not_exists: bool = True) -> str:
"""Generates a CREATE SCHEMA stub."""
builder = ['CREATE SCHEMA']
if if_not_exists:
builder.append('IF NOT EXISTS')
builder.append(cls.schema) # type: ignore
return ' '.join(builder)
@classmethod
@abc.abstractmethod
def _query_create(cls, drop_if_exists: bool = True, if_not_exists: bool = True) -> str:
"""Generates a CREATE stub."""
raise NotImplementedError
@classmethod
def _base_query_drop(cls, type: str, if_exists: bool = True, cascade: bool = False) -> str:
"""Generates a DROP stub."""
builder = ['DROP']
builder.append(type)
if if_exists:
builder.append('IF EXISTS')
builder.append(cls._name) # type: ignore
if cascade:
builder.append('CASCADE')
return ' '.join(builder)
@classmethod
@abc.abstractmethod
def _query_drop(cls, if_exists: bool = True, cascade: bool = False) -> str:
"""Generates a DROP stub."""
raise NotImplementedError
@classmethod
async def create(cls, *, connection=None, drop_if_exists=True, if_not_exists=True):
"""Creates this object in the database.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
if_not_exists (bool, optional): TODO
"""
async with MaybeAcquire(connection) as connection:
if if_not_exists:
await connection.execute(cls._query_create_schema())
await connection.execute(cls._query_create(drop_if_exists, if_not_exists))
@classmethod
async def drop(cls, *, connection=None, if_exists: bool = False, cascade: bool = False):
"""Drops this object from the database.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
if_exists (bool, optional): TODO
cascade (bool, optional): TODO
"""
async with MaybeAcquire(connection) as connection:
await connection.execute(cls._query_drop(if_exists, cascade))
class ObjectMeta(abc.ABCMeta):
def __new__(cls, name, bases, attrs, **kwargs):
attrs.update({
'schema': kwargs.get('schema', _DEFAULT_SCHEMA),
'_columns': {}
})
obj = super().__new__(cls, name, bases, attrs)
for _name, _type in attrs.get('__annotations__', {}).items():
# If the input type is an array
is_array = False
while isinstance(_type, list):
is_array = True
_type = _type[0]
if inspect.ismethod(_type) and _type.__self__ is SQLType:
_type = _type()
elif not isinstance(_type, SQLType):
_type = SQLType._from_python_type(_type)
column = attrs.get(_name, Column())._update(obj, _name, _type, is_array)
obj._columns[_name] = column
return obj
def __getattr__(cls, key):
if key == '__name__':
return f'{cls.__name__.lower()}'
if key == '_name':
return f'{cls.schema}.{cls.__name__.lower()}'
if key in cls._columns:
return cls._columns[key]
raise AttributeError(f'\'{cls.__name__}\' has no attribute \'{key}\'')
class Fetchable(Creatable, metaclass=ObjectMeta):
@classmethod
def _validate_kwargs(cls, primary_keys_only=False, **kwargs) -> List[Tuple[str, Any]]:
"""Validates passed kwargs against table"""
verified = list()
for kwarg, value in kwargs.items():
# Strip Extra operators
if kwarg.startswith('or_'):
kwarg = kwarg[3:]
if kwarg[-4:-2] == '__':
kwarg = kwarg[:-4]
# Check column is in Object
if kwarg not in cls._columns:
raise AttributeError(
f'Could not find column with name {kwarg} in table {cls._name}')
column = cls._columns[kwarg]
# Skip non primary when relevant
if primary_keys_only and not column.primary_key:
continue
# Check passing null into a non nullable column
if not column.nullable and value is None:
raise TypeError(
f'Cannot pass None into non-nullable column {column.name}')
def check_type(element):
return isinstance(element, (column.type.python, type(None)))
# If column is an array
if column.is_array:
def check_array(element):
# If not at the deepest level check elements in array
if isinstance(element, (List, Tuple)):
for item in element:
check_array(item)
# Otherwise check the type of the element
else:
if not check_type(element):
raise TypeError(
f'Column {column.name}; expected {column.type.__name__ }[], received {type(element).__name__}[]')
# Check array depth is expected.
check_array(value)
# Otherwise check type of element
elif not check_type(value):
raise TypeError(
f'Column {column.name}; expected {column.type.__name__}, received {type(value).__name__}')
verified.append((column.name, value))
return verified
@classmethod
def _query_fetch(cls, order_by: Optional[str], limit: Optional[int], **kwargs) -> Tuple[str, Iterable]:
"""Generates a SELECT FROM stub"""
verified = cls._validate_kwargs(**kwargs)
# AND / OR statement check
statements = ['AND ' for _ in verified]
# =, <, >, != check
operators = ['=' for _ in verified]
# Determine operators
for i, (_key, (key, _)) in enumerate(zip(kwargs, verified)):
# First statement has no boolean operator
if i == 0:
statements[i] = ''
elif _key[:3] == 'or_':
statements[i] = 'OR '
if _key[-4:-2] == '__':
try:
operators[i] = _DEFAULT_OPERATORS[_key[-2:]]
except KeyError:
raise AttributeError(f'Unknown operator type {_key[-2:]}')
builder = [f'SELECT * FROM {cls._name}']
# Set the WHERE clause
if verified:
builder.append('WHERE')
checks = []
for i, (key, _) in enumerate(verified):
checks.append(f'{statements[i]}{key} {operators[i]} ${i+1}')
builder.append(' '.join(checks))
if order_by is not None:
builder.append(f'ORDER BY {order_by}')
if limit is not None:
builder.append(f'LIMIT {limit}')
return (" ".join(builder), (value for (_, value) in verified))
@classmethod
def _query_fetch_where(cls, query: str, order_by: Optional[str], limit: Optional[int]) -> str:
"""Generates a SELECT FROM stub"""
builder = [f'SELECT * FROM {cls._name} WHERE']
builder.append(query)
if order_by is not None:
builder.append(f'ORDER BY {order_by}')
if limit is not None:
builder.append(f'LIMIT {limit}')
return " ".join(builder)
@classmethod
async def fetch(cls, *, connection: Optional[Connection] = None, order_by: Optional[str] = None, limit: Optional[int] = None, **kwargs) -> List[Record]:
"""Fetches a list of records from the database.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
order_by (str, optional): Sets the `ORDER BY` constraint.
limit (int, optional): Sets the maximum number of records to fetch.
**kwargs (any): Database :class:`Column` values to search for
Returns:
list(Record): A list of database records.
"""
query, values = cls._query_fetch(order_by, limit, **kwargs)
async with MaybeAcquire(connection) as connection:
return await connection.fetch(query, *values)
@classmethod
async def fetchall(cls, *, connection: Optional[Connection] = None, order_by: Optional[str] = None, limit: Optional[int] = None) -> List[Record]:
"""Fetches a list of all records from the database.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool
order_by (str, optional): Sets the `ORDER BY` constraint
limit (int, optional): Sets the maximum number of records to fetch
Returns:
list(Record): A list of database records.
"""
query, values = cls._query_fetch(order_by, limit)
async with MaybeAcquire(connection) as connection:
return await connection.fetch(query, *values)
@classmethod
async def fetchrow(cls, *, connection: Optional[Connection] = None, order_by: Optional[str] = None, **kwargs) -> Optional[Record]:
"""Fetches a record from the database.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
order_by (str, optional): Sets the `ORDER BY` constraint.
**kwargs (any): Database :class:`Column` values to search for
Returns:
Record: A record from the database.
"""
query, values = cls._query_fetch(order_by, 1, **kwargs)
async with MaybeAcquire(connection) as connection:
return await connection.fetchrow(query, *values)
@classmethod
async def fetch_where(cls, where: str, *values, connection: Optional[Connection] = None,
order_by: Optional[str] = None, limit: Optional[int] = None) -> List[Record]:
"""Fetches a list of records from the database.
Args:
where (str): An SQL Query to pass
values (tuple, optional): A tuple containing accompanying values.
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
order_by (str, optional): Sets the `ORDER BY` constraint.
limit (int, optional): Sets the maximum number of records to fetch.
Returns:
list(Record): A list of database records.
"""
query = cls._query_fetch_where(where, order_by, limit)
async with MaybeAcquire(connection) as connection:
return await connection.fetch(query, *values)
@classmethod
async def fetchrow_where(cls, where: str, *values, connection: Optional[Connection] = None, order_by: Optional[str] = None) -> List[Record]:
"""Fetches a record from the database.
Args:
where (str): An SQL Query to pass
values (tuple, optional): A tuple containing accompanying values.
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
order_by (str, optional): Sets the `ORDER BY` constraint.
Returns:
Record: A record from the database.
"""
query = cls._query_fetch_where(where, order_by, 1)
async with MaybeAcquire(connection) as connection:
return await connection.fetchrow(query, *values)
class Insertable(Fetchable, metaclass=ObjectMeta):
@classmethod
def _query_insert(cls, returning: Optional[Union[str, Iterable[Column]]], **kwargs) -> Tuple[str, Iterable]:
"""Generates the INSERT INTO stub."""
verified = cls._validate_kwargs(**kwargs)
builder = [f'INSERT INTO {cls._name}']
builder.append(f'({", ".join(key for (key, _) in verified)})')
builder.append('VALUES')
values = []
for i, _ in enumerate(verified, 1):
values.append(f'${i}')
builder.append(f'({", ".join(values)})')
if returning:
builder.append('RETURNING')
if returning == '*':
builder.append('*')
else:
# Convert to tuple if object is not iter
if not isinstance(returning, Iterable):
returning = (returning,)
returning_builder = []
for value in returning:
if not isinstance(value, Column):
raise TypeError(
f'Expected a volume for the returning value received {type(value).__name__}')
returning_builder.append(value.name)
builder.append(', '.join(returning_builder))
return (" ".join(builder), (value for (_, value) in verified))
@classmethod
def _query_insert_many(cls, columns) -> str:
"""Generates the INSERT INTO stub."""
builder = [f'INSERT INTO {cls._name}']
builder.append(f'({", ".join(column.name for column in columns)})')
builder.append('VALUES')
builder.append(
f'({", ".join(f"${n+1}" for n in range(len(columns)))})')
return " ".join(builder)
@classmethod
def _query_update_record(cls, record, **kwargs) -> Tuple[str, List[Any]]:
'''Generates the UPDATE stub'''
verified = cls._validate_kwargs(**kwargs)
builder = [f'UPDATE {cls._name} SET']
# Set the values
sets = []
for i, (key, _) in enumerate(verified, 1):
sets.append(f'{key} = ${i}')
builder.append(', '.join(sets))
# Set the QUERY
record_keys = cls._validate_kwargs(primary_keys_only=True, **record)
builder.append('WHERE')
checks = []
for i, (key, _) in enumerate(record_keys, i + 1):
checks.append(f'{key} = ${i}')
builder.append(' AND '.join(checks))
return (" ".join(builder), list((value for (_, value) in verified)) + list((value for (_, value) in record_keys)))
@classmethod
def _query_update_where(cls, query, values, **kwargs) -> Tuple[str, List[Any]]:
'''Generates the UPDATE stub'''
verified = cls._validate_kwargs(**kwargs)
builder = [f'UPDATE {cls._name} SET']
# Set the values
sets = []
for i, (key, _) in enumerate(verified, len(values) + 1):
sets.append(f'{key} = ${i}')
builder.append(', '.join(sets))
# Set the QUERY
builder.append('WHERE')
builder.append(query)
return (" ".join(builder), values + tuple(value for (_, value) in verified))
@classmethod
def _query_delete(cls, **kwargs) -> Tuple[str, List[Any]]:
'''Generates the DELETE stub'''
verified = cls._validate_kwargs(**kwargs)
# AND / OR statement check
statements = ['AND ' for _ in verified]
# =, <, >, != check
operators = ['=' for _ in verified]
# Determine operators
for i, (_key, (key, _)) in enumerate(zip(kwargs, verified)):
# First statement has no boolean operator
if i == 0:
statements[i] = ''
elif _key[:3] == 'or_':
statements[i] = 'OR '
if _key[-4:-2] == '__':
try:
operators[i] = _DEFAULT_OPERATORS[_key[-2:]]
except KeyError:
raise AttributeError(f'Unknown operator type {_key[-2:]}')
builder = [f'DELETE FROM {cls._name}']
# Set the WHERE clause
if verified:
builder.append('WHERE')
checks = []
for i, (key, _) in enumerate(verified):
checks.append(f'{statements[i]}{key} {operators[i]} ${i+1}')
builder.append(' '.join(checks))
return (" ".join(builder), list(value for (_, value) in verified))
@classmethod
def _query_delete_record(cls, record) -> Tuple[str, List[Any]]:
'''Generates the DELETE stub'''
builder = [f'DELETE FROM {cls._name}']
# Set the QUERY
record_keys = cls._validate_kwargs(primary_keys_only=True, **record)
builder.append('WHERE')
checks = []
for i, (key, _) in enumerate(record_keys, 1):
checks.append(f'{key} = ${i}')
builder.append(' AND '.join(checks))
return (" ".join(builder), list(value for (_, value) in record_keys))
@classmethod
def _query_delete_where(cls, query) -> str:
'''Generates the UPDATE stub'''
builder = [f'DELETE FROM {cls._name}']
# Set the QUERY
builder.append('WHERE')
builder.append(query)
return " ".join(builder)
@classmethod
async def insert(cls, *, connection: Connection = None, returning: Iterable[Column] = None, **kwargs) -> Optional[Record]:
"""Inserts a new record into the database.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
returning (list(Column), optional): A list of columns from this record to return
**kwargs (any): The records column values.
Returns:
(Record, optional): The record inserted into the database
"""
query, values = cls._query_insert(returning, **kwargs)
async with MaybeAcquire(connection) as connection:
if returning:
return await connection.fetchrow(query, *values)
await connection.execute(query, *values)
return None
@classmethod
async def insert_many(cls, columns: Iterable[Column], *values: Iterable[Iterable[Any]], connection: Connection = None):
"""Inserts multiple records into the database.
Args:
columns (list(Column)): The list of columns to insert based on.
values (list(list)): The list of values to insert into the database.
connection (asyncpg.Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
"""
query = cls._query_insert_many(columns)
async with MaybeAcquire(connection) as connection:
await connection.executemany(query, values)
@classmethod
async def update_record(cls, record: Record, *, connection: Connection = None, **kwargs):
"""Updates a record in the database.
Args:
record (Record): The database record to update
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool
**kwargs: Values to update
"""
query, values = cls._query_update_record(record, **kwargs)
async with MaybeAcquire(connection) as connection:
await connection.execute(query, *values)
@classmethod
async def update_where(cls, where: str, *values: Any, connection: Connection = None, **kwargs):
"""Updates any record in the database which satisfies the query.
Args:
where (str): An SQL Query to pass
values (tuple, optional): A tuple containing accompanying values.
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool
**kwargs: Values to update
"""
query, values = cls._query_update_where(where, values, **kwargs) # type: ignore
async with MaybeAcquire(connection) as connection:
await connection.execute(query, *values)
@classmethod
async def delete(cls, *, connection: Connection = None, **kwargs):
"""Deletes any records in the database which satisfy the supplied kwargs.
Args:
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool.
**kwargs (any): Database :class:`Column` values to filter by when deleting.
"""
query, values = cls._query_delete(**kwargs)
async with MaybeAcquire(connection) as connection:
await connection.execute(query, *values)
@classmethod
async def delete_record(cls, record: Record, *, connection: Connection = None):
"""Deletes a record in the database.
Args:
record (Record): The database record to delete
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool
"""
query, values = cls._query_delete_record(record)
async with MaybeAcquire(connection) as connection:
await connection.execute(query, *values)
@classmethod
async def delete_where(cls, where: str, *values: Optional[Tuple[Any]], connection: Connection = None):
"""Deletes any record in the database which satisfies the query.
Args:
where (str): An SQL Query to pass
values (tuple, optional): A tuple containing accompanying values.
connection (Connection, optional): A database connection to use.
If none is supplied a connection will be acquired from the pool
"""
query = cls._query_delete_where(where)
async with MaybeAcquire(connection) as connection:
await connection.execute(query, *values)
| [
"[email protected]"
] | |
31b76da73e6a927630716e4ec2e7a7705a409be4 | 6eda9bbe242a461eaaf8da5b320eddab46bfcf82 | /tools/jiandan_img_crawler.py | 3df53e5bdd29a71376f03019a49cd35a74153012 | [] | no_license | BriceChou/TensorflowEx | f7dd4b3e6df39546ba6f02eaf55670cd777063d4 | 87b658c62691e13aaeea3e790ac3193eabdcf489 | refs/heads/master | 2020-03-24T10:05:54.157866 | 2018-11-28T10:56:44 | 2018-11-28T10:56:44 | 142,647,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | # -*- coding: utf-8 -*-
# author @redpig315
import requests
from bs4 import BeautifulSoup
import re
import base64
from urllib.parse import urlparse
import os
#url="http://jandan.net/ooxx/page-36#comments"
url="http://jandan.net/ooxx"
def _base64_decode(data):
return base64.b64decode(data)
def saveimage(url):
image=requests.get(url)
path=urlparse(url).path
filenames=re.split('/',path)
if os.path.exists("D:/src/xxoo") == False:
os.mkdir("D:/src/xxoo")
f = open("D:/src/xxoo/"+filenames[2], 'wb')
f.write(image.content)
f.flush()
f.close()
def get_urls(url):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Host': 'jandan.net'
}
html = requests.get(url, headers=headers).text
#print(html)
soup=BeautifulSoup(html,'html.parser')
xxoos=soup.find_all('span',{'class':'img-hash'})
#page=soup.find_all('span',{'class':'current-comment-page'})
#curpage=page[1].text
#curnum=curpage[1:3]
#print("curpage is -------"+curnum)
for xxoo in xxoos:
#print(xxoo.string)
print("img url:http:"+str(_base64_decode(xxoo.string),'utf-8'))
imgurl="http:"+str(_base64_decode(xxoo.string),'utf-8');
saveimage(imgurl)
def get_page(url):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Host': 'jandan.net'
}
html = requests.get(url, headers=headers).text
soup=BeautifulSoup(html,'html.parser')
pages=soup.find_all('span',{'class':'current-comment-page'})
# for page in(pages):
# print(page.text)
page=pages[1].text
return page[1:3]
num=int(get_page(url))
#print(num)
num1=num-1
num2=num-2
#get page one
get_urls(url)
#get page two
urlone=url+"/page-"+str(num1)+"#comments"
get_urls(urlone)
#get page three
urltwo=url+"/page-"+str(num2)+"#comments"
get_urls(urltwo)
| [
"[email protected]"
] | |
1d35d836dd2e23513bc5bc3372b3f49c9a8a94d4 | d97d55e2e6437bf24e53d22e08c0178779e71b05 | /venv/bin/pip | cc7671217b4d72ab1e52b442af7d7db54170dc88 | [] | no_license | yhaim21/SampleApp | 3042d58c6ce29e9d7f49a2ba98ce9c369bffac45 | 7573e3bf4daa287a8a34ac03c2f295a69197831f | refs/heads/master | 2023-04-05T04:17:25.134586 | 2021-04-12T15:01:21 | 2021-04-12T15:01:21 | 348,729,245 | 0 | 0 | null | 2021-04-11T17:30:37 | 2021-03-17T13:57:07 | Python | UTF-8 | Python | false | false | 256 | #!/Users/yoavhaim/PycharmProjects/Try/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
b587aded56efa552452087bc2ee9ebcc21b71490 | 38c3dd02aa8176724e2854f2dc382a1226c2336b | /Python/module/module1.py | cc0c46344ab92f31274aabf64e303b8e704f2e9a | [] | no_license | rjh211/EncoreStudy | 6d46cae9130cbc8e194095ed845b3cb8cd30e118 | ae2abf69df7fac5b259f032e79b26018a32cd5a7 | refs/heads/master | 2022-12-17T13:37:03.498567 | 2020-09-24T02:25:08 | 2020-09-24T02:25:08 | 287,887,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | num = 10
msg = 'hello'
def cnt(x):
for i in range(1,x+1):
print(i, end=',')
print()
def makeList():
print("숫자 5개 입력")
return [int(input()) for i in range(0,5)]
def sumList(lst):
return sum(lst) | [
"[email protected]"
] | |
2dc37446c02b5ae3c76b9184b021ed6aa58ad607 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/284/89249/submittedfiles/minha_bib.py | a4e49fe59fc73927471089959c4911c71b460a5f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | a# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def divisor(x):
for n in range(1,x,1):
if (i%n==0):
n=z
| [
"[email protected]"
] | |
5d9adf11ba61d1788e85e2a90d9ac9bd66148012 | 9db04711e1f79f0957c454d65b70f7564d662c92 | /abstrys/sphinx_ext/markdown_builder.py | 6be56cbda72b1caed06def8150bc84b2b3c28765 | [
"BSD-3-Clause"
] | permissive | acsr/rst2db | 2d738bd8a9c4cc1ce88bfbf2c9a5bb9bf108156f | 7bd341aa5f6867d87cab805c42bb5835913c1ee5 | refs/heads/master | 2020-04-06T13:30:40.280624 | 2016-11-16T05:41:15 | 2016-11-16T05:41:15 | 157,502,835 | 0 | 0 | null | 2018-11-14T06:37:39 | 2018-11-14T06:37:39 | null | UTF-8 | Python | false | false | 806 | py | # -*- coding: utf-8 -*-
#
# abstrys.sphinx_ext.markdown_builder
# -----------------------------------
#
# A Markdown builder for Sphinx, using rst2db's markdown writer.
#
# by Eron Hennessey
from abstrys.docutils_ext.markdown_writer import MarkdownWriter, MarkdownTranslator
from docutils.core import publish_from_doctree
from sphinx.builders.text import TextBuilder
import os, sys
class MarkdownBuilder(TextBuilder):
"""Build Markdown documents from a Sphinx doctree"""
name = 'markdown'
format = 'markdown'
def __init__(self, app):
TextBuilder.__init__(self, app)
self.out_suffix = '.md'
def prepare_writing(self, docnames):
self.writer = MarkdownWriter()
def setup(app):
global sphinx_app
sphinx_app = app
app.add_builder(MarkdownBuilder)
| [
"[email protected]"
] | |
24602774257285f8b07411e318e6d8d028b70164 | fbe3a52d2dd02bec18f7f52b31e357aed192a308 | /fibonacci/fibonacci.py | 995a3f094f3311892ac15ecc3dd68895914058df | [] | no_license | lherrada/python | 8fc5bd5ceb6038479fa6347dd6c0bd6e17f92e98 | d8260f35ba91b89590ef8e489188fb80ca1aed4e | refs/heads/master | 2022-10-29T06:23:17.297554 | 2022-09-24T15:45:59 | 2022-09-24T15:45:59 | 23,411,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | #!/usr/bin/python
"""
Fibonacci numbers:
F(n)=F(n-1) + F(n-2)
with F(0)=0 and F(1)=1
This script measures execution time
for different implementation of Fibonacci
series
"""
#Iterative solution
def fib0(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
F=[]
F[0:2]=0,1
for i in range(2,n+1):
f=F[i-1]+F[i-2]
F.append(f)
return f
def fib1(n):
a,b=0,1
for i in range(n):
a,b=b,a+b
return a
#Recursive Solution
def fib2(n):
if n == 0 :
return 0
elif n == 1:
return 1
else:
return fib2(n-1)+ fib2(n-2)
fibohash={}
fibohash={0:0,1:1}
def fib3(n):
if not n in fibohash:
fibohash[n]=fib3(n-1) + fib3(n-2)
return fibohash[n]
if __name__ == '__main__':
import timeit
for i in range(1,50):
s="fib0("+str(i)+")"
time1=timeit.timeit(s,setup="from __main__ import fib0")
s="fib1("+str(i)+")"
time2=timeit.timeit(s,setup="from __main__ import fib1")
s="fib3("+str(i)+")"
time3=timeit.timeit(s,setup="from __main__ import fib3")
print "n=%d,\tfib0: %7.6f\tfib1: %7.6f\tfib3: %7.6f\n" % (i,time1,time2,time3),
| [
"[email protected]"
] | |
fdc73516cb8953e628c2d2d68b8d1de962530865 | e49c427bf83085bfdef6382addae1f9f6b976bda | /semisupervised/codes/run_dataformat7.py | 5e206df407ec67da430aedaee31f35b897e37d4b | [] | no_license | SihongHo/GMNN | 955c6815ecaa7a9ecf80c70780da1bf9551fb9bd | 30549161625a36b3e4c27bbf2f3c9d1aa4765b95 | refs/heads/main | 2023-04-16T23:54:42.606867 | 2021-04-29T06:43:55 | 2021-04-29T06:43:55 | 357,055,327 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | '''
Author: your name
Date: 2021-04-09 21:57:45
LastEditTime: 2021-04-28 21:52:55
LastEditors: Please set LastEditors
Description: In User Settings Edit
'''
import sys
import os
import copy
import json
import datetime
opt = dict()
opt['dataset'] = '../data/data_format7'
opt['hidden_dim'] = 16
opt['input_dropout'] = 0.5
opt['dropout'] = 0
opt['optimizer'] = 'rmsprop'
opt['lr'] = 0.05
opt['decay'] = 5e-4
opt['self_link_weight'] = 1.0
opt['pre_epoch'] = 100
opt['epoch'] = 100
opt['iter'] = 1
opt['use_gold'] = 1
opt['draw'] = 'smp'
opt['tau'] = 0.1
def generate_command(opt):
cmd = 'python3 train.py'
for opt, val in opt.items():
cmd += ' --' + opt + ' ' + str(val)
return cmd
def run(opt):
opt_ = copy.deepcopy(opt)
os.system(generate_command(opt_))
from datetime import datetime
start = datetime.now()
start_time = start.strftime("%H:%M:%S")
print("Start Time =", start_time)
for k in range(100):
seed = k + 100
# e3_2 + 100
opt['seed'] = seed
run(opt)
end = datetime.now()
end_time = end.strftime("%H:%M:%S")
print("End Time =", end_time)
# t = end - start
# print("Comsuming Time =", t.strftime("%H:%M:%S"))
| [
"[email protected]"
] | |
7cf01f49dfc23d45d2d65cce50510aa6db352d77 | 925e088a2f1f2d9b8fcab65bd2c96cf4c4911c13 | /examples/rag/callbacks.py | 099cf2bbdfac82b0138cda3d6c4465c307ca5f5f | [
"Apache-2.0"
] | permissive | bdalal/transformers | 7e2b2920e8848a8606389fbe7346f2f470351734 | 36a19915ea4fc3dc337a310e4a1af43eb3c81c9a | refs/heads/master | 2023-01-19T18:02:18.837672 | 2020-11-17T15:35:38 | 2020-11-17T15:35:38 | 313,672,203 | 1 | 0 | Apache-2.0 | 2020-11-17T16:06:51 | 2020-11-17T16:06:50 | null | UTF-8 | Python | false | false | 4,424 | py | import logging
import os
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils import save_json
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
logger = logging.getLogger(__name__)
def get_checkpoint_callback(output_dir, metric):
"""Saves the best model by validation EM score."""
if metric == "rouge2":
exp = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
exp = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
exp = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this function."
)
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(output_dir, exp),
monitor=f"val_{metric}",
mode="max",
save_top_k=3,
period=0, # maybe save a checkpoint every time val is run, not just end of epoch.
)
return checkpoint_callback
def get_early_stopping_callback(metric, patience):
return EarlyStopping(
monitor=f"val_{metric}", # does this need avg?
mode="min" if "loss" in metric else "max",
patience=patience,
verbose=True,
)
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True
) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****")
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
od = Path(pl_module.hparams.output_dir)
if type_path == "test":
results_file = od / "test_results.txt"
generations_file = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, "a+") as writer:
for key in sorted(metrics):
if key in ["log", "progress_bar", "preds"]:
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f"{key}: {val:.6f}\n"
writer.write(msg)
if not save_generations:
return
if "preds" in metrics:
content = "\n".join(metrics["preds"])
generations_file.open("w+").write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, "test")
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| [
"[email protected]"
] | |
c4c74f7c37ec7989964b84e8325dbb87bc0d6150 | df1c58facb4f0bbd0080c9cdbcbd93b580f6de60 | /litcity/wsgi.py | fed5ffe1b44e5478663f997b23e16bedf36feecb | [] | no_license | mailbackwards/litcity | 21f3f0e3817f09fca3c4900c240f38416f5a6002 | 777244b5f31e1ec8f7c842a7fb3a52f39f2bee6d | refs/heads/master | 2021-01-10T07:48:13.167925 | 2016-01-11T13:40:58 | 2016-01-11T13:40:58 | 49,340,892 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | """
WSGI config for litcity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "litcity.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"[email protected]"
] | |
6773fb851e960887e1c84755dab1372849fe0a51 | e8f2e7c175082934d00bb4896bd47c63108a094d | /build/lib/bin/modules/usbinfo.py | db7288116d87c5d88072013546aa87c035b41aae | [] | no_license | volneyrock/SlkFormat | d0440b49a169b7c82fd888eba8f216ae75a2c645 | d3474ece9341ff6b0b99247b18b5d3bc50bf0707 | refs/heads/master | 2016-08-04T12:23:45.428711 | 2015-09-16T16:30:41 | 2015-09-16T16:30:41 | 24,955,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | #!/usr/bin/env python
#
# Copyright (C) 2014 Joel W. Dafoe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from string import ascii_uppercase, ascii_lowercase
class USBdrv:
def __init__(self):
self.mounted_drvs = self.find_drives()
def find_drives(self):
mounts = []
for mount in self.get_drives():
if os.path.exists(mount):
mounts.append(mount)
return mounts
@staticmethod
def get_drives():
if os.name == "nt":
for letter in list(ascii_uppercase):
if letter != 'C':
yield letter+':'
if os.name == "posix":
for device in list(ascii_lowercase):
if device != 'a':
yield '/dev/sd'+device+'1'
if __name__ == '__main__':
print(USBdrv().mounted_drvs)
| [
"[email protected]"
] | |
e2ca2a97f00dc821223253c3166a745d83176193 | 9ca48d32e4020f2bcdb94f06d19c0b7e7e72b327 | /main.py | baaa2ff72711a27458828962ade4bddfb24a87fa | [
"MIT"
] | permissive | mdhiebert/curri | 9811c251d2f002e13853533e1e206c0c27f36d44 | 24583c53ff71285b5bfb39332d286b9e58073a91 | refs/heads/master | 2022-04-10T18:16:31.788495 | 2020-03-23T16:19:40 | 2020-03-23T16:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from curri.curri import Curri
import json
import subprocess
if __name__ == "__main__":
with open('real.json') as f:
j = json.load(f)
c = Curri(j)
with open('output.tex', 'w') as f:
f.write(c.latex())
subprocess.call('pdflatex output.tex', shell=True) | [
"[email protected]"
] | |
b1eee9cdf730bcb24e8f4f06700f11e81bfa261c | c48c4de9d221de538ac2a0e88430e4ad1c8a060c | /Python/Server_Client/grap_with_tcp.py | 320a131e6b6ae7a188b076649369b992c66d300a | [] | no_license | AlexBracamonte/Cris_Review | f95cc90f9fee819f1b4ea3c559723574e3820e91 | d7cdd3859a1e61ee212ccf975b2ec36678ff6adf | refs/heads/main | 2023-08-14T06:07:29.649469 | 2021-09-07T17:01:43 | 2021-09-07T17:01:43 | 404,043,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | import sys
import time
from PyQt5 import QtWidgets, uic
import threading
from Server_grap import Servidor
from entrada import Ui_MainWindow
class ChatWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, *args, obj=None, **kwargs):
super(ChatWindow, self).__init__(*args, **kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.button_send.clicked.connect(self.on_clicked_login)
self.tcp = Servidor(host='127.0.0.1', port=40001)
self.__init_deamons()
def __init_deamons(self):
procesar = threading.Thread(target=self.tcp.run)
procesar.daemon = True
procesar.start()
mostrar = threading.Thread(target=self.tcp2window)
mostrar.daemon = True
mostrar.start()
def tcp2window(self):
while True:
try:
msg = self.tcp.recv_msg()
mensaje = msg.decode('utf-8')
self.receiver_msg(mensaje)
except:
pass
def write_text(self, msg: str):
self.ui.text_revc.insertPlainText(msg)
def on_clicked_login(self):
text = self.ui.text_send.toPlainText()
self.tcp.msg_to_all(text)
localtime = time.asctime(time.localtime(time.time()))
self.write_text(msg=f"{localtime} >> {text}\n")
print(f"{localtime} >> {text}")
self.ui.text_send.clear()
def receiver_msg(self, msg: str):
localtime = time.asctime(time.localtime(time.time()))
self.write_text(msg=f"\t{localtime} << {msg}\n")
def keyPressEvent(self, event):
from PyQt5.QtCore import Qt
if event.key() == Qt.Key_Escape:
self.close()
if __name__ == "__main__":
try:
app = QtWidgets.QApplication(sys.argv)
window = ChatWindow()
window.show()
app.exec()
sys.exit(0)
except NameError:
print("Name Error:", sys.exc_info()[1])
except SystemExit:
window.tcp.exit()
print("Closing Window...")
except Exception:
print(sys.exc_info()[1]) | [
"[email protected]"
] | |
5cc63a29d17c2837080ef1c890b9707b076f2fb3 | 424213d62d7e88a907bfc30b52cef61f6d4b95b7 | /092 - Square digit chains/092.py | cb8be68584cd6592fdfcb4b798ef1779f1cb6661 | [] | no_license | David-Jackson/project-euler-solutions | ae1b3e88987c4e428be046cb6d9562995e0e0298 | 5ba9d9914f6d170110c4f9b533357c167b9f986d | refs/heads/master | 2021-01-19T10:43:04.444228 | 2017-03-15T18:29:51 | 2017-03-15T18:29:51 | 82,206,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | def sq_digit_sum(n):
sum = 0
while n > 0:
r = (n % 10)
sum += (r * r)
n /= 10
return sum
def sq_digit_chain(n):
while n != 1 and n != 89:
n = sq_digit_sum(n)
# yield n
return n
count = 0
i = 1
while i < 10000000:
if sq_digit_chain(i) == 89:
count += 1
i += 1
print "{0} - {1}\r".format(i, count),
print
print "Answer:", count
| [
"[email protected]"
] | |
ef402514ffa3750c3a5f4891c30b7bae071489cb | 794fb34490ad5d7e1fa596f05eeffca397b1e70a | /0x16-api_advanced/2-recurse.py | e8602ca3d3ca3416bd45a699cf7c79a3e93d4c70 | [] | no_license | Fermec28/holberton-system_engineering-devops | f31b5bfc9477c530e7f5dea9e8ac054ab90965b2 | 84038ff0a859d8a0bd899f69da96e1e0f81f8a0d | refs/heads/master | 2020-04-19T12:18:15.661872 | 2019-10-11T01:33:56 | 2019-10-11T01:33:56 | 168,188,483 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | #!/usr/bin/python3
import requests
"""count number of subscribers"""
def recurse(subreddit, hot_list=[], after=None):
"""Get number of subscribers"""
url = "https://www.reddit.com/r/{}/hot.json".format(subreddit)
headers = {'User-Agent': 'Fermec28'}
payload = {"after": after}
try:
result = requests.get(url, headers=headers, params=payload,
allow_redirects=False).json()
except:
return(None)
if ("data" in result and "children" in result.get("data")):
for post in result.get("data").get("children"):
hot_list.append(post.get("data").get("title"))
if "after" in result.get("data") and result.get("data").get("after"):
return recurse(subreddit, hot_list,
result.get("data").get("after"))
else:
return hot_list
else:
return(None)
| [
"[email protected]"
] | |
59adda2a15017da21a6c2c57b5d17b7f569cd18b | b8a2ca045e25e6047e08ac599d6eb5bfda97cfe3 | /victimas/apps/beneficiario/migrations/0012_auto_20161120_1808.py | 208e47b2ebedd46c9a95fd0effa57e93c18b5c31 | [] | no_license | jhon-palma/vicitimas | 4c0d35766b1dd1a5700c46d1a54f2c6787638af9 | fce8d176c72108f86860217c9dead7274fccb113 | refs/heads/master | 2020-05-31T15:50:36.542537 | 2017-06-30T03:40:23 | 2017-06-30T03:40:23 | 94,037,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-20 18:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('beneficiario', '0011_auto_20161120_1554'),
]
operations = [
migrations.RenameField(
model_name='niveleducativo',
old_name='hecho',
new_name='nivel',
),
migrations.AlterField(
model_name='beneficiario_grupohogar',
name='grupoHogar',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='beneficiario.GrupoHogar'),
),
]
| [
"palma@Master"
] | palma@Master |
282409aca8ba67536ce6dde50f596e1447c28e90 | 2cf9f165cb4d6e8e9009d74b43020fe2d5c1964f | /chat/urls.py | 1d7d025d1f3df8c9aba0845cbb91749fc398eb6d | [] | no_license | jimy1824/chat | 29f5039c6284083b8328502932795bee586dec14 | 627ad4678c6215d37322737b38b3e5eb6d69696f | refs/heads/master | 2023-04-27T15:10:51.316824 | 2019-11-08T05:00:12 | 2019-11-08T05:00:12 | 220,081,959 | 1 | 0 | null | 2023-04-21T20:40:47 | 2019-11-06T20:17:20 | HTML | UTF-8 | Python | false | false | 224 | py | from django.urls import path
from chat import views
urlpatterns = [
path('chat/', views.ChatHome.as_view(), name='chat_home'),
path('chat_box/<int:selected_user_id>', views.ChatInbox.as_view(), name='chat_inbox'),
]
| [
"[email protected]"
] | |
70d7fb1e3790e61c41f4c36047fb59ae40639efe | b70126739f50af4d598f4ca59806cdf34e7d06dd | /product/migrations/0055_product_offer_of_min.py | aaa570239554f5e0e34a127291ab8b2c37edf8cd | [] | no_license | sharingsimplethoughts/onlineshopping | aa1b52828819d9ab21b0405463efa9995210ec0d | c48a32dd762dd2eef81935e1645b4304d9587cc7 | refs/heads/master | 2022-11-10T09:16:21.341703 | 2020-07-01T07:29:10 | 2020-07-01T07:29:10 | 276,291,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 2.1.5 on 2019-05-01 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0054_product_min_price'),
]
operations = [
migrations.AddField(
model_name='product',
name='offer_of_min',
field=models.CharField(blank=True, max_length=5, null=True),
),
]
| [
"[email protected]"
] | |
7eea7466a192f13e09aa8a8e771d0ab3d1619d30 | 3202f0a51374541fe570835d6ac5c7222a31f97c | /www/content/views.py | ed93d32561b22755dbcb6de04f25500aeda305b4 | [] | no_license | zsoobhan/fva | 435fa988210cc6e547b83485b75dddfb562584ef | e9a65d50e0608ebadff9451787f2028b01e52013 | refs/heads/master | 2021-01-10T01:51:53.487459 | 2019-05-18T23:07:48 | 2019-05-18T23:07:48 | 46,052,752 | 0 | 0 | null | 2018-08-02T15:44:21 | 2015-11-12T12:54:27 | Python | UTF-8 | Python | false | false | 1,125 | py | from django.views import generic
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse_lazy
from . import forms
class ContactFormView(generic.FormView):
form_class = forms.ContactForm
template_name = 'content/contact.html'
success_url = reverse_lazy('home')
success_message = _("Your message has been sent and I will be in "
"touch shortly.")
def form_valid(self, form):
form.save()
messages.success(self.request, self.success_message)
return super(ContactFormView, self).form_valid(form)
def get_form_kwargs(self):
# Adds answer from session to the form kwargs for human verification
kwargs = super(ContactFormView, self).get_form_kwargs()
kwargs['answer'] = self.request.session['q_and_a']['answer']
kwargs['request_meta'] = self.request.META
return kwargs
class HomeView(generic.TemplateView):
template_name = 'content/home.html'
class BiographyView(generic.TemplateView):
template_name = 'content/biography.html'
| [
"[email protected]"
] | |
c602149831a9530d2932450549686b4d7807094e | 858e97af89958e27e4f3686b023e8b9cd53dc14d | /utils/cv_utils.py | 0b7b867be91a64bb4f430b6c212f31d249fd09fd | [
"MIT"
] | permissive | XLEric/ganimation | f507c92e3b2bc067238b4016d62da3da5090a2d9 | 024fdc0adbeae582a139635a5141b2737969bc48 | refs/heads/master | 2022-12-25T04:56:15.143476 | 2020-09-28T17:55:26 | 2020-09-28T17:55:26 | 299,255,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | import cv2
from matplotlib import pyplot as plt
import numpy as np
def read_cv2_img(path):
'''
Read color images
:param path: Path to image
:return: Only returns color images
'''
img = cv2.imread(path, -1)
# print(path)
# print('img shape : ',img.shape)
if img is not None:
if len(img.shape) != 3:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def show_cv2_img(img, title='img'):
'''
Display cv2 image
:param img: cv::mat
:param title: title
:return: None
'''
plt.imshow(img)
plt.title(title)
plt.axis('off')
plt.show()
def show_images_row(imgs, titles, rows=1):
'''
Display grid of cv2 images image
:param img: list [cv::mat]
:param title: titles
:return: None
'''
assert ((titles is None) or (len(imgs) == len(titles)))
num_images = len(imgs)
if titles is None:
titles = ['Image (%d)' % i for i in range(1, num_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(imgs, titles)):
ax = fig.add_subplot(rows, np.ceil(num_images / float(rows)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
ax.set_title(title)
plt.axis('off')
plt.show()
| [
"[email protected]"
] | |
5bf9c75838aa2939c12ca5bb9d42717ba989e2f3 | 691e48e96cb4d676701f9f62bfb5af936d6d9cd6 | /Archive_2020/M7/turtleData.py | 03c5468877b5c04e6917f2763132d72735fdf8cc | [] | no_license | mapossum/spa | 50d263ff1986dd73692e3d0866c2299ace180e59 | 90da0167aa6cbea067c88d30aea26440b366e6b3 | refs/heads/master | 2022-05-19T08:20:53.293065 | 2022-04-13T20:35:11 | 2022-04-13T20:35:11 | 29,207,514 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import numpy as np
import urllib.request as rq
import json
turtleID = 1
url = 'http://geo.conserveturtles.org/Tracking/GetTurtleLocationsByIdAsJson?id={}'.format(turtleID)
f = rq.urlopen(url)
turtledata = json.load(f)
#First Turtle Location
print(turtledata[0])
import arcpy
arcpy.env.overwriteOutput = True
out_path = r"G:\courses\spa\Archive_2020\M7"
out_name = "TutleTracks.shp"
fc = r"{}/{}".format(out_path,out_name)
spatial_reference = arcpy.SpatialReference(4326)
arcpy.CreateFeatureclass_management(out_path, out_name, "Polyline", "", "", "", spatial_reference)
| [
"[email protected]"
] | |
343750c5461b0e9012d0135e09db96bc94d10167 | 8d489c686385bfcf507ac139f57d60a180e26ba3 | /eland/tests/series/test_sample_pytest.py | 8de43e382d26d3668a7faf679fe10a9eb4202d24 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | hyperparam-shafin/eland | d070b426e1e83667cf18aec8bfd2b2996511b7e9 | abc5ca927b10744de07f48865fb842f78b167aea | refs/heads/master | 2022-12-30T17:00:56.523262 | 2020-10-15T15:52:48 | 2020-10-15T15:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# File called _pytest for PyCharm compatibility
import eland as ed
from eland.tests import ES_TEST_CLIENT
from eland.tests import FLIGHTS_INDEX_NAME
from eland.tests.common import TestData
from eland.tests.common import assert_pandas_eland_series_equal
class TestSeriesSample(TestData):
SEED = 42
def build_from_index(self, ed_series):
ed2pd_series = ed_series.to_pandas()
return self.pd_flights()["Carrier"].iloc[ed2pd_series.index]
def test_sample(self):
ed_s = ed.Series(ES_TEST_CLIENT, FLIGHTS_INDEX_NAME, "Carrier")
pd_s = self.build_from_index(ed_s.sample(n=10, random_state=self.SEED))
ed_s_sample = ed_s.sample(n=10, random_state=self.SEED)
assert_pandas_eland_series_equal(pd_s, ed_s_sample)
| [
"[email protected]"
] | |
cd3d91e4e7ec6e2ecde520d78cc895f4c5b05916 | f8f12e4005b33ae72440d580b5ac6047b7336222 | /accounts/views.py | f24219bb1356380a63a6a7ee72ecdfa26ff874cc | [] | no_license | memadd/BT-Real-Estate | 64f3cb61cac34240d59db409fac2afa624a473b0 | 56ba30159b1a7243fe66bd7b06b1c6d4c8cff3f7 | refs/heads/master | 2023-01-01T23:36:20.909328 | 2020-10-26T19:10:35 | 2020-10-26T19:10:35 | 291,974,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from contacts.models import Contact
def register(request):
if request.method == 'POST':
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
# Check if passwords match
if password == password2:
# Check username
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
# Check email
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is being used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(username=username, password=password, email=email,
first_name=first_name, last_name=last_name)
# Login after register
# auth.login(request, user)
# messages.success(request, 'You are now logged in')
# return redirect('index')
user.save()
messages.success(request, 'You are now register and can log in')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password= password)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are now logged out')
return redirect('index')
def dashboard(request):
user_contacts = Contact.objects.order_by('-contact_date').filter(user_id=request.user.id)
context = {
'contacts':user_contacts,
}
return render(request, 'accounts/dashboard.html', context) | [
"[email protected]"
] | |
0908d0a0d998c84abfd9765e3f32b145b93c4dcc | dd826364c4140a4357a74d5e6692959cd6a7c1da | /radial_entropy.py | c4cae7495c1b019769205497aad80cc6123cac8d | [] | no_license | bizhouxuan/streetmaps | 175c234913aab496c904c985e35deea8fe6c6b1e | d517365ec70db4e68c41fba301dd2f4eb3a13b90 | refs/heads/master | 2021-07-09T21:01:41.774505 | 2020-12-27T16:47:15 | 2020-12-27T16:47:15 | 225,814,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | import sys
import math
import numpy as np
from scipy.stats import entropy
import argparse
def print_help():
print('Usage: radial_entropy.py FILE')
print('Calculate the spatial freguency entropy of a city\'s street network.')
print('The FILE argument is the path to the file containing the two-col k distribution date.')
print('e.g., 0.12527823 0.012408118')
print('where, the first column is the wave vector k in 1/m and the second is the PSD (power spectral density).')
def calculate_entropy(k, psd):
bin_size = k[1]-k[0]
print('Bin size (1/m): ',bin_size)
#For radial distribution, may NOT need to subtract the background from the PSDF curve before calculating entropy.
orientation_entropy = entropy(psd)+math.log(bin_size)
print(orientation_entropy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("delimiter", help="delimiter separating data in a row")
parser.add_argument("skiprows", type=int, help="number of rows to skip")
parser.add_argument("filename", help="name of the data file")
args = parser.parse_args()
if len(sys.argv) == 1:
print_help()
sys.exit(-1)
delimiter = args.delimiter
skiprows = args.skiprows
filename = args.filename
try:
k, psd= np.loadtxt(filename, usecols = (0,1), unpack=True, delimiter=delimiter, skiprows=skiprows)
calculate_entropy(k, psd)
except Exception as e:
print(f'Error: {e}.')
| [
"[email protected]"
] | |
cbdf9d00e07cf760b6b58be619945082252fe343 | e1ee301e51b62c675f125a970390b1b4196bd59c | /hao_prj_hhl/hao_prj_hhl/settings.py | 90588db1999841dc269ec92c66582ae856a98607 | [
"MIT"
] | permissive | haohl-keepworking/hao_keep | ee96279dc4ff86b9f5ff7fe0d89bc9fc660ba3d3 | cb0186a3378b437a1f9a4cfd066a10870180996b | refs/heads/master | 2020-08-08T19:14:33.351872 | 2019-10-09T12:42:55 | 2019-10-09T12:42:55 | 213,897,321 | 0 | 0 | MIT | 2019-10-09T12:42:56 | 2019-10-09T11:14:32 | Python | UTF-8 | Python | false | false | 3,125 | py | """
Django settings for hao_prj_hhl project.
Generated by 'django-admin startproject' using Django 1.11.18.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd2yvzdddii=sythzfysi$(i=41rucqnw1i__95a!4=z#syv1q3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"app"
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hao_prj_hhl.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hao_prj_hhl.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
e2c67a4f04dc3ad8ffe2164bf040211a72fc7928 | 2a9934228f9ee46fea03d415262ef305de937dd9 | /Lesson2/Lesson3/cont5.py | 5c97f810a812911752726bd502ff0bb1252457f5 | [] | no_license | BayanbaevIlyas/Lesson1 | d938e1e87f2802a8d3b4a3343bbe88e1c36b943a | 24f979da615ce9ebd17daf485cd6c15531bd42cb | refs/heads/master | 2021-01-25T14:10:42.067571 | 2018-04-14T05:24:59 | 2018-04-14T05:24:59 | 123,663,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | def mySum(a):
return a + 4
a =int(input('ведите age'))
print(mySum(a))
| [
"[email protected]"
] | |
d952b3d9f13cc2ab9d89bfec0fe06b5678e48bb1 | fab3a3f86e5f15f0390d2e6faa5772e387676082 | /pythonwebapp/flaskblog/routes.py | 48deb921d50e77acf12b3152bfea64134947ba18 | [] | no_license | HeinnerAmaro/bookstore | e7d7b949768db5696318faed015d5770766b1f71 | 0e8e850bdd09d5636b619ce0c556f17944ed086a | refs/heads/main | 2023-01-31T07:13:21.107664 | 2020-12-15T21:29:48 | 2020-12-15T21:29:48 | 321,792,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,305 | py | from flask import render_template, url_for, flash, redirect
from flaskblog import app, db, bcrypt
from flaskblog.forms import RegistrationForm, LoginForm,PostForm,InsertForm
from flaskblog.models import User, Post,Book
from flask_login import login_user,current_user,logout_user,login_required
from sqlalchemy.sql import func
@app.route("/")
@app.route("/home")
def home():
posts = Post.query.all()
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', books=books)
# wtforms explains this
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username = form.username.data,email=form.email.data,password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created ', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password,form.password.data):
login_user(user,remember=form.remember.data)
return redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check your email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/post/new/",methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title = form.title.data,content = form.content.data,author = current_user,rating = form.rating.data,book_id =form.book_id.data)
db.session.add(post)
db.session.commit()
flash('Your post has been created!','success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post',form = form)
@app.route("/insert_book",methods=['GET', 'POST'])
@login_required
def new_book():
form = InsertForm()
if form.validate_on_submit():
book = Book(title = form.title.data,isbn = form.isbn.data,author = form.author.data,publicationyear = form.publicationyear.data,description = form.description.data, price = form.price.data,copies_sold = form.copies_sold.data,publisher = form.publisher.data, genre = form.genre.data)
db.session.add(book)
db.session.commit()
flash('Your Book has been created!','success')
return redirect(url_for('home'))
return render_template('insert_book.html', title='New Book',form = form)
@app.route("/books")
@login_required
def index():
#posts = Post.query.order_by(Post.rating.desc())
posts = Post.query.func.avg(Post.rating).all()
return render_template('books.html', posts = posts)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.