blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5d497e34c6d852313063361b3beaa86632b1650 | 18d94ebd5eb0854340d0f4ce68c3a0c879f44d7e | /LinkedInCrawler/parameters.py | 2cdd0ccfdbeb5efde9e8af79f6fa4399a7aad9ef | [] | no_license | benjamincarney/EECS-Faculty-Salary-Predictor | c3d2a268847a377fecaab9885a815f01edd6f701 | 2a3efe01806c93f7cbdd8a4db22c31ed296101ba | refs/heads/master | 2020-04-24T19:54:21.149330 | 2019-08-14T16:41:44 | 2019-08-14T16:41:44 | 172,227,168 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | '''
Contains user information to login to LinkedIn
'''
# specifies the path to the chromedriver.exe
driverDirectory = 'C:/Users/Aaron/Desktop/ChromeDriver/chromedriver.exe'
# query = 'site:linkedin.com/in/ AND "University of Michigan" AND "Professor"'
query = 'site:linkedin.com/in/ AND "University of Michigan"'
# User log-in information
linked_in_email = "[email protected]"
linked_in_password = "password123"
| [
"[email protected]"
] | |
d688346d4d087c2a6241963d45df681e1cd106b5 | a51fb785beb1fe70c9b1352ac24159d01061868c | /05-深入python的dict和set/04-dict_performance.py | 63cf01a087618bb5c08d3767a03a2abbfc7af58d | [] | no_license | zhagyilig/AdvancePy | f3cfe560919bd15760686b391d863d813781a867 | f98a8abb5898474a0f00d2d9856fd46713b65d82 | refs/heads/master | 2020-09-07T08:35:14.779826 | 2020-01-06T02:30:26 | 2020-01-06T02:30:26 | 220,724,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | # -*- encoding: utf-8 -*-
__author__ = 'ericzhang'
from random import randint
def load_list_data(total_nums, target_nums):
"""
从文件中读取数据,以list的方式返回
:param total_nums: 读取的数量
:param target_nums: 需要查询的数据的数量
"""
all_data = []
target_data = []
file_name = "fbobject_idnew.txt"
with open(file_name, encoding="utf8", mode="r") as f_open:
for count, line in enumerate(f_open):
if count < total_nums:
all_data.append(line)
else:
break
for x in range(target_nums):
random_index = randint(0, total_nums)
if all_data[random_index] not in target_data:
target_data.append(all_data[random_index])
if len(target_data) == target_nums:
break
return all_data, target_data
def load_dict_data(total_nums, target_nums):
"""
从文件中读取数据,以dict的方式返回
:param total_nums: 读取的数量
:param target_nums: 需要查询的数据的数量
"""
all_data = {}
target_data = []
file_name = "fbobject_idnew.txt"
with open(file_name, encoding="utf8", mode="r") as f_open:
for count, line in enumerate(f_open):
if count < total_nums:
all_data[line] = 0
else:
break
all_data_list = list(all_data)
for x in range(target_nums):
random_index = randint(0, total_nums-1)
if all_data_list[random_index] not in target_data:
target_data.append(all_data_list[random_index])
if len(target_data) == target_nums:
break
return all_data, target_data
def find_test(all_data, target_data):
#测试运行时间
test_times = 100
total_times = 0
import time
for i in range(test_times):
find = 0
start_time = time.time()
for data in target_data:
if data in all_data:
find += 1
last_time = time.time() - start_time
total_times += last_time
return total_times/test_times
if __name__ == "__main__":
# all_data, target_data = load_list_data(10000, 1000)
# all_data, target_data = load_list_data(100000, 1000)
# all_data, target_data = load_list_data(1000000, 1000)
# all_data, target_data = load_dict_data(10000, 1000)
# all_data, target_data = load_dict_data(100000, 1000)
# all_data, target_data = load_dict_data(1000000, 1000)
all_data, target_data = load_dict_data(2000000, 1000)
last_time = find_test(all_data, target_data)
#dict查找的性能远远大于list
#在list中随着list数据的增大 查找时间会增大
#在dict中查找元素不会随着dict的增大而增大
print(last_time)
# 1. dict的key或者set的值 都必须是可以hash的
# 不可变对象 都是可hash的, str, fronzenset, tuple,自己实现的类 __hash__
# 2. dict的内存花销大,但是查询速度快, 自定义的对象 或者python内部的对象都是用dict包装的
# 3. dict的存储顺序和元素添加顺序有关
# 4. 添加数据有可能改变已有数据的顺序
| [
"[email protected]"
] | |
8f835971103604fef87f915b2e98181e52cbf08a | 6a747c7daca848aa0f352fe2466dba249fa08eda | /customerapp/customerapp/wsgi.py | cfd56fbe00295ab7b1083b1ead3d7c3426c4d1fb | [] | no_license | sujabshrestha/customerapp | 229ae8a70f04be4402ed5470910dcb7a502cc950 | 74b31e34d895c2f06a6520b448a15b0092b7a50f | refs/heads/master | 2023-01-20T20:25:07.552808 | 2020-12-02T14:21:44 | 2020-12-02T14:21:44 | 317,863,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for customerapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'customerapp.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
5072fa236fc47cf8d3722ba3b44e8dfdd85b3df4 | 11ed61997e14cac429dc5fe76b1de8f17e457a99 | /lab1/task1.py | c6781343573519b3ffc36489b2eaa93760b9485a | [] | no_license | 4jeR/wfiis-python-in-the-enterprise | 0215cf9e005665a3bee5eb5f4eb407ea5991ef02 | 8c32e0e5651c4d34bd3ab65f87564a65b38ac9af | refs/heads/master | 2023-01-13T21:35:40.371763 | 2020-11-16T18:33:41 | 2020-11-16T18:33:41 | 304,587,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | # Write a module that will simulate autonomic car.
# The simulation is event based, an example:
# car1 = Car()
# car1.act(event)
# print(car1.wheel_angle, car1.speed)
# where event can be anything you want, i.e. :
# `('obstacle', 10)` where `10` is a duration (time) of the event.
##The program should:
# - act on the event
# - print out current steering wheel angle, and speed
# - run in infinite loop
# - until user breaks the loop
#The level of realism in simulation is of your choice, but more sophisticated solutions are better.
#If you can thing of any other features, you can add them.
#Make intelligent use of pythons syntactic sugar (overloading, iterators, generators, etc)
#Most of all: CREATE GOOD, RELIABLE, READABLE CODE.
#The goal of this task is for you to SHOW YOUR BEST python programming skills.
#Impress everyone with your skills, show off with your code.
#
#Your program must be runnable with command "python task.py".
#Show some usecases of your library in the code (print some things)
#
#When you are done upload this code to github repository.
#
#Delete these comments before commit!
#Good luck.
import time
from random import randint
import math
class Engine:
def __init__(self, acc_rate=5):
self.rate = acc_rate
class Brakes:
def __init__(self, dec_rate=5):
self.rate = dec_rate
class Car:
def __init__(self, engine, brakes, wheel_angle=0, speed=0):
self.engine = engine
self.brakes = brakes
self.wheel_angle = 0
self.speed = 0
def __str__(self):
return f'Car({self.wheel_angle} {self.speed})'
def accelerate(self):
print(f'Car is accelerating by {self.engine.rate}')
if self.speed < 70:
self.speed += self.engine.rate
self.wheel_angle += self.speed * 0.01
def decelerate(self):
self.speed -= self.brakes.rate
self.wheel_angle -= self.speed * 0.01
if self.speed < 0:
self.speed = 0
if self.wheel_angle < 0:
self.wheel_angle = 0
def act(self, event):
global total_time
print(f'Car has slowed, because of {event.name}')
for i in range(event.duration):
self.decelerate()
if self.wheel_angle < 0:
self.wheel_angle = 0
total_time += 1
class Event:
def __init__(self, name='Obstacle', duration=3, odds=10):
self.name = name
self.duration = duration
self.odds = odds
def __str__(self):
return f'An event occured: {self.name}, for {self.duration}'
def __repr__(self):
return f'An event occured: {self.name}, for {self.duration}'
if __name__=='__main__':
total_time = 0
engine = Engine(4)
brakes = Brakes(15)
car = Car(engine=engine, brakes=brakes)
while True:
total_time += 1
car.accelerate()
print(f'Car: {car.speed} km/h, {car.wheel_angle} rate')
print(f'[TIME {total_time}]')
event_odds = randint(0, 100)
if event_odds < 5:
evt = Event('Obstacle', 3)
car.act(evt)
time.sleep(1)
| [
"[email protected]"
] | |
035d3b7ecfd085642724be6fbf2d04839d3ff812 | 4c68186ef1c4cb184e4b09192bff080ea8f7bf57 | /ERROR:print odd num.py | 2651d3dd4c74f71b358a65f9fd3f5e55456f1137 | [] | no_license | neelakandan1905/python | e70c12d45a7c32be9787c85551c5647b90141bad | 75b23c751f22f763b1c881b0c83e32a83a9683f4 | refs/heads/master | 2021-06-23T01:28:47.828267 | 2021-04-09T12:09:13 | 2021-04-09T12:09:13 | 211,134,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | n= input()
l=list(n)
c=0
for i in l:
if int(i)%2!=0:
print(i,end=" ")
elif int(i)%2==0:
c=c+1
if len(l)==c:
print(-1)
| [
"[email protected]"
] | |
7f470f8bf2a672988fc108db926df5c8295a3df2 | d2dd15ebf96c0ded11666998d7e3b8c2251adf75 | /roles/blueprint/templates/PushBlueprint.py.j2 | fed4f22c2ed6191cd1105bbc8504cac8da760bfa | [] | no_license | borgified/ansible-hadoop | 98e9e24631aa826350a62b0227fe6902b65edd18 | 00508df9be33cf50853b157bf865d1ddef50dea6 | refs/heads/master | 2020-11-30T10:54:07.132488 | 2014-09-23T10:01:08 | 2014-09-23T10:01:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | j2 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import requests
AMBARI_SERVER = "{{ hostvars[groups['ambari_server'][0]]['ansible_fqdn'] }}"
def main():
# Read blueprint file
f = open('/root/ambari-install/multi-nodes-hdp.json','r')
blueprint_data = f.read()
f.close()
# Push blueprint
ambari_url = "http://" + AMBARI_SERVER + ":8080/api/v1/blueprints/multi-nodes-hdp"
r = requests.post(ambari_url, blueprint_data, auth=("admin","admin"), headers={"X-Requested-By":"ambari-script"})
if r.status_code != 201:
print "return code is %s" % r.status_code
print r.text
# sys.exit(1)
# Read blueprint file
f = open('/root/ambari-install/hosts-map.json','r')
maps_data = f.read()
f.close()
# Push blueprint
ambari_url = "http://" + AMBARI_SERVER + ":8080/api/v1/clusters/hortonworks"
print ambari_url
r = requests.post(ambari_url, maps_data, auth=("admin","admin"), headers={"X-Requested-By":"ambari-script"})
if r.status_code != 202:
print "return code is %s" % r.status_code
print r.text
ambari_url = "http://" + AMBARI_SERVER + ":8080/api/v1/clusters"
r = requests.get(ambari_url, auth=("admin","admin"))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f51dc8e865ace127ea3dc8e354921dc11e914eae | 0c69d52c02923d896855be6df48d63eb7c1d8a2f | /counting_quad_sorts.py | 48704861b8b3601d258228d8326a214d36946367 | [] | no_license | bzia/Complexity-Visualizer | d2e5c196cc17377a7539b9b7c323ee0627e363dd | e34654728c2869ca4b775e9aa5f9676c43642b4d | refs/heads/main | 2023-04-24T05:27:20.380688 | 2021-05-04T16:37:46 | 2021-05-04T16:37:46 | 325,883,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,786 | py | """
This module contains four sorting algorithms which
can be used in other models to generate empirical data.
"""
def bubbleSort(items):
"""
Orders the items from lowest to highest value
Parameters: items - list to be sorted
Returns: count - number of times the loop has been iterated
"""
swapped = True
count = 0
while swapped:
swapped = False
for i in range(1,len(items)):
if items[i-1] > items[i]:
items[i-1], items[i] = items[i], items[i-1] # Swap values
swapped = True
count += 1
return count
def optimizedBubbleSort(items):
"""
Optimized version of classic bubble sort which
orders the items from lowest to highest value
Parameters: items - list to be sorted
Returns: count - number of times the loop has been iterated
"""
count = 0
n = len(items)
swapped = True
while swapped:
count += 1
swapped = False
for i in range(1, n):
count += 1
if items[i-1] > items[i]:
items[i-1], items[i] = items[i], items[i-1]
swapped = True
n -= 1
return count
def selectionSort(items):
"""
Orders the items from lowest to highest value
Parameters: items - list to be sorted
Returns: count - number of times the loop has been iterated
"""
count = 0 # for analysis only
n = len(items)
for i in range(n-1):
count += 1
min = i
for j in range(i + 1,n):
if (items[j] < items[min]):
min = j
count += 1
if (min != i):
items[i], items[min] = items[min], items[i] # Swap Values
return count
def insertionSort(items):
"""
Orders the items from lowest to highest value
Parameters: items - list to be sorted
Returns: count - number of times the loop has been iterated
"""
count = 0
# Traverse from 1 to the length of the list.
for i in range(1, len(items)):
count += 1
key = items[i]
# Move elements of arr[0..i-1], that are
# greater than key, to one position ahead
# of their current position
j = i-1
while j >= 0 and key < items[j]:
count += 1
items[j + 1] = items[j]
j -= 1
items[j + 1] = key
return count
if __name__ == '__main__':
print(bubbleSort([9,8,7,6,5,4,3,2,1]))
print(optimizedBubbleSort([9,8,7,6,5,4,3,2,1]))
print(selectionSort([9,8,7,6,5,4,3,2,1]))
print(insertionSort([9,8,7,6,5,4,3,2,1]))
# The above code prints the loop iterations ('count')
# for each of the 4 sorting algorithms.
| [
"[email protected]"
] | |
c2506ad3b81d0b1849f86465fdfb65d22aeedc52 | 293e76be374728f72c89019b0274cf0755629e95 | /feature graphs.py | 3ed91b771740f2afd9b55c97c112cd2eb910801a | [] | no_license | Hudki/Noise-Pollution-Classification- | a206915015d5229af486219ea1e26c38291fbf5d | 6a4928f89aaca43b6872129670987dcee3697cf2 | refs/heads/master | 2020-06-16T19:31:42.430749 | 2019-12-04T00:04:20 | 2019-12-04T00:04:20 | 195,679,415 | 1 | 0 | null | 2019-08-07T17:45:24 | 2019-07-07T17:32:38 | null | UTF-8 | Python | false | false | 2,008 | py | import librosa
import matplotlib.pyplot as plt
import numpy as np
import librosa.display
y, sr = librosa.load('output.wav')
librosa.feature.melspectrogram(y=y, sr=sr)
librosa.feature.mfcc(y=y, sr=sr)
librosa.feature.chroma_stft(y=y, sr=sr)
D = np.abs(librosa.stft(y))**2
S = librosa.feature.melspectrogram(S=D, sr=sr)
S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
fmax=8000)
V = np.abs(librosa.stft(y))
a = librosa.effects.harmonic(y)
tonnetz = librosa.feature.tonnetz(y=a, sr=sr)
contrast = librosa.feature.spectral_contrast(S=V, sr=sr)
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
chroma = librosa.feature.chroma_stft(S=V, sr=sr)
"""Mel-Frequency Spectrogram"""
plt.figure(figsize=(10, 4))
S_dB = librosa.power_to_db(S, ref=np.max)
librosa.display.specshow(S_dB, x_axis='time', y_axis='mel', sr=sr, fmax=8000)
plt.colorbar(format='%+2.0f dB')
plt.title('Mel-frequency spectrogram')
plt.tight_layout()
"""Mel-Frequency Cepstral Coefficients"""
plt.figure(figsize=(10, 4))
librosa.display.specshow(mfccs, x_axis='time')
plt.colorbar()
plt.title('MFCC')
plt.tight_layout()
"""Spectral Contract"""
plt.figure()
plt.subplot(2, 1, 1)
librosa.display.specshow(librosa.amplitude_to_db(V,ref=np.max), y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.title('Power spectrogram')
plt.subplot(2, 1, 2)
librosa.display.specshow(contrast, x_axis='time')
plt.colorbar()
plt.ylabel('Frequency bands')
plt.title('Spectral contrast')
plt.tight_layout()
"""Tonnetz"""
plt.figure()
plt.subplot(2, 1, 1)
librosa.display.specshow(tonnetz, y_axis='tonnetz')
plt.colorbar()
plt.title('Tonal Centroids (Tonnetz)')
plt.subplot(2, 1, 2)
librosa.display.specshow(librosa.feature.chroma_cqt(a, sr=sr), y_axis='chroma', x_axis='time')
plt.colorbar()
plt.title('Chroma')
plt.tight_layout()
"""Chromagram"""
plt.figure(figsize=(10, 4))
librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
plt.colorbar()
plt.title('Chromagram')
plt.tight_layout()
plt.show()
| [
"[email protected]"
] | |
851bc6989765017b016ae4c617aace38f8d4ff45 | e67fa22b0193dca6809a15fa67a84f1d2ba27cf5 | /src/map_conf_cities.py | a6696409d84aef360dfb31186ab2b88326a54f33 | [] | no_license | kientz/If-you-host-it-will-they-come | 6c8f7bfee8e8ce6cee60c4ba00ab93fbd5690645 | 6e86204c3c56709d0abd9d8ded0241f5fc22cb54 | refs/heads/master | 2021-01-23T12:48:47.881358 | 2017-06-16T19:25:42 | 2017-06-16T19:25:42 | 93,201,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,143 | py | import folium
import pandas as pd
import numpy as np
from set_region_and_super_region import define_regions
# float_formatter = lambda x: "%.1f" % x
# np.set_printoptions(formatter={'float_kind':float_formatter})
superregions2, superregions, regions, regions2, state_to_code, code_to_state = define_regions()
srg=pd.read_csv('../data/superregions.csv') #Get Lat Long of Region Centers
rg=pd.read_csv('../data/regions.csv')
#Region Lat Long
latrg = list(rg["LAT"])
lonrg = list(rg["LON"])
rgid = list(rg["REGIONS"])
#Super Region Lat Long and names
latsrg = list(srg["LAT"])
lonsrg = list(srg["LON"])
srgid = list(srg["SUPER_REGIONS"])
df=pd.read_excel('../data/conference_lat_long_and_counts.xlsx')
colnames=list(df.columns)
#L_CityState_distance_cost_count.csv backup data
map = folium.Map(location=[38.58, -99.09], zoom_start=5, tiles="Mapbox Bright")
#one at a time
#map.add_child(folium.Marker(location=[38.2,-99.1]),popup="MARKER HERE",icon=folium.Icon(color='red'))
#Have map show base of super regions colored differently
if False:
fgSR = folium.FeatureGroup(name="SuperRegions")
#fgSR.add_child(folium.GeoJson(data=open('gz_2010_us_040_00_500k.json', 'r', encoding='utf-8-sig'),
#style_function = lambda feature: dict(color='red', weight=0.2, opacity=0.6))) #South
fgSR.add_child(folium.GeoJson(data=open('gz_2010_us_040_00_500k.json', 'r', encoding='utf-8-sig'),
style_function=lambda x: {'fillColor':
'cyan' if x['properties']['NAME'] in superregions2['Northeast']
else 'orange' if x['properties']['NAME'] in superregions2['Midwest']
else 'red' if x['properties']['NAME'] in superregions2['West']
else 'm','weight':0.1})) #South
# #fgsrc = folium.FeatureGroup(name="SuperRegionCenters") #feature group
# for lt, ln, id in zip(latsrg, lonsrg,srgid):
# fgSR.add_child(folium.CircleMarker(location=[lt, ln], radius = 6, popup=str(id),
# fill_color='red', color = 'grey', fill_opacity=0.9))
if True:
fgR = folium.FeatureGroup(name="Regions")
fgR.add_child(folium.GeoJson(data=open('gz_2010_us_040_00_500k.json', 'r', encoding='utf-8-sig'),
style_function=lambda x: {'fillColor':
'#ffff00' if x['properties']['NAME'] in regions2['Great Lakes']
else '#7fffd4' if x['properties']['NAME'] in regions2['Central Midwest']
else '#b03060' if x['properties']['NAME'] in regions2['Mid-Atlantic']
else '#cd853f' if x['properties']['NAME'] in regions2['Mountain States']
else '#b22222' if x['properties']['NAME'] in regions2['Northwest']
else '#ff0000' if x['properties']['NAME'] in regions2['South-Central']
else '#00ff00' if x['properties']['NAME'] in regions2['Southeast']
else '#0000ff' if x['properties']['NAME'] in regions2['Northeast']
else '#a020f0','weight':0.1})) #Southwest
# #Feature Group
# #fgrc = folium.FeatureGroup(name="RegionCenters") #or use fg
# ##fgv.add_child(folium.Marker(location=[38.2,-99.1]),popup="MARKER HERE",icon=folium.Icon(color='red'))
# for lt, ln, id in zip(latrg, lonrg,rgid):
# fgR.add_child(folium.CircleMarker(location=[lt, ln], radius = 6, popup=str(id),
# fill_color='red', color = 'grey', fill_opacity=0.9))
from folium.features import DivIcon
#m = folium.Map([45.0302, -105.22], zoom_start=13)
folium.map.Marker(
[52.0, -100.0],
icon=DivIcon(
icon_size=(500,36),
icon_anchor=(0,0),
html='<div style="font-size: 24pt">Conference Cities</div>',
)
).add_to(map)
''' cols in conference_lat_long_and_counts.xlsx
['L_CityState',
'L_City',
'L_Zip',
'L_lat',
'L_lon',
'L_State',
'L_Region',
'L_Super_Region',
'Total Conferences',
'Academic Administration',
'Advancement',
'Enrollment Management',
'Leadership',
'Physical Campus',
'Planning & Finance',
'Student Affairs',
'Teaching & Learning']
'''
num_cities=len(df)
#do not include domains with 0 counts.
fgCon = folium.FeatureGroup(name="Conferences")
for i in range(num_cities):
html = '<i>' + 'LOCATION: ' + df['L_CityState'][i] + '<br>'+\
'Total Num Conferences: ' + df['Total Conferences'][i].astype(str) + '<br>' +\
'Total Conference Attendees: ' + df['Total Attendance'][i].astype(str) + '<br>' +\
'Avg Conference Attendace: ' + str(round(float(df['Overall City Avg Attendance'][i]),1)) + '<br><br>' +\
['Academic Administration: ' + df['Academic Administration'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Academic Administration'][i]),1))+'<br>',''][df['Academic Administration'][i]==0] +\
['Advancement: ' + df['Advancement'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Advancement'][i]),1))+ '<br>',''][df['Advancement'][i]==0] +\
['Enrollment Management: ' + df['Enrollment Management'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Enrollment Management'][i]),1))+ '<br>',''][df['Enrollment Management'][i]==0] +\
['Leadership: ' + df['Leadership'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Leadership'][i]),1))+ '<br>',''][df['Leadership'][i]==0] +\
['Physical Campus: ' + df['Physical Campus'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Physical Campus'][i]),1))+ '<br>',''][df['Physical Campus'][i]==0]+\
['Planning & Finance: ' + df['Planning & Finance'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Planning & Finance'][i]),1))+ '<br>',''][df['Planning & Finance'][i]==0] +\
['Student Affairs: ' + df['Student Affairs'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Student Affairs'][i]),1))+ '<br>',''][df['Student Affairs'][i]==0] +\
['Teaching & Learning: ' + df['Teaching & Learning'][i].astype(str) + ' ' + str(round(float(df['Avg Atten Teaching & Learning'][i]),1)) + '</i>','</i>'][df['Teaching & Learning'][i]==0]
iframe = folium.IFrame(html=html, width=360, height=270)
popup = folium.Popup(iframe, max_width=2650)
#folium.CircleMarker(location=[df['L_lat'][i],df['L_lon'][i]],color='red',fill_color='red',radius=6,popup=popup).add_to(map)
if df['Total Conferences'][i]<8:
rad=6
else:
rad = df['Total Conferences'][i]
fgCon.add_child(folium.CircleMarker(location=[df['L_lat'][i],df['L_lon'][i]],color='red',fill_color='red',radius=rad,popup=popup))
# for lt, ln, id in zip(latrg, lonrg,rgid):
# fgR.add_child(folium.CircleMarker(location=[lt, ln], radius = 6, popup=str(id),
# fill_color='red', color = 'grey', fill_opacity=0.9))
# crime['text'] = '<i>' + 'Murder: ' + crime['Murder'].astype(str) + '<br>' +\
# 'Rape: ' + crime['Rape'].astype(str) + '<br>' +\
# 'Aggr. Aslt.: ' + crime['Aggravated Assault'].astype(str) + '</i>'
# df['L_CityState'][i] +
# str(df['Total Conferences'][i])+
# str(df['Academic Administration'][i])+
# str(df['Advancement'][i])+
# str(df['Enrollment Management'][i])+
# str(df['Leadership'][i])+
# str(df['Physical Campus'][i])+
# str(df['Planning & Finance'][i])+
# str(df['Student Affairs'][i])+
# str(df['Teaching & Learning'][i])
#
# df['L_CityState'][i] +str(df['Total Conferences'][i])+str(df['Academic Administration'][i])+str(df['Advancement'][i])+str(df['Enrollment Management'][i])+str(df['Leadership'][i])+str(df['Physical Campus'][i])+str(df['Planning & Finance'][i])+str(df['Student Affairs'][i])+str(df['Teaching & Learning'][i])
#map.add_child(fgrc)
#map.add_child(fgsrc)
#map.add_child(fgSR)
map.add_child(fgR)
map.add_child(fgCon)
map.add_child(folium.LayerControl())
map.save("Map_of_Conference_Cities.html")
# #Markers
# map_2 = folium.Map(location=[45.5236, -122.6750], tiles='Stamen Toner', zoom_start=13)
# folium.CircleMarker(location=[45.5215, -122.6261], radius=500,
# popup='Laurelhurst Park', color='#3186cc',
# fill_color='#3186cc').add_to(map_2)
#
# folium.Marker(location=[45.5244, -122.6699], popup='The Waterfront').add_to(map_2)
# map_2.save("MarkersExamp.html")
| [
"[email protected]"
] | |
573ab5c65291d81f2a1a2ccb6ae82183b69edb20 | baf592c7483656ca8270472c4b90d4a9f250882f | /Lib/site-packages/redis/connection.py | 8329cf6570b96b123a05391caa357ff4785a71d6 | [] | no_license | LuisACC/test2 | 10c0873df197a564506804abc246987e53d51943 | c910c7cda7d7602e84ad314b4f5a35b1219828b8 | refs/heads/main | 2023-03-23T00:16:47.536965 | 2021-03-12T11:06:03 | 2021-03-12T11:06:03 | 347,027,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,954 | py | from __future__ import unicode_literals
from distutils.version import StrictVersion
from itertools import chain
from time import time
import errno
import io
import os
import socket
import threading
import warnings
from redis._compat import (xrange, imap, unicode, long,
nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
recv, recv_into, unquote, BlockingIOError,
sendall, shutdown, ssl_wrap_socket)
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
ExecAbortError,
InvalidResponse,
NoPermissionError,
NoScriptError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
)
from redis.utils import HIREDIS_AVAILABLE
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
BlockingIOError: errno.EWOULDBLOCK,
}
if ssl_available:
if hasattr(ssl, 'SSLWantReadError'):
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2
else:
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2
# In Python 2.7 a socket.error is raised for a nonblocking read.
# The _compat module aliases BlockingIOError to socket.error to be
# Python 2/3 compatible.
# However this means that all socket.error exceptions need to be handled
# properly within these exception handlers.
# We need to make sure socket.error is included in these handlers and
# provide a dummy error number that will never match a real exception.
if socket.error not in NONBLOCKING_EXCEPTION_ERROR_NUMBERS:
NONBLOCKING_EXCEPTION_ERROR_NUMBERS[socket.error] = -999999
NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
HIREDIS_SUPPORTS_ENCODING_ERRORS = \
hiredis_version >= StrictVersion('1.0.0')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b'*'
SYM_DOLLAR = b'$'
SYM_CRLF = b'\r\n'
SYM_EMPTY = b''
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
SENTINEL = object()
class Encoder(object):
"Encode strings to bytes-like and decode bytes-like to strings"
def __init__(self, encoding, encoding_errors, decode_responses):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value):
"Return a bytestring or bytes-like representation of the value"
if isinstance(value, (bytes, memoryview)):
return value
elif isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError("Invalid input of type: 'bool'. Convert to a "
"bytes, string, int or float first.")
elif isinstance(value, float):
value = repr(value).encode()
elif isinstance(value, (int, long)):
# python 2 repr() on longs is '123L', so use str() instead
value = str(value).encode()
elif not isinstance(value, basestring):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
raise DataError("Invalid input of type: '%s'. Convert to a "
"bytes, string, int or float first." % typename)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value, force=False):
"Return a unicode string from the bytes-like representation"
if self.decode_responses or force:
if isinstance(value, memoryview):
value = value.tobytes()
if isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
return value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': {
'max number of clients reached': ConnectionError,
'Client sent AUTH, but no password is set': AuthenticationError,
'invalid password': AuthenticationError,
# some Redis server versions report invalid command syntax
# in lowercase
'wrong number of arguments for \'auth\' command':
AuthenticationWrongNumberOfArgsError,
# some Redis server versions report invalid command syntax
# in uppercase
'wrong number of arguments for \'AUTH\' command':
AuthenticationWrongNumberOfArgsError,
},
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
'NOAUTH': AuthenticationError,
'NOPERM': NoPermissionError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size, socket_timeout):
self._sock = socket
self.socket_read_size = socket_read_size
self.socket_timeout = socket_timeout
self._buffer = io.BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None, timeout=SENTINEL,
raise_on_timeout=True):
sock = self._sock
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
custom_timeout = timeout is not SENTINEL
try:
if custom_timeout:
sock.settimeout(timeout)
while True:
data = recv(self._sock, socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
return True
except socket.timeout:
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError("Error while reading from socket: %s" %
(ex.args,))
finally:
if custom_timeout:
sock.settimeout(self.socket_timeout)
def can_read(self, timeout):
return bool(self.length) or \
self._read_from_socket(timeout=timeout,
raise_on_timeout=False)
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except Exception:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self.encoder = None
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock,
self.socket_read_size,
connection.socket_timeout)
self.encoder = connection.encoder
def on_disconnect(self):
"Called when the socket disconnects"
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoder = None
def can_read(self, timeout):
return self._buffer and self._buffer.can_read(timeout)
def read_response(self):
raw = self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = raw[:1], raw[1:]
if byte not in (b'-', b'+', b':', b'$', b'*'):
raise InvalidResponse("Protocol Error: %r" % raw)
# server returned an error
if byte == b'-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b'+':
pass
# int value
elif byte == b':':
response = long(response)
# bulk response
elif byte == b'$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == b'*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes):
response = self.encoder.decode(response)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
self._socket_timeout = connection.socket_timeout
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.encoder.decode_responses:
kwargs['encoding'] = connection.encoder.encoding
if HIREDIS_SUPPORTS_ENCODING_ERRORS:
kwargs['errors'] = connection.encoder.encoding_errors
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self, timeout):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
if self._next_response is False:
return self.read_from_socket(timeout=timeout,
raise_on_timeout=False)
return True
def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):
sock = self._sock
custom_timeout = timeout is not SENTINEL
try:
if custom_timeout:
sock.settimeout(timeout)
if HIREDIS_USE_BYTE_BUFFER:
bufflen = recv_into(self._sock, self._buffer)
if bufflen == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
self._reader.feed(self._buffer, 0, bufflen)
else:
buffer = recv(self._sock, self.socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
self._reader.feed(buffer)
# data was read from the socket and added to the buffer.
# return True to indicate that data was read.
return True
except socket.timeout:
if raise_on_timeout:
raise TimeoutError("Timeout reading from socket")
return False
except NONBLOCKING_EXCEPTIONS as ex:
# if we're in nonblocking mode and the recv raises a
# blocking error, simply return False indicating that
# there's no data to be read. otherwise raise the
# original exception.
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
raise ConnectionError("Error while reading from socket: %s" %
(ex.args,))
finally:
if custom_timeout:
sock.settimeout(self._socket_timeout)
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
while response is False:
self.read_from_socket()
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
def __init__(self, host='localhost', port=5000, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
socket_type=0, retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536,
health_check_interval=0, client_name=None, username=None):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.socket_type = socket_type
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
def __repr__(self):
repr_args = ','.join(['%s=%s' % (k, v) for k, v in self.repr_pieces()])
return '%s<%s>' % (self.__class__.__name__, repr_args)
def repr_pieces(self):
pieces = [
('host', self.host),
('port', self.port),
('db', self.db)
]
if self.client_name:
pieces.append(('client_name', self.client_name))
return pieces
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
except socket.error as e:
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, self.socket_type,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.IPPROTO_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if username and/or password are set, authenticate
if self.username or self.password:
if self.username:
auth_args = (self.username, self.password or '')
else:
auth_args = (self.password,)
# avoid checking health here -- PING will fail if we try
# to check the health prior to the AUTH
self.send_command('AUTH', *auth_args, check_health=False)
try:
auth_response = self.read_response()
except AuthenticationWrongNumberOfArgsError:
# a username and password were specified but the Redis
# server seems to be < 6.0.0 which expects a single password
# arg. retry auth with just the password.
# https://github.com/andymccurdy/redis-py/issues/1274
self.send_command('AUTH', self.password, check_health=False)
auth_response = self.read_response()
if nativestr(auth_response) != 'OK':
raise AuthenticationError('Invalid Username or Password')
# if a client_name is given, set it
if self.client_name:
self.send_command('CLIENT', 'SETNAME', self.client_name)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Error setting client name')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
if os.getpid() == self.pid:
shutdown(self._sock, socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def check_health(self):
"Check the health of the connection with a PING/PONG"
if self.health_check_interval and time() > self.next_health_check:
try:
self.send_command('PING', check_health=False)
if nativestr(self.read_response()) != 'PONG':
raise ConnectionError(
'Bad response from PING health check')
except (ConnectionError, TimeoutError):
self.disconnect()
self.send_command('PING', check_health=False)
if nativestr(self.read_response()) != 'PONG':
raise ConnectionError(
'Bad response from PING health check')
def send_packed_command(self, command, check_health=True):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
# guard against health check recursion
if check_health:
self.check_health()
try:
if isinstance(command, str):
command = [command]
for item in command:
sendall(self._sock, item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error as e:
self.disconnect()
if len(e.args) == 1:
errno, errmsg = 'UNKNOWN', e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
raise ConnectionError("Error %s while writing to socket. %s." %
(errno, errmsg))
except BaseException:
self.disconnect()
raise
def send_command(self, *args, **kwargs):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args),
check_health=kwargs.get('check_health', True))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read(timeout)
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout reading from %s:%s" %
(self.host, self.port))
except socket.error as e:
self.disconnect()
raise ConnectionError("Error while reading from %s:%s : %s" %
(self.host, self.port, e.args))
except BaseException:
self.disconnect()
raise
if self.health_check_interval:
self.next_health_check = time() + self.health_check_interval
if isinstance(response, ResponseError):
raise response
return response
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. These arguments should be bytestrings so that they are
# not encoded.
if isinstance(args[0], unicode):
args = tuple(args[0].encode().split()) + args[1:]
elif b' ' in args[0]:
args = tuple(args[0].split()) + args[1:]
buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
buffer_cutoff = self._buffer_cutoff
for arg in imap(self.encoder.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values or memoryviews
arg_length = len(arg)
if (len(buff) > buffer_cutoff or arg_length > buffer_cutoff
or isinstance(arg, memoryview)):
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, str(arg_length).encode(),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
buffer_cutoff = self._buffer_cutoff
for cmd in commands:
for chunk in self.pack_command(*cmd):
chunklen = len(chunk)
if (buffer_length > buffer_cutoff or chunklen > buffer_cutoff
or isinstance(chunk, memoryview)):
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if chunklen > buffer_cutoff or isinstance(chunk, memoryview):
output.append(chunk)
else:
pieces.append(chunk)
buffer_length += chunklen
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
def __init__(self, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs='required', ssl_ca_certs=None,
ssl_check_hostname=False, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
self.check_hostname = ssl_check_hostname
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
if hasattr(ssl, "create_default_context"):
context = ssl.create_default_context()
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.certfile and self.keyfile:
context.load_cert_chain(certfile=self.certfile,
keyfile=self.keyfile)
if self.ca_certs:
context.load_verify_locations(self.ca_certs)
sock = ssl_wrap_socket(context, sock, server_hostname=self.host)
else:
# In case this code runs in a version which is older than 2.7.9,
# we want to fall back to old code
sock = ssl_wrap_socket(ssl,
sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
def __init__(self, path='', db=0, username=None, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536,
health_check_interval=0, client_name=None):
self.pid = os.getpid()
self.path = path
self.db = db
self.username = username
self.client_name = client_name
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._connect_callbacks = []
self._buffer_cutoff = 6000
def repr_pieces(self):
pieces = [
('path', self.path),
('db', self.db),
]
if self.client_name:
pieces.append(('client_name', self.client_name))
return pieces
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')
def to_bool(value):
if value is None or value == '':
return None
if isinstance(value, basestring) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
'socket_timeout': float,
'socket_connect_timeout': float,
'socket_keepalive': to_bool,
'retry_on_timeout': to_bool,
'max_connections': int,
'health_check_interval': int,
'ssl_check_hostname': to_bool,
}
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[[username]:[password]]@localhost:5000/0
rediss://[[username]:[password]]@localhost:5000/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates
a SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, ``username`` and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied
are parsed as float values. The arguments ``socket_keepalive`` and
``retry_on_timeout`` are parsed to boolean values that accept
True/False, Yes/No values to indicate state. Invalid types cause a
``UserWarning`` to be raised. In the case of conflicting arguments,
querystring arguments always win.
"""
url = urlparse(url)
url_options = {}
for name, value in iteritems(parse_qs(url.query)):
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
warnings.warn(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
username = unquote(url.username) if url.username else None
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
username = url.username or None
password = url.password or None
path = url.path
hostname = url.hostname
# We only support redis://, rediss:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'username': username,
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
elif url.scheme in ('redis', 'rediss'):
url_options.update({
'host': hostname,
'port': int(url.port or 5000),
'username': username,
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
else:
valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://'))
raise ValueError('Redis URL must specify one of the following '
'schemes (%s)' % valid_schemes)
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created unless connection_class is
specified. Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
# a lock to protect the critical section in _checkpid().
# this lock is acquired when the process id changes, such as
# after a fork. during this time, multiple threads in the child
# process could attempt to acquire this lock. the first thread
# to acquire the lock will reset the data structures and lock
# object of this pool. subsequent threads acquiring this lock
# will notice the first thread already did the work and simply
# release the lock.
self._fork_lock = threading.Lock()
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
repr(self.connection_class(**self.connection_kwargs)),
)
def reset(self):
self._lock = threading.Lock()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def _checkpid(self):
# _checkpid() attempts to keep ConnectionPool fork-safe on modern
# systems. this is called by all ConnectionPool methods that
# manipulate the pool's state such as get_connection() and release().
#
# _checkpid() determines whether the process has forked by comparing
# the current process id to the process id saved on the ConnectionPool
# instance. if these values are the same, _checkpid() simply returns.
#
# when the process ids differ, _checkpid() assumes that the process
# has forked and that we're now running in the child process. the child
# process cannot use the parent's file descriptors (e.g., sockets).
# therefore, when _checkpid() sees the process id change, it calls
# reset() in order to reinitialize the child's ConnectionPool. this
# will cause the child to make all new connection objects.
#
# _checkpid() is protected by self._fork_lock to ensure that multiple
# threads in the child process do not call reset() multiple times.
#
# there is an extremely small chance this could fail in the following
# scenario:
# 1. process A calls _checkpid() for the first time and acquires
# self._fork_lock.
# 2. while holding self._fork_lock, process A forks (the fork()
# could happen in a different thread owned by process A)
# 3. process B (the forked child process) inherits the
# ConnectionPool's state from the parent. that state includes
# a locked _fork_lock. process B will not be notified when
# process A releases the _fork_lock and will thus never be
# able to acquire the _fork_lock.
#
# to mitigate this possible deadlock, _checkpid() will only wait 5
# seconds to acquire _fork_lock. if _fork_lock cannot be acquired in
# that time it is assumed that the child is deadlocked and a
# redis.ChildDeadlockedError error is raised.
if self.pid != os.getpid():
# python 2.7 doesn't support a timeout option to lock.acquire()
# we have to mimic lock timeouts ourselves.
timeout_at = time() + 5
acquired = False
while time() < timeout_at:
acquired = self._fork_lock.acquire(False)
if acquired:
break
if not acquired:
raise ChildDeadlockedError
# reset() the instance for the new process if another thread
# hasn't already done so
try:
if self.pid != os.getpid():
self.reset()
finally:
self._fork_lock.release()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
with self._lock:
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
try:
# ensure this connection is connected to Redis
connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if connection.can_read():
raise ConnectionError('Connection has data')
except ConnectionError:
connection.disconnect()
connection.connect()
if connection.can_read():
raise ConnectionError('Connection not ready')
except BaseException:
# release the connection back to the pool so that we don't
# leak it
self.release(connection)
raise
return connection
def get_encoder(self):
"Return an encoder based on encoding settings"
kwargs = self.connection_kwargs
return Encoder(
encoding=kwargs.get('encoding', 'utf-8'),
encoding_errors=kwargs.get('encoding_errors', 'strict'),
decode_responses=kwargs.get('decode_responses', False)
)
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
with self._lock:
try:
self._in_use_connections.remove(connection)
except KeyError:
# Gracefully fail when a connection is returned to this pool
# that the pool doesn't actually own
pass
if self.owns_connection(connection):
self._available_connections.append(connection)
else:
# pool doesn't own this connection. do not add it back
# to the pool and decrement the count so that another
# connection can take its place if needed
self._created_connections -= 1
connection.disconnect()
return
def owns_connection(self, connection):
return connection.pid == self.pid
def disconnect(self, inuse_connections=True):
"""
Disconnects connections in the pool
If ``inuse_connections`` is True, disconnect connections that are
current in use, potentially by other threads. Otherwise only disconnect
connections that are idle in the pool.
"""
self._checkpid()
with self._lock:
if inuse_connections:
connections = chain(self._available_connections,
self._in_use_connections)
else:
connections = self._available_connections
for connection in connections:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
# this must be the last operation in this method. while reset() is
# called when holding _fork_lock, other threads in this process
# can call _checkpid() which compares self.pid and os.getpid() without
# holding any lock (for performance reasons). keeping this assignment
# as the last operation ensures that those other threads will also
# notice a pid difference and block waiting for the first thread to
# release _fork_lock. when each of these threads eventually acquire
# _fork_lock, they will notice that another thread already called
# reset() and they will immediately release _fork_lock and continue on.
self.pid = os.getpid()
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
try:
# ensure this connection is connected to Redis
connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
try:
if connection.can_read():
raise ConnectionError('Connection has data')
except ConnectionError:
connection.disconnect()
connection.connect()
if connection.can_read():
raise ConnectionError('Connection not ready')
except BaseException:
# release the connection back to the pool so that we don't leak it
self.release(connection)
raise
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if not self.owns_connection(connection):
# pool doesn't own this connection. do not add it back
# to the pool. instead add a None value which is a placeholder
# that will cause the pool to recreate the connection if
# its needed.
connection.disconnect()
self.pool.put_nowait(None)
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
self._checkpid()
for connection in self._connections:
connection.disconnect()
| [
"[email protected]"
] | |
58bdf0804bc8b7ad982f2eb35cec240b9a395eb9 | cb4be2d145c529192cad597ebf6bba8aed0ec12e | /2014-x64/prefs/00_important/mec_shelf_loader/shelves/00_Trash/noske_marc_riggingtools_cri1_1405.py | f8c8ae94094ed2cfba76c6061d8544926d1870cf | [] | no_license | mclavan/Work-Maya-Folder | 63e791fdbd6f8ac1f4fda2d46015cd98df38825c | c56dbdb85a7b1a87ef6dd35296c56e0057254617 | refs/heads/master | 2020-05-20T07:21:15.891179 | 2014-10-17T14:28:45 | 2014-10-17T14:28:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,440 | py | '''
Lesson - Joint Renamer
rigging_tools.py
Description:
Practical use of loops.
Renaming joint based upon a naming convention.
How to Run:
import noske_marc_rigging_tools
reload(noske_marc_rigging_tools)
'''
print 'Rigging Tools Active'
import pymel.core as pm
def hierarchy():
'''
Create a hierarchy basd upon a given system.
Select root joing chain and execute function.
import noske_marc_rigging_tools
noske_marc_rigging_tools.hierarchy()
'''
'''
Input
What are we working on?
The root joint.
'''
joint_system = pm.ls(selection=True, dag=True)
# print 'Joint System:', joint_system
root_joint = joint_system [0]
joint_2 = joint_system[1]
joint_3 = joint_system[2]
'''
Padding Root Joint
'''
# Create empty group
root_pad = pm.group(empty=True)
# Move group over to the target join.
temp_constraint = pm.pointConstraint(root_joint, root_pad)
pm.delete(temp_constraint)
# Freeze Transforms on group
pm.makeIdentity(root_pad, apply=True, t=1, r=1, s=1, n=0)
# Parent root joint to the group
pm.parent(root_joint, root_pad)
'''
Local Controls
'''
'''
Control 1 - root_joint
'''
# Create a control.
# normal=[1, 0, 0], radius 2
control_icon_1 = pm.circle(normal=[1, 0, 0], radius=2)[0]
# Create a group.
# Grouping control during the process.
local_pad_1 = pm.group()
# Output control and pad.
print 'Control 1 Created:', control_icon_1
print 'Local Pad 1 Created:', local_pad_1
# Move group over to the target joint
# Delete constrain after snapping.
# Driver: joint
# Driven: group
temp_constraint = pm.parentConstraint(root_joint, local_pad_1)
pm.delete(temp_constraint)
# Orient Constrain the joint to the control.
# Driver -> Driven.
# Control -> Joint
pm.orientConstraint(control_icon_1, root_joint)
'''
Control 2
'''
# Create a control.
# normal=[1, 0, 0], radius 2
control_icon_2 = pm.circle(normal=[1, 0, 0], radius=2)[0]
# Create a group.
# Grouping control during the process.
local_pad_2 = pm.group()
# Output control and pad.
print 'Control 2 Created:', control_icon_2
print 'Local Pad 2 Created:', local_pad_2
# Move group over to the target joint
# Delete constrain after snapping.
# Driver: joint
# Driven: group
temp_constraint = pm.parentConstraint(joint_2, local_pad_2)
pm.delete(temp_constraint)
# Orient Constrain the joint to the control.
# Driver -> Driven.
# Control -> Joint
pm.orientConstraint(control_icon_2, joint_2)
'''
Control 3
'''
# Create a control.
# normal=[1, 0, 0], radius 2
control_icon_3 = pm.circle(normal=[1, 0, 0], radius=2)[0]
# Create a group.
# Grouping control during the process.
local_pad_3 = pm.group()
# Output control and pad.
print 'Control 3 Created:', control_icon_3
print 'Local Pad 3 Created:', local_pad_3
# Move group over to the target joint
# Delete constrain after snapping.
# Driver: joint
# Driven: group
temp_constraint = pm.parentConstraint(joint_3, local_pad_3)
pm.delete(temp_constraint)
# Orient Constrain the joint to the control.
# Driver -> Driven.
# Control -> Joint
pm.orientConstraint(control_icon_3, joint_3)
'''
Parent control together.
'''
# Pad 3 (Last) -> control 2
pm.parent(local_pad_3, control_icon_2)
pm.parent(local_pad_2, control_icon_1)
print 'Hierarchy Created.'
def padding_tool():
'''
This tool creates a world pad on the selected joint system.
Select the root and execute the command.
import noske_marc_riggingtools_cri1_1405
reload (noske_marc_riggingtools_cri1_1405)
noske_marc_riggingtools_cri1_1405.padding_tool()
'''
selected = pm.ls(selection=True)
# print 'Current Selected:', selected
root_joint = selected[0]
# Create empty group
# empty=True will create a empty group
#
pad = pm.group(empty=True)
# Move group to joint.
# and delete constraint
temp_constraint = pm.pointConstraint(root_joint, pad)
pm.delete(temp_constraint)
# Freeze Transforms on the group
pm.makeIdentity(pad, apply=True, t=1, r=1, s=1, n=0)
# Parent joint to group
pm.parent(root_joint, pad)
# renaming
# ct_tail_01_bind
# ct_tail_00_pad
pad_name = root_joint.replace('01_bind', '00_pad')
pad.rename(pad_name)
print 'Padding group Created'
def joint_renamer():
'''
This tool renames all the joints in a joint chain.
Based upon the naming convention:
lt_arm_01_bind -> lt_arm_03_waste
Select the root joint and execute commad.
import noske_marc_riggingtools_cri1_1405
reload (noske_marc_riggingtools_cri1_1405)
noske_marc_riggingtools_cri1_1405.joint_renamer()
'''
print 'Joint Renamer - Active'
import pymel.core as pm
ori = raw_input()
system_name = raw_input()
count = 0
suffix = 'bind'
'''
Get Selected.
pm.ls(selected=True)
'''
joint_chain = pm.ls(selection=True, dag=True)
print 'Selected items:', joint_chain
'''
Figure out naming convetion.
'lt_arm_01_bind' -> 'lt_arm_03_waste
'ct_tail_01_bind', -> 'ct_tail_04_waste'
'''
'''
Loop through joint chain.
'''
for current_joint in joint_chain:
count = count + 1
new_name = '{0}_{1}_{2}_{3}'.format(ori, system_name, count, suffix)
print 'New Name:', new_name
# Rename joint
current_joint.rename(new_name)
new_name = '{0}_{1}_{2}_{3}'.format(ori, system_name, count, 'waste')
current_joint.rename(new_name)
def priming_tool():
'''
This tool creates a local oriented control and pad on the
selected joint system.
Select the joints and execute them.
import noske_marc_riggingtools_cri1_1405
reload (noske_marc_riggingtools_cri1_1405)
noske_marc_riggingtools_cri1_1405.priming_tool()
'''
# Get Selected.
selected = pm.ls(selection=True)
# print 'Joints Selected:', selected
target_joint = selected[0]
for target_joint in selected:
control_icon_name = target_joint.replace('_bind', '_icon')
local_pad_name = target_joint.replace('_bind', '_local')
# Create control
# normal set to x and radius is 1.8
control_icon = pm.circle(normal=[1, 0, 0], radius=1.8,
name=control_icon_name)[0]
# Group control (NOT an empty group)
local_pad = pm.group(name=local_pad_name)
print 'Control_Icon:', control_icon
print 'Pad Created:', local_pad
# Move group to target joint.
# and delete constraint
temp_constraint = pm.parentConstraint(target_joint, local_pad)
pm.delete(temp_constraint)
# Orient Constraint joint to control.
pm.orientConstraint(control_icon, target_joint)
print 'Local Oriented Controls Created'
| [
"[email protected]"
] | |
075dc5ac96d9959d8b37233b21e8f77234e9c826 | e9b5916013c2e3fc0ef91d505f4b8db3778e97e5 | /Where_is_the_ISS.py | 887e085004080f70fe5faf68b4f1535d67a8ccee | [] | no_license | Robin-van-Schendel/Stuff | 039ea55c5dbadfa6e0978d45d97de17afea475c5 | 9e2d74a98ffa5d79e7796e161542df3d7eaa5f04 | refs/heads/main | 2023-06-20T00:51:43.946230 | 2021-07-19T00:19:03 | 2021-07-19T00:19:03 | 380,754,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 19 01:04:32 2021
@author: Robin
"""
""" We are going to do a little python project here for fun. It's based on this
Youtube video (https://youtu.be/R6CCTuHast0) and involves pandas and plotly.
We are going to use an API and we are going to plot on a map the actual
current location of the International Space Station. This is literally just copied"""
import pandas as pd
import plotly.express as px
import plotly.io as pio
# Because spyder (the IDE I use) can't seem to deal with plotly figures
# we have to show them in browser
pio.renderers.default = "browser"
# Now we are going to read the location of the ISS using this API
url = "http://api.open-notify.org/iss-now.json"
dataframe = pd.read_json(url)
#print(dataframe)
# Let's change the order now of columns to rows
dataframe['latitude'] = dataframe.loc['latitude','iss_position']
dataframe['longitude'] = dataframe.loc['longitude','iss_position']
dataframe.reset_index(inplace=True)
# print(" ")
# print(dataframe)
# Let's get rid of some of the superfluous columns
dataframe = dataframe.drop(['index','message'], axis=1)
# print(" ")
# print(dataframe)
# Now let's illustrate our data by plotting it
fig = px.scatter_geo(dataframe, lat = 'latitude', lon = 'longitude')
fig.show()
| [
"[email protected]"
] | |
1f5282f0900b7d72c28cfe9b137fc1ed048289da | dd17f48dbfd60a775730c9ceab4e38331f3ab04e | /main_bonus.py | d3ac9fb3108f436e04afe21060af32e541c3b1ff | [] | no_license | KPI-Sense/intern-take-home-assessment | 3bf03fe8cc2a69abdcc4c7e45b5dc4c3e0d2a40f | 99b4130ef7007b15ec95fa08aba1839490eccd3b | refs/heads/master | 2022-11-02T03:30:29.012566 | 2020-06-18T19:50:05 | 2020-06-18T19:50:05 | 269,168,515 | 0 | 0 | null | 2020-06-18T19:50:07 | 2020-06-03T18:49:12 | Python | UTF-8 | Python | false | false | 1,778 | py | import xlrd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
import pandas as pd
from datetime import datetime
from datetime import timedelta
import getopt
import sys
from openpyxl import Workbook
import openpyxl
xl = pd.read_excel('demo_model.xlsx', sheet_name= "Model P&L" , index_col = 14, usecols = "N:CG", skiprows=4, nrows = 7);
dates_x = xl.iloc[0, :]
sub_rev = xl.iloc[5, :]
serv_rev = xl.iloc[6, :]
count = 0
if len(sys.argv) == 2:
m,y = sys.argv[1].split('/')
for jj in range(0, len(dates_x._index)):
count = count + 1
if dates_x._index[jj].month == int(m):
if dates_x._index[jj].year == int(y):
sub_rev = xl.iloc[5, count:len(dates_x._index)]
serv_rev = xl.iloc[6, count:len(dates_x._index)]
dates_x = xl.iloc[0, count:len(dates_x._index)]
break
dt_arr = [0]
for jj in range(0, len(dates_x._index)):
dt = dates_x._index[jj]
dt_month= dt.month
dt_daysm= dt.day
dt_daysa = dt.days_in_month
dt_diff = dt_daysa - dt_daysm
if dt_diff != 0:
dt = dt + timedelta(days = dt_diff)
dt_arr.insert(jj, dt)
indx_x = np.arange(len(dates_x._index))
tickvalues = np.arange(0,len(dt_arr))
plt.figure(figsize=(25,7))
graphSub = plt.bar(x=indx_x, height=sub_rev, width=0.55)
graphServ = plt.bar(x=indx_x, height=serv_rev, width=0.55, bottom=sub_rev)
plt.gcf().autofmt_xdate()
plt.gca().tick_params(axis='x', which='major', labelsize = 6)
plt.xlabel ('Dates')
plt.ylabel ('Revenue')
plt.title('Demo Company - P&L Model')
plt.xticks(tickvalues, dt_arr)
plt.yticks()
plt.legend((graphSub[0], graphServ[0]), ('Subscription Revenue', 'Service Revenue'))
plt.tight_layout()
plt.show()
| [
"[email protected]"
] | |
045eaa1ba5008040aa5cfb9cf8fdb29a4824d58b | ba9f1dfad6df7946f1dff62f42b8cca465a9d6a8 | /PEuler_076.py | 4ae4dd35459e19dc9b76d58f1b9571325877ab12 | [] | no_license | Concarne2/ProjectEuler | 7148e54ec74683d7a6ff3aeb41f4dfbc31386fe3 | 8fa87f3c0e59b730edc11272ef04790173e6a831 | refs/heads/master | 2020-07-21T14:46:04.140951 | 2019-09-07T01:42:45 | 2019-09-07T01:42:45 | 206,899,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | results = [0,0,1,2]
def nextResult():
n = len(results)
ways = results[n-1] + 1
for i in range(2,n//2 + 1):
ways += results[n//i]
results.append(ways)
ways = [1]
for i in range(1,101):
ways.append(0)
for i in range(1,100):
for j in range(i,101):
ways[j] += ways[j-i]
| [
"[email protected]"
] | |
623c325e61835a7b871177b94263a55960e72177 | a9f517df3af78a18c79b92a25fce3f676e2597c0 | /ckanext/ytp/comments/logic/auth/delete.py | e14918d608183f1dac79de377ce7dadad2f71a20 | [] | no_license | nvgiambruno44/ckanext-ytp-comments | d3f70e261badb634013ed585444d240cd869c64f | 0dd9069ed564076b850366bb37136436be9079da | refs/heads/master | 2020-08-05T19:14:48.284385 | 2019-10-03T20:22:43 | 2019-10-03T20:22:43 | 212,672,045 | 0 | 0 | null | 2019-10-03T20:18:43 | 2019-10-03T20:18:43 | null | UTF-8 | Python | false | false | 622 | py | import logging
from pylons.i18n import _
from ckan import logic
import ckanext.ytp.comments.model as comment_model
log = logging.getLogger(__name__)
def comment_delete(context, data_dict):
model = context['model']
user = context['user']
userobj = model.User.get(user)
cid = logic.get_or_bust(data_dict, 'id')
comment = comment_model.Comment.get(cid)
if not comment:
return {'success': False, 'msg': _('Comment does not exist')}
if comment.user_id is not userobj.id:
return {'success': False, 'msg': _('User is not the author of the comment')}
return {'success': True}
| [
"[email protected]"
] | |
fb92dd9b2f76b302c782a0613c06b023634ee190 | f391e9b02730cf61c19d53423ab861b252691a07 | /manage.py | 04e547904b2845ddb906df1e889b2916ab9f2db2 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Bogdanp/django_dramatiq_example | 7eb7b6f92c73aacb93b7bf964e437faad35dc5fd | 07b00b70233b7fb7eb72c1766f0d73ba68f08d17 | refs/heads/master | 2022-11-05T00:05:54.944946 | 2018-01-06T10:54:28 | 2018-01-06T10:54:28 | 109,243,253 | 34 | 4 | NOASSERTION | 2020-06-23T09:59:59 | 2017-11-02T09:23:55 | Python | UTF-8 | Python | false | false | 821 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_dramatiq_example.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
c19ba77da1e470b244cb2d31ae64f6e5753bc812 | 5b4529582c9e1c6d832387672cc295e775cdd39a | /day04/统计.py | 66008caaeaf6780c5aaa4f09e36c9d21f593a831 | [] | no_license | gitxf12/demotest | 8ccbc7f050eaefb96a6399171e103495371f403a | 5d4807d7e3a4e869038ceb1f08356d0caf40f6f5 | refs/heads/master | 2023-04-21T06:16:00.368012 | 2021-05-31T08:37:08 | 2021-05-31T08:37:08 | 359,756,091 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | List = [1,4,7,5,8,2,1,3,4,5,9,7,6,1,10]
i=0
while i < len(List):
a = List.count(List[i])
if a > 1:
print(List[i],"重复了:",a-1,"次")
i=i+1
| [
"[email protected]"
] | |
53bc1a13640e400a6b5db1007807747461441931 | 7fffc39739869f259fe2d103efa05b87739778d1 | /Python/3021.py | 0f36f7063cfd086de49ec99c21fa25ca058daed7 | [] | no_license | yunbinni/CodeUp | be39b3bd9fbeaa64be2a77a92918ebcc79b1799b | 5cb95442edb2b766de74154e0b91e8e1c236dd13 | refs/heads/main | 2023-08-15T19:02:29.819912 | 2021-10-12T00:50:16 | 2021-10-12T00:50:16 | 366,761,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | """입력 및 초기화"""
a = '0'+input()
b = '0'+input()
stack = []
oneNum, tenNum, sum = 0, 0, 0
res = ''
"""계산부"""
if len(a) >= len(b):
b = '0'*(len(a) - len(b)) + b
else:
a = '0'*(len(b) - len(a)) + a
# 숫자 뒤집기
a = a[::-1]
b = b[::-1]
loopNum = len(a) if len(a) > len(b) else len(b)
for i in range(loopNum):
sum = int(a[i]) + int(b[i]) + tenNum
oneNum = sum % 10
tenNum = sum // 10
stack.append(oneNum)
"""출력부"""
while(stack):
res += str(stack.pop())
res = int(res)
print(res) | [
"[email protected]"
] | |
ba30f17076ec8a6204953eed47762879a99d80e6 | 60a3353c5c497cae6092ead3d81c4e908d86d1b4 | /m-solutions/euclidean.py | 437f848fce6891f81828fc8cfa60179375f902c5 | [] | no_license | kt8128/atcorder | a60dee4b727292506281e512a176454a16b27481 | f7f61c8e25db73d725d9f87b655f9e46daf0c36e | refs/heads/master | 2020-08-27T19:03:59.451172 | 2020-01-13T02:16:15 | 2020-01-13T02:16:15 | 217,466,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | import sys
import pandas as pd
import math
args = sys.argv
d, d_prime = int(args[1]), int(args[2])
x, x_prime, y, y_prime, q = 1, 0, 0, 1, 0
df = pd.DataFrame([[d, d_prime, x, x_prime, y, y_prime, q]], columns=["d", "d_prime", "x", "x_prime", "y", "y_prime", "q"])
while d_prime != 0:
d_prev, d_prime_prev, x_prev, x_prime_prev, y_prev, y_prime_prev = d, d_prime, x, x_prime, y, y_prime
q = math.floor(d_prev / d_prime_prev)
d, d_prime = d_prime_prev, d_prev-q*d_prime_prev
x, x_prime, y, y_prime = x_prime_prev, x_prev-q*x_prime_prev, y_prime_prev, y_prev-q*y_prime_prev
df1 = pd.DataFrame([[d, d_prime, x, x_prime, y, y_prime, q]], columns=df.columns)
df = df.append(df1)
print(df)
| [
"[email protected]"
] | |
1677e59cd251670e285e829e2b42e65aabc776e2 | 43bb2144abf954a42995ea84ea1a71c2483ac3db | /django/api/appclassdash/urls.py | 0ea37291a17122b15ab8ba053212d7be0073b041 | [] | no_license | classdashapp/ALLClassDashServerFiles | d281464f0da715628f9d4f8f9a69ba1016734fa8 | dd05531b7422ca4e254de8841af7fffc71f006db | refs/heads/master | 2020-05-17T11:07:58.030951 | 2019-04-26T18:36:49 | 2019-04-26T18:36:49 | 183,676,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from django.urls import path
from rest_framework import routers
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from .views import InterestViewSet, UserViewSet, CourseViewSet
from rest_framework.authtoken.views import obtain_auth_token
router = routers.DefaultRouter()
router.register(r'interests',InterestViewSet, 'interests')
router.register(r'courses', CourseViewSet, 'courses')
router.register(r'users', UserViewSet, 'users')
urlpatterns = [ url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^get-token/', obtain_auth_token), url(r' ', include(router.urls))
]
| [
"[email protected]"
] | |
531e12ee91b3b3cc63baa365562d4af6120eef6c | 09eeb8352feb8a59574dc58d894b986575e78e5d | /packaging/myproject/pkgB/B_feature.py | 15fe78cd476250c0af84dc889c7a5c79e411bfb7 | [
"MIT"
] | permissive | TxuanYu/packaging_practice | 5d03f85ede49020ddeabf48724297db3d834e39a | 4f62953bdac18f542e216b429e4e70e892ee8f6b | refs/heads/main | 2023-04-03T11:56:58.183888 | 2021-03-29T05:14:37 | 2021-03-29T05:14:37 | 352,520,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | def nice_test():
print('B')
if __name__ == '__main__':
nice_test() | [
"[email protected]"
] | |
0c8ed463b89362b26c7aa74053162692a31bf33a | d877e1f98f907fa8556138265f0b7fb224639323 | /discusProject/asgi.py | 97806e780bcaa5c1a5456192c1da9c4195a0e3b8 | [] | no_license | MuflikhunovRR/discusProject | 2034867b8423b8713fc29b876f30f287e338e09f | 8fb9f568c7984f8b8f1184106d1518aeb794334e | refs/heads/master | 2023-04-27T13:38:07.857764 | 2021-05-14T12:01:09 | 2021-05-14T12:01:09 | 367,331,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for discusProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'discusProject.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
abbc97914297519c3d4d2f494cb8a8fe7b094b5f | a1ef11a1861bc94495f5cc1bc0de2355080f5c3f | /19/19_2.py | 6784acc25b51363b757cce9704b7a6d1da4b99b7 | [] | no_license | womogenes/AoC-2020-solutions | 85208d1a2b24daf299150a40056fe9cb3c52355d | 72c4673a24c1d06d8f385536f1881c9202991d10 | refs/heads/main | 2023-02-08T01:03:30.384345 | 2020-12-25T19:20:10 | 2020-12-25T19:20:10 | 318,245,689 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | import re
from functools import lru_cache
from pprint import pprint
with open("19_input.txt") as fin:
data = fin.read().split("\n")
# Variable to keep track of rules
rules = {}
# Get all the rules
line = 0
while data[line] != "":
number = int(data[line][:data[line].index(":")])
rule = data[line][data[line].index(":") + 2:]
rules[number] = rule
line += 1
# Variable to keep track of regexes
def get_regex(number):
# Gets regex based on id
if number == 8:
return r8
if number == 11:
return r11
ans = to_regex(rules[number])
return ans
def to_regex(rule):
# Converts "rule" to regex
if "|" in rule:
part1 = [int(i) for i in rule[:rule.index("|") - 1].split(" ")]
part2 = [int(i) for i in rule[rule.index("|") + 2:].split(" ")]
regex1 = "(" + ")(".join([get_regex(i) for i in part1]) + ")"
regex2 = "(" + ")(".join([get_regex(i) for i in part2]) + ")"
ans = f"({regex1})|({regex2})"
elif '"' in rule:
ans = rule[1]
else:
parts = [int(i) for i in rule.split(" ")]
ans = "(" + ")(".join([get_regex(i) for i in parts]) + ")"
return ans
r42 = get_regex(42)
r31 = get_regex(31)
r8 = f"({r42})+"
r11s = []
for n in range(1, 20):
r11s.append(f"({r42}){{{n}}}({r31}){{{n}}}")
r11 = "(" + ")|(".join([i for i in r11s]) + ")"
# Verify!
count = 0
line += 1
while line < len(data):
matches = bool(re.fullmatch(get_regex(0), data[line]))
count += matches
line += 1
print(count) | [
"[email protected]"
] | |
47f9a655abcd4d887051fc29d68c0b3f18df2950 | 18ca91dcce6f15e761c1ccae7a7fde5dc6ee93d3 | /spec/interpreter/interpreter_spec.py | 04c5458826ccaf59a801a20fc96874c5af6b7b34 | [] | no_license | IgnusG/H4ck3rs-D3l1ght | cc6e2f02b4d67a877259f43012fe7b9ff91101a7 | cadbe2842910885502c355a6269ee281bbcf334a | refs/heads/master | 2020-04-06T06:57:34.583845 | 2016-08-04T22:30:37 | 2016-08-04T22:30:37 | 64,971,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | import unittest
from interpreter.interpreter import Interpreter
class InterpreterTest(unittest.TestCase):
def setUp(self):
Interpreter.reset_storage()
def test_interpreter_basic_output(self):
user_input = 'HD! HD! HD='
self.assertEqual(Interpreter.run_interpreter(user_input), '2')
def test_interpreter_hello_world(self):
user_input = '72 H4ck3rs D3l1ght@ 101 H4ck3rs D3l1ght@ 108 D3l1ght H4ck3rs= H4ck3rs D3l1ght@ H4ck3rs D3l1ght@ H4ck3rs D3l1ght. 111 D3l1ght H4ck3rs= H4ck3rs D3l1ght@ 32 H4ck3rs D3l1ght@ 87 H4ck3rs D3l1ght@ H4ck3rs D3l1ght@ 114 H4ck3rs D3l1ght@ D3l1ght H4ck3rs. H4ck3rs D3l1ght@ 100 H4ck3rs D3l1ght@ 33 H4ck3rs D3l1ght@'
self.assertEqual(Interpreter.run_interpreter(user_input), 'Hello World!')
def test_interpreter_conditions(self):
user_input = 'HD! HD! HD! HD? HD= DH! DH? HD! HD='
self.assertEqual(Interpreter.run_interpreter(user_input), '3211')
def test_interpreter_nested_conditions(self):
user_input = 'HD! HD! HD! HD? HD= DH! HD. HD! HD! HD? HD= DH! DH? DH. DH?'
self.assertEqual(Interpreter.run_interpreter(user_input), '321221121')
| [
"[email protected]"
] | |
68765f3d71df3158760cd4cdd1511398f5249306 | e1b0baf821459cc58ad7e562ed59a2c18538f5d6 | /02_variables.py | 88cac0703aecb141fc88550754061815876ac408 | [] | no_license | Moses-Mumo98/Python-Basics | 57a8a5df7f8a973093ecb1fd5b333633e9fbb0d4 | 2c65123cc6a9977abd6746a761b2efb986bafdea | refs/heads/main | 2023-05-09T01:47:39.599003 | 2021-06-01T17:39:23 | 2021-06-01T17:39:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # what is a variable - named containers used to temporarily store data in the computers memory
# declare a variable;
# reassign a variable
# float
# string
# boolean
# define appriopriate variables from the folloing line
# check in a student named John doe
# He is 21 years old and is a new student
# getting input from the user (input)
# getting a data-type
| [
"[email protected]"
] | |
88a2924f6e0807ecffd10efb725949441bceb4fa | cfd493f1c1908b0c4b339da60e2453eec15bcdbb | /packages_ext_ws/build/universal_robot/ur_driver/catkin_generated/pkg.installspace.context.pc.py | 4b12042306581e950751fe8171f7862fe8861eb3 | [] | no_license | skypap/projets_ros | bc95e3baff468febba8169bd6375ed56d74a86df | 66f00307ffcf96141027752f287f57e16ade5e57 | refs/heads/master | 2021-05-16T14:11:58.086748 | 2018-01-29T22:32:06 | 2018-01-29T22:32:06 | 118,060,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/maxime/projets-ros/packages_ext_ws/install/include".split(';') if "/home/maxime/projets-ros/packages_ext_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_driver"
PROJECT_SPACE_DIR = "/home/maxime/projets-ros/packages_ext_ws/install"
PROJECT_VERSION = "1.1.11"
| [
"[email protected]"
] | |
90200eb35d16e47548cb09c81a083e16a412daf5 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /otp/uberdog/RejectCode.py | 4676d7c5bbdcf662d24bc3867799ee2d361bf35e | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | # 2013.08.22 22:15:51 Pacific Daylight Time
# Embedded file name: otp.uberdog.RejectCode
class RejectCode():
__module__ = __name__
TIMEOUT = 65535
NO_GUILD = 10000
MAY_NOT_JOIN_GUILD = 10001
MAY_NOT_INVITE_GUILD = 10002
NO_MEMBERSHIP = 10003
GUILD_FULL = 10004
LEAVE_CURRENT_GUILD_FIRST = 10005
ALREADY_IN_GUILD = 10006
JOIN_OWN_GUILD = 10007
NOT_IN_A_GUILD = 10008
OTHER_AVATAR_NO_MEMBERSHIP = 10009
OTHER_AVATAR_NO_GUILD = 10010
OTHER_AVATAR_NO_GUILD_FOUND = 10011
ALREADY_HAS_GUILD = 10012
MAY_NOT_CREATE_GUILD = 10013
BUSY = 10014
NO_CREW = 20000
MAY_NOT_JOIN_CREW = 20001
JOIN_OWN_CREW = 20002
NOT_IN_A_CREW = 20003
MAY_NOT_INVITE_CREW = 20004
ALREADY_IN_CREW = 20005
ALREADY_INVITED_TO_CREW = 20006
NOT_CAPTAIN_OF_CREW = 20007
NO_SELF_CREW_INVITE = 20008
CREW_INVITATION_DECLINED = 20009
ALREADY_LOCKED = 20010
NOT_LOCKED = 20011
NOT_YOUR_LOCK = 20012
YOUR_CREW_IS_LOCKED = 20013
OTHER_CREW_IS_LOCKED = 20014
NOT_IN_YOUR_CREW = 20015
NO_TRADE = 30000
NOT_YOUR_TRADE = 30001
TRADE_NOT_ACTIVE = 30002
AVATAR_NOT_HERE = 30003
MAY_NOT_TRADE = 30004
NO_SELF_TRADE = 30005
OTHER_AVATAR_NOT_HERE = 30006
OTHER_AVATAR_MAY_NOT_TRADE = 30007
NO_INVENTORY = 40000
CONTENT_TYPE_NOT_ALLOWED = 40001
HAS_NONE_OF_THAT_TYPE = 40002
NO_MORE_SLOTS = 40003
SLOT_TYPE_NOT_FOUND = 40004
OVERFLOW = 40005
UNDERFLOW = 40006
MULTIPLE_OF_SAME_ITEM = 40007
MAY_NOT_DESTROY = 40008
NO_REMOVE = 40009
NOT_YOUR_DO_ID = 40010
NO_AVATAR = 50000
NOT_YOUR_AVATAR = 50001
BAD_DELETE_PASSWORD = 50002
MAY_NOT_LIST_AVATARS = 50003
MAY_NOT_CREATE_AVATAR = 50004
MAY_NOT_REMOVE_AVATAR = 50005
SLOT_OUT_OF_RANGE = 50006
SLOT_TAKEN = 50007
INVALID_ACCOUNT = 50008
AVATAR_ONLINE = 50009
MAX_AVATAR_LIMIT = 50010
NO_LOCK_ON_SLOT = 50011
NO_FRIENDS_LIST = 60000
FRIENDS_LIST_NOT_HANDY = 60001
INVITEE_NOT_ONLINE = 60002
ALREADY_INVITED = 60003
ALREADY_YOUR_FRIEND = 60004
FRIENDS_LIST_FULL = 60005
OTHER_FRIENDS_LIST_FULL = 60006
ALREADY_NOT_YOUR_FRIEND = 60007
INVITATION_DECLINED = 60008
NOT_YOUR_FRIENDS_LIST = 60009
ALREADY_FRIENDS_WITH_SELF = 60010
MAY_NOT_OPEN_INVITE = 60011
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\otp\uberdog\RejectCode.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:15:51 Pacific Daylight Time
| [
"[email protected]"
] | |
b3fd148f25d0931228a52e238f31c93d73495d1a | 95fa704c0e2809bb7f740a79828eedaec7cffe05 | /Wrapper/MapFunctions.py | a59246b03e6d8eeb2d230c87aad60bf92e704165 | [] | no_license | jgontrum/GeoTweetEstimator | 03a9dccb49a3577fdc21c28cfa00ac2b3ad75850 | a4e5c6cdef1b0c377d6bcf318c14b907829dda93 | refs/heads/master | 2020-12-28T21:51:41.776151 | 2015-10-17T13:49:22 | 2015-10-17T13:49:22 | 34,210,768 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Johannes Gontrum <[email protected]>'
# Use this if there is no X-Server availible, e.g. this script runs on a cluster
import matplotlib as mpl
mpl.use('Agg')
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
"""
This wrapper maintains all functions about the Basemap package, that allows you to draw graphs
directly on a map.
The prepareMap() function returns a Basemap-object, that is centered on Germany, Austria and Switzerland.
"""
# Returns a map with default setting
def prepareMap():
map = Basemap(projection='merc',
resolution='i',
area_thresh=200,
lat_0=51.16, # center
lon_0=10.44, # center
llcrnrlon=5.3, # longitude of lower left hand corner of the desired map domain (degrees).
llcrnrlat=45, # latitude of lower left hand corner of the desired map domain (degrees).
urcrnrlon=18, # longitude of upper right hand corner of the desired map domain (degrees).
urcrnrlat=56 # latitude of upper right hand corner of the desired map domain (degrees).
)
# draw coastlines, state and country boundaries, edge of map.
map.drawcoastlines(linewidth=0.25)
map.drawcountries(linewidth=0.25)
map.fillcontinents(color='snow', lake_color='lightcyan')
# draw the edge of the map projection region (the projection limb)
map.drawmapboundary(fill_color='lightblue')
return map
| [
"[email protected]"
] | |
c63312dedb59f427503f31e21b0dc9fc56c7ba3d | db4157453ae65b6679809e8298dd63576387de9e | /scikit_learn_morvan/skl_4.py | 848363a3f14e0e6b482f1eb807eeb259679ddb8f | [] | no_license | TanMengyuan/Machine_Learning | c0a24471b4408c1677aa11107ec107645f9cc79c | 9581a6a1beb5060b5a5c8530706a8ce3a6520c2d | refs/heads/master | 2020-05-27T19:27:44.609118 | 2019-11-18T13:36:58 | 2019-11-18T13:36:58 | 170,133,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from sklearn import preprocessing
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm import SVC
import matplotlib.pyplot as plt
# a = np.array([[10, 2.7, 3.6],
# [-100, 5, -2],
# [120, 20, 40]], dtype=np.float64)
# print(a)
# print(preprocessing.scale(a))
X, y = make_classification(n_samples= 300, n_features= 2, n_redundant= 0, n_informative= 2, random_state= 22,
n_clusters_per_class= 1, scale= 100)
# plt.scatter(X[:, 0], X[:, 1], c=y)
# plt.show()
X = preprocessing.minmax_scale(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= .3)
clf = SVC()
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test)) | [
"[email protected]"
] | |
fb2f9f6c456de289f09f1b7a377ac669354429f8 | 6b35764647da66746818da584b9a058c838b98eb | /ble/projects/bookreview/app.py | e8de149b2b332854704508b475c0d3c43370eca7 | [] | no_license | seon0522/front_study | fa16ad6806c6b789b6d343a9cdcf5db3665e1a8e | 613f2cedd68abe3e6c3e2159739c08209020e476 | refs/heads/main | 2023-06-24T22:01:12.206324 | 2021-07-22T04:39:31 | 2021-07-22T04:39:31 | 374,309,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.dbsparta
## HTML을 주는 부분
@app.route('/')
def home():
return render_template('index.html')
## API 역할을 하는 부분
@app.route('/review', methods=['POST'])
def write_review():
sample_receive = request.form['sample_give']
print(sample_receive)
return jsonify({'msg': '이 요청은 POST!'})
@app.route('/review', methods=['GET'])
def read_reviews():
sample_receive = request.args.get('sample_give')
print(sample_receive)
return jsonify({'msg': '이 요청은 GET!'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True) | [
"[email protected]"
] | |
c2bde3703edd788b56bd303b9772777a3ab417c1 | c0df141c1e4cd2fc7148c88526c24099d994bb8a | /LinearRegression.py | 0531745cf64cc5d0beb55085d9c0534b2ccca200 | [] | no_license | Solony/MachineLearningDemo | 41c75986d18eaf2dc23ac3e04b315a0952ee53e7 | ec37a227384e8fec50e60f70249a20d26cacb1dc | refs/heads/master | 2021-07-17T04:39:34.736943 | 2017-10-20T03:07:52 | 2017-10-20T03:07:52 | 107,617,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(0, 10)
#inline method y = x
f = lambda x: x
#Turn f into numpy thing
F = np.vectorize(f)
Y = F(X)
#init dataset
num = 100
random_sign = np.vectorize(lambda x: x if np.random.sample() > 0.5 else -x)
data_X = np.linspace(1, 9, num)
data_Y = random_sign(np.random.sample(num) * 2) + F(data_X)
#Linear regression
from sympy import *
def linear_regression(X, Y):
a, b = symbols('a b')
residual = 0 #殘值
for i in range(num):
residual += (Y[i] - (a * X[i] + b)) ** 2
#diff a and b respectively
f1 = diff(residual, a)
f2 = diff(residual, b)
res = solve([f1, f2], [a, b])
return res[a], res[b]
a, b = linear_regression(data_X, data_Y)
LR_X = X
h = lambda x: a * x + b
H = np.vectorize(h)
LR_Y = H(LR_X)
#plt.plot(X, Y, 'b')
plt.plot(LR_X, LR_Y, 'g')
plt.plot(data_X, data_Y, 'ro')
#Data test
#Define 4 extraordinary data points and 3 normal data points
DataSet = [[1.3, 10], [2.6, 8], [3.8, 7], [4.5, 9], [3.2, 3.0], [2.1, 1.9], [1.4, 1.3]]
#DataSet = [[1.3, 2.6, 3.8, 4.5, 3.2, 2.1, 1.4], [10, 8, 7, 9, 3.0, 1.9, 1.3]]
i = 0
while i < len(DataSet):
plt.plot(DataSet[i][0], DataSet[i][1], 'bo')
i += 1
plt.show()
count = 0
for i in range(len(data_X)):
if(data_X[i] < 5):
count += 1
ordinary = (count + 3) / (float)(count + len(DataSet))
exception = (count + 4) / (float)(count + len(DataSet))
print (format(ordinary, '.2f'))
print (format(exception, '.2f')) | [
"[email protected]"
] | |
43560555f43f44b5b8ef4fe21a2f9d054e2ff43d | 2fd40155f0b7efce82ea87711db3b790aa7bb99e | /call_job.py | 3b26eb555841609295156c7e3c5cbb4c23fb68ee | [
"MIT"
] | permissive | breecummins/dsgrn_net_gen | 3c743ecbe173f33e129f8d865cf4af0001e11726 | bcd77b71ad3e311d2906f0af986559d2c54ffe6d | refs/heads/master | 2021-12-27T02:19:38.054600 | 2021-09-23T18:58:16 | 2021-09-23T18:58:16 | 251,466,654 | 1 | 0 | MIT | 2021-04-13T19:17:15 | 2020-03-31T01:09:42 | Python | UTF-8 | Python | false | false | 107 | py | from dsgrn_net_gen.makejobs import Job
import sys
paramfile = sys.argv[1]
job = Job(paramfile)
job.run()
| [
"[email protected]"
] | |
caa80222721a93370d33ce36be40304194ab3e32 | 7c99880c8014f090ec5694e14276211284bf4220 | /backend/wallet/migrations/0001_initial.py | f0061f9d57752af31404c778f37cb7a132166de7 | [] | no_license | crowdbotics-apps/selekni-24543 | bf1e087ae15fd7c99f02b89ba50d79c1e03b189c | f740a5dcf449ac7180e50bf2979a882a8d4353b3 | refs/heads/master | 2023-03-04T23:59:38.938220 | 2021-02-16T20:03:28 | 2021-02-16T20:03:28 | 339,158,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,258 | py | # Generated by Django 2.2.18 on 2021-02-15 19:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("task_profile", "0001_initial"),
("task", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="CustomerWallet",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("balance", models.FloatField()),
("expiration_date", models.DateTimeField()),
("last_transaction", models.DateTimeField()),
(
"customer",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="customerwallet_customer",
to="task_profile.CustomerProfile",
),
),
],
),
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("account_token", models.CharField(max_length=255)),
("payment_account", models.CharField(max_length=10)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"wallet",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="paymentmethod_wallet",
to="wallet.CustomerWallet",
),
),
],
),
migrations.CreateModel(
name="TaskerWallet",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("balance", models.FloatField(max_length=254)),
("expiration_date", models.DateTimeField()),
("last_transaction", models.DateTimeField()),
(
"tasker",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerwallet_tasker",
to="task_profile.TaskerProfile",
),
),
],
),
migrations.CreateModel(
name="TaskerPaymentAccount",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("account_token", models.CharField(max_length=255)),
("payment_account", models.CharField(max_length=10)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"wallet",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerpaymentaccount_wallet",
to="wallet.TaskerWallet",
),
),
],
),
migrations.CreateModel(
name="PaymentTransaction",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("price", models.FloatField()),
("tip", models.FloatField()),
("tracking_id", models.CharField(max_length=50)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"customer",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_customer",
to="task_profile.CustomerProfile",
),
),
(
"payment_method",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_payment_method",
to="wallet.PaymentMethod",
),
),
(
"tasker",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_tasker",
to="task_profile.TaskerProfile",
),
),
(
"transaction",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_transaction",
to="task.TaskTransaction",
),
),
],
),
]
| [
"[email protected]"
] | |
b38ff14e2876add3596c10fc062f2902405b898e | dcf9a7aeaddc876530e8f28fd17130f8859feda9 | /pymatflow/cp2k/base/motion_driver.py | f3e40b94ad64f31523d19937480656281410f41c | [
"MIT"
] | permissive | DeqiTang/pymatflow | 3c6f4a6161a729ad17db21db9533187c04d8f5ac | 922722187e2678efbfa280b66be2624b185ecbf5 | refs/heads/master | 2022-05-25T19:41:19.187034 | 2022-03-05T03:07:08 | 2022-03-05T03:07:08 | 245,462,857 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# ========================
# CP2K / MOTION / DRIVER
# ========================
class cp2k_motion_driver:
def __init__(self):
self.params = {
}
self.status = False
def set_params(self, params):
for item in params:
if len(item.split("-")) == 2:
self.params[item.split("-")[-1]] = params[item]
else:
pass
| [
"[email protected]"
] | |
626a52bc6976c92a5a598edb3e2ca4eaf21e0e9e | d4296be903796d6edbbd59b863ff4d45aa03ad36 | /day18/day18.py | 3089dd6aefa1307224b8d8920deaf073494f49ce | [] | no_license | yamnikov-oleg/aoc2020 | e534edcb64109f68f903e1b66d7b73151ae5364e | 0ad20f4db5b79a38e53364117dfda6787c6259f5 | refs/heads/master | 2023-02-05T03:08:54.390805 | 2020-12-27T14:46:05 | 2020-12-27T14:46:05 | 318,168,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,595 | py | from enum import Enum
import re
from dataclasses import dataclass
from typing import List, Tuple, Union
class Expr:
"""
A base class for an evaluatable expression.
"""
def eval(self) -> int:
raise NotImplementedError("eval")
@dataclass
class LiteralExpr(Expr):
"""
A number literal. Evaluates to itself.
"""
value: int
def eval(self) -> int:
return self.value
@dataclass
class ParenExpr(Expr):
"""
A group of parentheses, wrapping around another expression.
Evaluates to the value of the inner expression.
"""
inner: Expr
def eval(self) -> int:
return self.inner.eval()
@dataclass
class AddExpr(Expr):
"""
An addition. Evaluates to the sum of the values of the left hand side expression
and the right hand side expression.
"""
left_operand: Expr
right_operand: Expr
def eval(self) -> int:
return self.left_operand.eval() + self.right_operand.eval()
@dataclass
class MultExpr(Expr):
"""
A multiplication. Evaluates to the product of the values of the left hand side expression
and the right hand side expression.
"""
left_operand: Expr
right_operand: Expr
def eval(self) -> int:
return self.left_operand.eval() * self.right_operand.eval()
class Precedence(Enum):
"""
Enumeration for the precedence option of the parser.
"""
# + and * are parsed evaluated at the same precedence level.
# As a result, in an expression without parentheses former operations will be
# nested the later, be it AddExpr or MultExpr.
FLAT = 'flat'
# Summations are parsed and evaluated before the multiplications.
# As a result, in an expression without parentheses AddExpr will be nested
# in MultExpr, but never the other way around.
ADDITION_FIRST = 'addition_first'
@dataclass
class Parser:
"""
A class to tokenize, parse and evaluate expressions in the text form.
"""
# Controls the precedence of the operators.
# Affects how the operations are nested inside the AddExpr and MultExpr trees.
precedence: Precedence = Precedence.FLAT
def tokenize(self, line: str) -> List[str]:
"""
Split the expression string into more parsable string tokens.
Valid tokens are '(', ')', '+', '*' and 'N', where N is any integer.
Example:
Parser().tokenize("1 + 2 * (3 + 4)")
# => ["1", "+", "2", "*", "(", "3", "+", "4", ")"]
"""
line = line.strip()
if not line:
return []
token_re = re.compile(r'\d+|\(|\)|\+|\*')
match = token_re.match(line)
if not match:
raise ValueError(f"Invalid expression: {line}")
if match.start(0) != 0:
raise ValueError(f"Invalid character: {line}")
rest = line[match.end(0):]
tokens = self.tokenize(rest)
return [match.group(0), *tokens]
def _parse_operand(self, tokens: List[str]) -> Tuple[Expr, List[str]]:
"""
Expects a literal or a parenthesized expression. Parses it and returns
the rest of the tokens.
Examples:
parser._parse_operand(["1", "+", "2"])
# => LiteralExpr(1), ["+", "2"]
parser._parse_operand(["(", "1", "+", "2", ")", "+", "3"])
# => ParenExpr(AddExpr(LiteralExpr(1), LiteralExpr(2))), ["+", "3"]
"""
if not tokens:
raise ValueError("Expected an operand, found end of line")
if tokens[0].isdigit():
return LiteralExpr(int(tokens[0])), tokens[1:]
elif tokens[0] == '(':
expr, tokens = self._parse_expr(tokens[1:])
if not tokens:
raise ValueError(f"Expected ')', found end of line")
return ParenExpr(expr), tokens[1:] # [1:] skips the closing parenthesis
else:
raise ValueError(f"Expected operand, found {tokens!r}")
def _parse_operator(self, tokens: List[str]) -> Tuple[str, List[str]]:
"""
Expects a plus sign or an asterisk. Returns it as string and the rest of the tokens.
Examples:
parser._parse_operator(["+", "2"])
# => "+", ["2"]
"""
if not tokens:
raise ValueError("Expected an operator, found end of line")
if tokens[0] in ['+', '*']:
return tokens[0], tokens[1:]
else:
raise ValueError(f"Invalid operator: {tokens[0]!r}")
def _group_operations_flat(self, operations: List[Union[str, Expr]]) -> Expr:
"""
Given a list of expressions and operators, groups them in a single expression
by the flat precedence law.
Examples:
parser._group_operations_flat([Literal(1), "*", Literal(2), "+", Literal(3)])
# => AddExpr(MultExpr(Literal(1), Literal(2)), Literal(3))
"""
if not operations:
raise ValueError(f"_group_operations_flat called with an empty list")
# If there is only one items - it's a single operand, return it as is.
if len(operations) == 1:
return operations[0]
lhs = operations[0]
operator = operations[1]
rhs = operations[2]
rest_of_operations = operations[3:]
# Group the left-most operation...
if operator == '+':
expr = AddExpr(lhs, rhs)
elif operator == '*':
expr = MultExpr(lhs, rhs)
else:
raise ValueError(f"Invalid operator: {operator}")
# ...and nest it in the operation to the right by using it as an operand.
return self._group_operations_flat([expr, *rest_of_operations])
def _group_operations_addition_first(self, operations: List[Union[str, Expr]]) -> Expr:
"""
Given a list of expressions and operators, groups them in a single expression
by the "additions first" precedence law.
Examples:
parser._group_operations_flat([Literal(1), "*", Literal(2), "+", Literal(3)])
# => MultExpr(Literal(1), AddExpr(Literal(2), Literal(3)))
"""
# Group the operation items into "summation groups" - multiplicands, which
# can be a single operand or a summation.
# Example:
# Given operations = [Literal(1), "*", Literal(2), "+", Literal(3)]
# Produces sum_groups = [[Literal(1)], "*", [Literal(2), "+", Literal(3)]]
current_sum_group = []
sum_groups = [current_sum_group]
while operations:
operand = operations[0]
operations = operations[1:]
current_sum_group.append(operand)
if not operations:
break
operator = operations[0]
operations = operations[1:]
if operator == '*':
sum_groups.append(operator)
current_sum_group = []
sum_groups.append(current_sum_group)
continue
elif operator == '+':
current_sum_group.append(operator)
continue
else:
raise ValueError(f"Invalid operator: {operator!r}")
# Convert each "summation group" into an AddExpr or a single operand
# Example:
# Given sum_groups = [[Literal(1)], "*", [Literal(2), "+", Literal(3)]]
# Produces multiplicands = [Literal(1), "*", AddExpr(Literal(2), Literal(3))]
multiplicands = [self._group_operations_flat(sum_group) for sum_group in sum_groups]
# Convert the resulting list of multiplication operations into a single MultExpr.
return self._group_operations_flat(multiplicands)
def _group_operations(self, operations: List[Union[str, Expr]]) -> Expr:
"""
Given a list of expressions and operators, groups them in a single expression
by the precedence law selected in self.precedence.
See _group_operations_flat and _group_operations_addition_first.
"""
if self.precedence == Precedence.FLAT:
return self._group_operations_flat(operations)
elif self.precedence == Precedence.ADDITION_FIRST:
return self._group_operations_addition_first(operations)
else:
raise ValueError(f"Invalid precedence value: {self.precedence}")
def _parse_expr(self, tokens: List[str]) -> Tuple[Expr, List[str]]:
"""
Parses a compound expression until the end of the line or until an unmatched
closing parenthesis.
Returns the expression and the rest of tokens (including the unmatched closing
parenthesis if there is one).
Example:
parser._parse_expr(["(", "1", "+", "2", ")", "+", "3"])
# => AddExpr(ParenExpr(AddExpr(Literal(1), Literal(2))), Literal(3)), []
parser._parse_expr(["(", "1", "+", "2", ")", "+", "3", ")", "*", "4"])
# => AddExpr(ParenExpr(AddExpr(Literal(1), Literal(2))), Literal(3)), [")", "*", "4"]
"""
left_operand, tokens = self._parse_operand(tokens)
if not tokens or tokens[0] == ')':
return left_operand, tokens
# Collect operand and operators into a single list
operations: List[Union[str, Expr]] = [left_operand]
while tokens and tokens[0] != ')':
operator, tokens = self._parse_operator(tokens)
operations.append(operator)
right_operand, tokens = self._parse_operand(tokens)
operations.append(right_operand)
return self._group_operations(operations), tokens
def parse_expr(self, tokens: List[str]) -> Expr:
"""
Parses the expression given the tokens produced by self.tokenize.
Example:
parser._parse_expr(["(", "1", "+", "2", ")", "+", "3"])
# => AddExpr(ParenExpr(AddExpr(Literal(1), Literal(2))), Literal(3))
"""
expr, tokens = self._parse_expr(tokens)
if tokens:
raise ValueError(f"Expected end of line, found more tokens: {tokens!r}")
return expr
def evaluate(self, line: str) -> int:
return self.parse_expr(self.tokenize(line)).eval()
def main():
with open('./input.txt') as f:
lines = [l.strip() for l in f.readlines()]
values = [Parser().evaluate(line) for line in lines]
print(f"Sum of results: {sum(values)}")
values = [Parser(precedence=Precedence.ADDITION_FIRST).evaluate(line) for line in lines]
print(f"Sum of results (part 2): {sum(values)}")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
eb41129ed3f7057d5cc5083bb4c41750be22ebc3 | 979dbbef0b9818d4b950f42ee7d7c0ff0d515617 | /epitools-env/bin/pip3.8 | ea2258fe562378939df7d3d60b46e371f7db2bc7 | [] | no_license | adjovichancel/EPITECH-DOCUMENTATION | 86f6b767162f194d92f53144d01ccde77455653f | b4a13592ef6679117271fef16f2e16ccdbf3ae36 | refs/heads/master | 2022-12-09T14:37:59.591845 | 2020-09-23T21:42:16 | 2020-09-23T21:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | 8 | #!/home/blacky/github/EPITECH-DOCUMENTATION/epitools-env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
cdbb1dc82dac13f36383f179e2df457c3bf5a1b1 | 5a1e1756025bacae88b619d388ebf61b330001ab | /1.Class/Language_Python-master/Language_Python-master/LC25_1_IPaddressChecking.py | 7e82ec909b2bbe4f4feb7d208d5df661e74ba574 | [] | no_license | reshmaladi/Python | d1953497703aa15e163cd8ac27be23e3e5c3e947 | 8e9092af63476fef35d221e20acf418983957e53 | refs/heads/master | 2021-10-15T00:55:08.136039 | 2021-10-01T14:32:16 | 2021-10-01T14:32:16 | 165,836,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py |
import re
def checkip(ip):
pattern = re.compile("(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})")
k=re.match(pattern,ip)
type=k.group(1)
print(type)
type = int(type)
if(int(k.group(2))<256 and int(k.group(3))<256 and int(k.group(4))<256):
if(type<127 and type>0):
print("Class A")
elif(type<192 and type>127):
print("Class B")
elif(type<127 and type>192):
print("Class C")
elif(type<192 and type>224):
print("Class D")
elif(type<224 and type>256):
print("Class E")
else:
print("Invalid IP")
else:
print("Invalid IP")
def main():
ip=input("Enter IP:\t")
checkip(ip)
if __name__=='__main__':
main()
'''
try:
n=10/0
except ZeroDivisionError as e:
print(e,"222")
except Exception as e:
print(e,"111")
''' | [
"[email protected]"
] | |
e4fc443083d31d36d7520fafdf282e4d77b2f4e4 | 3df322b2e1ed2b63ee5cd6903a602010ba99d801 | /sniffer/tests/scent_file.py | af43f2f6fc5c9705a258521c03b4d33f665d87db | [
"MIT"
] | permissive | walnutgeek/sniffer | a76799aa31921a8bd2f15c5376fe2cd2da3d5683 | 5b462f61aa39972c42d14af4ff6b1f3f69658b47 | refs/heads/master | 2020-06-04T10:26:20.347812 | 2019-06-14T17:48:25 | 2019-06-14T17:48:25 | 191,983,870 | 1 | 0 | MIT | 2019-06-14T17:43:55 | 2019-06-14T17:43:54 | null | UTF-8 | Python | false | false | 386 | py | from sniffer.api import *
@select_runnable('execute_type1')
@file_validator
def type1_files(filename):
return filename.endswith('.type1')
@select_runnable('execute_type2')
@file_validator
def type2_files(filename):
return filename.endswith('.type2')
@runnable
def execute_type1(mock, *args):
mock('type1')
@runnable
def execute_type2(mock, *args):
mock('type2')
| [
"[email protected]"
] | |
7daf6a04365cbddc656882e3c00036eba767ca17 | 2dd224df341afd0d10e08024dc53197944ea2df9 | /datacollectvenv/Scripts/django-admin.py | 1e640b005e370c8d78f6c771e14cae2f3b6f487b | [] | no_license | c0pper/datacollector | fc319735c0a8e5e4df590b70ff5b25819f569ef1 | 798548d7b045771335ca87b3e250cbc169ecf34c | refs/heads/master | 2023-06-09T01:02:09.583650 | 2021-06-27T20:42:01 | 2021-06-27T20:42:01 | 380,715,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | #!c:\users\simon\pycharmprojects\django data collector\datacollectvenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
2341e1ce655b42e820e7cb430efa637bb46d886a | 409b87a4f3b77e929d84f140d9574e28c00ed9ec | /06-Condities/Test.py | b3d51bc8cad9e661f346bef5a111f392aa3d7db5 | [] | no_license | WoutVdb19103/Informatica5 | d16f76f308ecd95e5c1339e0b3f5be50e07d0e08 | 7d127e54b01a6dcac33907c346c1c360a07fa019 | refs/heads/master | 2020-03-28T03:19:03.832752 | 2019-05-23T08:05:59 | 2019-05-23T08:05:59 | 147,637,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | leeftijd = int(input('leeftijd: '))
if leeftijd > 16 and leeftijd > 21
print('Drink met mate.')
elif: leeftijd > 16:
print('Wel pintje, geen sterke drank')
else:
print('Alcohol verboden!')
| [
"[email protected]"
] | |
85f8c276c3f225a7b4195c6f3beabc4213a234ba | f888669dcf1ae6c6a25235c49ccb67333163b7f2 | /korpokkur/scaffolds/pygitignore/__init__.py | 9527f58b84b5b1659f7f3ff9417039e7cf668b02 | [] | no_license | podhmo/korpokkur | dd50f6df9b7659ab70b8dcde809fb50ebc848d40 | 9bf4cb6ca6fa3ec45fe1820282345e78268b396d | refs/heads/master | 2020-07-04T07:03:29.605665 | 2016-10-01T14:04:09 | 2016-10-01T14:04:09 | 19,282,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # -*- coding:utf-8 -*-
from zope.interface import implementer
from korpokkur.interfaces import IScaffoldTemplate
import os.path
## see: korpokkur.interfaces:IScaffoldTemplate
@implementer(IScaffoldTemplate)
class Package(object):
"""tiny python package scaffold (this is sample)"""
source_directory = os.path.join(os.path.abspath(os.path.dirname(__file__)), "+package+")
expected_words = {
"package": ("package name", "sample")
}
template_engine = "mako"
| [
"[email protected]"
] | |
179a1ab778d5951e027a1585095e9502fc057b84 | 3f6187741f631db0f43865b0029397324e1bd190 | /Script1.py | 2b0edc2b8a93e02b6a65a97cc985006853467c04 | [] | no_license | petervdl112/selenium | 9067cf614263aafa934ea400304ba1b2487ad27c | f8eee9a82a364bba5d3ec008b9df2c77529ddefa | refs/heads/main | 2023-08-11T17:12:56.084169 | 2021-09-15T19:05:20 | 2021-09-15T19:05:20 | 400,483,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys #om enter etc. door te kunnen geven
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#1e video on selenium TechWithTim
#PATH = 'C:/Program Files (x86)/chromedriver.exe' #voor windows
#driver = webdriver.Chrome(PATH) #voor windows
driver = webdriver.Chrome()
driver.get('https://techwithtim.net')
#grab search results. Textbox "search" has HTML "value_name='s'"
search = driver.find_element_by_name('s')
search.clear()
search.send_keys('test')
search.send_keys(Keys.RETURN)
# dit is de methode om te wachten totdat de pagina beschikbaar is. Alternatief is direct opvragen:
# main = driver.find_element_by_id('main').
# meer specifiek. we zoeken eerst naar sectie = main. binnen deze sectie zoeken we dan naar een tag
# het lijkt erop dat je voor een <article id="..."> gewoon kan zoeken naar article tag.
try:
main = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "main"))
)
articles = main.find_elements_by_tag_name("article")
for article in articles:
header = article.find_element_by_class_name("entry-summary")
print(header.text)
time.sleep(5)
finally:
driver.quit()
| [
"[email protected]"
] | |
bfdea5090dfe44846995ad0ad8fc2a19c97373c8 | 6d8cc1eeb320988a9e8e9047e6cd403600361257 | /manipulator_grasping/src/grasp_demo_3.py | 9891d2217e57dda760b8347052151f832699859b | [] | no_license | SalahEddine1919/mov | de3ce43a2e0fc2dc7d7e66a81d6730df23c663d4 | 7c5d6ce72df563bb88dfd42ac7b949c8b2ed0682 | refs/heads/main | 2023-04-28T16:00:32.041816 | 2021-05-21T09:29:08 | 2021-05-21T09:29:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy, sys
import moveit_commander
from control_msgs.msg import GripperCommand
class MoveItFkDemo:
def __init__(self):
# Initialize the move_group API
moveit_commander.roscpp_initialize(sys.argv)
# Initialize ROS node
rospy.init_node('moveit_fk_demo', anonymous=True)
# Initialize the arm group in the robot arm that needs to be controlled by the move group
arm = moveit_commander.MoveGroupCommander('arm')
# Initialize the gripper group in the robot arm controlled by the move group
gripper = moveit_commander.MoveGroupCommander('gripper')
# Set the allowable error value of the robot arm and gripper
arm.set_goal_joint_tolerance(0.001)
gripper.set_goal_joint_tolerance(0.001)
# Control the robot arm to return to the initial position first
'''
arm.set_named_target('bow')
arm.go()
rospy.sleep(2)
'''
# Set the target position of the gripper and control the gripper movement
gripper.set_named_target("opened")
gripper.go()
rospy.sleep(1)
# Set the target position of the robot arm and use six-axis position data to describe (unit: radians)
joint_positions = [0.0,1.57,1.0,0.0,-1.0]
result=arm.set_joint_value_target(joint_positions)
rospy.loginfo(str(result))
# Control the robot arm to complete the movement
arm.go()
joint=arm.get_current_joint_values()
print("final joint=",joint)
pose=arm.get_current_pose('arm_link5')
print('pose=',pose)
rospy.sleep(1)
gripper.set_named_target("closed")
gripper.go()
rospy.sleep(1)
arm.set_named_target('bow')
arm.go()
rospy.sleep(2)
# Close and exit moveit
moveit_commander.roscpp_shutdown()
moveit_commander.os._exit(0)
if __name__ == "__main__":
try:
MoveItFkDemo()
except rospy.ROSInterruptException:
pass | [
"[email protected]"
] | |
8147e3887f21a7dd5c37cf59f704ec8811f0a409 | c4ad1f2a6c1bc6cfc22c57a70035444340244854 | /snippet_converter/__init__.py | 074e3b044258b8d49dbb4542c2cf2a5a52f9c06e | [
"MIT"
] | permissive | pr4shan7/snippet-converter | 646598f765deb6edb13b05dc8816d5a5ac251ebd | ac9f29bc7a723b7f2bcbdf6fca4cbdee02526d5e | refs/heads/master | 2022-12-21T23:21:17.728089 | 2020-10-02T11:40:57 | 2020-10-02T11:40:57 | 263,321,787 | 0 | 1 | MIT | 2020-10-02T11:40:59 | 2020-05-12T11:49:57 | Python | UTF-8 | Python | false | false | 107 | py | from snippet_converter import setup
from snippet_converter import sublime
from snippet_converter import cli | [
"[email protected]"
] | |
450ace0a062d76157a65deb0ea308b6eada98826 | 118371a66a1649bf515712f66ae96e15a4443171 | /util.py | 472f745fa7cd04ecc6077843ce2471e146f61757 | [] | no_license | sodiffi/hot_source_server | 82dd8e721d9b043c2a04e8b841e4a011e0f82c8a | 81722e59d3b12571ddeb7c76497f5436915c2fa6 | refs/heads/master | 2023-08-28T12:49:39.984556 | 2021-10-07T03:56:38 | 2021-10-07T03:56:38 | 412,814,126 | 0 | 0 | null | 2021-10-03T15:44:58 | 2021-10-02T14:06:38 | Python | UTF-8 | Python | false | false | 338 | py | from flask import Response,jsonify,make_response
import json
from coder import MyEncoder
def ret(result):
mes="成功" if result["success"] else "失敗"
resultData=result["data"] if "data" in result else {}
return make_response(json.dumps({"D":resultData,"message":mes,"success":result["success"]}, cls=MyEncoder))
| [
"[email protected]"
] | |
4b7ba8fe703eed263317b0fd81b1007e71aafd01 | 00d7e9321d418a2d9a607fb9376b862119f2bd4e | /patterns/__init__.py | 00b2afc5cbacee223ea57b5ca685e1d8786dd191 | [
"MIT"
] | permissive | baluneboy/pims | 92b9b1f64ed658867186e44b92526867696e1923 | 5a07e02588b1b7c8ebf7458b10e81b8ecf84ad13 | refs/heads/master | 2021-11-16T01:55:39.223910 | 2021-08-13T15:19:48 | 2021-08-13T15:19:48 | 33,029,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | """
A place to keep all our string-type patterns (like regular expressions, for example).
""" | [
"[email protected]"
] | |
c3cdd5ad8556ee86fd866d49b31d93dc6e696596 | ad7391451b41e95c4349bdf62846969f6a92e155 | /VCF data mining new.py | 2f7b386892a330e72f5fe26c08748adeab807684 | [] | no_license | bearnomore/Evolution-genomics | 10ed959024f91ac944ec6a236da4c063c50f2bee | 9a48cde3b8d61d540c5a907c6b42e2c266458776 | refs/heads/master | 2020-03-13T09:33:03.231419 | 2018-07-10T20:39:28 | 2018-07-10T20:39:28 | 131,066,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,584 | py |
# coding: utf-8
# In[150]:
import pandas as pd
import numpy as np
import glob
# In[180]:
def vcf_clean(file):
'''
Input = Exported VCF from Geneious,
Select columns ['Minimum', 'Maximum', 'Amino Acid Change', 'Change', 'Codon Change', 'locus_tag', 'Polymorphism Type', 'Protein Effect',
'Variant Frequency', 'Note'],
Remove rows with 'Protein Effect' == NaN or None,
Remove rows of 'locus_tag' == 'SCO3798' where the barcode is inserted,
Sort data by 'Variant Frequency' in descending order.
Return cleaned VCF dataframe
'''
vcf = pd.read_csv(file, usecols = ['Minimum', 'Maximum', 'Change', 'locus_tag', 'Polymorphism Type',
'Protein Effect', 'Variant Frequency', 'Amino Acid Change','Codon Change', 'note'])
## vcf = vcf[~vcf['Protein Effect'].isnull()].loc[vcf['Protein Effect'] != 'None'].loc[vcf['locus_tag'] != 'SCO3798']
# Remove SNPs that caused no effect on protein encoding genes, but maintain all other intra/inter genetic changes
vcf = vcf.loc[vcf['Protein Effect'] != 'None']
# Clean the variant frequencies and pick up the floor of the range
vcf['Variant Frequency']= list(map(lambda x: float(x[0].replace('%', ''))/100, vcf['Variant Frequency'].str.split( '->')))
vcf.sort_values(by = ['Variant Frequency'], ascending = False, inplace = True)
return vcf
# In[191]:
def clean_WT(wt_vcf, vf_threshold):
'''
Input = the wt_vcf dataframe from vcf_clean function, the threshold of Variant Frequency
Remove all rows with 'Variant Frequency' >= vf_threshold (e.g. 0.95)
so that the existed variances across the population from time0 in our barcoded strain are excluded from following analysis.
Output = wt_vcf_new
'''
wt_vcf_new = wt_vcf.loc[wt_vcf['Variant Frequency'] < vf_threshold]
return wt_vcf_new
# In[294]:
def compare_to_wt(wt_vcf_new, mutant_vcf):
'''
Input = the cleaned wt_vcf_new where variances across the population have been removed, the mutant_vcf cleaned by vcf_clean()
Output = dataframe common where common variances were found between wt_vcf_new and the mutant_vcf,
dataframe mutant_to_wt where new variances are found
'''
common = wt_vcf_new.merge(mutant_vcf, on = ['Minimum', 'Maximum', 'Change'])
mutant_to_wt = mutant_vcf[(~mutant_vcf['Minimum'].isin(common['Minimum']))
& (~mutant_vcf['Maximum'].isin(common['Maximum']))
& (~mutant_vcf['Change'].isin(common['Change']))]
mutant_to_wt['locus_tag'].fillna('intergenetic region', inplace = True)
mutant_to_wt.set_index('locus_tag', inplace = True)
# clean up the common df a bit by removing repeated columns
common = common[['Minimum', 'Maximum', 'Change', 'locus_tag_x',
'Polymorphism Type_x', 'Protein Effect_x', 'Variant Frequency_x',
'Amino Acid Change_x', 'Codon Change_x', 'note_x', 'Variant Frequency_y']]
common.rename(index=str, columns={'locus_tag_x':'locus_tag',"Polymorphism Type_x": "Polymorphism Type", "Protein Effect_x": "Protein Effect",
'Variant Frequency_x': 'Variant Frequency WT', 'Amino Acid Change_x': 'Amino Acid Change',
'Codon Change_x': 'Codon Change', 'note_x': 'note', 'Variant Frequency_y': 'Variant Frequency Mutant'}, inplace = True)
common['locus_tag'].fillna('intergenetic region', inplace = True)
common.set_index('locus_tag', inplace = True)
return common, mutant_to_wt
# In[295]:
files = glob.glob('G:\Dropbox (Vetsigian lab)\Vetsigian lab Team Folder\Ye\Genomics\M145 Evolved mutants\*.csv')
strain_names = list(map(lambda f: f.split('\\')[6].split('_')[0], files))
vcfs = list(map(lambda f: vcf_clean(f), files))
VCFs = dict(zip(strain_names, vcfs))
wt = VCFs['WT']
vf_threshold = 0.95
wt_new = clean_WT(wt, vf_threshold)
# Compare each mutant df in VCFs to the cleaned new WT vcf, wt_new.
# This will generate a two layered dictionary where outside keys are the mutant names,
# and the inside keys are "common" and "variance" that corresponding to variance found
# in the wt_new and variance not found in wt_new respectively.
mutant_names = strain_names[:-1]
dict_mutants_to_wt = {}
for name in mutant_names:
[common, variance]= compare_to_wt(wt_new, VCFs[name])
dict_mutants_to_wt[name] = {}
dict_mutants_to_wt[name]['common'] = common
dict_mutants_to_wt[name]['variance'] = variance
wt_new['locus_tag'].fillna('intergenetic region', inplace = True)
wt_new.set_index('locus_tag', inplace = True)
# In[296]:
dict_mutants_to_wt['R1']
# In[298]:
# Compare variance frequencies of mutatations shared by wt and mutants ('the common data set and wt data set')
common_vf_comp = {}
for mutant in mutant_names:
common_vf_comp[mutant] = dict_mutants_to_wt[mutant]['common'][['Variant Frequency WT', 'Variant Frequency Mutant']]
common_vf_comp
# In[144]:
# find locus tag with mutations of variant freq > 0.5
def find_locus(dict_mutants_to_wt, variant_freq, mutant_name):
mutant2wt = dict_mutants_to_wt[mutant_name]
return mutant2wt[mutant2wt['Variant Frequency'] > 0.5].index.values
variant_freq = 0.5
mutation_genes = {}
for name in mutant_names:
mutation_genes[name] = find_locus(dict_mutants_to_wt, variant_freq, name)
#mutation_genes
df = pd.DataFrame.from_dict(mutation_genes, orient="index")
df.to_csv("G:\\Ye\\Evolution mutants\\mutation_genes.csv")
| [
"[email protected]"
] | |
1de878da8da08496ed34586ab07431295564e3c0 | 876c5c693739673ece340fca377746292da41d69 | /Simulation/Minutia/Dopplers Mirror/Doppler Mirror.py | 1528f8768a030248dc0c2e7e02b265bcd21b4a9a | [] | no_license | DanielxDWhite/Y4Project | b72ab9d37f5b5810cafe5cea9f96ac43ce417ee0 | e7bbfb72034fefbe80eea94f9b1956ff13b20fb6 | refs/heads/master | 2021-04-09T09:08:43.302472 | 2020-03-20T23:43:17 | 2020-03-20T23:43:17 | 248,858,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import matplotlib.pyplot as plt
import numpy as np
c = 3*10**8
#d = 30*10**6
d = np.linspace(0.5 * 10**7, 11**7, 15)
f0 = 382.230484468*10**12
f = f0 + d
a = f/f0
xx = 1-a
xy = np.negative(2)
xz = 1+a
disc = xy**2-4*xz*xx
Bp = (xy+np.sqrt(disc))/2*xx
Bm = (xy-np.sqrt(disc))/2*xx
Vp = c * Bp
Vm = c * Bm
print(Vp,Vm)
dplot = d*10**(-6)
Vpplot = Vp *10**(7)
plt.plot(dplot, Vpplot)
plt.xlabel('Detuning / MHz')
plt.ylabel('Velocity of Mirror $x10^{-7}$ / $ms^{-1}$')
plt.title('Doppler shift via a moving mirror')
plt.grid(color='grey', linestyle='-', linewidth=0.2)
plt.show()
plt.close
| [
"[email protected]"
] | |
39a03d47b7ac9f20e8cfcc3abf17a00db972c6dc | 5b18718ca8526788dde87be82da3c078ea40c766 | /geometry/discriminant.py | 4d450a6c9dc4a2371dab75736a401267bd11ed73 | [] | no_license | rhyshardwick/maths | 42443dcd888937128176d9fedbda22ad25ab09d8 | bca86c3d5b7db95a9063e0113067b437e00f9e40 | refs/heads/master | 2023-02-23T23:47:17.715035 | 2022-10-06T15:59:54 | 2022-10-06T15:59:54 | 185,846,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Calculates the discriminant value
# discriminant: the expression that appears under teh square root (radidical) sign in
# the quadratic equation
# b ** 2 - 4ac
# w3resource exercise uses x, y, z, I will use the more standard a, b, c
print("Calculate the discriminant for the quadratic equation axx + bx + c")
a = float(input("a: "))
b = float(input("b: "))
c = float(input("c: "))
print("Discriminant:", b ** 2 - 4 * a * c)
| [
"rhys@RhysSurfacePro7"
] | rhys@RhysSurfacePro7 |
ce1c4491d2da4a9c8e764be676f415cea4e6e04e | 124b35ccbae76ba33b9044071a056b9109752283 | /NeuralNetwork/selective/g_final.py | ea9f9155e11885614eb73dff119af6f31e7741ed | [] | no_license | anilmaddu/Daily-Neural-Network-Practice-2 | 94bc78fe4a5a429f5ba911bae5f231f3d8246f61 | 748de55c1a17eae9f65d7ea08d6b2b3fc156b212 | refs/heads/master | 2023-03-08T22:04:45.535964 | 2019-03-15T23:10:35 | 2019-03-15T23:10:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,831 | py | import tensorflow as tf
import numpy as np
import sys, os,cv2
from sklearn.utils import shuffle
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from skimage.transform import resize
from imgaug import augmenters as iaa
import imgaug as ia
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
np.random.seed(678)
tf.set_random_seed(678)
ia.seed(678)
def tf_elu(x): return tf.nn.elu(x)
def d_tf_elu(x): return tf.cast(tf.greater_equal(x,0),tf.float32) + (tf_elu(tf.cast(tf.less(x,0),tf.float32) * x) + 1.0)
def tf_relu(x): return tf.nn.relu(x)
def d_tf_relu(x): return tf.cast(tf.greater_equal(x,0),tf.float32)
def tf_softmax(x): return tf.nn.softmax(x)
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
# ===== Rules that I have learned =====
# 1. Data augmentation horizontal flip
# 2. kernel size is 3
# 3. LR decay is also good
# 4. 3 block is good
# ===== Rules that I have learned =====
# code from: https://github.com/tensorflow/tensorflow/issues/8246
def tf_repeat(tensor, repeats):
"""
Args:
input: A Tensor. 1-D or higher.
repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input
Returns:
A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats
"""
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tesnor
# class
class CNN():
def __init__(self,k,inc,out,act,d_act):
self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.05))
self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))
self.v_hat_prev = tf.Variable(tf.zeros_like(self.w))
self.act,self.d_act = act,d_act
def getw(self): return self.w
def feedforward(self,input,stride=1,padding='SAME',droprate=1.0,res=False):
self.input = input
self.layer = tf.nn.dropout(tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding) ,droprate)
self.layerA = self.act(self.layer)
if res: return self.layerA + input
return self.layerA
def backprop(self,gradient,learning_rate_change,batch_size_dynamic,stride=1,padding='SAME',mom=False,adam=False,awsgrad=False,reg=False):
grad_part_1 = gradient
grad_part_2 = self.d_act(self.layer)
grad_part_3 = self.input
grad_middle = grad_part_1 * grad_part_2
grad = tf.nn.conv2d_backprop_filter(
input = grad_part_3,
filter_sizes = self.w.shape,out_backprop = grad_middle,
strides=[1,stride,stride,1],padding=padding
)
grad_pass = tf.nn.conv2d_backprop_input(
input_sizes = [batch_size_dynamic] + list(grad_part_3.shape[1:]),
filter= self.w,out_backprop = grad_middle,
strides=[1,stride,stride,1],padding=padding
)
update_w = []
if adam:
update_w.append(
tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) )
)
update_w.append(
tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) )
)
m_hat = self.m / (1-beta1)
v_hat = self.v_prev / (1-beta2)
adam_middel = learning_rate_change/(tf.sqrt(v_hat) + adam_e)
adam_middel = tf.multiply(adam_middel,m_hat)
if reg: adam_middel = adam_middel - learning_rate_change * 0.00001 * self.w
update_w.append(tf.assign(self.w,tf.subtract(self.w,adam_middel )) )
if awsgrad:
update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * grad ))
v_t = self.v_prev *beta2 + (1-beta2) * grad ** 2
def f1(): return v_t
def f2(): return self.v_hat_prev
v_max = tf.cond(tf.greater(tf.reduce_sum(v_t), tf.reduce_sum(self.v_hat_prev) ) , true_fn=f1, false_fn=f2)
adam_middel = tf.multiply(learning_rate_change/(tf.sqrt(v_max) + adam_e),self.m)
if reg: adam_middel = adam_middel - learning_rate_change * 0.00001 * self.w
update_w.append(tf.assign(self.w,tf.subtract(self.w,adam_middel ) ))
update_w.append(tf.assign( self.v_prev,v_t ))
update_w.append(tf.assign( self.v_hat_prev,v_max ))
if mom:
update_w.append(tf.assign( self.m,self.m*beta1 + learning_rate* (grad) ))
adam_middel = self.m
if reg: adam_middel = adam_middel - learning_rate_change * 0.00001 * self.w
update_w.append(tf.assign(self.w,tf.subtract(self.w,adam_middel )) )
return grad_pass,update_w
# data
PathDicom = "../../Dataset/cifar-10-batches-py/"
lstFilesDCM = [] # create an empty list
for dirName, subdirList, fileList in os.walk(PathDicom):
for filename in fileList:
if not ".html" in filename.lower() and not ".meta" in filename.lower(): # check whether the file's DICOM
lstFilesDCM.append(os.path.join(dirName,filename))
# Read the data traind and Test
batch0 = unpickle(lstFilesDCM[0])
batch1 = unpickle(lstFilesDCM[1])
batch2 = unpickle(lstFilesDCM[2])
batch3 = unpickle(lstFilesDCM[3])
batch4 = unpickle(lstFilesDCM[4])
onehot_encoder = OneHotEncoder(sparse=True)
train_batch = np.vstack((batch0[b'data'],batch1[b'data'],batch2[b'data'],batch3[b'data'],batch4[b'data']))
train_label = np.expand_dims(np.hstack((batch0[b'labels'],batch1[b'labels'],batch2[b'labels'],batch3[b'labels'],batch4[b'labels'])).T,axis=1).astype(np.float32)
train_label = onehot_encoder.fit_transform(train_label).toarray().astype(np.float32)
test_batch = unpickle(lstFilesDCM[5])[b'data']
test_label = np.expand_dims(np.array(unpickle(lstFilesDCM[5])[b'labels']),axis=0).T.astype(np.float32)
test_label = onehot_encoder.fit_transform(test_label).toarray().astype(np.float32)
train_batch = np.reshape(train_batch,(len(train_batch),3,32,32))
test_batch = np.reshape(test_batch,(len(test_batch),3,32,32))
# reshape data rotate data
train_batch = np.rot90(np.rot90(train_batch,1,axes=(1,3)),3,axes=(1,2))
test_batch = np.rot90(np.rot90(test_batch,1,axes=(1,3)),3,axes=(1,2)).astype(np.float32)
# standardize Normalize data per channel
test_batch[:,:,:,0] = (test_batch[:,:,:,0] - test_batch[:,:,:,0].mean(axis=0)) / ( test_batch[:,:,:,0].std(axis=0))
test_batch[:,:,:,1] = (test_batch[:,:,:,1] - test_batch[:,:,:,1].mean(axis=0)) / ( test_batch[:,:,:,1].std(axis=0))
test_batch[:,:,:,2] = (test_batch[:,:,:,2] - test_batch[:,:,:,2].mean(axis=0)) / ( test_batch[:,:,:,2].std(axis=0))
# print out the data shape
print(train_batch.shape)
print(train_label.shape)
print(test_batch.shape)
print(test_label.shape)
# hyper parameter
num_epoch = 101
batch_size = 50
print_size = 1
beta1,beta2,adam_e = 0.9,0.9,1e-9
learning_rate = 0.0003
learnind_rate_decay = 0
proportion_rate = 0.0000001
decay_rate = 0
# define class
channel_size = 164
l1 = CNN(3,3,channel_size,tf_elu,d_tf_elu)
l2 = CNN(3,channel_size,channel_size,tf_elu,d_tf_elu)
l3 = CNN(3,channel_size,channel_size,tf_elu,d_tf_elu)
l4 = CNN(3,channel_size,channel_size,tf_elu,d_tf_elu)
l5 = CNN(3,channel_size,channel_size,tf_elu,d_tf_elu)
l6 = CNN(3,channel_size,channel_size,tf_elu,d_tf_elu)
l7 = CNN(3,channel_size,channel_size,tf_elu,d_tf_elu)
l8 = CNN(1,channel_size,channel_size,tf_elu,d_tf_elu)
l9 = CNN(1,channel_size,10,tf_elu,d_tf_elu)
# graph
x = tf.placeholder(shape=[None,32,32,3],dtype=tf.float32)
y = tf.placeholder(shape=[None,10],dtype=tf.float32)
batch_size_dynamic= tf.placeholder(tf.int32, shape=())
iter_variable = tf.placeholder(tf.float32, shape=())
learning_rate_dynamic = tf.placeholder(tf.float32, shape=())
learning_rate_change = learning_rate_dynamic * (1.0/(1.0+learnind_rate_decay*iter_variable))
decay_dilated_rate = proportion_rate * (1.0/(1.0+decay_rate*iter_variable))
droprate1 = tf.placeholder(tf.float32, shape=())
droprate2 = tf.placeholder(tf.float32, shape=())
droprate3 = tf.placeholder(tf.float32, shape=())
layer1 = l1.feedforward(x,droprate=droprate1)
layer2 = l2.feedforward(layer1,droprate=droprate2)
layer3 = l3.feedforward(layer2,droprate=droprate3)
layer4_Input = tf.nn.avg_pool(layer3,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")
layer4 = l4.feedforward(layer4_Input,droprate=droprate2)
layer5 = l5.feedforward(layer4,droprate=droprate3)
layer6 = l6.feedforward(layer5,droprate=droprate1)
layer7_Input = tf.nn.avg_pool(layer6,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")
layer7 = l7.feedforward(layer7_Input,droprate=droprate3)
layer8 = l8.feedforward(layer7,droprate=droprate1)
layer9 = l9.feedforward(layer8,droprate=droprate2)
final_global = tf.reduce_mean(layer9,[1,2])
final_soft = tf_softmax(final_global)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_global,labels=y) )
correct_prediction = tf.equal(tf.argmax(final_soft, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# ===== manual ====
grad_prepare = tf.reshape(final_soft-y, [ batch_size_dynamic ,1,1,10] )
grad9,grad9_up = l9.backprop(grad_prepare,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic)
grad8,grad8_up = l8.backprop(grad9,learning_rate_change=learning_rate_change,adam=True,batch_size_dynamic=batch_size_dynamic)
grad7,grad7_up = l7.backprop(grad8,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic)
grad6_Input = tf_repeat(grad7,[1,2,2,1])
grad6,grad6_up = l6.backprop(grad6_Input,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic)
grad5,grad5_up = l5.backprop(grad6,learning_rate_change=learning_rate_change,adam=True,batch_size_dynamic=batch_size_dynamic)
grad4,grad4_up = l4.backprop(grad5,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic)
grad3_Input = tf_repeat(grad4,[1,2,2,1])
grad3,grad3_up = l3.backprop(grad3_Input,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic)
grad2,grad2_up = l2.backprop(grad3,learning_rate_change=learning_rate_change,adam=True,batch_size_dynamic=batch_size_dynamic)
grad1,grad1_up = l1.backprop(grad2,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic)
grad_update = grad9_up + grad8_up+ grad7_up + grad6_up + grad5_up + grad4_up + grad3_up + grad2_up + grad1_up
# ===== manual ====
# ====== adversal input ==========
adversal_input = x + 0.01 * tf.sign(grad1)
concat_input = tf.concat([x,adversal_input],axis=0)
yy = tf.concat([y,y],axis=0)
layer1_ad = l1.feedforward(concat_input,droprate=droprate1)
layer2_ad = l2.feedforward(layer1_ad,droprate=droprate2)
layer3_ad = l3.feedforward(layer2_ad,droprate=droprate3)
layer4_Input_ad = tf.nn.avg_pool(layer3_ad,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")
layer4_ad = l4.feedforward(layer4_Input_ad,droprate=droprate2)
layer5_ad = l5.feedforward(layer4_ad,droprate=droprate3)
layer6_ad = l6.feedforward(layer5_ad,droprate=droprate1)
layer7_Input_ad = tf.nn.avg_pool(layer6_ad,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")
layer7_ad = l7.feedforward(layer7_Input_ad,droprate=droprate3)
layer8_ad = l8.feedforward(layer7_ad,droprate=droprate1)
layer9_ad = l9.feedforward(layer8_ad,droprate=droprate2)
final_global_ad = tf.reduce_mean(layer9_ad,[1,2])
final_soft_ad = tf_softmax(final_global_ad)
cost_ad = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_global_ad,labels=yy) )
correct_prediction_ad = tf.equal(tf.argmax(final_soft_ad, 1), tf.argmax(yy, 1))
accuracy_ad = tf.reduce_mean(tf.cast(correct_prediction_ad, tf.float32))
# ====== adversal input ==========
# ===== adversal manual ====
grad_prepare_ad = tf.reshape(final_soft_ad-yy,[batch_size_dynamic*2,1,1,10])
grad9_ad,grad9_up_ad = l9.backprop(grad_prepare_ad,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic*2)
grad8_ad,grad8_up_ad = l8.backprop(grad9_ad,learning_rate_change=learning_rate_change,adam=True,batch_size_dynamic=batch_size_dynamic*2)
grad7_ad,grad7_up_ad = l7.backprop(grad8_ad,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic*2)
grad6_Input_ad = tf_repeat(grad7_ad,[1,2,2,1])
grad6_ad,grad6_up_ad = l6.backprop(grad6_Input_ad,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic*2)
grad5_ad,grad5_up_ad = l5.backprop(grad6_ad,learning_rate_change=learning_rate_change,adam=True,batch_size_dynamic=batch_size_dynamic*2)
grad4_ad,grad4_up_ad = l4.backprop(grad5_ad,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic*2)
grad3_Input_ad = tf_repeat(grad4_ad,[1,2,2,1])
grad3_ad,grad3_up_ad = l3.backprop(grad3_Input_ad,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic*2)
grad2_ad,grad2_up_ad = l2.backprop(grad3_ad,learning_rate_change=learning_rate_change,adam=True,batch_size_dynamic=batch_size_dynamic*2)
grad1_ad,grad1_up_ad = l1.backprop(grad2_ad,learning_rate_change=learning_rate_change,awsgrad=True,batch_size_dynamic=batch_size_dynamic*2)
grad_update_ad = grad9_up_ad + grad8_up_ad+ grad7_up_ad + grad6_up_ad + grad5_up_ad + grad4_up_ad + grad3_up_ad + grad2_up_ad + grad1_up_ad
# ===== adversal manual ====
# sess
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_cota,train_acca = 0,0
train_cot,train_acc = [],[]
test_cota,test_acca = 0,0
test_cot,test_acc = [],[]
data_input_type = 0
for iter in range(num_epoch):
train_batch,train_label = shuffle(train_batch,train_label)
lower_bound = 0.025 * (iter+1)/num_epoch
random_drop1 = np.random.uniform(low=0.975+lower_bound,high=1.000000000000001)
random_drop2 = np.random.uniform(low=0.975+lower_bound,high=1.000000000000001)
random_drop3 = np.random.uniform(low=0.975+lower_bound,high=1.000000000000001)
for batch_size_index in range(0,len(train_batch),batch_size//2):
current_batch = train_batch[batch_size_index:batch_size_index+batch_size//2]
current_batch_label = train_label[batch_size_index:batch_size_index+batch_size//2]
# data aug
seq = iaa.Sequential([
iaa.Sometimes( (0.1 + lower_bound * 6) ,
iaa.Affine(
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
)
),
iaa.Sometimes( (0.2 + lower_bound * 6),
iaa.Affine(
rotate=(-25, 25),
)
),
iaa.Sometimes( (0.1 + lower_bound * 6),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
)
),
iaa.Fliplr(1.0), # Horizontal flips
], random_order=True) # apply augmenters in random order
seq_lite1 = iaa.Sequential([
iaa.Fliplr(0.5), # Horizontal flips,
], random_order=True) # apply augmenters in random order
seq_lite2 = iaa.Sequential([
iaa.Flipud(0.5), # Horizontal flips
], random_order=True) # apply augmenters in random order
if data_input_type == 2 :
images_aug = seq.augment_images(current_batch.astype(np.float32))
current_batch = np.vstack((current_batch,images_aug)).astype(np.float32)
current_batch_label = np.vstack((current_batch_label,current_batch_label)).astype(np.float32)
input_sess_array = [cost_ad,accuracy_ad,correct_prediction_ad,grad_update_ad,concat_input]
input_feed_dict={x:current_batch,y:current_batch_label,
iter_variable:iter,learning_rate_dynamic:learning_rate,droprate1:random_drop1,droprate2:random_drop2,droprate3:random_drop3,batch_size_dynamic:batch_size}
elif data_input_type == 0 :
images_aug = seq.augment_images(current_batch.astype(np.float32))
current_batch = np.vstack((current_batch,images_aug)).astype(np.float32)
current_batch_label = np.vstack((current_batch_label,current_batch_label)).astype(np.float32)
input_sess_array = [cost,accuracy,correct_prediction,grad_update]
input_feed_dict= {x:current_batch,y:current_batch_label,
iter_variable:iter,learning_rate_dynamic:learning_rate,droprate1:random_drop1,droprate2:random_drop2,droprate3:random_drop3,batch_size_dynamic:batch_size}
elif data_input_type == 1 :
current_batch = seq_lite1.augment_images(current_batch.astype(np.float32))
input_sess_array = [cost_ad,accuracy_ad,correct_prediction_ad,grad_update_ad,concat_input]
input_feed_dict ={x:current_batch,y:current_batch_label,
iter_variable:iter,learning_rate_dynamic:learning_rate,droprate1:random_drop1,droprate2:random_drop2,droprate3:random_drop3,batch_size_dynamic:batch_size//2}
elif data_input_type == 3 :
current_batch = seq_lite2.augment_images(current_batch.astype(np.float32))
input_sess_array = [cost_ad,accuracy_ad,correct_prediction_ad,grad_update_ad,concat_input]
input_feed_dict ={x:current_batch,y:current_batch_label,
iter_variable:iter,learning_rate_dynamic:learning_rate,droprate1:random_drop1,droprate2:random_drop2,droprate3:random_drop3,batch_size_dynamic:batch_size//2}
# online data augmentation here and standard normalization
current_batch[:,:,:,0] = (current_batch[:,:,:,0] - current_batch[:,:,:,0].mean(axis=0)) / ( current_batch[:,:,:,0].std(axis=0)+ 1e-20)
current_batch[:,:,:,1] = (current_batch[:,:,:,1] - current_batch[:,:,:,1].mean(axis=0)) / ( current_batch[:,:,:,1].std(axis=0)+ 1e-20)
current_batch[:,:,:,2] = (current_batch[:,:,:,2] - current_batch[:,:,:,2].mean(axis=0)) / ( current_batch[:,:,:,2].std(axis=0)+ 1e-20)
current_batch,current_batch_label = shuffle(current_batch,current_batch_label)
# online data augmentation here and standard normalization
sess_result = sess.run(input_sess_array,feed_dict=input_feed_dict )
print("Current Iter : ",iter, " current batch: ",batch_size_index, ' Current cost: ', sess_result[0],
' Current Acc: ', sess_result[1],end='\r')
train_cota = train_cota + sess_result[0]
train_acca = train_acca + sess_result[1]
for test_batch_index in range(0,len(test_batch),batch_size):
current_batch = test_batch[test_batch_index:test_batch_index+batch_size]
current_batch_label = test_label[test_batch_index:test_batch_index+batch_size]
sess_result = sess.run([cost,accuracy,correct_prediction],
feed_dict={x:current_batch,y:current_batch_label,iter_variable:iter,learning_rate_dynamic:learning_rate,
droprate1:1.0,droprate2:1.0,droprate3:1.0,batch_size_dynamic:batch_size})
print("Current Iter : ",iter, " current batch: ",test_batch_index, ' Current cost: ', sess_result[0],
' Current Acc: ', sess_result[1],end='\r')
test_acca = sess_result[1] + test_acca
test_cota = sess_result[0] + test_cota
if iter % print_size==0:
print("\n---------- Learning Rate : ", learning_rate * (1.0/(1.0+learnind_rate_decay*iter))," Data: ",data_input_type )
print("Lower Bound : ",lower_bound,' Drop Lower: ',0.95+lower_bound ,' Random Image :',lower_bound * 6,
'\n',"Drop 1 : ",random_drop1," Drop 2: ",random_drop2," Drop 3: ",random_drop3)
print('Train Current cost: ', train_cota/(len(train_batch)/(batch_size//2)),' Current Acc: ',
train_acca/(len(train_batch)/(batch_size//2) ),end='\n')
print('Test Current cost: ', test_cota/(len(test_batch)/batch_size),' Current Acc: ',
test_acca/(len(test_batch)/batch_size),end='\n')
print("----------")
train_acc.append(train_acca/(len(train_batch)/batch_size//2))
train_cot.append(train_cota/(len(train_batch)/batch_size//2))
test_acc.append(test_acca/(len(test_batch)/batch_size))
test_cot.append(test_cota/(len(test_batch)/batch_size))
test_cota,test_acca = 0,0
train_cota,train_acca = 0,0
# data_input_type = data_input_type + 1
# if data_input_type == 4: data_input_type = 0
# Normalize the cost of the training
train_cot = (train_cot-min(train_cot) ) / (max(train_cot)-min(train_cot))
test_cot = (test_cot-min(test_cot) ) / (max(test_cot)-min(test_cot))
# training done now plot
plt.figure()
plt.plot(range(len(train_acc)),train_acc,color='red',label='acc ovt')
plt.plot(range(len(train_cot)),train_cot,color='green',label='cost ovt')
plt.legend()
plt.title("Train Average Accuracy / Cost Over Time")
plt.savefig("Case a Train.png")
plt.figure()
plt.plot(range(len(test_acc)),test_acc,color='red',label='acc ovt')
plt.plot(range(len(test_cot)),test_cot,color='green',label='cost ovt')
plt.legend()
plt.title("Test Average Accuracy / Cost Over Time")
plt.savefig("Case a Test.png")
# -- end code -- | [
"[email protected]"
] | |
646e4bdb64395b37d72c7eec6ab0b6d0a82ec228 | 14edb3491718f55ac4faddbeeb804a18496d7b68 | /test/functional/test_framework/test_shell.py | 1d81a19057ef51b4ab48026087eb79e2906bf93b | [
"MIT"
] | permissive | freelancerstudio/Danecoin | 712f361abdf2a635f86ea9be7e8a0f63a419437d | 73d21d335c11a8966c995b7e8c520c2b55695c04 | refs/heads/main | 2023-07-03T04:22:05.076216 | 2021-08-16T17:43:48 | 2021-08-16T17:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import DanecoinTestFramework
class TestShell:
"""Wrapper Class for DanecoinTestFramework.
The TestShell class extends the DanecoinTestFramework
rpc & daemon process management functionality to external
python environments.
It is a singleton class, which ensures that users only
start a single TestShell at a time."""
class __TestShell(DanecoinTestFramework):
def set_test_params(self):
pass
def run_test(self):
pass
def setup(self, **kwargs):
if self.running:
print("TestShell is already running!")
return
# Num_nodes parameter must be set
# by DanecoinTestFramework child class.
self.num_nodes = 1
# User parameters override default values.
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
elif hasattr(self.options, key):
setattr(self.options, key, value)
else:
raise KeyError(key + " not a valid parameter key!")
super().setup()
self.running = True
return self
def shutdown(self):
if not self.running:
print("TestShell is not running!")
else:
super().shutdown()
self.running = False
def reset(self):
if self.running:
print("Shutdown TestShell before resetting!")
else:
self.num_nodes = None
super().__init__()
instance = None
def __new__(cls):
# This implementation enforces singleton pattern, and will return the
# previously initialized instance if available
if not TestShell.instance:
TestShell.instance = TestShell.__TestShell()
TestShell.instance.running = False
return TestShell.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name, value):
return setattr(self.instance, name, value)
| [
"[email protected]"
] | |
1fbc8fe692bbd42fa5947ae91067fa7daa1e18d2 | cd208b4a40be8bf166da79fdc126dbcb71e95a7d | /app/notification/advance_notifications.py | 8304f7d69b491586773bac5131f10c0f31a25f4c | [
"MIT"
] | permissive | Moirted/MyPersonalKitchenBot | 63a2b1be6e21e90ed908c9f3162bd085162cd83f | 03de0beeaf2665e8b3ddd1709da3d4edcd422b80 | refs/heads/main | 2023-04-21T12:17:52.486113 | 2021-05-16T13:00:22 | 2021-05-16T13:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import datetime as dt
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from dateutil.parser import parse
from sqlalchemy import and_
from app.misc import bot
from app.models import UserSettings
from app.models.product import Product
async def advance_notification_checker():
users = await UserSettings.query.where(and_(
UserSettings.notifications_general_enabled, UserSettings.notifications_advance_enabled)
).gino.all()
for u in users:
answer = "Истекает срок годности:\n"
p = await Product.query.where(and_(Product.user_id == u.user_id,
Product.expiration_date == dt.datetime.today() + dt.timedelta(u.notifications_advance_days_until_expiration))).gino.all()
for pr in p:
answer += pr.name + " - " + parse(str(pr.expiration_date)).strftime('%d.%m') + "\n"
await bot.send_message(u.user_id, answer)
scheduler = AsyncIOScheduler()
scheduler.add_job(advance_notification_checker, 'cron', hour='18', minute='00')
scheduler.start()
| [
"[email protected]"
] | |
8cd9e0a4dd1402479c1142722ce6f9bfb837ff1e | 5e3ebc83bc3fe2f85c34563689b82b1fc8b93a04 | /google/ads/googleads/v5/services/services/reach_plan_service/transports/grpc.py | 2a1fae4361e4090055a995f0e2712c344ada0370 | [
"Apache-2.0"
] | permissive | pdsing/google-ads-python | 0ce70227cd6bb13a25cd13de0ca05c2636279ecd | ee2c059498d5679a0d1d9011f3795324439fad7c | refs/heads/master | 2023-05-04T18:39:57.412453 | 2021-05-21T16:38:17 | 2021-05-21T16:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,745 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v5.services.types import reach_plan_service
from .base import ReachPlanServiceTransport, DEFAULT_CLIENT_INFO
class ReachPlanServiceGrpcTransport(ReachPlanServiceTransport):
"""gRPC backend transport for ReachPlanService.
Reach Plan Service gives users information about audience
size that can be reached through advertisement on YouTube. In
particular, GenerateReachForecast provides estimated number of
people of specified demographics that can be reached by an ad in
a given market by a campaign of certain duration with a defined
budget.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_plannable_locations(
self,
) -> Callable[
[reach_plan_service.ListPlannableLocationsRequest],
reach_plan_service.ListPlannableLocationsResponse,
]:
r"""Return a callable for the list plannable locations method over gRPC.
Returns the list of plannable locations (for example,
countries & DMAs).
Returns:
Callable[[~.ListPlannableLocationsRequest],
~.ListPlannableLocationsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_plannable_locations" not in self._stubs:
self._stubs[
"list_plannable_locations"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v5.services.ReachPlanService/ListPlannableLocations",
request_serializer=reach_plan_service.ListPlannableLocationsRequest.serialize,
response_deserializer=reach_plan_service.ListPlannableLocationsResponse.deserialize,
)
return self._stubs["list_plannable_locations"]
@property
def list_plannable_products(
self,
) -> Callable[
[reach_plan_service.ListPlannableProductsRequest],
reach_plan_service.ListPlannableProductsResponse,
]:
r"""Return a callable for the list plannable products method over gRPC.
Returns the list of per-location plannable YouTube ad
formats with allowed targeting.
Returns:
Callable[[~.ListPlannableProductsRequest],
~.ListPlannableProductsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_plannable_products" not in self._stubs:
self._stubs[
"list_plannable_products"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v5.services.ReachPlanService/ListPlannableProducts",
request_serializer=reach_plan_service.ListPlannableProductsRequest.serialize,
response_deserializer=reach_plan_service.ListPlannableProductsResponse.deserialize,
)
return self._stubs["list_plannable_products"]
@property
def generate_product_mix_ideas(
self,
) -> Callable[
[reach_plan_service.GenerateProductMixIdeasRequest],
reach_plan_service.GenerateProductMixIdeasResponse,
]:
r"""Return a callable for the generate product mix ideas method over gRPC.
Generates a product mix ideas given a set of
preferences. This method helps the advertiser to obtain
a good mix of ad formats and budget allocations based on
its preferences.
Returns:
Callable[[~.GenerateProductMixIdeasRequest],
~.GenerateProductMixIdeasResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_product_mix_ideas" not in self._stubs:
self._stubs[
"generate_product_mix_ideas"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v5.services.ReachPlanService/GenerateProductMixIdeas",
request_serializer=reach_plan_service.GenerateProductMixIdeasRequest.serialize,
response_deserializer=reach_plan_service.GenerateProductMixIdeasResponse.deserialize,
)
return self._stubs["generate_product_mix_ideas"]
@property
def generate_reach_forecast(
self,
) -> Callable[
[reach_plan_service.GenerateReachForecastRequest],
reach_plan_service.GenerateReachForecastResponse,
]:
r"""Return a callable for the generate reach forecast method over gRPC.
Generates a reach forecast for a given targeting /
product mix.
Returns:
Callable[[~.GenerateReachForecastRequest],
~.GenerateReachForecastResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_reach_forecast" not in self._stubs:
self._stubs[
"generate_reach_forecast"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v5.services.ReachPlanService/GenerateReachForecast",
request_serializer=reach_plan_service.GenerateReachForecastRequest.serialize,
response_deserializer=reach_plan_service.GenerateReachForecastResponse.deserialize,
)
return self._stubs["generate_reach_forecast"]
__all__ = ("ReachPlanServiceGrpcTransport",)
| [
"[email protected]"
] | |
902fa14e9c84754d961df78eee10d849f8f767bf | a9dd94198cb4d67d1b5b4c8d8dfdc6122da59613 | /Longinus/redisScan.py | 904d7a755d92c6a0bcbefde54aac14ba9d2739b4 | [] | no_license | orange23qi/Evangelion | 004cc637ffed8b23cd3ea07c09fc589e943664e8 | 17145875ef31d6f0f901265439fe5042ec4c7f25 | refs/heads/master | 2021-01-01T07:58:39.817105 | 2017-07-18T08:30:07 | 2017-07-18T08:30:07 | 97,568,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,018 | py | # -*- coding: UTF-8 -*-
import redis
from rediscluster import StrictRedisCluster
from .models import config_redis_command
class redisScan(object):
"""
def __init__(self, redisHost, redisPort, redisType, redisDb):
self.host = redisHost
self.port = int(redisPort)
self.type = int(redisType)
self.db = int(redisDb)
"""
def splitCommand(self, content):
"""
拆分命令
"""
commandList = []
for row in content.splitlines():
commandList.append(row.split())
return commandList
def prepareCommand(self, content):
finalResult = {'status': 0, 'msg': 'ok'}
count = 1
commandList = self.splitCommand(content)
for row in commandList:
count = config_redis_command.objects.filter(command=row[0], status=1) .count()
if count == 0:
finalResult['status'] = 1
finalResult['msg'] = '不支持 %s ,请修改.' % (row[0])
return finalResult
return finalResult
def getValues(self, redisHost, redisPort, redisType, redisDb, content):
"""
执行命令,返回结果
目前支持:
string : get / mget / getrange / strlen
hash : hget / hmget / hgetall / hlen / hkeys / hexists
list : llen / lindex / lrange
set : smembers / scard / sdiff / sinter / sismember / srandmember / sunion
zset : zmembers / zcount
other :
"""
finalValue = []
if redisType == 1:
r = redis.Redis(host=redisHost, port=redisPort, db=redisDb)
elif redisType == 2:
startup_nodes = [{"host": redisHost, "port": redisPort}]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)
commandList = self.splitCommand(content)
for row in commandList:
key = row[0]
command = ""
command2 = ""
tempList = []
if key in ("get", "strlen", "hlen", "llen", "scard", "zcard"):
command = "tempList.append(r.%s('%s'))" % (key, row[1])
print(command)
exec(command)
for tempRow in tempList:
if isinstance(tempRow, bytes):
tempRow = str(tempRow, encoding='utf-8')
command2 = "finalValue.append(['%s %s','%s'])" % (key, row[1], tempRow)
exec(command2)
elif key in ("mget", "hmget"):
if key == "mget":
command = "tempList.append(r.%s(%s))" % (key, row[1:])
key = "get"
i = 0
elif key == "hmget":
command = "tempList.append(r.%s('%s',%s))" % (key, row[1], row[2:])
key = "hget"
i = 1
exec(command)
for tempRow in tempList:
for listValue in tempRow:
i += 1
if isinstance(listValue, bytes):
listValue = str(listValue, encoding='utf-8')
if key == "get":
command2 = "finalValue.append(['%s %s','%s'])" % (key, row[i], listValue)
elif key == "hget":
command2 = "finalValue.append(['%s %s %s','%s'])" % (key, row[1], row[i], listValue)
exec(command2)
elif key in ("getrange", "zcount"):
command = "tempList.append(r.%s('%s', %d, %d))" % (key, row[1], int(row[2]), int(row[3]))
exec(command)
if isinstance(tempList[0], bytes):
tempList[0] = str(tempList[0], encoding='utf-8')
command2 = "finalValue.append(['%s %s %d %d','%s'])" % (key, row[1], int(row[2]), int(row[3]), tempList[0])
exec(command2)
elif key in ("hget", "hexists", "sismember"):
tmpKey = row[2].replace("\"","").replace("'","")
command = "tempList.append(r.%s('%s','%s'))" % (key, row[1], tmpKey)
exec(command)
if isinstance(tempList[0], bytes):
tempList[0] = str(tempList[0], encoding='utf-8')
command2 = "finalValue.append(['%s %s %s','%s'])" % (key, row[1], row[2], tempList[0])
exec(command2)
elif key in ("hgetall"):
command = "tempList.append(r.%s('%s'))" % (key, row[1])
exec(command)
key = "hget"
for tempRow in tempList:
if isinstance(tempRow, dict):
for dictKey in tempRow.keys():
dictValue = tempRow[dictKey]
if isinstance(dictKey, bytes):
dictKey = str(dictKey, encoding='utf-8')
if isinstance(dictValue, bytes):
dictValue = str(dictValue, encoding='utf-8')
command2 = "finalValue.append(['%s %s %s','%s'])" % (key, row[1], dictKey, dictValue)
exec(command2)
elif key in ("hkeys", "smembers", "sdiff", "sinter", "sunion"):
if key in ("hkeys", "smembers"):
command = "tempList.append(r.%s('%s'))" % (key, row[1])
if key in ("sdiff", "sinter", "sunion"):
command = "tempList.append(r.%s(%s))" % (key, row[1:])
exec(command)
i = 1
for tempRow in tempList:
for listValue in tempRow:
if isinstance(listValue, bytes):
listValue = str(listValue, encoding='utf-8')
if key in ("hkeys", "smembers"):
command2 = "finalValue.append(['%s %s (%d)','%s'])" \
% (key, row[1], i, listValue)
if key in ("sdiff", "sinter", "sunion"):
command2 = "finalValue.append(['%s %s%s (%d)','%s'])" \
% (key, row[1], "...", i, listValue)
exec(command2)
i += 1
elif key in ("lindex"):
command = "tempList.append(r.%s('%s',%d))" % (key, row[1], int(row[2]))
exec(command)
if isinstance(tempList[0], bytes):
tempList[0] = str(tempList[0], encoding='utf-8')
command2 = "finalValue.append(['%s %s %d','%s'])" % (key, row[1], int(row[2]), tempList[0])
exec(command2)
elif key in ("lrange", "srandmember"):
row.append('1')
if int(row[2]) > 1000:
row[2] = 1000
if key in ("lrange"):
command = "tempList.append(r.%s('%s',%d,%d))" % (key, row[1], int(row[2]), int(row[3]))
exec(command)
key = "lindex"
i = 0
elif key in ("srandmember"):
command = "tempList.append(r.%s('%s',%d))" % (key, row[1], int(row[2]))
exec(command)
i = 1
for tempRow in tempList:
for listValue in tempRow:
if isinstance(listValue, bytes):
listValue = str(listValue, encoding='utf-8')
if key in ("lindex"):
command2 = "finalValue.append(['%s %s %d','%s'])" % (key, row[1], i, listValue)
elif key in ("srandmember"):
command2 = "finalValue.append(['%s %s (%d)','%s'])" % (key, row[1], i, listValue)
exec(command2)
i += 1
return finalValue
| [
"[email protected]"
] | |
6ed1735ed355ea388a28aabc6fcfc46865ffb3d9 | e726624ca76481066d66e74dc4bcc14f589d05c3 | /utils.py | e3fb1fbf86d303aed240b3be11e7a9ef9f97f546 | [] | no_license | diwakar-vsingh/Scene-Recognition | 381387688d470775b8bb1f06845f3e00a40585d2 | 736bfeb9e85abc550db284f2bb28c03596757dc6 | refs/heads/master | 2022-12-19T15:23:06.983620 | 2020-09-20T22:40:08 | 2020-09-20T22:40:08 | 297,175,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,410 | py | import sys
import os
import os.path as osp
import shutil
import time
import random
import numpy as np
import cv2
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch
import torch.utils.data
from glob import
from torch.autograd import Variable
from IPython.core.debugger import set_trace
from visdom import Visdom
from torch.utils.data import DataLoader
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from random import shuffle
def im2single(im):
im = im.astype(np.float32) / 255
return im
def single2im(im):
im *= 255
im = im.astype(np.uint8)
return im
def load_image(path):
return im2single(cv2.imread(path))[:, :, ::-1]
def load_image_gray(path):
img = load_image(path)
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def get_image_paths(data_path, categories, num_train_per_cat=100, fmt='jpg'):
"""
This function returns lists containing the file path for each train
and test image, as well as listss with the label of each train and
test image. By default all four of these arrays will have 1500
elements where each element is a string.
:param data_path: path to the 'test' and 'train' directories
:param categories: list of category names
:param num_train_per_cat: max number of training images to use (per category)
:param fmt: file extension of the images
:return: lists: train_image_paths, test_image_paths, train_labels, test_labels
"""
train_image_paths = []
test_image_paths = []
train_labels = []
test_labels = []
for cat in categories:
# train
pth = osp.join(data_path, 'train', cat, '*.{:s}'.format(fmt))
pth = glob(pth)
shuffle(pth)
pth = pth[:num_train_per_cat]
train_image_paths.extend(pth)
train_labels.extend([cat]*len(pth))
# test
pth = osp.join(data_path, 'test', cat, '*.{:s}'.format(fmt))
pth = glob(pth)
shuffle(pth)
pth = pth[:num_train_per_cat]
test_image_paths.extend(pth)
test_labels.extend([cat]*len(pth))
return train_image_paths, test_image_paths, train_labels, test_labels
def show_results(train_image_paths, test_image_paths, train_labels, test_labels,
categories, abbr_categories, predicted_categories):
"""
shows the results
:param train_image_paths:
:param test_image_paths:
:param train_labels:
:param test_labels:
:param categories:
:param abbr_categories:
:param predicted_categories:
:return:
"""
cat2idx = {cat: idx for idx, cat in enumerate(categories)}
# confusion matrix
y_true = [cat2idx[cat] for cat in test_labels]
y_pred = [cat2idx[cat] for cat in predicted_categories]
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype(np.float) / cm.sum(axis=1)[:, np.newaxis]
acc = np.mean(np.diag(cm))
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap('jet'))
plt.title('Confusion matrix. Mean of diagonal = {:4.2f}%'.format(acc*100))
tick_marks = np.arange(len(categories))
plt.tight_layout()
plt.xticks(tick_marks, abbr_categories, rotation=45)
plt.yticks(tick_marks, categories)
def set_seed(seed, use_GPU=False):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if use_GPU:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic=True
def print_input_size_hook(self, input, output):
print('Input size to classifier is', input[0].size())
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class Trainer(object):
def __init__(self, train_dataset, val_dataset, model, loss_fn, optimizer,
lr_scheduler, params):
"""
General purpose training script
:param train_dataset: PyTorch dataset that loads training images
:param val_dataset: PyTorch dataset that loads testing / validation images
:param model: Network model
:param optimizer: PyTorch optimizer object
:param lr_scheduler: PyTorch learning rate scheduler object
:param loss_fn: loss function
:param params: dictionary containing parameters for the training process
It can contain the following fields (fields with no default value mentioned
are mandatory):
n_epochs: number of epochs of training
batch_size: batch size for one iteration
do_val: perform validation? (default: True)
shuffle: shuffle training data? (default: True)
num_workers: number of CPU threads for loading data (default: 4)
val_freq: frequency of validation (in number of epochs) (default: 1)
print_freq: progress printing frequency (in number of iterations
(default: 20)
experiment: name of the experiment, used to create logs and checkpoints
checkpoint_file: Name of file with saved weights. Loaded at before
start of training if provided (default: None)
resume_optim: whether to resume optimization from loaded weights
(default: True)
"""
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.best_prec1 = -float('inf')
# parse params with default values
self.config = {
'n_epochs': params['n_epochs'],
'batch_size': params['batch_size'],
'do_val': params.get('do_val', True),
'shuffle': params.get('shuffle', True),
'num_workers': params.get('num_workers', 4),
'val_freq': params.get('val_freq', 1),
'print_freq': params.get('print_freq', 100),
'experiment': params['experiment'],
'checkpoint_file': params.get('checkpoint_file'),
'resume_optim': params.get('resume_optim', True)
}
self.logdir = osp.join(os.getcwd(), 'logs', self.config['experiment'])
if not osp.isdir(self.logdir):
os.makedirs(self.logdir)
# visdom plots
self.vis_env = self.config['experiment']
self.loss_win = 'loss_win'
self.vis = Visdom()
self.vis.line(X=np.zeros((1,2)), Y=np.zeros((1,2)), win=self.loss_win,
opts={'legend': ['train_loss', 'val_loss'], 'xlabel': 'epochs',
'ylabel': 'loss'}, env=self.vis_env)
self.lr_win = 'lr_win'
self.vis.line(X=np.zeros(1), Y=np.zeros(1), win=self.lr_win,
opts={'legend': ['learning_rate'], 'xlabel': 'epochs',
'ylabel': 'log(lr)'}, env=self.vis_env)
self.top1_win = 'top1_win'
self.vis.line(X=np.zeros((1,2)), Y=np.zeros((1,2)), win=self.top1_win,
opts={'legend': ['train_top1_prec', 'val_top1_prec'], 'xlabel': 'epochs',
'ylabel': 'top1_prec (%)'}, env=self.vis_env)
self.top5_win = 'top5_win'
self.vis.line(X=np.zeros((1,2)), Y=np.zeros((1,2)), win=self.top5_win,
opts={'legend': ['train_top5_prec', 'val_top5_prec'], 'xlabel': 'epochs',
'ylabel': 'top5_prec (%)'}, env=self.vis_env)
# log all the command line options
print('---------------------------------------')
print('Experiment: {:s}'.format(self.config['experiment']))
for k, v in self.config.items():
print('{:s}: {:s}'.format(k, str(v)))
print('---------------------------------------')
self.start_epoch = int(0)
checkpoint_file = self.config['checkpoint_file']
if checkpoint_file:
if osp.isfile(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.best_prec1 = checkpoint['best_prec1']
if self.config['resume_optim']:
self.optimizer.load_state_dict(checkpoint['optim_state_dict'])
self.start_epoch = checkpoint['epoch']
print('Loaded checkpoint {:s} epoch {:d}'.format(checkpoint_file,
checkpoint['epoch']))
self.train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=self.config['batch_size'], shuffle=self.config['shuffle'],
num_workers=self.config['num_workers'])
if self.config['do_val']:
self.val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'])
else:
self.val_loader = None
def save_checkpoint(self, epoch, is_best):
filename = osp.join(self.logdir, 'checkpoint.pth.tar')
checkpoint_dict =\
{'epoch': epoch, 'model_state_dict': self.model.state_dict(),
'optim_state_dict': self.optimizer.state_dict(),
'best_prec1': self.best_prec1}
torch.save(checkpoint_dict, filename)
if is_best:
shutil.copyfile(filename, osp.join(self.logdir, 'best_model.pth.tar'))
def step_func(self, train):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
if train:
self.model.train()
status = 'train'
loader = self.train_loader
else:
self.model.eval()
status = 'val'
loader = self.val_loader
end = time.time()
for batch_idx, (data, target) in enumerate(loader):
data_time.update(time.time() - end)
kwargs = dict(target=target, loss_fn=self.loss_fn,
optim=self.optimizer, train=train)
loss, output = step_feedfwd(data, self.model, **kwargs)
# measure accuracy and calculate loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss, data.size(0))
top1.update(prec1[0], data.size(0))
top5.update(prec5[0], data.size(0))
# measure batch time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % self.config['print_freq'] == 0:
print('{:s} {:s}: batch {:d}/{:d}, loss {:4.3f}, top-1 accuracy {:4.3f},'
' top-5 accuracy {:4.3f}'.format(status, self.config['experiment'],
batch_idx, len(loader)-1, loss, prec1[0], prec5[0]))
print('{:s} {:s}: loss {:f}'.format(status, self.config['experiment'],
losses.avg))
return losses.avg, top1.avg, top5.avg
def train_val(self):
for epoch in range(self.start_epoch, self.config['n_epochs']):
print('{:s} Epoch {:d} / {:d}'.format(self.config['experiment'], epoch,
self.config['n_epochs']))
# ADJUST LR
self.lr_scheduler.step()
lr = self.lr_scheduler.get_lr()[0]
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([np.log10(lr)]),
win=self.lr_win, name='learning_rate', update='append', env=self.vis_env)
# TRAIN
loss, top1_prec, top5_prec = self.step_func(train=True)
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([loss]),
win=self.loss_win, name='train_loss', update='append', env=self.vis_env)
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([top1_prec]),
win=self.top1_win, name='train_top1_prec', update='append', env=self.vis_env)
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([top5_prec]),
win=self.top5_win, name='train_top5_prec', update='append', env=self.vis_env)
self.vis.save(envs=[self.vis_env])
# VALIDATION
if self.config['do_val'] and ((epoch % self.config['val_freq'] == 0) or
(epoch == self.config['n_epochs']-1)):
loss, top1_prec, top5_prec = self.step_func(train=False)
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([loss]),
win=self.loss_win, name='val_loss', update='append', env=self.vis_env)
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([top1_prec]),
win=self.top1_win, name='val_top1_prec', update='append', env=self.vis_env)
self.vis.line(X=np.asarray([epoch]), Y=np.asarray([top5_prec]),
win=self.top5_win, name='val_top5_prec', update='append', env=self.vis_env)
self.vis.save(envs=[self.vis_env])
# SAVE CHECKPOINT
is_best = top1_prec > self.best_prec1
self.best_prec1 = max(self.best_prec1, top1_prec)
self.save_checkpoint(epoch, is_best)
print('Checkpoint saved')
if is_best:
print('BEST TOP1 ACCURACY SO FAR')
return self.best_prec1
def step_feedfwd(data, model, target=None, loss_fn=None, optim=None,
train=True):
"""
training/validation step for a feedforward NN
:param data:
:param target:
:param model:
:param loss_fn:
:param optim:
:param train: training / val stage
:return:
"""
if train:
assert loss_fn is not None
with torch.no_grad():
data_var = Variable(data, requires_grad=train)
output = model(data_var)
if loss_fn is not None:
with torch.no_grad():
target_var = Variable(target, requires_grad=False)
loss = loss_fn(output, target_var)
if train:
# SGD step
optim.zero_grad()
loss.backward()
optim.step()
return loss.item(), output
else:
return 0, output
def get_mean_std(data_path, input_size, rgb):
tform = []
tform.append(transforms.Resize(size=input_size))
if not rgb:
tform.append(transforms.Grayscale())
tform.append(transforms.ToTensor())
tform = transforms.Compose(tform)
dset = datasets.ImageFolder(root=data_path, transform=tform)
train_loader = DataLoader(dataset=dset, batch_size=50)
scaler = StandardScaler(with_mean=True, with_std=True)
print('Computing pixel mean and stdev...')
for idx, (data, labels) in enumerate(train_loader):
if idx % 20 == 0:
print("Batch {:d} / {:d}".format(idx, len(train_loader)))
data = data.numpy()
n_channels = data.shape[1]
# reshape into [n_pixels x 3]
data = data.transpose((0, 2, 3, 1)).reshape((-1, n_channels))
# pass batch to incremental mean and stdev calculator
scaler.partial_fit(data)
print('Done, mean = ')
pixel_mean = scaler.mean_
pixel_std = scaler.scale_
print(pixel_mean)
print('std = ')
print(pixel_std)
return pixel_mean, pixel_std
| [
"[email protected]"
] | |
ff52b8f529b94dda69dce7fafc24efc220fa1e7c | d5985446e35eeb71b77b26a45c1c7c48db2bf1fa | /Web-Scraper/scraper.py | 0795bc31a5597bfad4c99f8d98aa2313680fad00 | [] | no_license | sl-spase/Python_NLP | aa7b9bd555ebdc31bcba07ddd83d19b1e37cc703 | fed717c719be16018acd1b092bdbee48ef340f97 | refs/heads/main | 2023-07-31T16:08:54.864313 | 2021-09-13T09:34:52 | 2021-09-13T09:34:52 | 405,909,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | import requests
import string
import os
from bs4 import BeautifulSoup
page = input()
article_type = input()
for i in range(1, int(page) + 1):
url_list_articles = "https://www.nature.com/nature/articles?sort=PubDate&year=2020&page=" + str(i)
r1 = requests.get(url_list_articles, headers={'Accept-Language': 'en-US,en;q=0.5'})
if r1.status_code == 200:
soup1 = BeautifulSoup(r1.content, 'html.parser')
types = soup1.find_all('span', {'class': "c-meta__type"})
position_news_articles = [] # [9,11,18]
k = 0
for t in types:
if t.text == article_type: # Find the 'News' ones
position_news_articles.append(k)
k = k + 1
hrefs = soup1.find_all('a', {'data-track-action': "view article"})
articles_url = [] # stores all news articles urls
for position in position_news_articles:
href = hrefs[position]['href']
url_article = "https://www.nature.com" + str(href)
articles_url.append(url_article)
articles_title = []
os.mkdir('Page_' + str(i))
for url in articles_url:
r2 = requests.get(url, headers={'Accept-Language': 'en-US,en;q=0.5'})
soup2 = BeautifulSoup(r2.content, 'html.parser')
article_title = []
if article_type == "Research Highlight":
article_title = soup2.find('h1', {'class': "article-item__title"}).text.strip()
else:
article_title = soup2.find('h1', {'class': "c-article-magazine-title"}).text.strip()
for p in article_title:
if p in string.punctuation:
article_title = article_title.replace(p, "")
article_title = article_title.replace(' ', '_')
articles_title.append(article_title)
article_body = []
if article_type == "Research Highlight":
article_body = soup2.find('div', {'class': "article-item__body"}).text.strip()
else:
article_body = soup2.find('div', {'class': "c-article-body u-clearfix"}).text.strip()
os.chdir(os.getcwd() + '\\Page_' + str(i))
file_name = article_title + '.txt'
file = open(file_name, 'wb')
file.write(article_body.encode())
file.close()
os.chdir("..")
print("Saved articles:")
print(articles_title)
| [
"[email protected]"
] | |
443a8d75e938c169af2de29928c8608a50907fc9 | d61482ba081ab69edbe53f1d641eba9b4295684f | /utilities.py | 1c038126e3f99e9b7eeabd6081ec962fd1b7998b | [] | no_license | lucapegolotti/FingerMovement | 6099bad810429444d1b94c6d4350e16ed2302c4c | d515da1bd7394fc62657dc9e065132f3b2d169bb | refs/heads/master | 2020-03-06T22:34:43.389476 | 2018-05-18T12:46:20 | 2018-05-18T12:46:20 | 127,106,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,321 | py | import torch
from torch import nn
import numpy as np
import random
"""
Compute number of errors on given datset wrt its target
"""
def compute_nb_errors(model, input, target):
# To deactivate dropout:
model.train(False)
y = model.forward(input)
indicesy = np.argmax(y.data,1).float()
nberrors = np.linalg.norm(indicesy - target.data.float(),0)
model.train(True)
return nberrors
"""
Train model
"""
def train_model(model, criterion, train_input, \
train_target, validation_input, validation_target, test_input, test_target,\
n_epochs=1000, eta=0.1, batch_perc=0.3, l2_parameter=1e-3, gamma=0.95):
train_size = train_input.size(0)
test_size = test_input.size(0)
validation_size = validation_input.size(0)
# Initialize an output_array of 4 or 5 columns if there isn't or there is
# a validation set, respectively
if validation_size is not 0:
output_array = np.zeros(shape=(n_epochs,5))
else:
output_array = np.zeros(shape=(n_epochs,4))
# Effective mini-batch size for this training set
mini_batch_size = int(train_size*batch_perc)
# Adam otpimizer to improve weights and biases after each epoch
optimizer = torch.optim.Adam(model.parameters(), lr = eta, weight_decay = l2_parameter)
# We adjust the learning rate following a geometric progression with coefficient
# gamma every 30 epochs
# See http://pytorch.org/docs/master/optim.html#how-to-adjust-learning-rate
# Section "How to adjust Learning Rate"
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=gamma, last_epoch=-1)
for e in range(0, n_epochs):
# Initialization of loss for SGD with mini-batches after random shuffling
# of training dataset
sum_loss = 0
shuffle_indexes_minibatch = torch.randperm(train_size)
train_input = train_input[shuffle_indexes_minibatch,:,:]
train_target = train_target[shuffle_indexes_minibatch]
for b in range(0, train_size, mini_batch_size):
# To avoid errors when train_size/mini_batch_size is not an integer
# we introduce the variable current_mini_bach_size
current_mini_batch_size = min(mini_batch_size, train_input.size(0) - b)
# Forward run
output = model.forward(train_input.narrow(0, b, current_mini_batch_size))
# Take the mini-batch of train_target
train_target_narrowed = train_target.narrow(0, b, current_mini_batch_size).long()
# Compute the loss
loss = criterion(output, train_target_narrowed)
scheduler.step()
sum_loss = sum_loss + loss.data.item()
# Backward step
model.zero_grad()
loss.backward()
optimizer.step()
# Compute number of erorrs on training and test set
train_error = compute_nb_errors(model,train_input, train_target)
test_error = compute_nb_errors(model,test_input, test_target)
# print("Epoch = {0:d}".format(e))
# print("Loss function = {0:.8f}".format(sum_loss))
string_to_print = "Epoch: {0:d}".format(e) + \
" loss function: {0:.8f}".format(sum_loss) + \
" train error: {0:.2f}%".format((train_error/train_size)*100)
# Save results on output_array to be exported
output_array[e,0] = e
output_array[e,1] = sum_loss
output_array[e,2] = (train_error/train_size)*100
output_array[e,3] = (test_error/test_size)*100
# If the cross-validation set is non empty we can also compute the
# number of errors on this set
if validation_size is not 0:
validation_error = compute_nb_errors(model,validation_input, validation_target)
string_to_print += " validation error: {0:.2f}%".format((validation_error/validation_size)*100)
output_array[e,4] = (validation_error/validation_size)*100
print(string_to_print)
print("===============================================================================")
return output_array
"""
Random initialization of weights using Xavier uniform
"""
def init_weights(layer):
if type(layer) == nn.Linear:
torch.nn.init.xavier_uniform_(layer.weight)
| [
"[email protected]"
] | |
7aca51c0bc9b994fc566fd826ff4718574520621 | 77938a0011373d88a966374a7464e03781372ee8 | /canvas_stream/helpers.py | 03386cf4133bbc3b7bea3f6d0f59c0e884a6fe7f | [] | no_license | benjavicente/canvas-stream | f1f5283dab646885ffe73c14c5862c10419835ba | 897b9600b2b8da3a66feaeac044dad3cf1041471 | refs/heads/main | 2023-06-18T22:37:08.352444 | 2021-07-14T03:10:57 | 2021-07-14T03:10:57 | 385,799,020 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | "Utilities"
import unicodedata
import re
from pathlib import Path
import requests
def slugify(value: str) -> str:
"Makes a string a valid file path"
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
.lower()
.replace(r"/", "-")
.replace("\\", "-")
.replace("*", "")
)
return re.sub(r"[-]+", "-", value).strip("_-.")
def download(url: str, path: Path):
"Downloads a file from a url"
path.parent.mkdir(parents=True, exist_ok=True)
stream = requests.get(url, stream=True)
content_length = stream.headers.get("content-length", None)
with path.open("wb") as file:
if not content_length:
print(f"???% -- {path}")
file.write(stream.content)
return
progress = 0
total_bytes = int(content_length)
for data in stream.iter_content(chunk_size=4096):
file.write(data)
progress += len(data)
print(f"{progress / total_bytes:4.0%} -- {path}", end="\r")
print(end="\n")
| [
"[email protected]"
] | |
27505b546eae906b61150035fa0d980ecabbeead | b49fe70be4977ff9ba4903d410d6a2628922f486 | /app.py | c49553b5708536de071d9ac843d182b4e101ba7d | [] | no_license | CarlosPlatoni/BeeDetection | 8609af08d8c4b59098e292d4c09ed074f90ae0eb | cc459b576a0d4c492acf7c68f8b545a0eb4dfcf4 | refs/heads/master | 2021-01-01T06:19:21.300110 | 2017-07-16T19:53:21 | 2017-07-16T19:53:21 | 97,406,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,557 | py | #!/usr/bin/env python
from flask import Flask, render_template, Response, flash, jsonify
from flask_socketio import SocketIO
from camera import Camera
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
import time
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'well-secret-password'
bootstrap = Bootstrap(app)
socketio = SocketIO(app)
camera = Camera()
class MyForm(FlaskForm):
nomask = SubmitField(label='NoMask')
green = SubmitField(label='Green')
yellow = SubmitField(label='Yellow')
@app.route('/', methods=['GET', 'POST'])
def index():
form = MyForm()
if form.validate_on_submit():
if form.nomask.data:
flash("You pressed nomask button")
camera.removemask()
elif form.green.data:
flash("You pressed green button")
camera.setmaskgreen()
elif form.yellow.data:
flash("You pressed yellow button")
camera.setmaskyellow()
return render_template('index.html', form=form)
def gen(camera):
"""Video streaming generator function."""
count = 0
currentvalue = False
socketio.emit('my_response', {'mse' : 'No Bee detected'})
while True:
frame = camera.get_frame()
if camera.mse > 0 and currentvalue == False:
socketio.emit('my_response', {'mse' : 'Bee detected', 'elapsed' : 0})
start_time = time.time()
currentvalue = True
elif camera.mse == 0 and currentvalue == True:
elapsed_time = time.time() - start_time
socketio.emit('my_response', {'mse' : 'No Bee detected', 'elapsed' : elapsed_time})
currentvalue = False
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/detection')
def detection():
return jsonify(result=camera.mse);
@socketio.on('my event')
def handle_my_custom_event(json):
print('received json: ' + str(json))
return 'one', 2
@socketio.on('message')
def handle_message(message):
print('socketio', message)
send(message)
if __name__ == '__main__':
#app.run(host='0.0.0.0', debug=True, threaded=True)
socketio.run(app, host='0.0.0.0', debug=True)
| [
"[email protected]"
] | |
12e4d32bdd3605e95d8b769aaf8df68f4b0dca2d | 31b53c36c3573af089b768afeb4fda2e7e095916 | /backend/env/Scripts/django-admin.py | a288ebf49e5bb51ba02edf823fe2fdc70ce4b4a8 | [] | no_license | jlavery828/django-react | 75fbe952144b3deb37d67657ab166b35cf912308 | 6faa065779a9978431dc6ba28174e58762ff790a | refs/heads/master | 2022-12-11T18:05:15.322678 | 2019-03-09T16:05:07 | 2019-03-09T16:05:07 | 174,713,724 | 0 | 1 | null | 2022-11-27T23:36:31 | 2019-03-09T15:49:13 | JavaScript | UTF-8 | Python | false | false | 176 | py | #!c:\users\miner\documents\python-projects\djreact\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
07d25a17906756a002a0fc08fb221ed13ac6ad2a | ba3be84d355e90860479fc2a0d92b536c377c643 | /PYTHON/string/3ld.py | 7a3773926eea3d04b6c3ec283fe50647e16eed21 | [] | no_license | vipin26/python | c62a3427511658ff292085fc382c5b3b3ff4d609 | e8442b84c385ddef972d6a514e471d8eba8af0a3 | refs/heads/master | 2020-09-29T12:39:52.775466 | 2019-12-15T05:44:31 | 2019-12-15T05:44:31 | 227,034,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py |
s1=""
s1=raw_input("enter a string : ")
f=open("ele.txt",'w')
f.write(s1)
f.close()
f=open("ele.txt",'r')
s=f.read()
print s
c=0
for i in s:
if(i==" "):
c=c+1
print c
| [
"[email protected]"
] | |
b45bc654f093c8fd49cd30f653fc0c4abdc0e330 | 811036ddc4996d5dfd71a2f28f877a0e8c518593 | /python_course/bin/pip2 | c86fdb7c6bbb88f0df7019c7fe43812fb3c314c4 | [] | no_license | canyoubaby/python_course | 728409ab1adaeb53614f9192d2bbfc156995b5d5 | c418c811aa0d70c91d0fadcbca6765a8c09eabb6 | refs/heads/master | 2020-05-21T00:27:41.099422 | 2019-05-25T02:19:08 | 2019-05-25T02:19:08 | 185,826,697 | 0 | 0 | null | 2019-11-02T12:07:53 | 2019-05-09T15:35:47 | Python | UTF-8 | Python | false | false | 256 | #!/home/hieu/Desktop/python_course/python_course/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cfe5fe1188d19fce17f316165c315b36d38060dc | 48909fe6205f92d707b5d825426e1da9cb4b7738 | /amazon_6/first_non_repeating _char.py | cc6b8893428e5c00532b4fa1b3a65cd5922c15fc | [] | no_license | BJV-git/amazon_6 | 608fd9ad76611d6050d8ff14d19ff29de3240b96 | b8959581ca501ecebcc7f67982a18a4a985a6852 | refs/heads/master | 2020-05-04T05:22:21.008271 | 2019-04-02T02:36:08 | 2019-04-02T02:36:08 | 178,983,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # isntead of maintaingin a dict lets mainatain a row of all alphabets
# Time: O(N)
# Space: O(1)
def firtsUnique(s):
count={}
for i in s:
count[i] = count.get(i,0)+1
for i in s:
if count[i]==1:
return i
return -1 | [
"[email protected]"
] | |
31465cfde8c86ee957b1f48a96915c6c548da66e | a3f991609b9f25e4ef4f65dc16867d602c1e867e | /scs_account_report/report/financial_report.py | a7cc4ddfa8199eb0d9a3384cd067412a5c829c5a | [] | no_license | RLJO/custom_gtl1 | ecdb86ebb5dbead92a2ec152748eb125381f3ce2 | ee9b9148aa8b2746322f67cd1afdb43078af7eaf | refs/heads/main | 2023-07-29T04:25:18.016192 | 2021-09-13T13:48:48 | 2021-09-13T13:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,317 | py | # -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
import time
from odoo import api, models, _
from odoo.exceptions import UserError
class ReportFinancial(models.AbstractModel):
_name = 'report.scs_account_report.report_financial'
_description = 'Financial Account Report'
def _compute_account_balance(self, accounts):
""" compute the balance, debit and credit for the provided accounts
"""
mapping = {
'balance': "COALESCE(SUM(debit),0) - COALESCE(SUM(credit), 0) as balance",
'debit': "COALESCE(SUM(debit), 0) as debit",
'credit': "COALESCE(SUM(credit), 0) as credit",
}
res = {}
for account in accounts:
res[account.id] = dict.fromkeys(mapping, 0.0)
if accounts:
tables, where_clause, where_params = self.env['account.move.line']._query_get()
tables = tables.replace('"', '') if tables else "account_move_line"
wheres = [""]
if where_clause.strip():
wheres.append(where_clause.strip())
filters = " AND ".join(wheres)
request = "SELECT account_id as id, " + ', '.join(mapping.values()) + \
" FROM " + tables + \
" WHERE account_id IN %s " \
+ filters + \
" GROUP BY account_id"
params = (tuple(accounts._ids),) + tuple(where_params)
self.env.cr.execute(request, params)
for row in self.env.cr.dictfetchall():
res[row['id']] = row
return res
def _compute_report_balance(self, reports):
'''returns a dictionary with key=the ID of a record and value=the credit, debit and balance amount
computed for this record. If the record is of type :
'accounts' : it's the sum of the linked accounts
'account_type' : it's the sum of leaf accoutns with such an account_type
'account_report' : it's the amount of the related report
'sum' : it's the sum of the children of this record (aka a 'view' record)'''
res = {}
fields = ['credit', 'debit', 'balance']
for report in reports:
if report.id in res:
continue
res[report.id] = dict((fn, 0.0) for fn in fields)
if report.type == 'accounts':
# it's the sum of the linked accounts
res[report.id]['account'] = self._compute_account_balance(report.account_ids)
for value in res[report.id]['account'].values():
for field in fields:
res[report.id][field] += value.get(field)
elif report.type == 'account_type':
# it's the sum the leaf accounts with such an account type
accounts = self.env['account.account'].search([('user_type_id', 'in', report.account_type_ids.ids)])
res[report.id]['account'] = self._compute_account_balance(accounts)
for value in res[report.id]['account'].values():
for field in fields:
res[report.id][field] += value.get(field)
elif report.type == 'account_report' and report.account_report_id:
# it's the amount of the linked report
res2 = self._compute_report_balance(report.account_report_id)
for key, value in res2.items():
for field in fields:
res[report.id][field] += value[field]
elif report.type == 'sum':
# it's the sum of the children of this account.report
res2 = self._compute_report_balance(report.children_ids)
for key, value in res2.items():
for field in fields:
res[report.id][field] += value[field]
return res
def get_account_lines(self, data):
lines = []
account_report = self.env['account.financial.report'].search([('id', '=', data['account_report_id'][0])])
child_reports = account_report._get_children_by_order()
res = self.with_context(data.get('used_context'))._compute_report_balance(child_reports)
if data['enable_filter']:
comparison_res = self.with_context(data.get('comparison_context'))._compute_report_balance(child_reports)
for report_id, value in comparison_res.items():
res[report_id]['comp_bal'] = value['balance']
report_acc = res[report_id].get('account')
if report_acc:
for account_id, val in comparison_res[report_id].get('account').items():
report_acc[account_id]['comp_bal'] = val['balance']
for report in child_reports:
vals = {
'name': report.name,
'balance': res[report.id]['balance'] * report.sign,
'type': 'report',
'level': bool(report.style_overwrite) and report.style_overwrite or report.level,
'account_type': report.type or False, #used to underline the financial report balances
}
if data['debit_credit']:
vals['debit'] = res[report.id]['debit']
vals['credit'] = res[report.id]['credit']
if data['enable_filter']:
vals['balance_cmp'] = res[report.id]['comp_bal'] * report.sign
lines.append(vals)
if report.display_detail == 'no_detail':
#the rest of the loop is used to display the details of the financial report, so it's not needed here.
continue
if res[report.id].get('account'):
sub_lines = []
for account_id, value in res[report.id]['account'].items():
#if there are accounts to display, we add them to the lines with a level equals to their level in
#the COA + 1 (to avoid having them with a too low level that would conflicts with the level of data
#financial reports for Assets, liabilities...)
flag = False
account = self.env['account.account'].browse(account_id)
vals = {
'name': account.code + ' ' + account.name,
'balance': value['balance'] * report.sign or 0.0,
'type': 'account',
'level': report.display_detail == 'detail_with_hierarchy' and 4,
'account_type': account.internal_type,
}
if data['debit_credit']:
vals['debit'] = value['debit']
vals['credit'] = value['credit']
if not account.company_id.currency_id.is_zero(vals['debit']) or not account.company_id.currency_id.is_zero(vals['credit']):
flag = True
if not account.company_id.currency_id.is_zero(vals['balance']):
flag = True
if data['enable_filter']:
vals['balance_cmp'] = value['comp_bal'] * report.sign
if not account.company_id.currency_id.is_zero(vals['balance_cmp']):
flag = True
if flag:
sub_lines.append(vals)
lines += sorted(sub_lines, key=lambda sub_line: sub_line['name'])
return lines
@api.model
def _get_report_values(self, docids, data=None):
if not data.get('form') or not self.env.context.get('active_model') or not self.env.context.get('active_id'):
raise UserError(_("Form content is missing, this report cannot be printed."))
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_id'))
report_lines = self.get_account_lines(data.get('form'))
return {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'get_account_lines': report_lines,
}
| [
"[email protected]"
] | |
fbf7b12a7de7632c7500856f41831d7973ddded9 | fa3f1f73ccb50ab551b0560ccece9f1bd41fa10a | /WeightedAvg_categorybased.py | 8c9a0d2a32a5bc6580ffa7f13c4306f480d83c72 | [] | no_license | harithagmu/SciCast-PredictionMarketData-Analytics-Engineering | d4074efd66d94bc49f587c412609e8c83c92cff5 | beaf1a9b1236cb46cfaab6bfbba798223683821d | refs/heads/master | 2020-09-09T21:08:28.103697 | 2020-04-21T20:59:30 | 2020-04-21T20:59:30 | 221,571,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,074 | py |
#Importing required libraries
import psycopg2
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import os
import math
from scipy import interpolate
%matplotlib qt
#Changing the working directory
os.getcwd()
os.chdir("")
#connecting to postgres SQL
conn = psycopg2.connect("")
cursor = conn.cursor()
#Extracting all the binary resolved questions
sql_command = "SELECT * FROM {};".format(str('questions'))
data = pd.read_sql(sql_command, conn)
new_data=data[data.settled_value.isnull() == False]
df=new_data.reset_index(drop=True)
df_binary=df[df.type == 'binary']
df_b=df_binary.reset_index(drop=True)
#Calculating weights for timeline resolution
def find_a_k(d):
'''
The function returns constant values of a and k for every unique question
using the duration(no. of days) for which the question was open. The function
assumes equation f(t)=a*(exp(kt)) where f(t)=1 for t=1 and f(t)=100 for t=d(number of days)
'''
a=pow(100,(1/(d+1)))
k=np.log(a)
return a,k
e=2.718
def exp(t,a,k):
'''
The function returns the value of f(t) where f(t)=a*(exp(kt))
'''
y=a*(e**(k*t))
return y
df_time=pd.DataFrame()
df_time['q_id']=df_b['id']
df_time['creation']=df_b['created_at']
df_time['settled']=df_b['settled_at']
df_time['settled_value']=df_b['settled_value']
#calculating constants a and k for each question
df_time['d']=0
df_time['a']=0.0
df_time['k']=0.0
for i in range(len(df_time)):
df_time['d'][i]=(df_time['creation'][i]-df_time['settled'][i]).days
df_time['a'][i],df_time['k'][i]=find_a_k(df_time['d'][i])
#Simple Average and Weighted Average for Category 13- Business of Science
#getting questions of category 13
sql_command1 = "SELECT * FROM {};".format(str('question_category_links'))
category = pd.read_sql(sql_command1, conn)
q_lst=[]
for i in range(len(category)):
if category['category_id'][i]==13:
q_lst.append(category['question_id'][i])
q_lst1=tuple(q_lst)
#getting trades related to category 13 questions
sql_command = "select h.new_value,h.created_at as h_time,h.question_id,h.user_id,q.created_at as q_time,q.settled_at as q_settledtime,q.settled_value as settled_value from questions q, historical_trades h where q.id=h.question_id and q.settled_value is not null and q.type='binary'and h.user_id!=1 and h.question_id in {};".format(q_lst1)
cat13=pd.read_sql(sql_command, conn)
#Calculating difference between creation time of question and creation time of trade
# As well as difference between creation time of trade and resolution time of question
l=[]
m=[]
l1=[]
for i in range(len(cat13)):
l.append((cat13['q_time'][i]-cat13['h_time'][i]).days)
m.append(abs((cat13['q_time'][i]-cat13['h_time'][i]).days))
l1.append(abs((cat13['h_time'][i]-cat13['q_settledtime'][i]).days))
cat13['t']=l
cat13['t1']=m
cat13['res_time_difference']=l1
#Scaling the resolution value for all questions so that all questions are resolved to a single value
n=[]
for i in range(len(cat13)):
if cat13['settled_value'][i]==0:
n.append(1-cat13['new_value'][i])
else:
n.append(cat13['new_value'][i])
cat13['new_value_scaled']=n
cat13['Abs Error']=1-cat13['new_value_scaled']
#Timeline weights calulation and scaling the weights
f=[]
for i in range(len(cat13)):
for j in range(len(df_time)):
if cat13['question_id'][i]==df_time['q_id'][j]:
x=exp(cat13['t'][i],df_time['a'][j],df_time['k'][j])
break
f.append(x)
cat13['w_unscaled']=f
cat13['w']=cat13['w_unscaled']/100
cat13['w']
#Getting user weights for each trade
user_w=pd.read_csv("lookup.csv")
user_wlst=[]
x=0.0
for i in range(len(cat13)):
for j in range(len(user_w)):
if cat13['user_id'][i]==user_w['UserId'][j]:
x=user_w['Business of Science'][j]
break
if x==0.0:
user_wlst.append(0.000196541)
else:
user_wlst.append(x)
x=0.0
cat13['user_w']=user_wlst
#Calculating weighted Average and simple Average
cat13['prod_of_weights']=cat13['user_w']*cat13['w']
cat13['prod']=cat13['prod_of_weights']*cat13['new_value_scaled']
cat13['prod_time']=cat13['w']*cat13['new_value_scaled']
#plot for checking trend of trades with respect to timeline of question
df_mod=cat13
a=[]
for i in range(len(df_mod)):
a.append(abs((df_mod['q_settledtime'][i]-df_mod['q_time'][i]).days))
df_mod['length_of_question']=a
df_mod1=df_mod.groupby(['question_id']).mean()
df_mod1=df_mod1.reset_index()
mean_value=math.ceil(df_mod1['length_of_question'].mean())
df_mod['x']=round((df_mod['t1']/df_mod['length_of_question'])*mean_value)
df_mod['x1']=round((df_mod['res_time_difference']/df_mod['length_of_question'])*mean_value)
df_mod['Abs Error']=abs(df_mod['new_value']-df_mod['settled_value'])
df_mod2=df_mod.groupby('x1').mean()
df_mod2=df_mod2.reset_index()
plt.scatter(df_mod2['x1'],df_mod2['Abs Error'])
z = np.polyfit(df_mod2['x1'], df_mod2['Abs Error'], 1)
p = np.poly1d(z)
plt.plot(df_mod2['x1'],p(df_mod2['x1']),"r--")
plt.xlim(max(df_mod['x1']),0)
plt.xlabel("Time until resolution(scaled)")
plt.ylabel("Average Absolute Error")
plt.title("Trend of predictions over time")
plt.grid(True)
plt.show()
#df_mod2.to_csv('df_mod2.csv')
#Calculating weighted Average and simple Average
l=cat13.groupby(['question_id']).mean()
l=l.reset_index()
new_frame_cat13=cat13.groupby(['question_id']).sum()
new_frame_cat13=new_frame_cat13.reset_index()
settled=[]
for i in range(len(new_frame_cat13)):
if new_frame_cat13['settled_value'][i]==0:
settled.append(0)
else:
settled.append(1)
new_frame_cat13['settled_value']=settled
new_frame_cat13['Weighted_avg']=new_frame_cat13['prod']/new_frame_cat13['prod_of_weights']
new_frame_cat13['Weighted_avg_time']=new_frame_cat13['prod_time']/new_frame_cat13['w']
new_frame_cat13['avg']=l['new_value_scaled']
new_frame_cat13['Avg_AE']=1-new_frame_cat13['avg']
new_frame_cat13['WeightedAvg_AE']=1-new_frame_cat13['Weighted_avg']
new_frame_cat13['WeightedAvg_AE_time']=1-new_frame_cat13['Weighted_avg_time']
#Baseline 1- getting prediction made by the user prior to resolution of question
base=pd.read_csv('basline.csv')
lll=[]
sv=[]
for i in range(len(new_frame_cat13)):
for j in range(len(base)):
if base['id'][j]==new_frame_cat13['question_id'][i]:
lll.append(base['absError'][j])
sv.append(base['settled_value'][j])
break
new_frame_cat13['baseline_AE']=lll
new_frame_cat13['baseline_settled']=sv
new_frame_cat13['baseline']=abs(new_frame_cat13['baseline_settled']-new_frame_cat13['baseline_AE'])
#Baseline 2- getting last 10 predictions prior to resolution
newavg10 = []
weights=[]
prod=[]
for qid in new_frame_cat13['question_id']:
records = cat13[cat13.question_id == qid]
records = records.reset_index()
#print(records.shape)
if(len(records)>0):
if records.shape[0] > 10:
records = records.sort_values('h_time', ascending= False)
records = records.reset_index()
records = records[:10]
newavg10.append(abs(records['settled_value']-records['new_value']).mean())
weights.append(sum(records['w']))
prod.append(sum(records['prod_time']))
else:
newavg10.append(abs(records['settled_value']-records['new_value']).mean())
prod.append(sum(records['prod_time']))
weights.append(sum(records['w']))
else:
newavg10.append(0)
weights.append(sum(records['w']))
prod.append(sum(records['prod_time']))
wadf=pd.DataFrame()
wadf['prod']=prod
wadf['weights']=weights
wadf['wa']=wadf['prod']/wadf['weights']
new_frame_cat13['baseline_last10'] = newavg10
new_frame_cat13['baseline_last10_wa'] = wadf['wa']
new_frame_cat13['baseline_last10_wa_ae'] = 1-wadf['wa']
#Difference between Simple Average and weighted Average considering both weights as well as only time weights
new_frame_cat13['dff_SA&WA_AE']=new_frame_cat13['Avg_AE']-new_frame_cat13['WeightedAvg_AE']
new_frame_cat13['dff_SA&WA_time_AE']=new_frame_cat13['Avg_AE']-new_frame_cat13['WeightedAvg_AE_time']
plt.plot(new_frame_cat13.index,new_frame_cat13['dff_SA&WA_AE'],'o')
plt.axhline(0)
#Mean absolute errors for category 13 questions
round(new_frame_cat13['Avg_AE'].mean(),2) #Simple Average
round(new_frame_cat13['WeightedAvg_AE'].mean(),2) #Weighted Average
round(new_frame_cat13['WeightedAvg_AE_time'].mean(),2) #Weighted Average using time weights only
round(new_frame_cat13['baseline_AE'].mean(),2) #Baseline 1
round(new_frame_cat13['baseline_last10'].mean(),2) #Baseline 2
| [
"[email protected]"
] | |
11c26bc67655788b4e9134383d372e909eb9b40f | 65e39deb4e1e152d3650323cc7641686d4b1776d | /list1.py | 5d2fa070bcaaf00b6da4a8edfa3cf2da580e9bc2 | [] | no_license | bwigoder/pythontut | a7c07c4f55afcb41b04cc4b895e256058bbaf134 | 629e889326bfd4473d7073837b0058c0f7328361 | refs/heads/master | 2021-01-10T20:39:46.667380 | 2014-02-17T19:59:02 | 2014-02-17T19:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
count = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
count += 1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
wordsnox = []
wordswithx = []
for word in words:
if word[0] == 'x':
wordswithx.append(word)
else:
wordsnox.append(word)
wordswithx_sorted = sorted(wordswithx)
wordsnox_sorted = sorted(wordsnox)
sortedwords = wordswithx_sorted + wordsnox_sorted
return sortedwords
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
def grablast(s):
return s[-1]
tuples_sorted = sorted(tuples, key=grablast)
return tuples_sorted
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cb4160269f36707e4931919c274db435eb21cb6c | 944401a6292baa2d23b9738898e0b0cb199d0795 | /lib/python2.7/site-packages/bokeh/models/widgets/tables.py | 5584c2821cb4704e2b0077b69ef9bc9912d443fe | [
"Python-2.0"
] | permissive | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 17,275 | py | ''' Various kinds of data table (data grid) widgets.
'''
from __future__ import absolute_import
from ...core.enums import DateFormat, FontStyle, NumeralLanguage, TextAlign, RoundingFunction
from ...core.has_props import abstract
from ...core.properties import Bool, Color, Either, Enum, Float, Instance, Int, List, Override, String
from ...model import Model
from ..sources import DataSource, CDSView
from .widget import Widget
@abstract
class CellFormatter(Model):
''' Abstract base class for data table's cell formatters.
'''
@abstract
class CellEditor(Model):
''' Abstract base class for data table's cell editors.
'''
class StringFormatter(CellFormatter):
''' Basic string cell formatter.
'''
font_style = Enum(FontStyle, default="normal", help="""
An optional text font style, e.g. bold, italic.
""")
text_align = Enum(TextAlign, default="left", help="""
An optional text align, i.e. left, center or right.
""")
text_color = Color(help="""
An optional text color. See :class:`bokeh.core.properties.Color` for
details.
""")
class NumberFormatter(StringFormatter):
''' Number cell formatter.
'''
format = String("0,0", help="""
The number format, as defined in the following tables:
**NUMBERS**:
============ ============== ===============
Number Format String
============ ============== ===============
10000 '0,0.0000' 10,000.0000
10000.23 '0,0' 10,000
10000.23 '+0,0' +10,000
-10000 '0,0.0' -10,000.0
10000.1234 '0.000' 10000.123
10000.1234 '0[.]00000' 10000.12340
-10000 '(0,0.0000)' (10,000.0000)
-0.23 '.00' -.23
-0.23 '(.00)' (.23)
0.23 '0.00000' 0.23000
0.23 '0.0[0000]' 0.23
1230974 '0.0a' 1.2m
1460 '0 a' 1 k
-104000 '0a' -104k
1 '0o' 1st
52 '0o' 52nd
23 '0o' 23rd
100 '0o' 100th
============ ============== ===============
**CURRENCY**:
=========== =============== =============
Number Format String
=========== =============== =============
1000.234 '$0,0.00' $1,000.23
1000.2 '0,0[.]00 $' 1,000.20 $
1001 '$ 0,0[.]00' $ 1,001
-1000.234 '($0,0)' ($1,000)
-1000.234 '$0.00' -$1000.23
1230974 '($ 0.00 a)' $ 1.23 m
=========== =============== =============
**BYTES**:
=============== =========== ============
Number Format String
=============== =========== ============
100 '0b' 100B
2048 '0 b' 2 KB
7884486213 '0.0b' 7.3GB
3467479682787 '0.000 b' 3.154 TB
=============== =========== ============
**PERCENTAGES**:
============= ============= ===========
Number Format String
============= ============= ===========
1 '0%' 100%
0.974878234 '0.000%' 97.488%
-0.43 '0 %' -43 %
0.43 '(0.000 %)' 43.000 %
============= ============= ===========
**TIME**:
============ ============== ============
Number Format String
============ ============== ============
25 '00:00:00' 0:00:25
238 '00:00:00' 0:03:58
63846 '00:00:00' 17:44:06
============ ============== ============
For the complete specification, see http://numbrojs.com/format.html
""")
language = Enum(NumeralLanguage, default="en", help="""
The language to use for formatting language-specific features (e.g. thousands separator).
""")
rounding = Enum(RoundingFunction, help="""
Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).
""")
class BooleanFormatter(CellFormatter):
''' Boolean (check mark) cell formatter.
'''
icon = Enum('check', 'check-circle', 'check-circle-o', 'check-square', 'check-square-o', help="""
The icon visualizing the check mark.
""")
class DateFormatter(CellFormatter):
''' Date cell formatter.
'''
format = Either(Enum(DateFormat), String, default='ISO-8601', help="""
The date format can be any standard `strftime`_ format string, as well
as any of the following predefined format names:
================================================ ================== ===================
Format name(s) Format string Example Output
================================================ ================== ===================
``ATOM`` / ``W3C`` / ``RFC-3339`` / ``ISO-8601`` ``"%Y-%m-%d"`` 2014-03-01
``COOKIE`` ``"%a, %d %b %Y"`` Sat, 01 Mar 2014
``RFC-850`` ``"%A, %d-%b-%y"`` Saturday, 01-Mar-14
``RFC-1123`` / ``RFC-2822`` ``"%a, %e %b %Y"`` Sat, 1 Mar 2014
``RSS`` / ``RFC-822`` / ``RFC-1036`` ``"%a, %e %b %y"`` Sat, 1 Mar 14
``TIMESTAMP`` (ms since epoch) 1393632000000
================================================ ================== ===================
Note that in the table some of the format names are synonymous, with
identical format names separated by slashes.
This list of supported `strftime`_ format codes is reproduced below.
%a
The abbreviated name of the day of the week according to the
current locale.
%A
The full name of the day of the week according to the current
locale.
%b
The abbreviated month name according to the current locale.
%B
The full month name according to the current locale.
%c
The preferred date and time representation for the current
locale.
%C
The century number (year/100) as a 2-digit integer.
%d
The day of the month as a decimal number (range 01 to 31).
%D
Equivalent to %m/%d/%y. (Americans should note that in many
other countries %d/%m/%y is rather common. This means that in
international context this format is ambiguous and should not
be used.)
%e
Like %d, the day of the month as a decimal number, but a
leading zero is replaced by a space.
%f
Microsecond as a decimal number, zero-padded on the left (range
000000-999999). This is an extension to the set of directives
available to `timezone`_.
%F
Equivalent to %Y-%m-%d (the ISO 8601 date format).
%G
The ISO 8601 week-based year with century as a decimal number.
The 4-digit year corresponding to the ISO week number (see %V).
This has the same format and value as %Y, except that if the
ISO week number belongs to the previous or next year, that year
is used instead.
%g
Like %G, but without century, that is, with a 2-digit year (00-99).
%h
Equivalent to %b.
%H
The hour as a decimal number using a 24-hour clock (range 00
to 23).
%I
The hour as a decimal number using a 12-hour clock (range 01
to 12).
%j
The day of the year as a decimal number (range 001 to 366).
%k
The hour (24-hour clock) as a decimal number (range 0 to 23).
Single digits are preceded by a blank. (See also %H.)
%l
The hour (12-hour clock) as a decimal number (range 1 to 12).
Single digits are preceded by a blank. (See also %I.) (TZ)
%m
The month as a decimal number (range 01 to 12).
%M
The minute as a decimal number (range 00 to 59).
%n
A newline character. Bokeh text does not currently support
newline characters.
%N
Nanosecond as a decimal number, zero-padded on the left (range
000000000-999999999). Supports a padding width specifier, i.e.
%3N displays 3 leftmost digits. However, this is only accurate
to the millisecond level of precision due to limitations of
`timezone`_.
%p
Either "AM" or "PM" according to the given time value, or the
corresponding strings for the current locale. Noon is treated
as "PM" and midnight as "AM".
%P
Like %p but in lowercase: "am" or "pm" or a corresponding
string for the current locale.
%r
The time in a.m. or p.m. notation. In the POSIX locale this
is equivalent to %I:%M:%S %p.
%R
The time in 24-hour notation (%H:%M). For a version including
the seconds, see %T below.
%s
The number of seconds since the Epoch, 1970-01-01 00:00:00
+0000 (UTC).
%S
The second as a decimal number (range 00 to 60). (The range
is up to 60 to allow for occasional leap seconds.)
%t
A tab character. Bokeh text does not currently support tab
characters.
%T
The time in 24-hour notation (%H:%M:%S).
%u
The day of the week as a decimal, range 1 to 7, Monday being 1.
See also %w.
%U
The week number of the current year as a decimal number, range
00 to 53, starting with the first Sunday as the first day of
week 01. See also %V and %W.
%V
The ISO 8601 week number (see NOTES) of the current year as a
decimal number, range 01 to 53, where week 1 is the first week
that has at least 4 days in the new year. See also %U and %W.
%w
The day of the week as a decimal, range 0 to 6, Sunday being 0.
See also %u.
%W
The week number of the current year as a decimal number, range
00 to 53, starting with the first Monday as the first day of
week 01.
%x
The preferred date representation for the current locale
without the time.
%X
The preferred time representation for the current locale
without the date.
%y
The year as a decimal number without a century (range 00 to 99).
%Y
The year as a decimal number including the century.
%z
The +hhmm or -hhmm numeric timezone (that is, the hour and
minute offset from UTC).
%Z
The timezone name or abbreviation.
%%
A literal '%' character.
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full compliment
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `github issue`_,
so that the documentation can be updated appropriately.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
""")
class HTMLTemplateFormatter(CellFormatter):
''' HTML formatter using a template.
This uses Underscore's `template` method and syntax. http://underscorejs.org/#template
The formatter has access other items in the row via the `dataContext` object passed to the formatter.
So, for example, if another column in the datasource was named `url`, the template could access it as:
<a href="<%= url %>"><%= value %></a>
To use a different set of template delimiters, pass the appropriate values for `evaluate`, `interpolate',
or `escape`. See the Underscore `template` documentation for more information. http://underscorejs.org/#template
Example: Simple HTML template to format the column value as code.
HTMLTemplateFormatter(template='<code><%= value %></code>')
Example: Use values from other columns (`manufacturer` and `model`) to build a hyperlink.
HTMLTemplateFormatter(template='<a href="https:/www.google.com/search?q=<%= manufacturer %>+<%= model %>" target="_blank"><%= value %></a>')
'''
template = String('<%= value %>', help="""
Template string to be used by Underscore's template method.
""")
class StringEditor(CellEditor):
''' Basic string cell editor with auto-completion.
'''
completions = List(String, help="""
An optional list of completion strings.
""")
class TextEditor(CellEditor):
''' Multi-line string cell editor.
'''
class SelectEditor(CellEditor):
''' Select cell editor.
'''
options = List(String, help="""
The list of options to select from.
""")
class PercentEditor(CellEditor):
''' ``IntEditor`` optimized for editing percentages.
'''
class CheckboxEditor(CellEditor):
''' Boolean value cell editor.
'''
class IntEditor(CellEditor):
''' Spinner-based integer cell editor.
'''
step = Int(1, help="""
The major step value.
""")
class NumberEditor(CellEditor):
''' Spinner-based number cell editor.
'''
step = Float(0.01, help="""
The major step value.
""")
class TimeEditor(CellEditor):
''' Spinner-based time cell editor.
'''
class DateEditor(CellEditor):
''' Calendar-based date cell editor.
'''
class TableColumn(Model):
''' Table column widget.
'''
field = String(help="""
The name of the field mapping to a column in the data source.
""")
title = String(help="""
The title of this column. If not set, column's data field is
used instead.
""")
width = Int(300, help="""
The width or maximum width (depending on data table's configuration)
in pixels of this column.
""")
formatter = Instance(CellFormatter, lambda: StringFormatter(), help="""
The cell formatter for this column. By default, a simple string
formatter is used.
""")
editor = Instance(CellEditor, lambda: StringEditor(), help="""
The cell editor for this column. By default, a simple string editor
is used.
""")
sortable = Bool(True, help="""
Whether this column is sortable or not. Note that data table has
to have sorting enabled to allow sorting in general.
""")
default_sort = Enum("ascending", "descending", help="""
The default sorting order. By default ``ascending`` order is used.
""")
@abstract
class TableWidget(Widget):
''' Abstract base class for data table (data grid) widgets.
'''
source = Instance(DataSource, help="""
The source of data for the widget.
""")
view = Instance(CDSView, help="""
A view into the data source to use when rendering table rows. A default view
of the entire data source is created if a view is not passed in during
initialization.
""")
def __init__(self, **kw):
super(TableWidget, self).__init__(**kw)
if "view" not in kw:
self.view = CDSView(source=self.source)
class DataTable(TableWidget):
''' Two dimensional grid for visualisation and editing large amounts
of data.
'''
columns = List(Instance(TableColumn), help="""
The list of child column widgets.
""")
fit_columns = Bool(True, help="""
Whether columns should be fit to the available width. This #1lab_results in no
horizontal scrollbar showing up, but data can get unreadable if there is
no enough space available. If set to ``True``, columns' width is
understood as maximum width.
""")
sortable = Bool(True, help="""
Allows to sort table's contents. By default natural order is preserved.
To sort a column, click on it's header. Clicking one more time changes
sort direction. Use Ctrl + click to return to natural order. Use
Shift + click to sort multiple columns simultaneously.
""")
reorderable = Bool(True, help="""
Allows the reordering of a tables's columns. To reorder a column,
click and drag a table's header to the desired location in the table.
The columns on either side will remain in their previous order.
""")
editable = Bool(False, help="""
Allows to edit table's contents. Needs cell editors to be configured on
columns that are required to be editable.
""")
selectable = Either(Bool(True), Enum("checkbox"), help="""
Whether a table's rows can be selected or not. Using ``checkbox`` is
equivalent to ``True``, but makes selection visible through a checkbox
for each row, instead of highlighting rows. Multiple selection is
allowed and can be achieved by either clicking multiple checkboxes (if
enabled) or using Shift + click on rows.
""")
row_headers = Bool(True, help="""
Enable or disable row headers, i.e. the index column.
""")
scroll_to_selection = Bool(True, help="""
Whenever a selection is made on the data source, scroll the selected
rows into the table's viewport if none of the selected rows are already
in the viewport.
""")
height = Override(default=400)
| [
"[email protected]"
] | |
e2c572e43c469fc2dd9396060cc0563e3547fb90 | c5cbb2a8a32ad0f6d86290921ed5fd623e02e624 | /MICCAI_Challenge2018_YuHsiang/DICELOSS/DLV3+3AS.py | f34d0e00979214a3c80d0ba2232c218d09f23eab | [] | no_license | leocvml/MICCAI2018_Robotic-Scene-Segmentation- | 779835ca506b935c9ae0282c2d49924735d473a6 | 8b23c352188449bf73f5976723ff22326911cfd7 | refs/heads/master | 2020-07-23T12:13:04.395012 | 2019-09-10T12:28:08 | 2019-09-10T12:28:08 | 207,552,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,418 | py | from mxnet import gluon, image, ndarray
from matplotlib import pyplot as plt
from mxnet.gluon import data as gdata
from os import listdir
from mxnet.gluon import nn
import mxnet as mx
from mxnet import nd
import os
import numpy as np
#####################################################################################
##
## DeepLab
##
####################################################################################
class stemblock(nn.HybridBlock):
def __init__(self, filters):
super(stemblock, self).__init__()
self.filters = filters
self.conv1 = nn.Conv2D(self.filters, kernel_size=3, padding=1, strides=2)
self.bn1 = nn.BatchNorm()
self.act1 = nn.Activation('relu')
self.conv2 = nn.Conv2D(self.filters, kernel_size=3, padding=1, strides=1)
self.bn2 = nn.BatchNorm()
self.act2 = nn.Activation('relu')
self.conv3 = nn.Conv2D(self.filters, kernel_size=3, padding=1, strides=1)
self.pool = nn.MaxPool2D(pool_size=(2, 2), strides=2)
def hybrid_forward(self, F, x):
stem1 = self.act1(self.bn1(self.conv1(x)))
stem2 = self.act2(self.bn2(self.conv2(stem1)))
stem3 = self.pool(stem2)
out = self.conv3(stem3)
return out
class conv_block(nn.HybridBlock):
def __init__(self, filters):
super(conv_block, self).__init__()
self.net = nn.HybridSequential()
with self.net.name_scope():
self.net.add(
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(filters, kernel_size=3, padding=1),
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(filters, kernel_size=1),
)
def hybrid_forward(self, F, x):
return self.net(x)
class DenseBlcok(nn.HybridBlock):
def __init__(self, num_convs, num_channels): # layers, growth rate
super(DenseBlcok, self).__init__()
self.net = nn.HybridSequential()
with self.net.name_scope():
for _ in range(num_convs):
self.net.add(
conv_block(num_channels)
)
def hybrid_forward(self, F, x):
for blk in self.net:
Y = blk(x)
x = F.concat(x, Y, dim=1)
return x
class Deeplabv3(nn.HybridBlock):
def __init__(self, growth_rate, numofcls):
super(Deeplabv3, self).__init__()
self.feature_extract = nn.HybridSequential()
with self.feature_extract.name_scope():
self.feature_extract.add(
stemblock(256),
DenseBlcok(8, growth_rate),
nn.BatchNorm(),
nn.Activation('relu')
)
self.conv1 = nn.HybridSequential()
with self.conv1.name_scope():
self.conv1.add(
nn.Conv2D(128, kernel_size=1, strides=2),
nn.BatchNorm(),
nn.Activation('relu')
)
self.conv3r6 = nn.HybridSequential()
with self.conv3r6.name_scope():
self.conv3r6.add(
nn.Conv2D(128, kernel_size=3, strides=2, padding=6, dilation=6),
nn.BatchNorm(),
nn.Activation('relu')
)
self.conv3r12 = nn.HybridSequential()
with self.conv3r12.name_scope():
self.conv3r12.add(
nn.Conv2D(128, kernel_size=3, strides=2, padding=12, dilation=12),
nn.BatchNorm(),
nn.Activation('relu')
)
self.conv3r18 = nn.HybridSequential()
with self.conv3r18.name_scope():
self.conv3r18.add(
nn.Conv2D(128, kernel_size=3, strides=2, padding=18, dilation=18),
nn.BatchNorm(),
nn.Activation('relu')
)
self.maxpool = nn.MaxPool2D(pool_size=2, strides=2)
self.concatconv1 = nn.HybridSequential()
with self.concatconv1.name_scope():
self.concatconv1.add(
nn.Conv2D(512, kernel_size=1),
nn.BatchNorm(),
nn.Activation('relu')
)
self.feconv1 = nn.HybridSequential()
with self.feconv1.name_scope():
self.feconv1.add(
nn.Conv2D(512, kernel_size=1),
nn.BatchNorm(),
nn.Activation('relu')
)
self.transUp = nn.HybridSequential()
with self.transUp.name_scope():
self.transUp.add(
nn.Conv2DTranspose(256, kernel_size=4, padding=1, strides=2),
nn.BatchNorm(),
nn.Activation('relu')
)
self.decodeConv3 = nn.HybridSequential()
with self.decodeConv3.name_scope():
self.decodeConv3.add(
nn.Conv2D(512, kernel_size=3, padding=1, strides=1),
nn.BatchNorm(),
nn.Activation('relu')
)
self.Up4 = nn.HybridSequential()
with self.Up4.name_scope():
self.Up4.add(
nn.Conv2DTranspose(256, kernel_size=4, padding=1, strides=2),
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2DTranspose(numofcls, kernel_size=4, padding=1, strides=2),
nn.Activation('sigmoid')
)
self.supervised = nn.HybridSequential()
with self.supervised.name_scope():
self.supervised.add(
nn.Conv2D(numofcls, kernel_size=1, strides=1),
nn.Activation('sigmoid')
)
def hybrid_forward(self, F, x):
out = self.feature_extract(x)
conv1out = self.conv1(out)
conv3r6out = self.conv3r6(out)
conv3r12out = self.conv3r12(out)
conv3r18out = self.conv3r18(out)
maxpoolout = self.maxpool(out)
second_out = ndarray.concat(conv1out, conv3r6out, conv3r12out, conv3r18out, maxpoolout, dim=1)
encoder_out = self.concatconv1(second_out)
sup_out = self.supervised(encoder_out)
encoderUp = self.transUp(encoder_out)
feconv1out = self.feconv1(out)
combine_out = ndarray.concat(encoderUp, feconv1out, dim=1)
output = self.decodeConv3(combine_out)
output = self.Up4(output)
return output, sup_out
####################################################################################
###
###
### DataLoader
###
#####################################################################################
class SegDataset(gluon.data.Dataset):
def __init__(self, root, resize, DataNameList, colormap=None, classes=None):
self.root = root
self.resize = resize
self.colormap = colormap
self.classes = classes
self.DataNameList = DataNameList
self.colormap2label = None
self.load_images()
def clsmap2channel(self,x):
y = ndarray.one_hot(x, 11)
y = ndarray.transpose(y,(2, 0, 1))
return y
def label_indices(self, img):
if self.colormap2label is None:
self.colormap2label = nd.zeros(256 ** 3)
for i, cm in enumerate(self.colormap):
self.colormap2label[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i
data = img.astype('int32')
idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2]
return self.colormap2label[idx]
def read_images(self, root):
dataroot = root + 'left_frames/' # left_frames #data
labelroot = root + 'labels/' # labels #label
DataNamelist = sorted(self.DataNameList)
data, label = [None] * len(self.DataNameList), [None] * len(self.DataNameList)
for i, name in enumerate(DataNamelist):
data[i] = image.imread(dataroot + name)
label[i] = image.imread(labelroot + name)
return data, label
def load_images(self):
data, label = self.read_images(root=self.root)
self.data = [self.normalize_image(im) for im in data]
if self.colormap is None:
self.label = [self.normalize_image(im) for im in label]
if self.colormap != None:
self.label = label
print('read ' + str(len(self.data)) + ' examples')
def normalize_image(self, data):
return (data.astype('float32') / 127.5) - 1
def __getitem__(self, item):
if self.colormap is None:
data = image.imresize(self.data[item], self.resize[0], self.resize[1])
label = image.imresize(self.label[item], self.resize[0], self.resize[1])
return data.transpose((2, 0, 1)), label.transpose((2, 0, 1))
if self.colormap != None:
data = image.imresize(self.data[item], self.resize[0], self.resize[1])
label = image.imresize(self.label[item], self.resize[0], self.resize[1])
sup_label = image.imresize(self.label[item], self.resize[0]//8, self.resize[1]//8)
return data.transpose((2, 0, 1)), self.label_indices(label),self.label_indices(sup_label)
def __len__(self):
return len(self.data)
MICCAI_colormap = [[0, 0, 0], [0, 255, 0], [0, 255, 255], [125, 255, 12],
[255, 55, 0], [24, 55, 125], [187, 155, 25], [0, 255, 125],
[255, 255, 125], [123, 15, 175], [124, 155, 5]]
MICCAI_classes = ['background', 'shaft', 'clasper', 'wrist', 'kidney-parenchyma', 'covered-kidney',
'thread', 'clamps', ' suturing-needle', 'suction', 'small_intestine']
numcls = len(MICCAI_colormap)
def LoadDataset(dir, batchsize, output_shape, datalist, colormap=None, classes=None, train=True):
dataset = SegDataset(dir, output_shape, datalist, colormap, classes)
if not train:
data_iter = gdata.DataLoader(dataset, 1, shuffle=False)
if train:
data_iter = gdata.DataLoader(dataset, batchsize, shuffle=True, last_batch='discard')
return data_iter
########################################################################
###
### predict to map
###
###
########################################################################
def predict2img(predict):
colormap = ndarray.array(MICCAI_colormap, ctx=mx.gpu(), dtype='uint8') # voc_colormap
target = predict.asnumpy()
label = colormap[target[:, :, :]]
return label.asnumpy()
def PredictTrans(predict):
colormap = ndarray.array(MICCAI_colormap, ctx=mx.gpu(), dtype='uint8') # voc_colormap
label = colormap[predict[:, :, :]]
label = ndarray.transpose(label, (0, 3, 1, 2))
label = label.astype(('float32'))
return label
######################################################################
###
### learning rate decay
###
###
###
######################################################################
def Cosine_decay_schedule(current_step, total_step, warm_step, hold_base_step, learning_rate_base, warmup_rate_base):
# learning_rate_base = 0.1
# warmup_rate_base = 0.05
# total_step = 1000
# warm_step = 0
# hold_base_step = 10
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi * ((current_step - warm_step - hold_base_step) / (total_step - warm_step - hold_base_step))))
if hold_base_step > 0:
if current_step > warm_step + hold_base_step:
learning_rate = learning_rate
else:
learning_rate = learning_rate_base
if warm_step > 0:
slope = (learning_rate_base - warmup_rate_base) / warm_step
warmup_rate = slope * current_step + warmup_rate_base
if current_step < warm_step:
learning_rate = warmup_rate
else:
learning_rate = learning_rate
return learning_rate
######################################################################################################################
import random
name = 'DLV3+3AS'
dataset_dir ='../../total640x512/'
result_folder = 'result_' + name + '/'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
random.seed(0)
test_result_folder = 'test' + result_folder
if not os.path.exists(test_result_folder):
os.makedirs(test_result_folder)
GPU_COUNT = 1
batch_size = GPU_COUNT * 10
resize = (320, 256)
ctx = [mx.gpu(i) for i in range(GPU_COUNT)]
with open('train_iter1.txt', 'r') as f:
lines = f.readlines()
img_list = []
for line in lines:
img_list.append(line[:-1])
training_list = img_list
train_iter = LoadDataset(dataset_dir, batch_size, resize, training_list, MICCAI_colormap,
MICCAI_classes) # default is for 2 class if you want to multiclass
for d, l,sup_label in train_iter:
break
print(d.shape)
print(l.shape)
print(sup_label.shape)
GenNet = nn.HybridSequential()
with GenNet.name_scope():
GenNet.add(
Deeplabv3(growth_rate=10, numofcls=numcls) # 12
)
GenNet.initialize()
softmax_CE = gluon.loss.SoftmaxCrossEntropyLoss(axis=1)
GenNet.collect_params().reset_ctx(ctx=ctx)
epochs = 200
warm_step = 3
hold_base = int(epochs * 0.6)
base_lr = 0.0002
warm_lr = 0.000001
Gtrainer = gluon.Trainer(GenNet.collect_params(), 'adam', {'learning_rate': base_lr}) # 0.0001
G_filename = name+'.params'
#GenNet.load_params(G_filename, ctx=ctx)
import time
from mxnet import autograd
for epoch in range(epochs):
lr = Cosine_decay_schedule(epoch, epochs, warm_step, hold_base,base_lr, warm_lr)
Gtrainer.set_learning_rate(lr)
tic = time.time()
for i, (d, l, sup_l) in enumerate(train_iter):
x = gluon.utils.split_and_load(d, ctx)
y = gluon.utils.split_and_load(l, ctx)
sup_y = gluon.utils.split_and_load(sup_l, ctx)
with autograd.record():
fake_y = [GenNet(X) for X in x]
errG = [softmax_CE(f_y[0], _y) + softmax_CE(f_y[1], _sy) for f_y, _y, _sy in zip(fake_y, y, sup_y)]
# print(len(errG))
for l in errG:
l.backward()
Gtrainer.step(d.shape[0])
print(
'Epoch %2d,batch %2d,G_loss %.5f ,lr = %.5f, time %.1f sec' % (
epoch,
i,
mx.ndarray.mean(errG[0]).asscalar(),
Gtrainer.learning_rate,
time.time() - tic))
if epoch % 5 == 0:
result, sup = GenNet(x[0])
result = ndarray.argmax(result, axis=1)
result = predict2img(result)
x = mx.ndarray.transpose(x[0], (0, 2, 3, 1))
# print(x.shape)
GT = predict2img(y[0])
figsize = (10, 4)
_, axes = plt.subplots(3, x.shape[0], figsize=figsize)
for n in range(x.shape[0]):
#print(n)
axes[0][n].imshow(x[n].asnumpy())
axes[1][n].imshow(result[n])
axes[2][n].imshow(GT[n])
axes[0][n].axis('off')
axes[1][n].axis('off')
axes[2][n].axis('off')
plt.savefig(result_folder + str(epoch) + '.png')
plt.close('all')
GenNet.save_params(G_filename)
GenNet.save_params(G_filename)
| [
"[email protected]"
] | |
387fbf7012807a35c5ca715e00d16344878a7166 | 3c191eddb3f83a18d2f20836ccbc7d64470d441e | /IDEA/pythod_work/spark_to_pgMysql/pgDDLToSpark/pg_ddl_spark.py | 9ae11f6d2e5de3d9120dd766cc4f297eb473032e | [] | no_license | maizi12580/test | 69c7aa60264d4757e58daba0f268408a22bb96a2 | df354eb1de71b459b92a5a505e27b9d17880332c | refs/heads/master | 2023-06-29T13:50:30.401052 | 2021-08-03T16:21:14 | 2021-08-03T16:21:14 | 289,845,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,108 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Atom
import ConfigParser
import base64
import csv
import logging.config
import random
import sched
import socket
import traceback
import psycopg2
import json
from datetime import datetime
import subprocess
import time
import os
import sys
import re
class CryptoUtil:
def __init__(self):
pass
@classmethod
def encrypt(cls, source_str):
random_choice = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()"
to_encrypt_arr = []
shift_str = ""
for char in source_str:
shift_str = shift_str + chr(ord(char) + 3)
shift_index = 0
for index in range(0, len(shift_str) * 3):
if index % 3 != 0:
rand_char = random.choice(random_choice)
to_encrypt_arr.append(rand_char)
else:
to_encrypt_arr.append(shift_str[shift_index])
shift_index = shift_index + 1
to_encrypt_str = ''.join(to_encrypt_arr)
encrypt_str = base64.b64encode(to_encrypt_str)
return encrypt_str
@classmethod
def decrypt(cls, encrypt_str):
decrypt_str = base64.b64decode(encrypt_str)
shift_str = []
for index in range(len(decrypt_str)):
if index % 3 == 0:
shift_str.append(decrypt_str[index])
source_arr = []
for char in shift_str:
source_arr.append(chr(ord(char) - 3))
source_str = "".join(source_arr)
return source_str
class DateUtils:
def __init__(self):
pass
@classmethod
def get_current_date(cls):
"""get current time of year-month-day format
:return: time of year-month-day format
"""
return datetime.now().strftime('%Y-%m-%d')
@classmethod
def get_current_time(cls):
"""get current time of year-month-day hour:minute:second.microsecond format
:return: time of year-month-day hour:minute:second.microsecond format
"""
return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
@classmethod
def timestamp_to_datetime(cls, timestamp):
local_dt_time = datetime.fromtimestamp(timestamp / 1000000.0)
return local_dt_time
@classmethod
def datetime_to_strtime(cls, datetime_obj, date_format):
local_str_time = datetime_obj.strftime(date_format)
return local_str_time
@classmethod
def datetime_to_timestamp(cls, datetime_obj):
local_timestamp = long(time.mktime(datetime_obj.timetuple()) * 1000000.0 + datetime_obj.microsecond)
return local_timestamp
@classmethod
def strtime_to_datetime(cls, timestr, date_format):
local_datetime = datetime.strptime(timestr, date_format)
return local_datetime
@classmethod
def timestamp_to_strtime(cls, timestamp, date_format):
return cls.datetime_to_strtime(cls.timestamp_to_datetime(timestamp), date_format)
@classmethod
def strtime_to_timestamp(cls, timestr, date_format):
try:
local_str_time = cls.datetime_to_timestamp(cls.strtime_to_datetime(timestr, date_format))
return local_str_time
except Exception as e:
return 0
@classmethod
def get_file_ctime_timestamp(cls, f):
return cls.datetime_to_timestamp(datetime.fromtimestamp(os.path.getctime(f)))
@classmethod
def get_file_mtime_timestamp(cls, f):
return cls.datetime_to_timestamp(datetime.fromtimestamp(os.path.getmtime(f)))
@staticmethod
def compare_mtime(x, y):
x_mtime = x["mtime"]
y_mtime = y["mtime"]
if x_mtime < y_mtime:
return -1
elif x_mtime > y_mtime:
return 1
else:
return 0
class SparkDDLSync:
def __init__(self, log):
# 获取当前文件路径
current_file_path = os.path.split(os.path.realpath(__file__))[0]
self.config_file = os.path.join(current_file_path, "syn.config")
self.log = log
self.config = ConfigParser.ConfigParser()
self.config.read(self.config_file)
self.host = self.config.get('psql', 'host')
self.spark_host = self.config.get('psql', 'spark_host')
self.port = self.config.get('psql', 'port')
self.psql_password_type = self.config.get('psql', 'psql_password_type')
self.psql_user = self.config.get('psql', 'psql_user')
self.psql_password = self.config.get('psql', 'psql_password')
self.fromDB = self.config.get('psql','from_db_name')
self.aimDB = self.config.get('psql','aim_db_name')
self.interval_time = int(self.config.get('execute', 'interval_time'))
ignore_option = self.config.get('execute', 'ignore_error')
if "true" == ignore_option.lower():
self.ignore_error = True
else:
self.ignore_error = False
self.max_retry_times = int(self.config.get('execute', 'max_retry_times'))
self.sleep_time = 1
self.SUCCESS_STATE = 0
self.level = {"debug": "DEBUG", "info": "INFO", "warning": "WARNING", "error": "ERROR"}
self.level_priority = {"DEBUG": 4, "INFO": 3, "WARNING": 2, "ERROR": 1}
self.ignore_file = "syn.ignore.info"
self.in_metastore_file = "syn.insert_metastore.info"
self.check_avg()
self.logger(self.level["info"], "Start pg ddl sync to spark...")
self.pg_to_spark_fields={"text":"string",\
"integer":"int",\
"numeric":"int",\
"integer":"integer",\
"bigint":"long",\
"float":"double",\
"double":"double",\
"bigint":"bigint",\
"decimal":"decimal",\
"date":"date",\
"timestamp":"timestamp",\
"LOB":"LOB"}
self.current_file_path = os.path.split(os.path.realpath(__file__))[0]
path=os.path.join(self.current_file_path, self.aimDB)
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
self.logger(self.level["info"], "Create folder successful.")
else:
self.logger(self.level["info"], "Folder is exists.")
def __log_ignore_stmt(self, stmt):
ignore_file = open(self.ignore_file, "a")
ignore_file.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") + " " + stmt + "\n")
ignore_file.close()
def check_avg(self):
# if not os.path.exists(self.log_directory):
# os.makedirs(self.log_directory)
if int(self.psql_password_type) == 0:
encrypt_password = CryptoUtil.encrypt(self.psql_password)
self.psql_password = encrypt_password
self.psql_password_type = 1
self.update_password()
def update_password(self):
self.config.set('psql', 'psql_password', self.psql_password)
self.config.set('psql', 'psql_password_type', self.psql_password_type)
self.config.write(open(self.config_file, "w"))
def run_check_task(self):
self.logger(self.level["info"], "begin to check psql")
self.fromDB_conn = psycopg2.connect(database=self.fromDB, user=self.psql_user,\
password=CryptoUtil.decrypt(self.psql_password), host=self.host, port=self.port)
self.fromDB_cursor = self.fromDB_conn.cursor()
self.exe()
self.fromDB_cursor.close()
self.fromDB_cursor.close()
def exe(self):
self.fromDB_cursor.execute("select a.relname ,b.ftoptions from \"pg_class\" a ,\"pg_foreign_table\" b where a.relkind='f' and a.relfilenode=b.ftrelid")
tables = self.fromDB_cursor.fetchall()
folder=os.path.join(self.current_file_path, self.aimDB)
for table in tables:
self.fromDB_cursor.execute("select column_name,data_type from information_schema.\"columns\" where \"table_name\"='{tb}'".format(tb=table[0]))
fields=self.fromDB_cursor.fetchall()
sub=''
for field in fields:
field_type=field[1].split(" ")[0]
if(self.pg_to_spark_fields.has_key(field_type)):
sub+=field[0]+" "+self.pg_to_spark_fields[field_type]+","
else:
self.logger(self.level["info"], "error field type {fie}".format(fie=field[1]))
cs=table[1][0].split("=")[1]
cl=table[1][1].split("=")[1]
pre='drop table if exists {cs}.{cl};\n'.format(cs=self.aimDB,cl=table[0],sub=sub[:-1])
head='create table {cs}.{cl} ({sub} )'.format(cs=self.aimDB,cl=table[0],sub=sub[:-1])
tail='USING com.sequoiadb.spark OPTIONS(host \'{host}\',collectionspace \'{cs}\',collection \'{cl}\',ignoreduplicatekey \'true\');'.format(host=self.spark_host,cs=cs,cl=cl)
path=os.path.join(folder,table[0])
with open (path,'w+') as f:
f.write(pre)
f.write(head)
f.write(tail)
def logger(self, log_level, message):
if log_level == self.level["error"]:
self.log.error(message)
elif log_level == self.level["warning"]:
self.log.warn(message)
elif log_level == self.level["info"]:
self.log.info(message)
elif log_level == self.level["debug"]:
self.log.debug(message)
def init_log(log_config_file):
try:
# Get the log file path from the log configuration file, and create the directory if it dose not exist.
config_parser = ConfigParser.ConfigParser()
files = config_parser.read(log_config_file)
if len(files) != 1:
print("Error: Read log configuration file failed")
return None
log_file = config_parser.get("handler_rotatingFileHandler", "args").split('\'')[1]
curr_path = os.path.abspath(os.path.dirname(log_config_file))
log_file_full_path = os.path.join(curr_path, log_file)
log_file_parent_dir = os.path.abspath(os.path.join(log_file_full_path, ".."))
if not os.path.exists(log_file_parent_dir):
os.makedirs(log_file_parent_dir)
logging.config.fileConfig(log_config_file)
log = logging.getLogger("ddlLogger")
return log
except BaseException as e:
print("Error: Initialize logging failed. Error number: " + ". Message: " + e.message)
return None
def run_task(log):
sparkDDLSync = SparkDDLSync(log)
scheduler = sched.scheduler(time.time, time.sleep) # 定时器
while True:
scheduler.enter(sparkDDLSync.interval_time, 1, sparkDDLSync.run_check_task, ())
scheduler.run()
break
def main():
current_file_path = os.path.split(os.path.realpath(__file__))[0]
pid_file = os.path.join(current_file_path, "APP_ID")
if os.path.exists(pid_file):
with open(pid_file, "r") as f:
pid = str(f.readline())
if os.path.exists("/proc/{pid}".format(pid=pid)):
with open("/proc/{pid}/cmdline".format(pid=pid), "r") as process:
process_info = process.readline()
if process_info.find(sys.argv[0]) != -1:
return
with open(pid_file, "w") as f:
pid = str(os.getpid())
f.write(pid)
log_config_file= os.path.join(current_file_path, "syn.log.config")
log = init_log(log_config_file)
if log is None:
print("Initialize logging failed. Exit...")
return 1
run_task(log)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9893462377c8a546220e77bccda1fa2062b2e625 | 37b906301a5a63575ddca4f76c6218039cb1ee64 | /core/receivers.py | 83b75a215d442fc31775c161bd9ca81ea701f609 | [] | no_license | oseniasjunior/order-system | 83c19a97f36de14bbb822c6de5db96e1d065ca11 | 4d398d5ae51bb146c4a8d13eaf13710516b2d8a5 | refs/heads/master | 2022-12-28T05:14:36.755278 | 2020-10-16T00:46:16 | 2020-10-16T00:46:16 | 302,489,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from core import models, actions, tasks
@receiver(post_save, sender=models.Product, dispatch_uid="update_detail_decription_by_product", weak=False)
def update_detail_decription_by_product(**kwargs):
tasks.populate_test.apply_async([], countdown=1)
@receiver(pre_save, sender=models.Product, dispatch_uid="pre_save_product", weak=False)
def pre_save_product(**kwargs):
instance = kwargs.get('instance')
actions.ProductActions.update_detail_decription(product=instance)
actions.ProductActions.calculate_sale_price(product=instance)
| [
"[email protected]"
] | |
ed1c2824292e2442eaf2e0e8dbf0274376b7629e | 8bc13cf88d47d5b3c65f215f3f138f82dc7b86b9 | /auth_path/models.py | ee76cdc16a4171052339448e6f9c0ee673e8ef97 | [] | no_license | xal9wiii4ik/store | f941673dbff1b8a333d0df36dc04b51775df54c9 | d8b7fbae3f6232e956859abb078c236986a0cc3a | refs/heads/master | 2023-04-10T09:14:36.777575 | 2021-04-08T21:52:40 | 2021-04-08T21:52:40 | 323,389,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
class Uid(models.Model):
"""Model uid for future verification of user"""
uid = models.CharField(max_length=25, verbose_name='Юид')
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='Владелец юида')
def save(self, *args, **kwargs):
self.uid = urlsafe_base64_encode(force_bytes(self.user.id))
super().save(*args, **kwargs)
def __str__(self):
return f'{self.uid}, {self.user.username}'
| [
"[email protected]"
] | |
0499b11e79ad0ae0ace80eb279ded4f3c66e0293 | 32fd2ea54af5f802f0568de168254b070a9f9bef | /tests/test_xmp.py | 3f6a364914a02057509833e3c3bc6e2f6b7de103 | [] | no_license | gaqzi/py-xmp-tool | 923b6ade266880f0d64734bf83d80db37f4409fb | df69d5a3527fddb4e1c2d271974227ba81f1a7b3 | refs/heads/master | 2021-01-01T16:00:26.248178 | 2015-02-19T05:47:36 | 2015-02-19T05:50:52 | 19,036,805 | 4 | 1 | null | 2018-03-05T00:55:54 | 2014-04-22T16:23:26 | Python | UTF-8 | Python | false | false | 2,429 | py | import os
import tempfile
from shutil import copyfile
import pytest
import xmp
FILES_FOLDER = os.path.join(os.path.dirname(__file__), 'files')
BUBBA_JPG = os.path.join(FILES_FOLDER, 'bubba.jpg')
HELLO_TXT = os.path.join(FILES_FOLDER, 'hello.txt')
@pytest.fixture
def clean_file(request):
filename = tempfile.mkstemp()[1]
copyfile(BUBBA_JPG, filename)
request.addfinalizer(lambda: os.unlink(filename))
return filename
@pytest.fixture
def clean_file_text(request):
filename = tempfile.mkstemp()[1]
copyfile(HELLO_TXT, filename)
def remove_files():
os.unlink(filename)
try:
os.unlink('{0}.xmp'.format(filename))
except OSError:
pass
request.addfinalizer(remove_files)
return filename
def test_write_physical_medium(clean_file):
assert xmp.read_xmp(clean_file, 'PhysicalMedium') is False
assert xmp.write_xmp(clean_file, 'PhysicalMedium', 'lto01')
assert xmp.read_xmp(clean_file, 'PhysicalMedium') == 'lto01'
def test_write_physical_medium_to_text_file_should_fail(clean_file_text):
assert xmp.read_xmp(clean_file_text, 'PhysicalMedium') is False
assert xmp.write_xmp(clean_file_text, 'PhysicalMedium', 'lto01') is False
def test_write_physical_medium_to_text_with_sidecar(clean_file_text):
assert xmp.read_xmp(clean_file_text, 'PhysicalMedium') is False
sidecar_file = xmp.write_xmp(clean_file_text, 'PhysicalMedium', 'lto01',
sidecar=True)
assert sidecar_file == '{0}.xmp'.format(clean_file_text)
assert os.path.exists(sidecar_file)
assert xmp.read_xmp(clean_file_text, 'PhysicalMedium') == 'lto01'
assert xmp.read_xmp(clean_file_text, 'Test') is False
def test_read_back_entire_xmp_data_from_sidecar(clean_file_text):
xmp.write_xmp(clean_file_text, 'PhysicalMedium', 'lto01', sidecar=True)
assert 'lto01' in xmp.read_xmp(clean_file_text)
def test_read_back_entire_xmp_data(clean_file):
xmp.write_xmp(clean_file, 'PhysicalMedium', 'lto01')
assert 'lto01' in xmp.read_xmp(clean_file)
def test_write_to_sidecar_file_again(clean_file_text):
xmp.write_xmp(clean_file_text, 'PhysicalMedium', 'lto01', sidecar=True)
xmp.write_xmp(clean_file_text, 'format', 'text/plain', sidecar=True)
assert xmp.read_xmp(clean_file_text, 'PhysicalMedium') == 'lto01'
assert xmp.read_xmp(clean_file_text, 'format') == 'text/plain'
| [
"[email protected]"
] | |
55c7b7e568f950238f82b23b7c2310569f498664 | 5609efe94f138a820e7478a2ee9e49164241621a | /article/models.py | 6611404a5db2bf6afe54f517ab8a25e4b17f8ba4 | [] | no_license | hynn911/tony_blog | 546a7f4943a927860f43dc0dc6011429b846a63d | 8b26e151f767cc83a91ac7ea9279ae29d93cc1d4 | refs/heads/master | 2021-09-10T10:07:37.880664 | 2018-03-24T11:46:55 | 2018-03-24T11:46:55 | 120,170,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | from django.db import models
from django.forms import ModelForm
# Create your models here.
class HssUser(models.Model):
msisdn_no = models.BigIntegerField(db_column='MSISDN_NO', primary_key=True) # Field name made lowercase.
imsi_no = models.BigIntegerField(db_column='IMSI_NO', blank=True, null=True) # Field name made lowercase.
hss = models.CharField(db_column='HSS', max_length=64, blank=True, null=True) # Field name made lowercase.
city = models.CharField(db_column='City', max_length=128, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'HSS_USER'
def __unicode__(self):
return self.MSISDN_NO
class HSS_NUM(models.Model):
MSISDN_NO = models.DecimalField(max_digits=20, decimal_places=0, primary_key=True) # MSISDN号码
IMSI_NO = models.DecimalField(max_digits=20, decimal_places=0) # IMSI号码
HSS = models.CharField(max_length=50, blank=False) # 归属HSS,严格来说哦,归属HLR
City = models.CharField(max_length=50, blank=True, null=True) # 归属城市
def __unicode__(self):
return self.MSISDN_NO + ":" + self.HSS
class Meta: # 按号段正序排序
ordering = ['MSISDN_NO']
class Article(models.Model):
title = models.CharField(max_length = 100) #题目
category = models.CharField(max_length = 100) #类别
date_time = models.DateTimeField(auto_now_add = True) #上线日期
content = models.TextField(blank = True,null = True) #处理url
url = models.CharField(max_length = 100) #处理url
def __unicode__(self):
return self.title
class Meta: #按时间下降排序
ordering = ['-date_time']
class CmdinfoHwhss(models.Model):
cmd = models.CharField(db_column='CMD', primary_key=True, max_length=64) # Field name made lowercase.
cmd_name = models.CharField(db_column='CMD_NAME', max_length=256) # Field name made lowercase.
param = models.CharField(db_column='PARAM', max_length=256) # Field name made lowercase.
param_name = models.CharField(db_column='PARAM_NAME', max_length=2048) # Field name made lowercase.
class Meta:
managed = False
db_table = 'CMDINFO_HWHSS'
def __unicode__(self):
return self.cmd_name
class NumForm(ModelForm):
class Meta:
model = HSS_NUM
# fields = ['MSISDN_NO', 'IMSI_NO', 'HSS']
fields = '__all__' | [
"[email protected]"
] | |
9ce278978b150bbbdbe42f9f29d5a065f081ab42 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/prime-big-734.py | ae9cb7e5ea058d3ccbe2d140c51960c31f68dea9 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | # Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime3(x:int, x2:int, x3:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = $ID + 1
| [
"[email protected]"
] | |
70064ac4483b4656d78c4854940aa8a1d6e54c2f | 068bf97bc4c2a7c95ffe281e480d4e168435837b | /base/3.python循环while/08_print函数的结尾.py | 63b5682d74c8327bfeba144ae4adc170a6fd0f70 | [] | no_license | EricLeeN1/pythonStudy | 6ab28833f4f25340ea5ddfc9e2dd4fbe34630567 | e8bc47214711e43e6d5273843937b73e27ad1acf | refs/heads/master | 2020-09-30T17:50:18.448680 | 2020-01-18T03:47:13 | 2020-01-18T03:47:13 | 227,340,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | '''
@Author: your name
@Date: 2020-01-09 10:34:45
@LastEditTime : 2020-01-09 10:35:56
@LastEditors : Please set LastEditors
@Description: In User Settings Edit
@FilePath: \pythonStudy\base\3.python循环while\08_print函数的结尾.py
'''
# - 在默认情况下,print 函数输出内容之后,会自动在内容末尾增加换行
# - 如果不希望末尾增加换行,可以在 print 函数输出内容的后面增加 , end=""
# - 其中 "" 中间可以指定 print 函数输出内容之后,继续希望显示的内容
i = 0
while i < 5:
i += 1
print('*' * i, end="/")
| [
"[email protected]"
] | |
03fcd43a145d21132262958bba4ac98d27fdfe04 | ff9be20d8ec3eb0e4a35d13c1b95b25d81221b75 | /orthos/graphicsItems/linked_view_box/infinite_grid_lines.py | c7c6fb047e73252dd617fdece8bc82b22559309e | [] | no_license | DerThorsten/orthos | 340ec785c053f8f3423117e6c1f7aee340fb3d77 | 529454df365ccca93162e2cc2ee2cbf1c3688e42 | refs/heads/master | 2021-01-17T06:16:12.947418 | 2016-07-21T11:45:47 | 2016-07-21T11:45:47 | 44,521,841 | 0 | 2 | null | 2015-11-27T15:07:36 | 2015-10-19T08:43:37 | Python | UTF-8 | Python | false | false | 2,077 | py | import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy
#from ..tiling_image_item import *
#from tiling import VisibleBlocks
def make3dSlicing(begin,end):
slicing = []
for b,e in zip(begin, end):
slicing.append(slice(b,e))
return slicing
class InfiniteGridLines(pg.GraphicsObject):
def __init__(self, viewBox):
pg.GraphicsObject.__init__(self)
self.viewBox = viewBox
self.mlBlocking = self.viewBox.navigator.mlBlocking
self.initialzie()
def initialzie(self):
self.shape2d = self.mlBlocking.shape2d(self.viewBox.scrollAxis)
self.blockColors = numpy.zeros(shape=(self.mlBlocking.nBlockings, 3))
fLen = float(self.mlBlocking.nBlockings)
for bi in range(self.mlBlocking.nBlockings):
r = 50.0
b = 20 + (fLen-bi)/fLen * 180.0
g = 50.0
self.blockColors[bi, :] = r,g,b
def boundingRect(self):
return QtCore.QRectF(0,0,self.shape2d[0],self.shape2d[1])
def paint(self, p, *args):
vrx = self.viewBox.state['viewRange'][0]
vry = self.viewBox.state['viewRange'][1]
minX = max(int(vrx[0]),0)
minY = max(int(vry[0]),0)
maxX = min(int(vrx[1]),self.shape2d[0])
maxY = min(int(vry[1]),self.shape2d[1])
mlB = self.mlBlocking
for blockingIndex in range(mlB.nBlockings):
w = float(blockingIndex+1)*2.0
c = self.blockColors[blockingIndex,:]
p.setPen(pg.mkPen(color=c,width=w ))
bs2d = mlB.blockShape2d(blockingIndex, self.viewBox.scrollAxis)
# draw horizonal lines
minBlockC = minX/bs2d[0],minY/bs2d[1]
maxBlockC = maxX/bs2d[0] + 2,maxY/bs2d[1] +1
for bcx in range(minBlockC[0],maxBlockC[0]):
x = min(bcx*bs2d[0],self.shape2d[0])
p.drawLine(x,minY,x,maxY )
for bcy in range(minBlockC[1],maxBlockC[1]):
y = min(bcy*bs2d[1],self.shape2d[1])
p.drawLine(minX,y,maxX,y )
| [
"[email protected]"
] | |
f8015ddfe4a45df4802411afbc079c4499655f43 | 7b54c0e315fa38f604d767d9559af54889b38bd6 | /open_wechat/settings.py | 032439f75357884dfebe6db0284cbf968e304e11 | [] | no_license | dongtianqi1125/open-wechat-grant | 790e7076e5f186e8ee340a5a9d8aa0eb5d6380dc | 07126f1ea6a176959dab962ebfc0127da85a9bf8 | refs/heads/master | 2022-04-26T18:38:40.219360 | 2020-04-27T08:45:07 | 2020-04-27T08:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,337 | py | """
Django settings for open_wechat project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from config import DEBUG, ALLOWED_HOSTS
from config import LOG_NAME, LOG_PATH, LOG_MAX_BYTES, LOG_BACKUP_COUNT, LOG_LEV
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i*-rhmnnsc&6z#s)ola08l)(f=xsx*&e!%9!yts1=q@%ip54ui'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portal',
'wechat',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'open_wechat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'open_wechat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s|%(levelname)s|%(message)s',
},
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(server_time)s] %(message)s',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_PATH,
'maxBytes': LOG_MAX_BYTES,
'backupCount': LOG_BACKUP_COUNT,
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
},
'loggers': {
LOG_NAME: {
'handlers': ['default'],
'level': LOG_LEV,
'propagate': False,
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
}
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_IMG_PATH = os.path.join(BASE_DIR, 'portal/static/portal/img')
| [
"[email protected]"
] | |
1981fa46e690336fe98322561d70fff3b3b90049 | e4a0a50471c38aea60f4f4dc99b38ad9020250e7 | /Transformer_TextClassification/utils.py | 28f427e3410bee8a2199e2ed4ce3df1a5fbada0a | [] | no_license | Macielyoung/TextClassification | 3041d7bb6161abb78ed1aab91af1a930bbbae9b0 | ee53558f252cfbf399816ed5d5012e35f5f2f9f3 | refs/heads/master | 2020-06-29T22:12:06.406940 | 2020-04-18T07:21:39 | 2020-04-18T07:21:39 | 200,638,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | # coding:utf-8
import torch
from torch import nn
from torch.autograd import Variable
import copy
import math
def clones(module, N):
# 生成N层encoder子层
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Embedding(nn.Module):
# embedding层
def __init__(self, d_model, vocab):
super(Embedding, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut (x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
# 使用正弦余弦公式来计算位置编码
# PE(pos, 2i) = sin(pos/10000^(2i/d_model))
# PE(pos, 2i+1) = cos(pos/10000^(2i/d_model))
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
# 函数 : e ^ (2i * -log(10000) / d), i是维度
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))
pe[:, 1::2] = torch.cos(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))
pe = pe.unsqueeze(0)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))
pe[:, 1::2] = torch.cos(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))#torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x) | [
"[email protected]"
] | |
1ae5603381bc89456335cb745e6e62f3ba9fb909 | d78f98494135212f626b5c13dae35cf1a7195436 | /AutomatedMails/AutomatedMails/urls.py | 3f5f550580d596be28d1203c361684d9e62098ff | [] | no_license | JasbirCodeSpace/Django-Mini-Projects | 0d4cd1981d356a7b737be25f5bc69ab70863292a | 87634eeb1e15540b88bcd3e5d6f788cdfbe0c797 | refs/heads/master | 2023-07-30T17:44:21.500000 | 2020-06-07T07:54:51 | 2020-06-07T07:54:51 | 257,206,866 | 0 | 0 | null | 2021-09-22T18:55:32 | 2020-04-20T07:40:00 | CSS | UTF-8 | Python | false | false | 1,041 | py | """AutomatedMails URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',include('SimpleMail.urls')),
path('SendMail',include('SimpleMail.urls')),
path('admin/', admin.site.urls),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
b82bb341708cdea6fa41b4c567c2b87179a7e0e7 | 382f4acfd565be4aedb07c694e8daa489ff3e70a | /eveutil1/bin/OLD/fwcorps | 4d476f84186e8d5ceb9b7bff45e8e2e8863a33ab | [] | no_license | electusmatari/legacy | 913c5a9f68074d1fa793b0e3ff76fd3f9c3f481e | 9266e955398a0c8279b82b8347a85d9186a455da | refs/heads/master | 2021-01-22T05:24:13.935562 | 2013-09-29T11:44:19 | 2013-09-29T11:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | #!/usr/bin/env python
import urllib
import os
import re
from lib import characters
URLS = []
#"http://www.amarr-empire.net/killboard/?a=alliance_detail&all_id=517&view=corp_kills",
# "http://www.minmatar-militia.org/kb/?a=faction_detail&fac_id=2&view=corp_kills"]
line_rx = re.compile(r'^([^(]*)(?: \(.*\))?$')
def main():
known = []
for line in file(os.path.expanduser("~/.fwcorps_known")):
line = line.rstrip("\n")
corp = line_rx.match(line).group(1)
known.append(corp)
api_corps = get_api()
kb_corps = []
for url in URLS:
kb_corps.extend(get_kb(url))
new = []
for corp in kb_corps:
if corp not in known:
new.append((corp, "Amarr Militia"))
known.append(corp)
for corp in api_corps:
if corp not in known:
new.append((corp, None))
known.append(corp)
new.sort(lambda a, b: cmp(a[0].lower(), b[0].lower()))
for (corp, affiliation) in new:
if affiliation is not None:
print "%s (%s)" % (corp, affiliation)
else:
print corp
rx = re.compile(r'<a href="\?a=corp_detail&crp_id=[0-9]+">(.*?)</a>')
def get_kb(url):
data = urllib.urlopen(url).read()
return unique(rx.findall(data))
def get_api():
api = characters.api()
fwts = api.eve.FacWarTopStats()
return unique([x.corporationName for x in fwts.corporations.KillsYesterday] +
[x.corporationName for x in fwts.corporations.KillsLastWeek] +
[x.corporationName for x in fwts.corporations.KillsTotal] +
[x.corporationName for x in fwts.corporations.VictoryPointsYesterday] +
[x.corporationName for x in fwts.corporations.VictoryPointsLastWeek] +
[x.corporationName for x in fwts.corporations.VictoryPointsTotal])
def unique(list):
result = []
for element in list:
if element not in result:
result.append(element)
return result
main()
| [
"[email protected]"
] | ||
4db6bd1dbd44a581974b10488b2d28062ae27e45 | 60634610b4a96f28afc5ce20c7389704e60957a9 | /Python/django_intro/integration_project/apps/words/views.py | 0f0fe4a555513d8b378d6ad8953e1bc0ea230512 | [] | no_license | r-knight/DojoAssignments | 31f313d8f566080195fb8024fb2aed87599ff9e7 | 81c5231a7f1d58a975cd3bda0a1250f4155d24ac | refs/heads/master | 2020-03-10T17:00:34.372821 | 2018-06-30T22:12:33 | 2018-06-30T22:12:33 | 129,488,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | from django.shortcuts import render, redirect
import time
def index(request):
return render(request, 'words/index.html')
def process(request):
if request.method == "POST":
size = request.POST.get('checkbox_big', False);
if size == False :
size = 'normal'
curr_time = time.gmtime()
if request.POST['new_word'] != "":
request.session['word'] = request.POST['new_word']
print("Got word! Word: ", request.session['word'])
else:
print("No word detected")
return redirect('/')
request.session['color'] = request.POST['color']
if 'words' in request.session:
request.session['words'].append({'word': request.session['word'], 'color': request.session['color'], 'size': size, 'timestamp': time.strftime("%c", curr_time)})
else:
request.session['words'] = [{'word': request.session['word'], 'color': request.session['color'], 'size': size, 'timestamp': time.strftime("%c", curr_time)}]
return redirect('/new')
else:
return redirect('/')
def new(request):
return redirect('/')
def reset(request):
request.session.clear()
return redirect('/') | [
"[email protected]"
] | |
8feff5fd04db2a6ebab56a8c444af3bf03ee13ff | 07f8d2b32aaf96f7a0fbb23a6146bea49ecf2de2 | /old_version/nombre_rut_firma.py | d7926635cd868f93973c5e7b1573026f2ef9a94b | [] | no_license | ROBLIENAN/rutificador_nombrerutfirma | 6a7cc2904c001111e6f8692f10b559ede356d37c | f861276707ad19dd75438bf67a9190bc3fd049ed | refs/heads/master | 2023-04-22T02:09:26.797150 | 2021-05-14T16:34:24 | 2021-05-14T16:34:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | #!/usr/bin/env python3
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import csv
import time, sys
import warnings
import os
if not sys.warnoptions:
warnings.simplefilter("ignore")
pwd = os.getcwd()
print('')
print('Rutificador automático para nombrerutyfirma.cl. Creado por deivid.')
print('******************************************************************')
print('')
print('Para finalizar el proceso de manera anticipada, oprima Ctrl + C ...')
print('')
driver = webdriver.PhantomJS()
driver.get('https://nombrerutyfirma.cl')
driver.implicitly_wait(10)
filas = []
try:
ruts_out = open('ruts_out.csv', 'w')
with open('ruts.csv', newline='', encoding='utf-8-sig') as f:
reader = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
for row in reader:
driver.find_elements_by_xpath("//*[contains(text(),'RUT')]")[0].click()
busca_rut = driver.find_elements_by_xpath("//input[@placeholder='Buscar por RUT']")[0]
busca_rut.send_keys(row[0])
print('Procesando rut: ' + row[0])
boton = driver.find_elements_by_xpath("//div[@class='tab-pane active']//input[@type='submit']")[0]
boton.click()
try:
nombre = driver.find_element_by_xpath("//tr[@tabindex='1']/td[1]").text
rut = driver.find_element_by_xpath("//tr[@tabindex='1']/td[2]").text
sexo = driver.find_element_by_xpath("//tr[@tabindex='1']/td[3]").text
direccion = driver.find_element_by_xpath("//tr[@tabindex='1']/td[4]").text
comuna = driver.find_element_by_xpath("//tr[@tabindex='1']/td[5]").text
linea = nombre + ";" + rut + ";" + sexo + ";" + direccion + ";" + comuna
except NoSuchElementException:
print('El rut ' + row[0] + ' no existe en la BD, saltando ...')
print(linea)
print('')
ruts_out.write(linea + '\n')
filas.append(linea)
sgte = driver.find_element_by_xpath("//*[contains(text(),'Buscar otro')]")
sgte.click()
except IOError as e:
print ("No se ha encontrado ruts.csv (archivo de entrada).")
print ("Carpeta: " + pwd)
print ("")
except KeyboardInterrupt:
print ("Ctrl + C detectado, interrumpiendo ejecución...")
print ("")
ruts_out.close()
| [
"[email protected]"
] | |
a8b946fea7fc3a21cb19090832a8fb387d4ddb6a | 5247dbbe6555c73358775e9b82c2bd4154a418fa | /user/api/views.py | a9d6d97ddba9049a5b185b7f4cdf25428086ed7e | [] | no_license | pimka/django-microservices-example | f6fc260768afd1197b89b0aff0b956632459ad71 | 18a6dcd38480bca917f5c2898dea10da257026c4 | refs/heads/master | 2021-06-25T11:54:23.759781 | 2019-12-22T10:36:50 | 2019-12-22T10:36:50 | 216,499,308 | 0 | 0 | null | 2021-03-19T22:47:33 | 2019-10-21T07:02:00 | Python | UTF-8 | Python | false | false | 2,202 | py | from django.http import Http404
from rest_framework import status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from api.models import User, CustomToken
from api.serializers import UserSerializer, AppSerializer
from api.auth import TokenAuth
class UserOper(APIView):
authentication_classes = [TokenAuth, ]
paginator = LimitOffsetPagination()
paginator.default_limit = 100
def get(self, request):
users = User.objects.all()
result = self.paginator.paginate_queryset(users, request)
serializer = UserSerializer(result, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserAdvOper(APIView):
authentication_classes = [TokenAuth, ]
def get(self, request, user_id):
try:
user = User.objects.get(owner_uuid=user_id)
serializer = UserSerializer(user)
return Response(serializer.data)
except User.DoesNotExist:
raise Http404
class UserExist(APIView):
permissions = [IsAuthenticated, ]
authentication_classes = [TokenAuth, ]
def get(self, request, *args, **kwargs):
if request.user is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = UserSerializer(request.user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class ServiceAuth(ObtainAuthToken):
serializer_class = AppSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data, context = {'request': request})
serializer.is_valid(raise_exception=True)
token = CustomToken.objects.create()
return Response({ 'token': token.token }, status.HTTP_200_OK) | [
"[email protected]"
] | |
158b25e7b92fec4e90254c369ae5634e94b55675 | 561378ab9a6eb737c21144f085878eb825f64729 | /guest_list.py | e59b8253018d762c2310c664ce376ec5a1b41fec | [] | no_license | NotAKernel/python-tutorials | 75a88554ffdddfff636582f9e66b734f33404601 | ccdb37af7680cb465d73c0468d8667406f1c9f14 | refs/heads/master | 2022-05-01T19:51:35.465524 | 2022-03-22T19:51:01 | 2022-03-22T19:51:01 | 217,547,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | while True:
f_name = input("Enter your first name: ")
l_name = input("Enter your last name: ")
username = f"{f_name.title()} {l_name.title()}"
file_name = 'guest_list.txt'
with open('guest_list.txt','a') as file_object:
file_object.write(f"{username}\n")
print("Next name") | [
"[email protected]"
] | |
b8f337665d6ff249d50cfbf245afee15ee82ae62 | 0544ff7fc0736d2f6dded68e5ae27f4e2ddf7a06 | /sdk/communication/azure-communication-chat/azure/communication/chat/_generated/_configuration.py | ffeba8865187656ea9fe006fd81dd0e4f9bf577c | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | dhanizael/azure-sdk-for-python | 160469181f80593b0dff9104a8eb0d5b781722a9 | 17a1d048a8be4c869c7b943b2aff5a0c8213652e | refs/heads/master | 2023-03-04T14:47:54.334635 | 2021-02-10T20:16:40 | 2021-02-10T20:16:40 | 337,845,515 | 0 | 0 | MIT | 2021-02-10T20:37:41 | 2021-02-10T20:30:09 | null | UTF-8 | Python | false | false | 2,533 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
VERSION = "unknown"
class AzureCommunicationChatServiceConfiguration(Configuration):
"""Configuration for AzureCommunicationChatService.
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: The endpoint of the Azure Communication resource.
:type endpoint: str
"""
def __init__(
self,
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
super(AzureCommunicationChatServiceConfiguration, self).__init__(**kwargs)
self.endpoint = endpoint
self.api_version = "2020-11-01-preview3"
kwargs.setdefault('sdk_moniker', 'azurecommunicationchatservice/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
| [
"[email protected]"
] | |
d8714ee24f591c485cd9ec54687dc4e2c60e795e | 6ac2c27121d965babbb4bcbc7c479c26bf60bdf5 | /pymatex/node/BinaryOperator.py | a1ed0964642e5cd803a9f7cb8a373142fde706b7 | [
"MIT"
] | permissive | Gawaboumga/PyMatex | 5a2e18c3e17d3b76e814492f7e2ca63a57d720e9 | 3ccc0aa23211a064aa31a9b509b108cd606a4992 | refs/heads/master | 2020-03-28T01:40:32.341723 | 2018-12-20T13:49:12 | 2018-12-20T13:49:12 | 147,521,693 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | from pymatex.node.Node import Node
class BinaryOperator(Node):
def __init__(self, lhs, rhs, symbol, operator):
super().__init__()
self.lhs = lhs
self.rhs = rhs
self.symbol = symbol
self.operator = operator
def __eq__(self, other):
assert (self.lhs is not None and self.rhs is not None)
assert (other.lhs is not None and other.rhs is not None)
assert (isinstance(self.lhs, type(other.lhs)))
assert (isinstance(self.rhs, type(other.rhs)))
return self.lhs == other.lhs and self.rhs == other.rhs
def __str__(self):
assert (self.lhs is not None and self.rhs is not None)
return '({} {} {})'.format(self.lhs, self.symbol, self.rhs)
| [
"[email protected]"
] | |
5c50fc9485d29bdbbdc80e762cf0bf71aab040fc | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /isSameTree.py | 92b84e0e9e9ebf88b5ff80b0395446eb323b984e | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | from typing import *
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
def same(a,b):
if a==None and b==None:
return True
if a==None or b==None:
return False
if a.val!=b.val:
return False
return same(a.left,b.left) and same(a.right,b.right)
return same(p,q)
| [
"[email protected]"
] | |
b59ef2972aee022007baf201ac8cd373a9d77aa0 | c5d1e08312ddfaac8bb3f558d09b5378e28f7add | /design_patterns/proxy_pattern/urer_proxy.py | 27b1d5ab375b0cd13f44b4f194b376e9c5de96d5 | [] | no_license | dimitriepirghie/UReR | 7d368752e6c21dccfe7734d3d58921c0992b4393 | eefe22cb60d5e7d8e259dcc7bf5d3bc1ce14ad18 | refs/heads/master | 2021-06-12T15:27:07.369326 | 2017-01-23T23:35:00 | 2017-01-23T23:35:00 | 71,646,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | from flask import Flask, request, abort
app = Flask(__name__)
class Dispatcher(object):
__service_identifier_proxy_mapping__ = {}
def __init__(self):
"""
Get all proxies
"""
import Proxies
self.__service_identifier_proxy_mapping__['facebook_identifier'] = Proxies.FacebookProxy()
self.__service_identifier_proxy_mapping__['linkedin_identifier'] = Proxies.LinkedInProxy()
self.__service_identifier_proxy_mapping__['foaf_identifier'] = Proxies.FOAFProxy()
def __get_proxy__(self, service_identifier):
"""
For a service_identifier return specific proxy
:param service_identifier: service_identifier
:return: AbstractProxy specific object
"""
return self.__service_identifier_proxy_mapping__[service_identifier]\
if service_identifier in self.__service_identifier_proxy_mapping__ else None
def dispatch(self, service_identifier, request=None):
specific_proxy = self.__get_proxy__(service_identifier)
if specific_proxy:
return specific_proxy.serve_request(request)
else:
abort(400)
def authenticate_and_authorize():
"""
Check if request if a request is valid
:return: boolean
"""
# TODO: Implement request validation method !
return True if request else False
@app.route('/service/<service_identifier>')
def service(service_identifier):
if not authenticate_and_authorize():
abort(401, "Unauthorized")
dispatcher = Dispatcher()
return dispatcher.dispatch(service_identifier, request)
@app.route('/')
def hello_client():
return 'Hello Client, here is how UReR API Works ! ' # Redirect
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
ec2c3157e9435ae144c7542e34e4d008a82f24ee | 3a6f9f492a625c1b688baf5607bd9b0efae049ee | /GFS/FIS/DecisionSystem.py | d72c542075908c1a88100df578ab28eb3bcebbc8 | [] | no_license | HarderThenHarder/GeneticFuzzySystem | ad77866fa34bf584040460bde466d4867ec91ee0 | d625523e62fc6a41b2c257df255c067a5ae9c3eb | refs/heads/master | 2023-08-17T08:31:08.174303 | 2021-09-27T04:37:35 | 2021-09-27T04:37:35 | 320,836,303 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | class DecisionSystem(object):
def __init__(self, fuzzy_variable_list, rule_lib):
"""
FIS决策系统:由模糊变量集与模糊规则库组成
:param fuzzy_variable_list: 模糊变量集
:param rule_lib: 模糊规则库
"""
self.fuzzy_variable_list = fuzzy_variable_list
self.rule_lib = rule_lib
| [
"[email protected]"
] | |
4aeaa6069f610d5d0cfb69906fdfedaf7e462021 | 0421985d1af2c039e3ad1867b674adf3393dfbae | /basicFunctions.py | 01886de7ee6da99319df36caa30f8271ed49e21c | [] | no_license | mayankloyalka/Maths4330 | 854f7b6ac1822f0ec6651133402a18d3a3775f40 | fcb705ccb031c30a07595db2d958ebdfec283563 | refs/heads/master | 2020-04-22T12:25:42.321533 | 2019-05-09T04:15:21 | 2019-05-09T04:15:21 | 170,370,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,729 | py |
def vandermode(arg1):
# What does the Vandermode function do?
# It takes a list of vectors and raise it to the power of [0,1,2,3,4]
# Arguments:
# args1 :Arg1 is a list of vectors, that is entered by the user
result=[]
for i in range(5):
new_list=[]
for element in range(len(arg1)):
new_list.append(arg1[element]**i)
result.append(new_list)
# Final Result of our function?
# We get a list of matrix that is represented as a coloumn vector
return result
def transpose(arg1):
# What does the transpose function do?
# It takes a matrix and change its rows to coloumns
# Arguments:
# args1 :Arg1 is a matrix represented as a coloumn vector
result=[]
for i in range(len(arg1)):
new_list=[]
for element in range(len(arg1)):
new_list.append(arg1[i][element])
result.append(new)
# Final Result of our function?
# We get a transpose matrix
return result
def matrix_saclor(arg1,scalor):
# What does the matrix function do?
# It takes the coloumns of a matrix and then multiply them to a scalor
# Arguments:
# args1 :Arg1 is a matrix which is represented as a coloumn vector
# scalor: It is any complex number
result=arg1[::]
for index in range(len(arg1)):
result[index]=scalor*arg1[index]
# Final Result of our function?
# We get a matrix
return result
def conjuagte(arg1):
# What does the conjugate function do?
# It takes an complex number as an argument and changes the sign of the imaginery part
# Arguments:
# args1 :Arg1 is a list of complex number
result=0
for index in range(len(arg1)):
result[index]=-result[index]
# Final Result of our function?
# The sign of the imaginery part is changed
return result
def conjugate_transpose(arg1):
# What does the conjugate function function do?
# It takes the rows of a matrix change them to a coloumn and take their conjugate
# Arguments:
# args1 :Arg1 is a matrix that is represented as a coloumn vector
result=transpose(arg1)
for i in range(len(arg1)):
for element in range(len(arg1)):
result[i][element]=conjugate(result[i][element])
# Final Result of our function?
# We get a matrix
return result
def normalisation(arg1):
# What does the normalisation function do?
# It takes a singular matrix and then find the absolute value of each raise to the power of 1/2
# Arguments:
# args1 :Arg1 is a matrix, that is given by a coloumn vector
result=0
for i in range(len(arg1)):
for j in range(len(arg1[i])):
result=result+ (abs(arg1[i][j]))**2
result=result**(1/2)
# Final Result of our function?
# We a vector norm of our given matrix
return result
def gram_Schmitt(arg1):
# What does the Vandermode function do?
# It takes a matrix and does an orthogonal decomposition and normalisation
# on the matrix to find matrix Q and R, such that Matrix= Q*R
# Arguments:
# args1 :Arg1 is a matrix represented as a coloumn vectore, obtained from
# doing function vandermode
V=[]
for i in range(len(arg1)):
V.append(arg1[i])
for i in range(len(arg1)):
R[i][i]=normalisation(V[i])
Q[i]=V[i]/(R[i])
for j in (i+1,len(arg1)+1):
R[i][j]=transpose(Q[i])*V[j]
V[j]=V[j]- V[i][j]*Q[j]
# Final Result of our function?
# We get a unitary matrix Q and a upper triangular matrix R
return Q,R
def backsub(R,y):
# What does the bacsub function do?
# It solves the value of a for the equation
# R*a=Q*y
# Arguments:
# R: It is an upper triangular matrix
# y : It is a vector that will be entered by the user
result=y
for i in range(len(R[0])):
a=len(R)
result[i]=(b*[i]-matrix_scalor(R[i+1:a],result[i+1:a])*(1/(R[i][i])))
# Final Result of our function?
# We get the final values of a
return result
Beta=backsub(R,Q*y)
def degree4(Beta,x):
# What does the degree4 function do?
# It gives us an interpolyating polynomial
# Arguments:
# Beta: The result obtained after the backsub function
# x: It is vector that will be entered by the user
return (Beta[0]+Beta[1]*x+Beta[2]*(x**2)+Beta[3]*(x**3)+Beta[4]*(x**4))
| [
"[email protected]"
] | |
c98bd1e48e841a4636e99391915ab7d93eae1383 | b7639e383c9250b0de2e4713c55f1d366840d758 | /todo/views.py | 0015de031217bfa43d734ecf77d13c27b1fbdcac | [] | no_license | mohammad2745/awsome-todo | 6fe0263147917cc92a4e335cff3d7adb1580af64 | 571633ae39146df00ef4c40b272d00abfe4c415f | refs/heads/master | 2023-08-04T13:02:41.375094 | 2021-06-22T05:06:25 | 2021-06-22T05:06:25 | 277,222,183 | 0 | 0 | null | 2021-09-22T19:23:13 | 2020-07-05T03:10:35 | Python | UTF-8 | Python | false | false | 935 | py | from django.shortcuts import render, redirect
from django.views.decorators.http import require_POST
from .models import Todo
from .forms import TodoForm
# Create your views here.
def index(request):
todo_list = Todo.objects.order_by('id')
form = TodoForm()
context = {'todo_list': todo_list, 'form': form}
return render(request, 'todo/index.html', context)
@require_POST
def addTodo(request):
form = TodoForm(request.POST)
if form.is_valid():
new_todo = Todo(text=request.POST['text'])
new_todo.save()
return redirect('index')
def completeTodo(request, todo_id):
todo = Todo.objects.get(pk=todo_id)
todo.complete = True
todo.save()
return redirect('index')
def deleteCompleted(request):
Todo.objects.filter(complete__exact=True).delete()
return redirect('index')
def deleteAll(request):
Todo.objects.all().delete()
return redirect('index')
| [
"[email protected]"
] | |
8203b06fc0a707631c4c0f8c113869357cde1785 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/check_verify_codes_request.py | 4a523c962e6e5a6b6fd6bb917e72c51422a6eb24 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,852 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CheckVerifyCodesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_site': 'int',
'x_language': 'str',
'x_time_zone': 'str',
'body': 'VerifyVerifyCodeV2Req'
}
attribute_map = {
'x_site': 'X-Site',
'x_language': 'X-Language',
'x_time_zone': 'X-Time-Zone',
'body': 'body'
}
def __init__(self, x_site=None, x_language=None, x_time_zone=None, body=None):
"""CheckVerifyCodesRequest - a model defined in huaweicloud sdk"""
self._x_site = None
self._x_language = None
self._x_time_zone = None
self._body = None
self.discriminator = None
if x_site is not None:
self.x_site = x_site
if x_language is not None:
self.x_language = x_language
if x_time_zone is not None:
self.x_time_zone = x_time_zone
if body is not None:
self.body = body
@property
def x_site(self):
"""Gets the x_site of this CheckVerifyCodesRequest.
对接站点信息。 0(中国站) 1(国际站),不填的话默认为0。
:return: The x_site of this CheckVerifyCodesRequest.
:rtype: int
"""
return self._x_site
@x_site.setter
def x_site(self, x_site):
"""Sets the x_site of this CheckVerifyCodesRequest.
对接站点信息。 0(中国站) 1(国际站),不填的话默认为0。
:param x_site: The x_site of this CheckVerifyCodesRequest.
:type: int
"""
self._x_site = x_site
@property
def x_language(self):
"""Gets the x_language of this CheckVerifyCodesRequest.
语言环境,值为通用的语言描述字符串,比如zh-cn等,默认为zh-cn。 会根据语言环境对应展示一些国际化的信息,比如工单类型名称等。
:return: The x_language of this CheckVerifyCodesRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this CheckVerifyCodesRequest.
语言环境,值为通用的语言描述字符串,比如zh-cn等,默认为zh-cn。 会根据语言环境对应展示一些国际化的信息,比如工单类型名称等。
:param x_language: The x_language of this CheckVerifyCodesRequest.
:type: str
"""
self._x_language = x_language
@property
def x_time_zone(self):
"""Gets the x_time_zone of this CheckVerifyCodesRequest.
环境时区,值为通用的时区描述字符串,比如GMT+8等,默认为GMT+8。 涉及时间的数据会根据环境时区处理。
:return: The x_time_zone of this CheckVerifyCodesRequest.
:rtype: str
"""
return self._x_time_zone
@x_time_zone.setter
def x_time_zone(self, x_time_zone):
"""Sets the x_time_zone of this CheckVerifyCodesRequest.
环境时区,值为通用的时区描述字符串,比如GMT+8等,默认为GMT+8。 涉及时间的数据会根据环境时区处理。
:param x_time_zone: The x_time_zone of this CheckVerifyCodesRequest.
:type: str
"""
self._x_time_zone = x_time_zone
@property
def body(self):
"""Gets the body of this CheckVerifyCodesRequest.
:return: The body of this CheckVerifyCodesRequest.
:rtype: VerifyVerifyCodeV2Req
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CheckVerifyCodesRequest.
:param body: The body of this CheckVerifyCodesRequest.
:type: VerifyVerifyCodeV2Req
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CheckVerifyCodesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f93b9c60190b4f7e57154c753e126e12e0b25105 | 70961aa4e7aab24e2c8d32a3b5e5f5c804c40144 | /id11.py | c7b57b7c24c9a8abbd5bfd1e1786518d70091f7e | [] | no_license | kxtan/euler | 5c98726a43d38bc7840c9835a746e34f8d923fe6 | 97b20cdbf699b808b94bfb3986de3f7f81f5f0cd | refs/heads/master | 2021-01-11T00:04:16.534649 | 2017-02-07T12:50:18 | 2017-02-07T12:50:18 | 70,759,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,036 | py | """
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally)
in the 20×20 grid?
"""
# pre-process number grid
my_file = open('id11numgrid', 'r')
num_grid = list(my_file)
for x in range(len(num_grid)):
num_grid[x] = num_grid[x].rstrip()
num_grid[x] = [int(i) for i in num_grid[x].split()]
# check up and down
max_product_up_down = 0
for x in range(len(num_grid)):
product = 1
for y in range(4):
if x <= 16:
product *= num_grid[x+y][x]
if product > max_product_up_down:
max_product_up_down = product
max_product_left_right = 0
for x in range(len(num_grid)):
product = 1
for y in range(4):
if x <= 16:
product *= num_grid[x][x+y]
if product > max_product_left_right:
max_product_left_right = product
max_product_diagonal_up_right = 0
for x in range(len(num_grid)):
for y in range(len(num_grid)):
if x < 17 and y > 2:
product = num_grid[x][y] * num_grid[x+1][y-1] * num_grid[x+2][y-2] * num_grid[x+3][y-3]
if product > max_product_diagonal_up_right:
max_product_diagonal_up_right = product
max_product_diagonal_down_right = 0
for x in range(len(num_grid)):
for y in range(len(num_grid)):
if x < 17 and y < 17:
product = num_grid[x][y] * num_grid[x+1][y+1] * num_grid[x+2][y+2] * num_grid[x+3][y+3]
if product > max_product_diagonal_down_right:
max_product_diagonal_down_right = product
print(max(max_product_up_down, max_product_left_right, max_product_diagonal_down_right, max_product_diagonal_up_right))
| [
"[email protected]"
] | |
e29437a2fb5d9ded927b7bb4e7a848eb1239d407 | da03465c8a2dbd7b1415f1703f824bb4f582408a | /nova/policy.py | 2ebe6a69c2e40fd4a68168dd74dc7ef32b8f6c39 | [
"Apache-2.0"
] | permissive | rbenali/nova | 4d4622189aa0b54aec90e07bc71919ca9f99bbe6 | fb4a138bc5791d23e6f302b78da13ea99a511f26 | refs/heads/master | 2021-01-16T20:50:13.224776 | 2012-01-11T16:50:50 | 2012-01-11T16:50:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,427 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Nova"""
from nova import exception
from nova import flags
from nova import utils
from nova.common import policy
FLAGS = flags.FLAGS
flags.DEFINE_string('policy_file', 'policy.json',
_('JSON file representing policy'))
_POLICY_PATH = None
_POLICY_CACHE = {}
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = utils.find_config(FLAGS.policy_file)
data = utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_brain)
def _set_brain(data):
policy.set_brain(policy.HttpBrain.load_json(data))
def enforce(context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: nova context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. compute:create_instance
compute:attach_volume
volume:attach_volume
:param object: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. {'project_id': context.project_id}
:raises: `nova.exception.PolicyNotAllowed` if verification fails.
"""
init()
match_list = ('rule:%s' % action,)
target_dict = target
credentials_dict = context.to_dict()
try:
policy.enforce(match_list, target_dict, credentials_dict)
except policy.NotAllowed:
raise exception.PolicyNotAllowed(action=action)
| [
"[email protected]"
] | |
617197ff5520b1ea151f45448772ca8e3edcd9d5 | 3c0687e6ee69b079d2cdced80494681fef0512e5 | /file handling_Corey Schafer/reading and wrting.py | e80ca9a152436e6d458a4387629797ccf333313f | [] | no_license | wingedrasengan927/Python-tutorials | b51f21f13b2e4f7ff924eb702e236f320de70d85 | 7af09884b4c4bec3a234bcdb8ed4f80aa8c2210a | refs/heads/master | 2021-07-09T01:02:46.431532 | 2020-08-09T16:42:48 | 2020-08-09T16:42:48 | 180,403,864 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py |
# first let's clear the file
f = open('dummy3.txt', 'r+')
f.truncate(0)
# let us read the contents of dummy.txt and copy it into dummy3.txt
with open('dummy.txt', 'r') as rf:
with open('dummy3.txt', 'w') as wf:
for line in rf:
wf.write(line)
| [
"[email protected]"
] | |
baa962e00a091243b62064081042f24aec064f21 | ade74e6b497703ef01fe267b665261e90e335a18 | /studyvisapp/utils.py | c34695abd775ad8f16791ec085d2291043680e61 | [] | no_license | TMdiesel/study-vis-app | 0492ff8bdda5f1c2596cca0ac3642b6c81691f9a | a0440a8ffd21d5848faf8eeabfed4c7be38be90f | refs/heads/main | 2023-07-08T04:32:48.564623 | 2021-08-15T12:08:53 | 2021-08-15T12:08:53 | 395,558,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | import os
import pathlib
from dotenv import load_dotenv
from omegaconf import OmegaConf, dictconfig
from django.core.management.utils import get_random_secret_key
def load_config() -> dictconfig.DictConfig:
load_dotenv()
config_path = os.getenv(
"CONFIG_PATH", pathlib.Path(__file__).parent.parent / "config.yaml"
)
config = OmegaConf.load(config_path)
return config
def create_item_choice():
config = load_config()
dict = config.item
choice = tuple([tuple([item, item]) for item in dict.keys()])
return choice
def create_key():
secret_key = get_random_secret_key()
secret_key = "SECRET_KEY = '{0}'".format(secret_key)
print(secret_key)
return secret_key
if __name__ == "__main__":
create_key()
| [
"[email protected]"
] | |
6f23f4bc5f9f62fe62decd8935d9f299c4c0d995 | 032d4ed8cfdbc10b6b22186a28d527e2ad0161c4 | /CourseraCourse ProgrammingforEverybody/Code/Wk9q4.py | 06cc8dc8d1d3fc27f1f128f279761b76f6bd62d2 | [] | no_license | jish6303/Python | fa83a8d95707982426a484330c82c167577404a5 | 841976b71b0028c8cfaa57aaea0bc472ce3ea55b | refs/heads/master | 2021-01-19T18:08:33.263735 | 2014-12-09T00:00:18 | 2014-12-09T00:00:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #Write a program to read through the mbox-short.txt and figure out who has the
#sent the greatest number of mail messages. The program looks for 'From ' lines
#and takes the second word of those lines as the person who sent the mail. The
#program creates a Python dictionary that maps the sender's mail address to a
#count of the number of times they appear in the file. After the dictionary is
#produced, the program reads through the dictionary using a maximum loop to find
#the most prolific committer.
name = raw_input("Enter file:")
name = "mbox-short.txt"
handle = open(name,'r')
ct_email=dict()
for line in handle:
line=line.rstrip()
if not line.startswith('From '): continue
words=line.split()
email=words[1]
ct_email[email]=ct_email.get(email,0)+1
bigcount=None
bigword=None
for word,count in ct_email.items():
if bigcount is None or count>bigcount:
bigword=word
bigcount=count
print bigword,bigcount
| [
"[email protected]"
] | |
96a2a20b910aa08899a63d5c2c2b228bfeecca9f | 8fb1cd16158873c97976c6861e4a16817f11c096 | /WhatsAppManifest/automator/whatsapp/conversation/conversation.py | e540fb729eb786e85f3425596f61f2c0dfa1675b | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | alexbnkz/WhatsAppManifest | bdce7b281d17b68e72090fd0f269fc77618a4879 | 963546dbcc254b332c704c823f1f291906f8dee6 | refs/heads/master | 2021-03-10T15:51:31.201749 | 2020-03-11T03:06:34 | 2020-03-11T03:06:34 | 246,465,139 | 0 | 1 | null | 2020-03-11T03:26:48 | 2020-03-11T03:26:47 | null | UTF-8 | Python | false | false | 1,942 | py | from time import sleep
from WhatsAppManifest.adb.base import WhatsAppManifest
from WhatsAppManifest.consts import _PACKAGE_NAME_
from WhatsAppManifest.adb.device import Device
from WhatsAppManifest.manifest.android import AndroidKeyEvents
from WhatsAppManifest.automator.whatsapp.database import WhatsAppDatabaseMSGStore
class Conversation(WhatsAppManifest):
_device: Device = None
_msgstore: WhatsAppDatabaseMSGStore = None
def __init__(self, device: Device):
self.build_logger(type(self).__name__)
self._device = device
self._msgstore = WhatsAppDatabaseMSGStore(device=device)
def send_message(self, jid: str, message: str, re_open: bool = True, wait_send_complete: bool = False):
from WhatsAppManifest.manifest.whatsapp.contact_picker import ContactPicker
if re_open:
self._device.adb_utils.app_stop(_PACKAGE_NAME_)
picker = ContactPicker()
command = picker.build_send_message(jid, message)
self.logger.info(f"Opening conversation with contact {jid}")
command_output = self._device.adb_device.shell(command)
self.logger.debug(f"{command_output}")
while self._device.adb_utils.current_app().get("activity") != "com.whatsapp.Conversation":
sleep(1)
self.logger.info(f"Pressing the \"ENTER\" key")
self._device.adb_utils.keyevent(AndroidKeyEvents.ENTER)
if wait_send_complete:
while True:
sleep(2)
message = self._msgstore.last_contact_message(jid)
if message is not None and message.status in ["seen", "received", "waiting_in_server"]:
break
def create_chat(self, phone_number):
from WhatsAppManifest.manifest.whatsapp.api_send import APISend
api_send = APISend()
command = api_send.build_apo_send(phone_number)
return self._device.adb_device.shell(command)
| [
"[email protected]"
] | |
47404961f7e18c58781bec4b2c30100d3659544b | f85e7b3e347622c7541a15974d90159c4fd9b973 | /kitchensink-prj/kitchensink/statics/views.py | b7c7eacd735135b4fab68a876456c7f490477497 | [] | no_license | giubeppe/django-study | 45baca5e7b03d478319214b28bb62ae73629c34e | 34c31609bf02ea56cd6c8d24fac8dde0a051207e | refs/heads/master | 2021-01-04T14:06:20.942460 | 2011-09-27T05:21:49 | 2011-09-27T05:21:49 | 2,456,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # Create your views here.
from django.http import HttpResponse
from django.shortcuts import *
def index(request):
return render_to_response('index.html')
| [
"[email protected]"
] | |
861791a99cff6199e12450b643eb878adfecc0e4 | 6d25434ca8ce03f8fef3247fd4fc3a1707f380fc | /[0072][Hard][Edit_Distance]/Edit_Distance.py | a5ef60b431b491c1ee9c91a3dfbba9d16a70f005 | [] | no_license | sky-dream/LeetCodeProblemsStudy | 145f620e217f54b5b124de09624c87821a5bea1b | e0fde671cdc9e53b83a66632935f98931d729de9 | refs/heads/master | 2020-09-13T08:58:30.712604 | 2020-09-09T15:54:06 | 2020-09-09T15:54:06 | 222,716,337 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | # leetcode time cost : 180 ms
# leetcode memory cost : 16.9 MB
# Time Complexity: O(M*N)
# Space Complexity: O(M*N)
# solution 1. dp with iteration
class Solution:
#def minDistance(self, word1: str, word2: str) -> int:
def minDistance(self, word1, word2):
m = len(word1)
n = len(word2)
dp = [[0] * (n + 1) for _ in range(m + 1)]
# 第一行
for j in range(1, n + 1):
dp[0][j] = dp[0][j-1] + 1
# 第一列
for i in range(1, m + 1):
dp[i][0] = dp[i-1][0] + 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i][j-1], dp[i-1][j], dp[i-1][j-1] ) + 1
return dp[-1][-1]
def main():
word1 = "horse" # expect is 3,
word2 = "ros"
obj = Solution()
result = obj.minDistance(word1, word2)
print("return result is ",result);
if __name__ =='__main__':
main() | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.