blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9fe63f7d3ec967b0984566e83707772eedadfb5 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/765188917950a2d371982a81fa142747ea65f14a-<binned_statistic_2d>-bug.py | 37399820742f3945f3f54302e29aeac36416da57 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,928 | py | def binned_statistic_2d(x, y, values, statistic='mean', bins=10, range=None, expand_binnumbers=False):
"\n Compute a bidimensional binned statistic for one or more sets of data.\n\n This is a generalization of a histogram2d function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned along the first dimension.\n y : (N,) array_like\n A sequence of values to be binned along the second dimension.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'std' : compute the standard deviation within each bin. This \n is implicitly calculated with ddof=0.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * 'min' : compute the minimum of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'max' : compute the maximum of values for point within each bin.\n Empty bins will be represented by NaN.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n\n * the number of bins for the two dimensions (nx = ny = bins),\n * the number of bins in each dimension (nx, ny = bins),\n * the bin edges for the two dimensions (x_edge = y_edge = bins),\n * the bin edges in each dimension (x_edge, y_edge = bins).\n\n If the bin edges are specified, the number of bins will be,\n (nx = len(x_edge)-1, ny = len(y_edge)-1).\n\n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be\n considered outliers and not tallied in the histogram.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section.\n\n .. versionadded:: 0.17.0\n\n Returns\n -------\n statistic : (nx, ny) ndarray\n The values of the selected statistic in each two-dimensional bin.\n x_edge : (nx + 1) ndarray\n The bin edges along the first dimension.\n y_edge : (ny + 1) ndarray\n The bin edges along the second dimension.\n binnumber : (N,) array of ints or (2,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n\n\n See Also\n --------\n numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd\n\n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n\n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (2,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import stats\n\n Calculate the counts with explicit bin-edges:\n\n >>> x = [0.1, 0.1, 0.1, 0.6]\n >>> y = [2.1, 2.6, 2.1, 2.1]\n >>> binx = [0.0, 0.5, 1.0]\n >>> biny = [2.0, 2.5, 3.0]\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])\n >>> ret.statistic\n array([[ 2., 1.],\n [ 1., 0.]])\n\n The bin in which each sample is placed is given by the `binnumber`\n returned parameter. By default, these are the linearized bin indices:\n\n >>> ret.binnumber\n array([5, 6, 5, 9])\n\n The bin indices can also be expanded into separate entries for each\n dimension using the `expand_binnumbers` parameter:\n\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],\n ... expand_binnumbers=True)\n >>> ret.binnumber\n array([[1, 1, 1, 2],\n [1, 2, 1, 1]])\n\n Which shows that the first three elements belong in the xbin 1, and the\n fourth into xbin 2; and so on for y.\n\n "
try:
N = len(bins)
except TypeError:
N = 1
if ((N != 1) and (N != 2)):
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
(medians, edges, binnumbers) = binned_statistic_dd([x, y], values, statistic, bins, range, expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) | [
"[email protected]"
] | |
a12343947c99a0584b18996596487918113884d1 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1360455/snippet.py | ff62eb5f770ed285b9b8fdc6e6f331c6b6e4e651 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 3,727 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Done under Visual Studio 2010 using the excelent Python Tools for Visual Studio
# http://pytools.codeplex.com/
#
# Article on ideas vs execution at: http://blog.databigbang.com/ideas-and-execution-magic-chart/
import urllib2
import json
from datetime import datetime
from time import mktime
import csv
import codecs
import cStringIO
class CSVUnicodeWriter: # http://docs.python.org/library/csv.html
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def get_hackernews_articles_with_idea_in_the_title():
endpoint = 'http://api.thriftdb.com/api.hnsearch.com/items/_search?filter[fields][title]=idea&start={0}&limit={1}&sortby=map(ms(create_ts),{2},{3},4294967295000)%20asc'
incomplete_iso_8601_format = '%Y-%m-%dT%H:%M:%SZ'
items = {}
start = 0
limit = 100
begin_range = 0
end_range = 0
url = endpoint.format(start, limit, begin_range, str(int(end_range)))
response = urllib2.urlopen(url).read()
data = json.loads(response)
prev_timestamp = datetime.fromtimestamp(0)
results = data['results']
while results:
for e in data['results']:
_id = e['item']['id']
title = e['item']['title']
points = e['item']['points']
num_comments = e['item']['num_comments']
timestamp = datetime.strptime(e['item']['create_ts'], incomplete_iso_8601_format)
#if timestamp < prev_timestamp: # The results are not correctly sorted. We can't rely on this one. if _id in items: # If the circle is complete. return items prev_timestamp = timestamp items[_id] = {'id':_id, 'title':title, 'points':points, 'num_comments':num_comments, 'timestamp':timestamp} title_utf8 = title.encode('utf-8') print title_utf8, timestamp, _id, points, num_comments start += len(results) if start + limit > 1000:
start = 0
end_range = mktime(timestamp.timetuple())*1000
url = endpoint.format(start, limit, begin_range, str(int(end_range))) # if not str(int(x)) then a float gives in the sci math form: '1.24267528e+12'
response = urllib2.urlopen(url).read()
data = json.loads(response)
results = data['results']
return items
if __name__ == '__main__':
items = get_hackernews_articles_with_idea_in_the_title()
with open('hn-articles.csv', 'wb') as f:
hn_articles = CSVUnicodeWriter(f)
hn_articles.writerow(['ID', 'Timestamp', 'Title', 'Points', '# Comments'])
for k,e in items.items():
hn_articles.writerow([str(e['id']), str(e['timestamp']), e['title'], str(e['points']), str(e['num_comments'])])
# It returns 3706 articles where the query says that they are 3711... find the bug... | [
"[email protected]"
] | |
41a05bf8566a0aaa78cd2a68971c5772b4e0c361 | 71fdffc6f4ed975d042073691960e554a2b76be0 | /Air Brush.py | 70ba0981586c27589e54a2deafc7f5fdb5bf4eca | [] | no_license | BhavyaShah1234/MyWholeImageProcessingFolder | 1abe4f1f35625daf5b0e532c4e285267cf90719e | fa8af03537c576c1c3661eb57a7346ab0db24f56 | refs/heads/main | 2023-04-05T00:25:24.932163 | 2021-04-08T07:04:04 | 2021-04-08T07:04:04 | 355,788,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | import cv2
import numpy as np
frame_width = 800
frame_height = 600
brightness = 150
web_cam = cv2.VideoCapture(0)
web_cam.set(3, frame_width)
web_cam.set(4, frame_height)
web_cam.set(10, brightness)
my_colors = [[5, 107, 0, 19, 255, 255],
[133, 56, 0, 159, 156, 255],
[57, 76, 0, 100, 255, 255],
[90, 48, 0, 118, 255, 255]]
my_color_values = [[51, 153, 255],
[255, 0, 255],
[0, 255, 0],
[255, 0, 0]]
my_points = []
def find_color(img, colors, color_value):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_point = []
for k in colors:
lower = np.array(k[0:3])
upper = np.array(k[3:6])
mask = cv2.inRange(img_hsv, lower, upper)
x, y = get_contours(mask)
cv2.circle(image_result, (x, y), 10, color_value[count], cv2.FILLED)
if x != 0 and y != 0:
new_point.append([x, y, count])
count = count + 1
cv2.imshow(f'{k[0]}', mask)
return new_point
def get_contours(img):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
x = 0
y = 0
w = 0
for j in contours:
area = cv2.contourArea(j)
if area > 500:
cv2.drawContours(image_result, j, -1, (0, 255, 0), 4)
perimeter = cv2.arcLength(j, True)
approx = cv2.approxPolyDP(j, 0.02 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x+w//2, y
def draw(points, color_values):
for point in points:
cv2.circle(image_result, (point[0], point[1]), 10, color_values[point[2]], cv2.FILLED)
while True:
success, image = web_cam.read()
image_result = image.copy()
new_points = find_color(image, my_colors, my_color_values)
for i in new_points:
if len(new_points) != 0:
my_points.append(i)
if len(my_points) != 0:
draw(my_points, my_color_values)
cv2.imshow('Air Brush', image_result)
cv2.waitKey(1)
| [
"[email protected]"
] | |
25ddfc25649e4f32fa7aca616751c0df603fbaa6 | d0705ffafbb53129bce83520e4a07362e1be8ff5 | /Auswertungskript.py | 0124a46447ab11271a7b82487a3337b99e0c2e8e | [] | no_license | rasefix/Semi-Stuff | 9d97bdb02bf052dc3fe210042e0cca1bad175fcf | f65556860971a90b8f8d4627fbea18fb057a14c5 | refs/heads/main | 2023-03-23T07:13:57.807010 | 2021-03-16T14:33:33 | 2021-03-16T14:33:33 | 348,387,474 | 0 | 0 | null | 2021-03-16T14:54:37 | 2021-03-16T14:54:35 | null | UTF-8 | Python | false | false | 489 | py | import openpyxl
a=0
b=2
while a==0:
temp=int(input("Temperatur:"))
konz=int(input("Konzentration:"))
if konz==5:
a=1
print("Messvorgang abgeschlossen")
else:
fileXLSX = openpyxl.load_workbook("Auswertung.xlsx")
sheet = fileXLSX["Tabelle1"]
sheet.cell(row=b, column=1).value = temp/1000
sheet.cell(row=b, column=2).value = konz/1000
fileXLSX.save('Auswertung.xlsx')
b=b+1
| [
"[email protected]"
] | |
663eb729df9fd31227930e3361b2418b2f4d6d5c | 89cb1736c052c6ecd4028a57d23a92252427bce4 | /game_of_thrones_EDA.py | bb0faf9ce6d8c23cb057a8f8ad5be870eb543f44 | [] | no_license | lucascmbarros/game_of_thrones_dataset | 889c1b4c8e0edba280dac459896390ef2ad94891 | a8febe918998e490502fa5903904c2583c37f829 | refs/heads/master | 2020-05-04T06:25:48.268120 | 2019-04-08T01:25:50 | 2019-04-08T01:25:50 | 179,005,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,128 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 16:26:49 2019
@author: lucas.barros
Assignment 2: Game of Thrones predictions
"""
#################################
# Basic libraries
#################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#################################
# Importing file
#################################
file = 'GOT_character_predictions.xlsx'
df = pd.read_excel(file)
##############################################################################
# EDA
##############################################################################
# showing all columns when called
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', None)
print(df.columns)
'''
Some column names are not clear on what they are or too complicated to type
everytime it is needed; I'm renaming some for covinience
'''
df = df.rename(index = str, columns ={'S.No': 'charnumber',
'dateOfBirth': 'DOB',
'book1_A_Game_Of_Thrones': 'book1',
'book2_A_Clash_Of_Kings': 'book2',
'book3_A_Storm_Of_Swords': 'book3',
'book4_A_Feast_For_Crows': 'book4',
'book5_A_Dance_with_Dragons': 'book5'
})
print(df.info())
print(df.describe().round(2))
'''
The only variables that seem to be continuous are age, DOB, numDeadRelations,
popularity, the others are categorical/binary.
'''
df['isAlive'].describe()
df['isAlive'].value_counts()
#################################
# Flagging missing values
#################################
mv_bycolumn = df.isnull().sum()
print(mv_bycolumn)
#########################################################
# Creating new columns for the flagged missing values
#########################################################
'''
Creating columns for the missing values with 0 and 1s
'''
for col in df:
if df[col].isnull().any():
df['m_'+col] = df[col].isnull().astype(int)
df_dropped = df.dropna()
####################################
# Analysing the culture variable
####################################
df.culture.head()
# getting dummy variables for the cultures
dum_cult = pd.get_dummies(df[['culture']], dummy_na = True)
# analyzing the count of cultures
for col in dum_cult.iloc[:, :65]:
count = dum_cult[col].value_counts()
print(count)
'''
Westermen are decendent of Andals (have similar echnicity),
but since the lineage come from several generations on the past,
It is better to separete them.
'''
# filling NAs with unknown
fill = 'unknown'
df['culture'] = df['culture'].fillna(fill)
# Some culture have duplicates, aggregating them together
df['culture'][df['culture'].str.contains('Andal')] = 'Andal'
df['culture'][df['culture'].str.contains('Asshai')] = 'Asshai'
df['culture'][df['culture'].str.contains('Astapor')] = 'Astapor'
df['culture'][df['culture'].str.contains('Braavos')] = 'Braavos'
df['culture'][df['culture'].str.contains('Dorn')] = 'Dorne'
df['culture'][df['culture'].str.contains('Ghiscari')] = 'Ghiscari'
df['culture'][df['culture'].str.contains('Iron')] = 'Ironborn'
df['culture'][df['culture'].str.contains('iron')] = 'Ironborn'
df['culture'][df['culture'].str.contains('Lhazare')] = 'Lhazareen'
df['culture'][df['culture'].str.contains('Lyse')] = 'Lysene'
df['culture'][df['culture'].str.contains('Meereen')] = 'Meereen'
df['culture'][df['culture'].str.contains('orthmen')] = 'Northmen'
df['culture'][df['culture'].str.contains('Norvos')] = 'Norvos'
df['culture'][df['culture'].str.contains('Qarth')] = 'Qarth'
df['culture'][df['culture'].str.contains('Reach')] = 'Reach'
df['culture'][df['culture'].str.contains('River')] = 'Rivermen'
df['culture'][df['culture'].str.contains('Stormland')] = 'Stormland'
df['culture'][df['culture'].str.contains('Summer')] = 'Summer'
df['culture'][df['culture'].str.contains('Vale')] = 'Vale'
df['culture'][df['culture'].str.contains('Lyse')] = 'Lysene'
df['culture'][df['culture'].str.contains('ester')] = 'Westernmen'
'''
Free folks and windlings are actually the same people, just different
nomenclature.
'''
df['culture'][df['culture'].str.contains('Wilding')]= 'Windling'
df['culture'][df['culture'].str.contains('Free')]= 'Windling'
df['culture'][df['culture'].str.contains('free')]= 'Windling'
print(df['culture'][df['isAlive'] == 0].value_counts())
print(df['culture'][df['isAlive'] == 1].value_counts())
'''
Generally speaking, the inhabitants of the north of Westeros are the ones
that die the most. There is probably due to the number of wars in that region
plus what happens in the Great Wall.
'''
####################################
# Analysing the house variable
####################################
df.house.head()
# getting dummy variables for the cultures
dum_hou = pd.get_dummies(df[['house']], dummy_na = True)
#### analyzing the count of cultures
for col in dum_hou.iloc[:, :348]:
count = dum_hou[col].value_counts()
print(count)
# Filling NAs with unknown
fill = 'unknown'
df['house'] = df['house'].fillna(fill)
#### Some houses have duplicates, aggregating them together
df['house'][df['house'].str.contains('Lannister')] = 'Lannister'
df['house'][df['house'].str.contains('Baratheon')] = 'Baratheon'
df['house'][df['house'].str.contains('Brotherhood')] = 'Brotherhood without banners'
df['house'][df['house'].str.contains('Bolton')] = 'Bolton'
df['house'][df['house'].str.contains('Flint')] = 'Flint'
df['house'][df['house'].str.contains('Brune')] = 'Brune of Browhollow'
df['house'][df['house'].str.contains('Fossoway')] = 'Fossoway'
df['house'][df['house'].str.contains('Frey')] = 'Frey'
df['house'][df['house'].str.contains('Goodbrother')] = 'Goodbrother'
df['house'][df['house'].str.contains('House Harlaw')] = 'House Harlaw'
df['house'][df['house'].str.contains('Kenning')] = 'Kenning'
df['house'][df['house'].str.contains('Royce')] = 'Royce'
df['house'][df['house'].str.contains('Tyrell')] = 'Tyrell'
df['house'].value_counts()
print(df['house'][df['isAlive'] == 0].value_counts())
print(df['house'][df['isAlive'] == 1].value_counts())
'''
Night's Watch die the most, followed by obviously the Targaryen, and then
Starks, Lannisters, Greyjoys, and Freys probably due to the war between the
families.
'''
'''
According to my research, the most important families are Baratheon, Stark,
Lannister, Arryn, Tyrell, Tully, Greyjoy, Martell, and Targaryen.
After the Red Wedding, House Frey became one of the most important.
'''
##################################
# Analysing Title
##################################
print(df.title.value_counts().head(10))
print(df.title[df['isAlive'] == 1].value_counts().head(10))
df.title.isna().sum()
# filling NAs with unknown
fill = 'unknown'
df['title'] = df['title'].fillna(fill)
dum_title = pd.get_dummies(df[['title']], dummy_na = True)
df = pd.concat([df, dum_title], axis = 1)
'''
Higher titles of nobility seems to have a higher chance of surviving.
'''
##################################################
# Analysing Father, Mother, Heir, and Spouse
##################################################
# flagging missing values
print(df.father.isna().sum())
print(df.mother.isna().sum())
print(df.heir.isna().sum())
print(df.spouse.isna().sum())
# checking the distribution
print(df.father.value_counts())
print(df.mother.value_counts())
print(df.heir.value_counts())
# filling NAs with unknown
fill = 'unknown'
df['father'] = df['father'].fillna(fill)
df['mother'] = df['mother'].fillna(fill)
df['heir'] = df['heir'].fillna(fill)
df['spouse'] = df['spouse'].fillna(fill)
###################################################
# Analysing books
##################################################
# Flagging Missing Values
print(df.book1.isna().sum())
print(df.book2.isna().sum())
print(df.book3.isna().sum())
print(df.book4.isna().sum())
print(df.book5.isna().sum())
'''
no NAs
'''
print(df.book1.value_counts())
print(df.book2.value_counts())
print(df.book3.value_counts())
print(df.book4.value_counts())
print(df.book5.value_counts())
# Studying the relation between being in a book and being alive
'''There are not a lot of people alive in book1 since it tells a lot of
stories about what happened in the past.
'''
# Checking who appeared in all books, they are probably very significant.
df['all_books'] = (df['book1'] + df['book2'] + df['book3'] + df['book4'] +
df['book5'])
df['all_books'].value_counts()
# Doing a outlier for people who appeared in all books.
df['out_allbooks'] = 0
df['out_allbooks'] = df['all_books'][df['all_books'] == 5]
fill = 0
df['out_allbooks'] = df['out_allbooks'].fillna(fill)
# Flagging characters that didn't appear in any book.
df['no_books'] = 0
df.loc[ : , 'no_books'][df.loc[ : , 'all_books'] == 0] = 1
'''
Combining who appeared in different books might be significant to the
final analysis
'''
df['book_4_5'] = 0
df['book_4_5'] = df['book4'] + df['book5']
df['book_4_5'][df['isAlive']== 1].value_counts()
df['book_1_5'] = 0
df['book_1_5'] = df['book1'] + df['book5']
df['book_1_5'][df['isAlive']== 1].value_counts()
df['book_3_n_5'] = 0
df['book_3_n_5'] = df['book3'] + df['book5']
df['book_3_n_5'][df['isAlive']== 1].value_counts()
df['book_2_3'] = 0
df['book_2_3'] = df['book2'] + df['book3']
df['book_2_3'][df['isAlive']== 1].value_counts()
df['book_2_3'] = 0
df['book_2_3'] = df['book2'] + df['book3']
df['book_2_3'][df['isAlive']== 1].value_counts()
df['book_3_4_5'] = 0
df['book_3_4_5'] = df['book4'] + df['book5'] + df['book3']
df['book_3_4_5'][df['isAlive']== 1].value_counts()
'''
These combinations above shows who appeared in/or the selected books.
'''
print(np.corrcoef(x=df['isAlive'], y = df['book1']))
print(np.corrcoef(x=df['isAlive'], y = df['book2']))
print(np.corrcoef(x=df['isAlive'], y = df['book3']))
print(np.corrcoef(x=df['isAlive'], y = df['book4']))
print(np.corrcoef(x=df['isAlive'], y = df['book5']))
print(np.corrcoef(x=df['isAlive'], y = df['all_books']))
'''
The is a small correlation between being alive and the older the book,
although book4 has the highest correlation with being alive. Also the more the
person appeared the highest the probability of being alive.
'''
#################################################################
# Analysing If Mother, Father, Heir, and/or Spouse are alive
#################################################################
# Flagging missing Values
print(df.isAliveMother.isna().sum())
print(df.isAliveFather.isna().sum())
print(df.isAliveHeir.isna().sum())
print(df.isAliveSpouse.isna().sum())
'''
There are a lot of missing values, I'm assuming that if it is unknown that
their family is alive, the character is probably not important, hence I'm
inputing missing values with 0.
'''
# Filling NAs with unknown
fill = 0
df.isAliveMother = df.isAliveMother.fillna(fill)
df.isAliveFather = df.isAliveFather.fillna(fill)
df.isAliveHeir = df.isAliveHeir.fillna(fill)
df.isAliveSpouse = df.isAliveSpouse.fillna(fill)
###################################################
# Analysing if is Married and/or is Noble
###################################################
# Flagging missing Values
print(df.isMarried.isna().sum())
print(df.isNoble.isna().sum())
'''
No missing values
'''
# Checking the distribution of Married and Spouse
print(df.isMarried.value_counts())
print(df.isNoble.value_counts())
print(df['isMarried'][df['isAlive'] == 1].sum())
print(df['isNoble'][df['isAlive'] == 1].sum())
'''
69.2% of Married are alive
72.5% of Nobles are alive
'''
df['isMarried'][df['isMarried'] == 1 ][df['isNoble'] == 1][df['isAlive'] == 1].sum()
'''
183 are Married and are Noble
109 are Married, Noble, and are Alive
'''
'''
Creating a column for characters that are noble and married
'''
df['lucky'] = 0
df['lucky'] = df.loc[ : ,'isNoble'] + df.loc[: , 'isMarried']
df['lucky'] = df['lucky'].replace(1, 0)
df['lucky'] = df['lucky'].replace(2, 1)
#############################################################
# Analysing Age
#############################################################
# Flagging missing values for AGE
print(df.age.isna().sum())
'''
Droping the 2 extreme outliers
'''
df = df.drop(df.index[110])
df = df.drop(df.index[1349])
df.age.describe()
'''
Getting the age of the person, if he/she is alive and adding with the DOB it
will give us the current year of the dataset(which is 305). Also if we get the
oldest person alive and he his/hers DOB, we can assume that anyone that was
born before that is dead. The oldest person alive was born in 208, so that
will be a threshold.
'''
'''
Creating a column with dummy 1 and 0 to if they are living the interval
between 208 and 305.
'''
df['300year_vs_dob'] = 305 - df['DOB']
df['alive_by_age'] = 0
def conditions(df):
if (df['age'] == df['300year_vs_dob']):
return 0
elif (df['age'] < df['300year_vs_dob']):
return 1
df['alive_by_age'] = df.apply(conditions, axis=1)
print(df['alive_by_age'].sum())
# Filling the missing values with -1
df['300year_vs_dob'] = df['300year_vs_dob'].fillna(-1)
# Filling the missing value with -1 to
fill = -1
df.alive_by_age = df.alive_by_age.fillna(fill)
# Filing the NA's with -1 to analyze the distribution afterwards
fill = -1
df['age'] = df['age'].fillna(fill)
# Creating a new colum without the Nas values of the age
df['out_age'] = df['age'][df['age'] != -1]
df['out_age'] = df['out_age'].fillna(0)
# Analysing the distribution of the ages
df_age = df.age.dropna()
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df_age)
plt.show()
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df.age)
plt.show()
# Filling NAs with the median
df['age'][df['age'] == -1] = 27
# Filling the NAs with the median to analyse the distribution afterwards
fill = -1
df['DOB'] = df['DOB'].fillna(fill)
# Creating a new colum without the NA values of the DOB
df['out_DOB'] = df['DOB'][df['DOB'] != -1]
df['out_DOB'] = df['out_DOB'].fillna(0)
df.DOB.describe()
df_DOB = df.DOB.dropna()
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df_DOB)
plt.show()
# filling NAs with the median
df['DOB'][df['DOB'] == -1] = 268
'''
Creating a new column with the sum of age and DOB, if the result != 305 then
the character is not alive.
'''
df['out_year'] = df.DOB + df.age
##########################################################
# Analysing Number of dead relatives and popularity
##########################################################
# Flagging Missing Values
print(df.numDeadRelations.isna().sum())
print(df.popularity.isna().sum())
# distribution of dead relatives
print(df.numDeadRelations.value_counts())
# checking the correlation between dead relatives and being alive
np.corrcoef(x = df['numDeadRelations'] , y = df['isAlive'])
'''
It shows a very weak negative correlation between the number of dead relatives
and being alive
I'm creating a dummy variable for the number of read relatives, where if the
character has 0 dead relatives, it will flag as 1.
'''
dead_relations_zero = 0
df['out_deadrelations'] = 0
df.loc[ : , 'out_deadrelations'][df.loc[ : , 'numDeadRelations'] !=
dead_relations_zero] = 1
# Exploring the popularity
print(df.popularity.describe())
# Analysing the distribution
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df['popularity'])
plt.show()
sns.lmplot(x = 'popularity',
y = 'isAlive',
data = df
)
plt.show()
# Checking the correlation with being alive
np.corrcoef(x = df['popularity'], y = df['isAlive'])
'''
I'm going to create a new column only with the most popular characters.
Checking the distribution of according to the quantiles.
'''
df['popularity'].quantile([0.25,
0.50,
0.75,
0.80,
0.90,
0.95
])
df_popularity = (df.loc[ : , ['name',
'house',
'popularity',
'isAlive']]
[df['popularity'] >= 0.3]
)
print(df_popularity.describe())
print(np.corrcoef(x=df_popularity['popularity'],
y = df_popularity['isAlive']
))
# Creating a new column only with characters >= 0.3 of popularity.
df['out_popular'] = 0
df['out_popular'][df['popularity'] >= 0.3] = 1
df_corr = df.loc[:, ['out_age', 'out_DOB', 'out_year', 'alive_by_age']
].corr().round(2)
###############################################################################
# Dataset is ready for the models
###############################################################################
df.to_excel('got.xlsx')
| [
"[email protected]"
] | |
1f29a592c39022e79242a176b8638f31728d0fba | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/190.py | 4ea85e66ef60f663dfa02f1f700dbd13bd15454c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from heapq import *
def read_ints():
return list(map(int, input().split()))
def solve(t):
N, r, o, y, g, b, v = read_ints()
if r == g != 0:
if o or y or b or v:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'RG'*r))
return
if y == v != 0:
if r or o or g or b:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'VY'*y))
return
if b == o != 0:
if r or y or g or v:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'OB'*b))
return
r -= g
y -= v
b -= o
if r < 0 or y < 0 or b < 0:
print('Case #{}: IMPOSSIBLE'.format(t))
return
M = max(r, y, b)
h = [(-r, r != M, 'R'), (-y, y != M, 'Y'), (-b, b != M, 'B')]
heapify(h)
res = ''
count, _prio, ch = heappop(h)
while count < 0:
res += ch
count, _prio, ch = heapreplace(h, (count + 1, _prio, ch))
if res[-1] != res[0] and all(count == 0 for count, *_ in h):
res = res.replace('R', 'RG'*g + 'R', 1)
res = res.replace('Y', 'YV'*v + 'Y', 1)
res = res.replace('B', 'BO'*o + 'B', 1)
print('Case #{}: {}'.format(t, res))
else:
print('Case #{}: IMPOSSIBLE'.format(t))
if __name__ == "__main__":
for t in range(1, int(input())+1):
solve(t)
| [
"[email protected]"
] | |
6f93021be2e728eb052b23276ba667565f0f0bb7 | 872ea32f551c803ac497a38667dc272965246561 | /tensorflow_transform/gaussianization.py | 320acb6f67fcda13b616b72cb43fb36c878774ab | [
"Apache-2.0"
] | permissive | tensorflow/transform | 5c4d74c15e7a13ef0901816dfe35b0901d6cb1da | d2bfc2640137324dcad7f7be365e6c851c01f4e9 | refs/heads/master | 2023-08-31T21:54:54.222760 | 2023-08-15T22:45:45 | 2023-08-15T22:46:20 | 81,509,390 | 1,030 | 267 | Apache-2.0 | 2023-08-11T22:57:56 | 2017-02-10T00:36:53 | Python | UTF-8 | Python | false | false | 13,792 | py | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities used to compute parameters for gaussianization."""
import numpy as np
import tensorflow as tf
# The expressions to compute the first L-moments from the parameters of the
# Tukey HH distribution are taken from:
# Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and
# hh-Distributions through L-Moments and the L-Correlation," ISRN Applied
# Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153
def tukey_hh_l_mean_and_scale(h_params):
"""Computes L-mean and L-scale for a Tukey HH distribution.
Args:
h_params: An np.array with dimension 2 on the first axis. The slice
h_params[0, ...] contains the left parameter of the distribution and
h_params[1, ...] the right parameter. Each entry h must in 0 <= h < 1.
Returns:
The tuple (L_mean, L_scale) containing the first two L-moments for the
given parameters. Each entry has the same shape as h_params, except for
the first axis, which is removed.
"""
one_div_sqrt2pi = 1.0 / np.sqrt(2.0 * np.pi)
hl = h_params[0, ...]
hr = h_params[1, ...]
dtype = h_params.dtype
l_1 = one_div_sqrt2pi * (1.0 / (hl - 1.0) + 1.0 / (1.0 - hr))
l_2 = one_div_sqrt2pi * (
(np.sqrt(2.0 - hl) + np.sqrt(2.0 - hr) - hl * np.sqrt(2.0 - hl) -
hr * np.sqrt(2 - hr)) /
((hl - 1.0) * (hr - 1.0) * np.sqrt((hl - 2.0) * (hr - 2.0))))
return (l_1.astype(dtype), l_2.astype(dtype))
def _tukey_hh_l_skewness_and_kurtosis(h_params):
"""Computes L-skewness and L-kurtosis for a Tukey HH distribution.
Args:
h_params: An np.array with dimension 2 on the first axis. The slice
h_params[0, ...] contains the left parameter of the distribution and
h_params[1, ...] the right parameter.
Returns:
The tuple (L_skewness, L_kurtosis) for the given parameters. Each entry
has the same shape as h_params, except for the first axis, which is
removed.
"""
def skewness_num(h1, h2):
return (12 * np.sqrt(2.0 - h1) * (h2 - 2.0) * (h2 - 1.0) *
np.arctan(1.0 / np.sqrt(2.0 - h1)))
def skewness_den(h):
return h * np.sqrt(2 - h) - np.sqrt(2 - h)
def kurtosis_den_part(h):
return h * np.sqrt(2.0 - h) - np.sqrt(2.0 - h)
hl = h_params[0, ...]
hr = h_params[1, ...]
dtype = h_params.dtype
skewness = (skewness_num(hl, hr) -
np.pi * (hl - hr) * (hl - 2.0) * (hr - 2.0) -
skewness_num(hr, hl)) / (
2 * np.pi * np.sqrt((hl - 2.0) * (hr - 2.0)) *
(skewness_den(hl) + skewness_den(hr)))
kurtosis_num_1 = (
hr * np.sqrt((hl - 4.0) * (hl - 2.0) * (hl - 1.0) * (hr - 2.0)) -
2.0 * np.sqrt((hl - 4.0) * (hl - 1.0)))
kurtosis_num_2 = (hl * (hl - 3.0) * np.sqrt((hl - 4.0) * (hl - 1.0)) +
np.sqrt((hl - 4.0) * (hl - 2.0) * (hl - 1.0) * (hr - 2.0)))
kurtosis_num_3 = (30.0 * (hl - 1.0) *
np.sqrt((hl - 4.0) * (hl - 2.0) * (hr - 2.0) / (hl - 1.0)) *
(hr - 1.0) * np.arctan(np.sqrt(1.0 + 2.0 / (hl - 4.0))))
kurtosis_num_4 = (30.0 * (hl - 2) *
np.sqrt((hl - 4.0) * (hl - 1.0)) * (hl - 1.0) *
np.arctan(np.sqrt(1.0 + 2.0 / (hr - 4.0))))
kurtosis_den = (np.pi * np.sqrt((4.0 - hl) * (2.0 - hl) * (1.0 - hl)) *
(kurtosis_den_part(hl) + kurtosis_den_part(hr)))
kurtosis = (6.0 * np.pi * (kurtosis_num_1 - kurtosis_num_2) +
kurtosis_num_3 + kurtosis_num_4) / kurtosis_den
return (skewness.astype(dtype), kurtosis.astype(dtype))
def _binary_search(error_fn, low_value, high_value):
"""Binary search for a function given start and end interval.
This is a simple binary search over the values of the function error_fn given
the interval [low_value, high_value]. We expect that the starting condition is
error_fn(low_value) < 0 and error_fn(high_value) > 0 and we bisect the
interval until the exit conditions are met. The result is the final interval
[low_value, high_value] that is normally much smaller than the initial one,
but still satisfying the starting condition.
Args:
error_fn: Function mapping values to errors.
low_value: Lower interval endpoint. We expect f(low_value) < 0.
high_value: Higher interval endpoint. We expect f(high_value) > 0.
Returns:
The final interval endpoints (low_value, high_value) after the sequence of
bisections.
"""
# Exit conditions.
stop_iter_step = 10 # Max number of iterations.
stop_error_step = 1e-6 # Minimum function variation.
stop_value_step = 1e-6 # Minimum variable variation.
current_iter = 0
while True:
current_value = (low_value + high_value) / 2.0
current_error = error_fn(current_value)
if current_error < 0.0:
low_value = current_value
else:
high_value = current_value
current_iter += 1
if (current_iter > stop_iter_step or
np.abs(current_error) < stop_error_step or
high_value - low_value < stop_value_step):
break
return low_value, high_value
def _params_to_errors(h, delta_h, l_skewness_and_kurtosis):
"""Maps parameters to errors on L-skewness and L-kurtosis.
Args:
h: Value of right parameter of the Tukey HH distribution.
delta_h: Different between right and left parameter of the Tukey HH
distribution.
l_skewness_and_kurtosis: np.array containing the target values of
L-skewness and L-kurtosis.
Returns:
An np.array containing the difference between the values of L-skewness and
L-kurtosis corresponding to the parameters hl = h - delta_h, hr =h and the
target values.
"""
dtype = l_skewness_and_kurtosis.dtype
h_params = np.array([h - delta_h, h], dtype=dtype)
current_l_skewness_and_kurtosis = np.array(
_tukey_hh_l_skewness_and_kurtosis(h_params), dtype=dtype)
return current_l_skewness_and_kurtosis - l_skewness_and_kurtosis
def compute_tukey_hh_params(l_skewness_and_kurtosis):
"""Computes the H paramesters of a Tukey HH distribution.
Given the L-skewness and L-kurtosis of a Tukey HH distribution we compute
the H parameters of the distribution.
Args:
l_skewness_and_kurtosis: A np.array with shape (2,) containing L-skewness
and L-kurtosis.
Returns:
An np.array with the same type and shape of the argument containing the
left and right H parameters of the distribution.
"""
# Exit conditions for the search loop.
stop_iter_step = 20 # Max number of iteration for the search loop.
stop_error_step = 1e-6 # Minimum function variation.
stop_value_step = 1e-6 # Minimum variable variation.
dtype = l_skewness_and_kurtosis.dtype
# Returns zero parameters (i.e. treat as gaussian) if L-kurtosis is smaller
# than for a gaussian.
result = np.zeros_like(l_skewness_and_kurtosis)
if l_skewness_and_kurtosis[1] < 0.1226017:
return result
# If L-skewness is negative, swap the parameters.
swap_params = False
if l_skewness_and_kurtosis[0] < 0.0:
l_skewness_and_kurtosis[0] = -l_skewness_and_kurtosis[0]
swap_params = True
l_skewness_and_kurtosis[1] = np.minimum(
l_skewness_and_kurtosis[1], 1.0 - 1.0e-5)
# If L-skewness is zero, left and right parameters are equal and there is a
# a closed form to compute them from L-kurtosis. We start from this value
# and then change them to match simultaneously L-skeweness and L-kurtosis.
# For that, we parametrize the search space with the array
# [h_rigth, h_right - h_left], i.e. the value of the right parameter and the
# difference right minus left paramerters. In the search iteration, we
# alternate between updates on the first and the second entry of the search
# parameters.
initial_h = 3.0 - 1.0 / np.cos(
np.pi / 15.0 * (l_skewness_and_kurtosis[1] - 6.0))
search_params = np.array([initial_h, 0.0], dtype=dtype)
# Current lower and upper bounds for the search parameters.
min_search_params = np.array([initial_h, 0.0], dtype=dtype)
max_search_params = np.array([1.0 - 1.0e-7, initial_h], dtype=dtype)
current_iter = 0
previous_search_params = np.zeros_like(search_params)
while current_iter < stop_iter_step:
# Search for L-skewness at constant h. Increase delta_h.
error_skewness = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
search_params[0], x, l_skewness_and_kurtosis)[0]
if error_skewness(max_search_params[1]) > 0.0:
low_delta_h, high_delta_h = _binary_search(
error_skewness, min_search_params[1], max_search_params[1])
search_params[1] = high_delta_h
max_search_params[1] = high_delta_h # The new delta is an upperbound.
upperbound_delta_found = True
else:
search_params[1] = max_search_params[1]
min_search_params[1] = max_search_params[1] # No solution: lowerbound.
upperbound_delta_found = False
# Search for L-kurtosis at constant possibly overestimated delta.
error_kurtosis = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
x, search_params[1], l_skewness_and_kurtosis)[1]
low_h, high_h = _binary_search(
error_kurtosis, min_search_params[0], max_search_params[0])
if upperbound_delta_found:
search_params[0] = high_h
max_search_params[0] = high_h # Delta overestimated: upperbound for h.
else:
search_params[0] = low_h
min_search_params[0] = low_h # Delta underestimated: lowerbound for h.
max_search_params[1] = low_h # Delta not found, search on full range.
if upperbound_delta_found: # If not found, we repeat the first 2 steps.
# Otherwise, Search for delta at constant overestimated h.
error_skewness = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
search_params[0], x, l_skewness_and_kurtosis)[0]
low_delta_h, high_delta_h = _binary_search(
error_skewness, min_search_params[1], max_search_params[1])
search_params[1] = low_delta_h
min_search_params[1] = low_delta_h
# Search for h at constant delta.
error_kurtosis = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
x, search_params[1], l_skewness_and_kurtosis)[1]
low_h, high_h = _binary_search(
error_kurtosis, min_search_params[0], max_search_params[0])
search_params[0] = low_h
min_search_params[0] = low_h
current_error = _params_to_errors(
search_params[0], search_params[1], l_skewness_and_kurtosis)
delta_search_params = search_params - previous_search_params
current_iter += 1
previous_search_params = search_params.copy()
if (np.all(np.abs(current_error) < stop_error_step) or
np.all(np.abs(delta_search_params) < stop_value_step)):
break
result[0] = search_params[0] - search_params[1]
result[1] = search_params[0]
if swap_params:
result = result[::-1]
return result
def lambert_w(x):
"""Computes the Lambert W function of a `Tensor`.
Computes the principal branch of the Lambert W function, i.e. the value w such
that w * exp(w) = x for a a given x. For the principal branch, x must be real
x >= -1 / e, and w >= -1.
Args:
x: A `Tensor` containing the values for which the principal branch of
the Lambert W function is computed.
Returns:
A `Tensor` with the same shape and dtype as x containing the value of the
Lambert W function.
"""
dtype = x.dtype
e = tf.constant(np.exp(1.0), dtype)
inv_e = tf.constant(np.exp(-1.0), dtype)
s = (np.exp(1) - 1.0) / (np.exp(2) - 1.0)
slope = tf.constant(s, dtype)
c = tf.constant(1 / np.exp(1) * (1 - s), dtype)
log_s = tf.math.log(x)
w_init = tf.where(
x < inv_e,
x,
tf.where(x < e,
slope * x + c,
(log_s + (1.0 / log_s - 1.0) * tf.math.log(log_s))))
def newton_update(count, w):
expw = tf.math.exp(w)
wexpw = w * expw
return count + 1, w - (wexpw - x) / (expw + wexpw)
count = tf.constant(0, tf.int32)
num_iter = tf.constant(8)
(unused_final_count, w) = tf.while_loop(
lambda count, w: tf.less(count, num_iter),
newton_update,
[count, w_init])
return w
def inverse_tukey_hh(x, hl, hr):
"""Compute the inverse of the Tukey HH function.
The Tukey HH function transforms a standard Gaussian distribution into the
Tukey HH distribution and it's defined as:
x = u * exp(hl * u ^ 2) for u < 0 and x = u * exp(hr * u ^ 2) for u >= 0.
Given the values of x, this function computes the corresponding values of u.
Args:
x: The input `Tensor`.
hl: The "left" parameter of the distribution. It must have the same dtype
and shape of x (or a broadcastable shape) or be a scalar.
hr: The "right" parameter of the distribution. It must have the same dtype
and shape of x (or a broadcastable shape) or be a scalar.
Returns:
The inverse of the Tukey HH function.
"""
def one_side(x, h):
h_x_square = tf.multiply(h, tf.square(x))
return tf.where(
# Prevents the 0 / 0 form for small values of x..
tf.less(h_x_square, 1.0e-7),
x, # The error is < 1e-14 for this case.
tf.sqrt(tf.divide(lambert_w(h_x_square), h)))
return tf.where(tf.less(x, 0.0), -one_side(-x, hl), one_side(x, hr))
| [
"[email protected]"
] | |
bf15a0134c6b3e379d9901b3901eb79bfb8cefa4 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/xm5.py | 38c66cb97e0801b2ac4684ce92a27b4b9fd0b4e8 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'xm5':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
a6cb9926ae305eeb61a1736944a2a2d904e853d9 | dd6385ef2b1ab63c7142d0a6c317c57aa1865b24 | /Estimating_the_root.py | 6820d4a098c5d9f08327d3f34e859cf7bdd3cfe2 | [] | no_license | sam505/Roots-of-all-Positive-Numbers | 1d43cbaf8cf2fc0bfd7276f7b0abf449b8f4c0e6 | ff99fa1f63912883ed7d90958f5baf0187c42105 | refs/heads/master | 2022-07-15T21:12:41.658937 | 2020-05-13T10:50:35 | 2020-05-13T10:50:35 | 263,601,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | number = input("Enter the number to get the square root: ")
root = input("Enter the root you want to get: ")
number = int(number)
root = int(root)
estimate = 0
solution = 1
solution_one = 1
while (solution <= 0 and solution_one <= 0) or (solution >= 0 and solution_one >= 0):
estimate += 1
solution = ((estimate**root) - number)
solution_one = (((estimate + 1)**root) - number)
if solution == 0:
break
print("Estimate used is: " + str(estimate))
soln = (estimate**root) - number
soln_one = root*(estimate**(root-1))
square_root_one = estimate
square_root = estimate - (soln/soln_one)
square_root = round(square_root, 10)
square_root_one = round(square_root_one, 10)
while square_root != square_root_one:
square_root_one = square_root
soln = (square_root ** root) - number
soln_one = root*(square_root_one**(root-1))
square_root = square_root - (soln / soln_one)
square_root = round(square_root, 10)
print (square_root)
print("The square root is: " + str(square_root))
| [
"[email protected]"
] | |
4ebd8d9c83528e4b8f6961352320a0c777136e22 | dc4713228b15ca4b6f262203df7908c617cd3915 | /tests/test_pitch.py | e5eaa786dbbd3476e2c3a7010e5c417186e8bce5 | [] | no_license | Nelvinom/60sec-pitch | 983826d312dcb28f9646f554d4bfb0117d402670 | 29d43258c92d62da32eef13ea9a8d6dc0e7bbb96 | refs/heads/master | 2022-06-11T23:53:14.275694 | 2020-05-08T10:19:06 | 2020-05-08T10:19:06 | 262,290,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | import unittest
from app.models import User, Role
class PitchModelTest(unittest.TestCase):
def setUp(self):
self.user_test = User(username = 'Daudi',password = 'potato', email = '[email protected]')
self.new_pitch = Pitch(id=1,pitch_title='Test',pitch_content='This is a test pitch',category="interview",user = self.user_James,likes=0,dislikes=0)
def tearDown(self):
Pitch.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_pitch.pitch_title,'Test')
self.assertEquals(self.new_pitch.pitch_content,'This is a test pitch')
self.assertEquals(self.new_pitch.category,"interview")
self.assertEquals(self.new_pitch.user,self.user_test)
def test_save_pitch(self):
self.new_pitch.save_pitch()
self.assertTrue(len(Pitch.query.all())>0)
def test_get_pitch_by_id(self):
self.new_pitch.save_pitch()
got_pitch = Pitch.get_pitch(1)
self.assertTrue(got_pitch is not None) | [
"[email protected]"
] | |
2bd40a80b828137202059058e88f7504df2e6470 | 8613ec7f381a6683ae24b54fb2fb2ac24556ad0b | /boot/hard/2017.py | 36601afabce20178c45edae2db36c8014b9864eb | [] | no_license | Forest-Y/AtCoder | 787aa3c7dc4d999a71661465349428ba60eb2f16 | f97209da3743026920fb4a89fc0e4d42b3d5e277 | refs/heads/master | 2023-08-25T13:31:46.062197 | 2021-10-29T12:54:24 | 2021-10-29T12:54:24 | 301,642,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | q = int(input())
l, r = [0] * q, [0] * q
for i in range(q):
l[i], r[i] = map(int, input().split())
mini = min(min(l), min(r))
maxi = max(max(l), max(r))
ans = [0] * (maxi + 1)
prime = [0] * (maxi + 1)
def judge_prime(n):
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True if n != 1 else False
for i in range((mini + 1) // 2, maxi + 1):
prime[i] = judge_prime(i)
for i in range(mini, maxi + 1, 2):
ans[i] = ans[i - 2] + 1 if prime[i] and prime[(i + 1) // 2] else ans[i - 2]
#print(i, ans[i], ans[i - 2])
#print(ans[1:])
for i in range(q):
#print(ans[r[i]], ans[l[i] - 2], ans[l[i] - 1])
print(ans[r[i]] - ans[max(0, l[i] - 2)])
| [
"[email protected]"
] | |
e0de689cfe67d9012183a5933cadf8c4b9bc0315 | cf8cd28b091fa4378d90ead4a7c620646e50a051 | /2016/14_solution.py | ec277e3e649dea467bc75da1ca614337cb409b49 | [
"MIT"
] | permissive | kng/AoC2019 | 55a1f4fe24a243c9f1e6aa78e5563104faf35fb3 | 0d40053a876580a78e277be9c047c631762d6ea7 | refs/heads/master | 2021-12-19T10:05:22.922939 | 2021-11-26T15:36:39 | 2021-11-26T15:36:39 | 226,349,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | # --- Day 14: One-Time Pad ---
# https://adventofcode.com/2016/day/14
import time
import hashlib
simple = False
verbose = 0
if simple:
data = 'abc'
iterations = 25000
else:
file = open('14_input.txt', 'r')
data = file.read().strip()
iterations = 50000 # somewhere between 25k and 50k should suffice, ymmv :P
# better solution would be to use a generator, but this completed in less than 2min
def main():
start_time = time.time()
print('generating {} hashes with salt: {}'.format(iterations, data))
hashlist = []
part = 1 # set this to 1 or 2 change the puzzle part
for j in range(iterations):
s = data + str(j)
m = hashlib.md5(s.encode()).hexdigest()
if part == 2 and not simple:
for i in range(2016):
m = hashlib.md5(m.encode()).hexdigest()
hashlist.append(m)
print("time elapsed: {:.2f}".format((time.time() - start_time)))
start_time = time.time()
# print('first {}'.format(hashlist[0])) # part 2: a107ff... part 1: 577571...
print('search for threes')
trip = []
for i in range(len(hashlist)):
h = hashlist[i] # seems to be faster to assign to a variable first
for j in range(len(h) - 2):
if h[j] == h[j + 1] and h[j] == h[j + 2]:
trip.append([i, h[j]])
break
if verbose > 1:
print(trip)
print("time elapsed: {:.2f}".format((time.time() - start_time)))
start_time = time.time()
print('search for fives')
key = []
while trip:
idx, h = trip.pop(0) # yeah, collections.deque is better, but not critical here
hs = str(h * 5)
if any(hs in w for w in hashlist[idx + 1:idx + 1000]):
key.append(idx)
if verbose > 1:
print('index found {} in hash {}'.format(idx, hs))
print('keys found: {}'.format(len(key)))
if len(key) > 63:
print('part {}, key at pos 64: {}'.format(part, key[63]))
print("time elapsed: {:.2f}".format((time.time() - start_time)))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c1bcd65d34b7a3e59e2d47a48b25316f3ee6c058 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv1/mmdet/models/mask_heads/fcn_mask_head.py | 26cb3c0ff0c362870863dc2fddb5f9a2379cb87e | [
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,012 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from mmdet.core import auto_fp16, force_fp32, mask_target
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule
@HEADS.register_module
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
upsample_method='deconv',
upsample_ratio=2,
num_classes=81,
class_agnostic=False,
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']:
raise ValueError(
'Invalid upsample method {}, accepted methods '
'are "deconv", "nearest", "bilinear"'.format(upsample_method))
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = upsample_method
self.upsample_ratio = upsample_ratio
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
self.upsample = nn.ConvTranspose2d(
upsample_in_channels,
self.conv_out_channels,
self.upsample_ratio,
stride=self.upsample_ratio)
else:
self.upsample = nn.Upsample(
scale_factor=self.upsample_ratio, mode=self.upsample_method)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = nn.Conv2d(logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_target(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid().cpu().numpy()
assert isinstance(mask_pred, np.ndarray)
# when enabling mixed precision training, mask_pred may be float16
# numpy array
mask_pred = mask_pred.astype(np.float32)
cls_segms = [[] for _ in range(self.num_classes - 1)]
bboxes = det_bboxes.cpu().numpy()[:, :4]
labels = det_labels.cpu().numpy() + 1
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
for i in range(bboxes.shape[0]):
if not isinstance(scale_factor, (float, np.ndarray)):
scale_factor = scale_factor.cpu().numpy()
bbox = (bboxes[i, :] / scale_factor).astype(np.int32)
label = labels[i]
w = max(bbox[2] - bbox[0] + 1, 1)
h = max(bbox[3] - bbox[1] + 1, 1)
if not self.class_agnostic:
mask_pred_ = mask_pred[i, label, :, :]
else:
mask_pred_ = mask_pred[i, 0, :, :]
bbox_mask = mmcv.imresize(mask_pred_, (w, h))
bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype(
np.uint8)
if rcnn_test_cfg.get('crop_mask', False):
im_mask = bbox_mask
else:
im_mask = np.zeros((img_h, img_w), dtype=np.uint8)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
if rcnn_test_cfg.get('rle_mask_encode', True):
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[label - 1].append(rle)
else:
cls_segms[label - 1].append(im_mask)
return cls_segms
| [
"[email protected]"
] | |
e810fdb9f3f5a5b3c10a3d2249126c55c78d8c89 | e4633dacc48d682e80674fec3c7830a2ba148d94 | /Finance data capstone project 2.py | 684325846c66541e7ac0516994fe6731aafaafd1 | [] | no_license | lakshaygola/Popular-ML-Algorithms | c1b32b9d7db7a2818aa6f7eb95a54c993c1fe2c5 | 847140bb720146880ea9061f141c656b71fb8cc5 | refs/heads/master | 2022-12-21T08:59:24.681474 | 2020-09-29T17:59:50 | 2020-09-29T17:59:50 | 299,692,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | #Finance Data Capstone Projrct 2
#In this project we import the data online of the banks from the time of economic crisis
from pandas_datareader import data , wb
import pandas as pd
import numpy as np
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
import plotly
import cufflinks as cf
cf.go_offline()
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
from plotly.graph_objs import *
init_notebook_mode()
start = datetime.datetime(2006,1,1)
end = datetime.datetime(2016,1,1)
#Bank of america
BAC = data.DataReader('BAC', 'yahoo', start = start, end = end)
#CitiGroup
C = data.DataReader('C', 'yahoo', start = start, end = end)
#Goldman Sachs
GS = data.DataReader('GS', 'yahoo', start = start, end = end)
#JPMorgan Chase
JPM = data.DataReader('JPM', 'yahoo', start = start, end = end)
#Morgan Stanley
MS = data.DataReader('MS', 'yahoo', start = start, end = end)
#Wells Fargo
WFC = data.DataReader('WFC' , 'yahoo', start , end)
#list of ticker symbols
tickers = ['BAC' , 'C' , 'GS' , 'JPM' , 'MS' , 'WFC']
#concatenating all the dataframes
bank_stocks = pd.concat([BAC , C , GS , JPM , MS , WFC] , axis = 1 , keys = tickers)
#setting the columns names
bank_stocks.columns.names = ['Banks Tickers' , 'Stock Info']
bank_stocks.head()
#grouping by banks names
for tick in tickers:
print(tick , bank_stocks['BAC']['Close'].max())
#or
bank_stocks.xs(key = 'Close' , axis = 1 , level = 'Stock Info').max()
#Making new dataframe called return
returns = pd.DataFrame()
#calculating the percentage change on each rows in bank_stocks data
for tick in tickers:
returns[tick + ' Return'] = bank_stocks[tick]['Close'].pct_change()
#pair plot
sns.pairplot(data = returns[1:])
plt.tight_layout()
#best and worst dates for the particular banks in the return dataframe
returns.idxmin()
returns.idxmax()
#standard deviation of the return data frame
returns.std()
#standard deviation of the return data in 2015
returns.loc['2015-01-01':'2015-12-31'].std()
#Distplot of the 2015 returns for Morgan Stanley
sns.distplot(returns.loc['2015-01-01' : '2015-12-31']['MS Return'] ,bins = 30 , color='green')
sns.set_style('whitegrid')
#2008 citigroup distplot
sns.distplot(returns.loc['2008-01-01':'2008-12-31']['C Return'] , bins = 50 , color = 'Red')
#line plot for each bank
#Using for loop
for tick in tickers:
bank_stocks[tick]['Close'].plot(label = tick)
plt.legend()
#line plot for each bank
#Using .xs method
bank_stocks.xs(key ="Close" , level = 'Stock Info' , axis = 1).plot()
#Using plotly
bank_stocks.xs(key = 'Close' , level = 'Stock Info' , axis = 1).iplot()
#Ploting the rolling average of BAC for the year 2008
bank_stocks['BAC']['Close'].loc['2008-01-01':'2009-01-01'].rolling(window = 30).mean().plot()
bank_stocks['BAC']['Close'].loc['2008-01-01':'2009-01-01'].plot()
#Heat map of the close columns
close_corr = bank_stocks.xs(key = 'Close' , axis = 1 , level = 'Stock Info').corr()
sns.heatmap(close_corr,annot = True)
#Cluster map
sns.clustermap(close_corr , annot = True)
#Heat map using iplot
close_corr.iplot(kind = 'heatmap')
#Candle plot of bank of america from 2015 to 2016
bank_stocks['BAC'][['Open','High','Low','Close']].loc['2015-01-01':'2016-01-01'].iplot(kind = 'candle')
#Simple moving averages plot of the morgan stanley for the year 2015
bank_stocks['MS'].loc['2015-01-01':'2015-12-31'].ta_plot(study = 'sma')
#Bollinger band plot for the Bank of america for the year 2015
bank_stocks['BAC'].loc['2015-01-01':'2016-01-01'].ta_plot(study='boll')
| [
"[email protected]"
] | |
92aa7a25070d981b4443680ae1a1621f0f40d582 | ce4d1c3a1522f382d9b3f73b7f126e7a3616bfb5 | /projects/DensePose/densepose/data/datasets/coco.py | ddd03c25b6956e8afa7d78ac0a259d255fb51541 | [
"Apache-2.0"
] | permissive | davidnvq/detectron2 | 6c01512326687e86ab50c0f89af4e926c0007ae6 | eaca19840e5db014c3dd37dee9920d780b3b6165 | refs/heads/master | 2022-04-26T03:29:08.080258 | 2020-04-24T09:05:07 | 2020-04-24T09:05:07 | 258,421,912 | 1 | 0 | Apache-2.0 | 2020-04-24T06:08:26 | 2020-04-24T06:08:25 | null | UTF-8 | Python | false | false | 4,143 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from dataclasses import dataclass
from typing import Any, Dict, Iterable, Optional
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_coco_json
DENSEPOSE_KEYS = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V", "dp_masks"]
DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/"
@dataclass
class CocoDatasetInfo:
name: str
images_root: str
annotations_fpath: str
DATASETS = [
CocoDatasetInfo(
name="densepose_coco_2014_train",
images_root="coco/train2014",
annotations_fpath="coco/annotations/densepose_train2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014_100.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_valminusminival2014.json",
),
CocoDatasetInfo(
name="densepose_chimps",
images_root="densepose_evolution/densepose_chimps",
annotations_fpath="densepose_evolution/annotations/densepose_chimps_densepose.json",
),
]
def _is_relative_local_path(path: os.PathLike):
path_str = os.fsdecode(path)
return ("://" not in path_str) and not os.path.isabs(path)
def _maybe_prepend_base_path(base_path: Optional[os.PathLike], path: os.PathLike):
"""
Prepends the provided path with a base path prefix if:
1) base path is not None;
2) path is a local path
"""
if base_path is None:
return path
if _is_relative_local_path(path):
return os.path.join(base_path, path)
return path
def get_metadata(base_path: Optional[os.PathLike]) -> Dict[str, Any]:
"""
Returns metadata associated with COCO DensePose datasets
Args:
base_path: Optional[os.PathLike]
Base path used to load metadata from
Returns:
Dict[str, Any]
Metadata in the form of a dictionary
"""
meta = {
"densepose_transform_src": _maybe_prepend_base_path(
base_path, "UV_symmetry_transforms.mat"
),
"densepose_smpl_subdiv": _maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"),
"densepose_smpl_subdiv_transform": _maybe_prepend_base_path(
base_path, "SMPL_SUBDIV_TRANSFORM.mat"
),
}
return meta
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[os.PathLike] = None):
"""
Registers provided COCO DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[os.PathLike]
Datasets root folder (default: None)
"""
annotations_fpath = _maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = _maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_coco_json(
json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
extra_annotation_keys=DENSEPOSE_KEYS,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX)
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[os.PathLike] = None
):
"""
Registers provided COCO DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[os.PathLike]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
| [
"[email protected]"
] | |
df05652165c48f1e162013aea44f587cd1a93715 | 98a813b1c76d5da0509b97ebbbd49a6c0d920f67 | /Day5/best_time_to_buy_and_sell_stock_ii.py | 779611855d8b8a32988cdeb65b2cce7b482bfa61 | [] | no_license | routdh2/30DayLeetCodingChallenge | 06d94f03b2b241722cebf39ef7ec74477f64d5c3 | 5b2f8cc34a6c7bdd999c4cb22acbc3e21cb53caa | refs/heads/master | 2021-05-21T06:37:17.714656 | 2020-04-21T08:53:54 | 2020-04-21T08:53:54 | 252,587,283 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | #Problem Statement: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii
class Solution:
def maxProfit(self, prices: List[int]) -> int:
i=0
total_profit=0
while i<len(prices)-1:
profit=prices[i+1]-prices[i]
if profit>0:
total_profit+=profit
i+=1
return total_profit
| [
"[email protected]"
] | |
a3dbfd07afbe0391b734bd981eafc9d8ac28b53b | 64fe4fcaeb71b5e4d448abed92e03bebd838b1a2 | /Models/Form_Factors.py | b5559332237915e69187a8b4ae18f8282d5fa4c1 | [] | no_license | Caster89/Scattering_Analysis | ea2ddbd9311f0beebae6f083a37d843f44d3d00b | 2bd14efb2d1bb6c1af5173a8ed98b668dbfc4673 | refs/heads/master | 2021-01-19T16:04:41.322316 | 2017-12-17T16:22:46 | 2017-12-17T16:22:46 | 88,247,591 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,797 | py | from config import _AvlbUnits, _UnitsSymbols,_UnitsConv, _AvlbSASFit, _AvlbSASFitDic, _AvlbSASFitDicInv, _lmfitModels, _lmfitModelFunctions, _lmfitDistFunctions
import numpy as np
import scipy as sp
from scipy import signal
from scipy import interpolate
from scipy.integrate import dblquad, tplquad
import logging
import lmfit
from Distributions import *
from lmfit import minimize, Parameter, report_fit
try:
import gmpy2
from gmpy2 import mpz,mpq,mpfr,mpc
except:
gmpy2 = None
from decimal import Decimal
print 'The Schultz fitting function is optimized by using the GMPY2 module to\
deal with the large numbers required. The module was not found, so Decimal will\
be used instead, but the calculations will be slower.'
"""
The distriutions presented here can be found in Polydispersity analysis of scattering
data from self-assembled systems. Phy. Rev. A,45, 2428-2438.
DOI: 10.1103/PhysRevA.45.2428
"""
def single_gauss_spheres(q,R_av = 1,sigma = 1,I0 = 1,bckg=0):
"""sing_gauss_spheres: calculates the scattering pattern of an assembly of
spheres which have a Gaussian number density size distribution.
Args
q (numpy.array): the array containg the list of q-values for which to
calculate the scattering
R_av (int): the mean of the size distribution. Defaults to 1
sigma (int): the dispersion of the distribution. Defaults to 1
I0 (int): the prefactor which includes information on the scattering
length density (SLD) and the concentration of particles. Defaults
to 1
bckg (int): the background value to use in case the background is
not perfectly subtracted. Defaults to 0.
Returns
the scattering curve which has the same size as q
"""
P_q=(4.*np.pi/q**3.)**2.*((1.+q**2.*(R_av**2.+sigma**2.))/2.+\
(((-1.+q**2*R_av**2)/2.-3./2.*q**2.*sigma**2.-2.*q**4.*sigma**4.)*\
np.cos(2.*q*R_av)-q*R_av*(1.+2.*q**2.*sigma**2.)*\
np.sin(2.*q*R_av))*np.exp(-2.*q**2.*sigma**2))
return np.array(10**(I0)*P_q+bckg)
def double_gauss_spheres(q,R1_av = 1,sigma1 = 1, R2_av = 1, sigma2 = 1, I0 = 1,ratio=0.5, bckg = 0):
"""double_gauss_spheres: calculates the scattering pattern of an assembly of
spheres which have a bimodal Gaussian size distribution.
Args
q (numpy.array): the array containg the list of q-values for which to
calculate the scattering
R_av1 (int): the mean of the size distribution of the first
peak. Defaults to 1
sigma1 (int): the dispersion of the first peak. Defaults to 1
R_av2 (int): the mean of the size distribution of the second
peak. Defaults to 1
sigma2 (int): the dispersion of the second peak. Defaults to 1
I0 (int): the prefactor which includes information on the scattering
length density (SLD) and the concentration of particles. Defaults
to 1
ratio (int): the ratio between the first and the second peak. Defaults
to 0.5
bckg (int): the background value to use in case the background is
not perfectly subtracted. Defaults to 0.
Returns
the scattering curve which has the same size as q
"""
return np.array(ratio*single_gauss_spheres(q,R1_av, sigma1,I0,0)+(1-ratio)*single_gauss_spheres(q,R2_av, sigma2,I0,0)+bckg)
def single_schultz_spheres(q, R_av = 1, Z = 50, I0 = 1, bckg = 0 ):
"""sing_schultz_spheres: calculates the scattering pattern of an assembly of
spheres which have a Schultz-Zimm size distribution. Devimal is used to
ensure that the for vey monodisperse distributions (Z>171) the values are not
rounded off to inf. The integrated function is taken from 'Analysis of small
angle neutron scattering spectra from pplydisperse interacting colloids',
DOI: 10.1063/1.446055
sigma = R_av/(Z+1)^0.5
?The Z parameter is defined as z = 1 / sigma^2.?
The definition was taken from:
ttp://sasfit.ingobressler.net/manual/Schultz-Zimm
"""
if gmpy2 is None:
aD = np.array([Decimal((Z+1.)/(qq*R_av)) for qq in q])
"""
numpy trigonometric functions do not support Decimal, therefore the
numpy array is created on the spot using float numbers and transforming
them to Decimal after the calculation
"""
a = (Z+1.)/(q*R_av)
p1 = Decimal(8. * np.pi**2 * R_av**6 * (Z+1)**(-6)) * aD**Decimal(Z+7.)
G11 = aD**Decimal(-(Z+1.)) - (Decimal(4.)+aD**2)**(Decimal(-(Z+1.)/2.)) *\
np.array([Decimal(np.cos((Z+1) * np.arctan(2./aa))) for aa in a])
G12 = Decimal((Z+2.)*(Z+1.)) * (aD**Decimal(-(Z+3.)) + (Decimal(4.) + aD**Decimal(2.))**Decimal(-(Z+3.)/2.) *\
np.array([Decimal(np.cos((Z+3)*np.arctan(2./aa))) for aa in a]))
G13 = Decimal(2.*(Z+1.)) * (Decimal(4.) + aD**2.)**Decimal(-(Z+2.)/2) *\
np.array([Decimal(np.sin((Z+2.)*np.arctan(2./aa))) for aa in a])
G1 = G11+G12-G13
returnVal = Decimal(10**I0)*p1*G1+Decimal(bckg)
else:
a = np.array([mpfr((Z+1.)/(qq*R_av)) for qq in q])
a2 = a**2
a2_1 = (mpfr(4.)+a2)
R_av = mpfr(R_av)
Z = mpfr(Z)
I0 = mpfr(I0)
bckg = mpfr(bckg)
"""
numpy trigonometric functions do not support Decimal, therefore the
numpy array is created on the spot using float numbers and transforming
them to Decimal after the calculation
"""
p1 = 8. * np.pi**2 * R_av**6 * (Z+1)**(-6) * a**(Z+7.)
#G11 = a**-(Z+1.) - (4.+a**2)**(-(Z+1.)/2.) *\
#np.array([gmpy2.cos((Z+1) * gmpy2.atan(2./aa)) for aa in a])
G11 = a**-(Z+1.) - a2_1**(-(Z+1.)/2.) *\
np.array([gmpy2.cos((Z+1) * gmpy2.atan(2./aa)) for aa in a])
G12 = (Z+2.)*(Z+1.) * (a**-(Z+3.) + a2_1**(-(Z+3.)/2.) *\
np.array([gmpy2.cos((Z+3)*gmpy2.atan(2./aa)) for aa in a]))
G13 = 2.*(Z+1.) * a2_1**(-(Z+2.)/2) *\
np.array([gmpy2.sin((Z+2.)*gmpy2.atan(2./aa)) for aa in a])
G1 = G11+G12-G13
returnVal = 10**I0*p1*G1+bckg
returnVal = np.array(returnVal.astype(np.float64))
#print 'Single_schultz calculated with:\nR_av:{} Z:{} I0:{}'.format(R_av, Z, I0)
#print 'length is:{}, of which nan: {}'.format(len(returnVal), np.sum(np.isnan(returnVal)))
return returnVal
def single_schultz_spheres_old(q,R_av = 1,Z = 1, I0 = 1, bckg = 0):
"""sing_schultz_spheres: calculates the scattering pattern of an assembly of
spheres which have a Flory schultz size distribution. the Z parameter is
defined as z = 1 / sigma^2. THe definistion was taken forom:
ttp://sasfit.ingobressler.net/manual/Schultz-Zimm
Args
q (numpy.array): the array containg the list of q-values for which to
calculate the scattering
R_av (int): the mean of the size distribution. Defaults to 1
Z (int): the dispersion of the distribution. For a Flory-Schultz
distribution the Z parameter is defined as Z = 1/sigma^2.
Defaults to 1
I0 (int): the prefactor which includes information on the scattering
length density (SLD) and the concentration of particles. Defaults
to 1
bckg (int): the background value to use in case the background is
not perfectly subtracted. Defaults to 0.
Returns
the scattering curve which has the same size as q
"""
a = (Z+1.)/(q*R_av)
P_q = 8.*np.pi**2*R_av**6*(Z-1.)**(-6.)*a**(Z+7.)*(a**(-(Z+1.))- \
(4.+a**2)**(-(Z+1.)/2)*np.cos((Z+1.)*np.arctan(2/a)) + \
(Z+2.)*(Z+1.)*(a**(-Z-3.)+(4+a**2)**((-Z-3.)/2.)*np.cos((Z+3.)*np.arctan(2./a))) - \
2.*(Z+1.)*(4.+a**2.)**(-(Z+2.)/2.)*np.sin((Z+2.)*np.arctan(2./a)))
return np.nan_to_num(10**I0*P_q+bckg)
def double_schultz_spheres(q, R1_av = 1, Z1 = 1, R2_av = 1,Z2 = 1, I0 = 1, ratio = 0.5, bckg = 0):
"""double_schultz_spheres: calculates the scattering pattern of an assembly of
spheres which have a bimodal Flory Schultz distribution.
Args
q (numpy.array): the array containg the list of q-values for which to
calculate the scattering
R_av1 (int): the mean of the size distribution of the first
peak. Defaults to 1
Z1 (int): the dispersion of the first distribution. For a Flory-Schultz
distribution the Z parameter is defined as Z = 1/sigma^2.
Defaults to 1
R_av2 (int): the mean of the size distribution of the second
peak. Defaults to 1
Z2 (int): the dispersion of the second distribution. For a Flory-Schultz
distribution the Z parameter is defined as Z = 1/sigma^2.
Defaults to 1
I0 (int): the pre-factor which includes information on the scattering
length density (SLD) and the concentration of particles. Defaults
to 1
ratio (int): the ratio between the first and the second peak. Defaults
to 0.5
bckg (int): the background value to use in case the background is
not perfectly subtracted. Defaults to 0.
Returns
the scattering curve which has the same size as q
"""
return np.nan_to_num(ratio*single_schultz_spheres(q,R1_av,Z1,I0,0)+(1-ratio)*single_schultz_spheres(q,R2_av,Z2,I0,0)+bckg)
def monodisperse_cube(q, L=1, I0=1, bckg = 0):
"""
http://www.sasview.org/sasview/user/models/model_functions.html#rectangularprismmodel
:param q: the wavevector, vna be aither a number of a numpy array
:param L: The side of the cube
:param I0: The prefactor in front of the form factor
:param bckg: The constant background to sum
:return: The complete, integrated form factor for a cube
"""
def FF(theta, phi):
A = q*L/2.*np.cos(theta)
B = q*L/2.*np.sin(theta)*np.sin(phi)
C = q*L/2.*np.sin(theta)*np.cos(phi)
return np.sinc(A)*np.sinc(B)+np.sinc(C)
return 10**I0*dblquad(FF, 0, np.pi/2., lambda x: 0, lambda x: np.pi/2.0)[0]+bckg
def single_gaussian_cube(q, L_av=1, sigma=1, I0=1, bckg = 0):
def FF(theta,phi,L):
A = q*L/2.*np.cos(theta)
B = q*L/2.*np.sin(theta)*np.sin(phi)
C = q*L/2.*np.sin(theta)*np.cos(phi)
return single_gauss_distribution(L,L_av,sigma,1)*np.sinc(A)*np.sinc(B)+np.sinc(C)
l_min = max(0,L_av-4*(L_av*sigma))
l_max = L_av+4*(L_av*sigma)
return 10**I0*tplquad(FF, 0, np.pi/2., lambda x: 0, lambda x: np.pi/2.0,)[0]+bckg
| [
"="
] | = |
013ad4f8eb3ba02e9770aed25cb228d75475289b | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/030d0c5ebc377ba768e6bdbbc82d64a6cfcbb7d4__main__.py | 030d0c5ebc377ba768e6bdbbc82d64a6cfcbb7d4 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 580 | py | import gi
from ghue.controller import Controller
from ghue.device.hue import HueDeviceManager
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib
import phue
from .application import GHueApplication
if __name__ == '__main__':
GLib.set_application_name("Philips Hue")
controller = Controller()
hue_device_manager = HueDeviceManager(bridge=phue.Bridge('philips-hue.local'),
controller=controller)
controller.add_device_manager(hue_device_manager)
app = GHueApplication(controller)
app.run(None)
| [
"[email protected]"
] | |
2765c72d48178ec65db0c1e6936c7650767cba82 | d8a23825c56920d5125c5bc2ca89fecb7ac61c4a | /integral_vector_w.py | ba9c92cdf63bf8c1eed14fdc333187d90b61d701 | [] | no_license | eranbTAU/DronDetection_ML_ALGO | 45e8f24bcf3cdc531ddf273d52b4829ab4596f00 | 22e27cb92c4b8767bde144936e5d86fa6ba89ae3 | refs/heads/master | 2022-03-29T09:59:05.458262 | 2019-12-04T11:24:09 | 2019-12-04T11:24:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | #ERAN BAMANI
#17.12.16
#integral_vector_w function
#===============================================
import numpy as np
import pandas as pd
import random
import cv2
import matplotlib.pyplot as plt
from GDecent import *
# --------------------------
def integralImage_sum(ii,x,y,new_width,new_length):
A = ii[x, y]
B = ii[x + new_width, y]
C = ii[x, y + new_length]
D = ii[x + new_width, y + new_length]
sum = D + A - B - C
return sum
def w_vector(x_train,x_test,y_train,y_test):
c_vec = 2**(np.linspace(-5,2,15))
EP = 5
err_vec = np.zeros((1, len(c_vec)))
n, m = np.size(x_train)
N, M = np.size(x_test)
train_norm = np.zeros((n, m))
test_norm = np.zeros((N, M))
max_train = np.zeros((1, m))
max_test = np.zeros((1, M))
min_train = np.zeros((1, m))
min_test = np.zeros((1, M))
for i in range(m):
max_train[i] = max(x_train[:, i])
min_train[i] = min(x_train[:, i])
train_norm[:, i] = (x_train[:, i] - min_train[i]) / (max_train[i] - min_train[i])
for j in range(M):
max_test[j] = max(x_test[:, j])
min_test[j] = min(x_test[:, j])
test_norm[:, j] = (x_test[:, j] - min_test[i])/(max_test[i] - min_test[i])
for q in range(len(c_vec)):
temp = c_vec[q]
err_avg = 0
for ii in range(EP):
w, b, e = SGD(train_norm, y_train, temp)
err_vec[q] = e / EP
min_c = c_vec(err_vec == min(err_vec))
w = SGD(x_train, y_train, min_c)
w = np.mean(w)
return w, min_c
| [
"[email protected]"
] | |
0d155686d2b7d638897fc2d02dc556dd3da8babb | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /other_time/last_part_and_thing/problem_or_world.py | e42537b5fd907d85a61cab4911bd521a6bc81f4a | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#! /usr/bin/env python
def feel_eye(str_arg):
early_life(str_arg)
print('find_next_part_about_small_person')
def early_life(str_arg):
print(str_arg)
if __name__ == '__main__':
feel_eye('take_company_at_little_case')
| [
"[email protected]"
] | |
5e8e9e4cc04b87577c04e4b09ce745dd68a85d04 | 706fcc0630a2a1befa32e8d0e9e0a61978dcc947 | /config.py | 7fcc7def7a3c78d71daf7c805bc812e5aabcc542 | [] | no_license | paulgowdy/hal_split | a8f731a5a6e77f605d45de345d1c48bbc774738d | f618a6b1a132e192f4778c237a92c86f24540ca0 | refs/heads/master | 2022-11-17T00:51:37.343265 | 2020-07-07T22:25:49 | 2020-07-07T22:25:49 | 277,934,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | BOARD_SIZE = 9
MAX_NB_SHIPS = 2
NB_SHIP_ACTIONS = 5
#TRAIN_EPISODES = 10
STEPS_PER_EP = 200
GAMMA = 0.99
PPO_BATCHES = 10000000
PPO_STEPS = 32
LOSS_CLIPPING = 0.2
ENTROPY_LOSS = 5e-2
| [
"[email protected]"
] | |
36c8acfb797eb95d0cff11050a40dc75a0bd3fa0 | 8211c2cd810fe785333ace3f2823d92560c61be4 | /src/data_reader_onecommon.py | 0c9f0c8be023cd733d8c0bb89ca465509a1bf228 | [] | no_license | Alab-NII/lcfp | 4432cf51f824ad5926399e41ad2223cc0ddc3e8d | c0455805dc37dabd9f7c2a4b8cdd7e61f301e8b6 | refs/heads/master | 2022-04-13T19:12:51.668465 | 2020-04-11T15:23:14 | 2020-04-11T15:23:14 | 226,287,328 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | # coding: utf-8
import json
import numpy as np
import os
try:
import data_reader_base
except ModuleNotFoundError as e:
import sys
sys.path += ['src']
from data_reader_base import DatasetReaderBase, VisualSelectionTaskInstance
class OneCommonDataReader(DatasetReaderBase):
task_name = 'onecommon.selection'
token_eos = '<eos>'
token_selection = '<selection>'
token_you = '<you>'
token_them = '<them>'
control_tokens = {
'main':[task_name, token_eos, token_selection, token_you, token_them],
'sub':[],
}
full_canvas_size = 224
min_size=0.025
max_size=0.05
min_col=0.2
max_col=0.8
min_pos = full_canvas_size*max_size*0.5
max_pos = full_canvas_size*(1 - max_size*0.5)
@classmethod
def instance_to_text(cls, inst):
"""Returns a space-splitted text given an instance."""
text = inst['dialogue']
text = text.replace('YOU:', cls.token_you)
text = text.replace('THEM:', cls.token_them)
text = text.lower()
return text
@classmethod
def count_tokens(cls, dataset_spec, textifier, n_tokens):
"""Return a dict whose key and value are token and its frequency."""
with open(dataset_spec['path'], 'r') as f:
dataset = json.load(f)
target_n_tokens = n_tokens['main']
for inst in dataset:
tokens = textifier(cls.task_name, inst, to_ids=False)
for token in tokens:
target_n_tokens[token] = target_n_tokens.get(token, 0) + 1
return n_tokens
@classmethod
def compile_dataset(cls, dataset_spec, textifier):
"""Returns a list of VisualSelectionTaskInstance."""
provide_image = dataset_spec.get('provide_image', True)
asarray = lambda x, t: np.asarray(x, dtype=t)
get_min_max = lambda obj: (obj['x_min'], obj['y_min'], obj['x_max'], obj['y_max'])
def get_attributes(obj):
x_center = 2*(0.5*(obj['x_min'] + obj['x_max']) - cls.min_pos)/(cls.max_pos - cls.min_pos) - 1
y_center = 2*(0.5*(obj['y_min'] + obj['y_max']) - cls.min_pos)/(cls.max_pos - cls.min_pos) - 1
size = 2*(obj['size'][0] / (cls.full_canvas_size*(1 - cls.max_size)) - cls.min_size)/(cls.max_size - cls.min_size) - 1
color = 2*(obj['color']/255 - cls.min_col) / (cls.max_col - cls.min_col) - 1
return [x_center, y_center, size, color]
with open(dataset_spec['path'], 'r') as f:
dataset = json.load(f)
instances = []
for inst in dataset:
_id = os.path.splitext(os.path.basename(inst['image_path']))[0]
tokens = asarray(textifier(cls.task_name, inst, to_ids=True), np.int)
object_bboxes = asarray([get_min_max(o) for o in inst['objects']], np.float32)
if provide_image:
object_optional_info = None
else:
object_optional_info = asarray([get_attributes(o) for o in inst['objects']], np.float32)
instances.append(VisualSelectionTaskInstance(
task_name=cls.task_name,
instance_id=_id,
image_path=inst['image_path'],
tokens=tokens,
n_tokens=asarray(tokens.shape[0], np.int),
object_bboxes=object_bboxes,
object_optional_info=object_optional_info,
n_objects=asarray(object_bboxes.shape[0], np.int),
ground_truth_id=asarray(inst['selected_id'], np.int),
))
return instances
| [
"[email protected]"
] | |
d19900843a543c71b3c6b42cd0625657db46def1 | 1714707d842bc3e465cf95449a3440a64b4ceb98 | /MSG-MIR/models/stn/local_stn.py | 67a5b2613d0d2b5a82692bc1e9094a5951cb609a | [] | no_license | BugIITheGreat/MSG-MIR | 178ff1b52ea86ebb978d4e222a0ee6d27089aea2 | c425d129ae8eb96cf81f5bd4473e72434461e613 | refs/heads/main | 2023-07-27T20:06:39.493150 | 2021-09-07T02:33:11 | 2021-09-07T02:33:11 | 403,804,562 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,011 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import DownBlock, Conv, ResnetTransformer, get_activation, TransConv
from .stn_losses import smoothness_loss, deformation_equality_loss
sampling_align_corners = False
sampling_mode = 'bilinear'
# The number of filters in each block of the encoding part (down-sampling).
ndf = {'A': [32, 32, 64, 64, 128, 128, 256], }
# The number of filters in each block of the decoding part (up-sampling).
# If len(ndf[cfg]) > len(nuf[cfg]) - then the deformation field is up-sampled to match the input size.
nuf = {'A': [256, 128, 128, 64, 64, 32, 32], }
# Indicate if res-blocks are used in the down-sampling path.
use_down_resblocks = {'A': True, }
# indicate the number of res-blocks applied on the encoded features.
resnet_nblocks = {'A': 5, }
# indicate the time for output the intact affine parameters.
convs_for_intact = {'A': 7, }
# control the contribution of intact feature and local feature.
para_for_local = {'A': 0.9, }
# Indicate if the a final refinement layer is applied on the before deriving the deformation field
refine_output = {'A': True, }
# The activation used in the down-sampling path.
down_activation = {'A': 'leaky_relu', }
# The activation used in the up-sampling path.
up_activation = {'A': 'leaky_relu', }
affine_dimentions = {'A': 6, }
class LocalNet(nn.Module):
def __init__(self, nc_a, nc_b, cfg, height, width, init_func, init_to_identity):
super(LocalNet, self).__init__()
act = down_activation[cfg]
# ------------ Down-sampling path
self.ndown_blocks = len(ndf[cfg])
self.nup_blocks = len(nuf[cfg])
self.h, self.w = height, width
self.convs_for_intact = convs_for_intact[cfg]
assert self.ndown_blocks >= self.nup_blocks
in_nf = nc_a + nc_b
conv_num = 1
skip_nf = {}
for out_nf in ndf[cfg]:
setattr(self, 'down_{}'.format(conv_num),
DownBlock(in_nf, out_nf, 3, 1, 1, activation=act, init_func=init_func, bias=True,
use_resnet=use_down_resblocks[cfg], use_norm=True))
skip_nf['down_{}'.format(conv_num)] = out_nf
in_nf = out_nf
conv_num += 1
conv_num -= 1
actIntact = get_activation(activation='relu')
self.outputIntact = nn.Sequential(
nn.Linear(ndf[cfg][self.convs_for_intact - 1] *
(self.h // 2 ** (self.convs_for_intact - 1)) *
(self.w // 2 ** (self.convs_for_intact - 1)),
ndf[cfg][self.convs_for_intact - 1], bias=True),
actIntact,
nn.Linear(ndf[cfg][self.convs_for_intact - 1], affine_dimentions[cfg], bias=True))
self.outputIntact[-1].weight.data.normal_(mean=0.0, std=5e-4)
self.outputIntact[-1].bias.data.zero_()
if use_down_resblocks[cfg]:
self.c1 = Conv(in_nf, 2 * in_nf, 1, 1, 0, activation=act, init_func=init_func, bias=True,
use_resnet=False, use_norm=False)
self.t = ((lambda x: x) if resnet_nblocks[cfg] == 0
else ResnetTransformer(2 * in_nf, resnet_nblocks[cfg], init_func))
self.c2 = Conv(2 * in_nf, in_nf, 1, 1, 0, activation=act, init_func=init_func, bias=True,
use_resnet=False, use_norm=False)
# ------------- Up-sampling path
act = up_activation[cfg]
for out_nf in nuf[cfg]:
setattr(self, 'up_{}'.format(conv_num),
Conv(in_nf + skip_nf['down_{}'.format(conv_num)], out_nf, 3, 1, 1, bias=True, activation=act,
init_fun=init_func, use_norm=True, use_resnet=True))
setattr(self, 'output_{}'.format(conv_num),
Conv(out_nf, 2, 3, 1, 1, use_resnet=False, bias=True,
init_func=('zeros' if init_to_identity else init_func), activation=act,
use_norm=False)
)
# ------------- Deformation Field TransposeConv Block
setattr(self, 'field_transconv_{}'.format(conv_num),
TransConv(2, 2, 3, 2, 0, use_resnet=True, bias=True,
init_func=('zeros' if init_to_identity else init_func), activation=act,
use_norm=False)
)
if refine_output[cfg]:
setattr(self, 'refine_{}'.format(conv_num),
nn.Sequential(ResnetTransformer(out_nf, 1, init_func),
Conv(out_nf, out_nf, 1, 1, 0, use_resnet=False, init_func=init_func,
activation=act,
use_norm=False)
)
)
else:
setattr(self, 'refine_{}'.format(conv_num), lambda x: x)
in_nf = out_nf
conv_num -= 1
def forward(self, img_a, img_b):
use_transpose_conv_in_fields = False
para_for_multiscale = 0.9
x = torch.cat([img_a, img_b], 1)
skip_vals = {}
conv_num = 1
# Down
while conv_num <= self.ndown_blocks:
x, skip = getattr(self, 'down_{}'.format(conv_num))(x)
skip_vals['down_{}'.format(conv_num)] = skip
conv_num += 1
tus = skip_vals['down_{}'.format(self.convs_for_intact)]
# print(str(tus.shape) + "tus_shape")
intact_x = tus.view(tus.size(0), -1)
# print(str(intact_x.shape) + "intact_x_shape")
# print(self.outputIntact)
dtheta_for_intact = self.outputIntact(intact_x)
if hasattr(self, 't'):
x = self.c1(x)
x = self.t(x)
x = self.c2(x)
# Up
conv_num -= 1
deform_scale_output = {}
while conv_num > (self.ndown_blocks - self.nup_blocks):
s = skip_vals['down_{}'.format(conv_num)]
x = F.interpolate(x, (s.size(2), s.size(3)), mode='bilinear')
x = torch.cat([x, s], 1)
x = getattr(self, 'up_{}'.format(conv_num))(x)
x = getattr(self, 'refine_{}'.format(conv_num))(x)
deform_scale_output[conv_num] = getattr(self, 'output_{}'.format(conv_num))(x)
if use_transpose_conv_in_fields is False:
if conv_num is self.nup_blocks:
def_for_local = deform_scale_output[conv_num]
else:
def_for_local = para_for_multiscale * F.interpolate(def_for_local,
(deform_scale_output[conv_num].shape[2],
deform_scale_output[conv_num].shape[3]),
mode='bilinear') \
+ deform_scale_output[conv_num]
else:
if conv_num is self.nup_blocks:
def_for_local = deform_scale_output[conv_num]
else:
ppr = getattr(self, 'field_transconv_{}'.format(conv_num))(def_for_local)
ppr = F.interpolate(ppr,
(deform_scale_output[conv_num].shape[2],
deform_scale_output[conv_num].shape[3]),
mode='bilinear')
def_for_local = para_for_multiscale * ppr + deform_scale_output[conv_num]
conv_num -= 1
# x = self.outputLocal(x)
return dtheta_for_intact, def_for_local, deform_scale_output
class LocalSTN(nn.Module):
"""This class is generates and applies the deformable transformation on the input images."""
def __init__(self, in_channels_a, in_channels_b, height, width, cfg, init_func, stn_bilateral_alpha,
init_to_identity, multi_resolution_regularization):
super(LocalSTN, self).__init__()
self.oh, self.ow = height, width
self.in_channels_a = in_channels_a
self.in_channels_b = in_channels_b
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.all_offsets = LocalNet(self.in_channels_a, self.in_channels_b, cfg, height, width,
init_func, init_to_identity).to(self.device)
self.identity_grid = self.get_identity_grid()
self.identity_theta = torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float).to(self.device)
if affine_dimentions[cfg] is 8:
self.identity_theta = torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=torch.float).to(self.device)
if affine_dimentions[cfg] is 6:
self.identity_theta = torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float).to(self.device)
elif affine_dimentions[cfg] is 4:
self.justian_matrix = torch.tensor([1, 0, 1, 0, 1, 1], dtype=torch.float).unsqueeze(0)
# self.transfer_matrix = torch.tensor(self.justian_matrix, dtype=torch.float).to(self.device)
self.alpha = stn_bilateral_alpha
self.multi_resolution_regularization = multi_resolution_regularization
self.para_for_local = para_for_local[cfg]
def get_identity_grid(self):
"""Returns a sampling-grid that represents the identity transformation."""
x = torch.linspace(-1.0, 1.0, self.ow)
y = torch.linspace(-1.0, 1.0, self.oh)
xx, yy = torch.meshgrid([y, x])
xx = xx.unsqueeze(dim=0)
yy = yy.unsqueeze(dim=0)
identity = torch.cat((yy, xx), dim=0).unsqueeze(0)
return identity
def get_homography_grid(self, matrix):
# matrix = torch.cat((matrix, torch.ones([1, 1]).to(matrix.device)), dim=1)
matrix = matrix.view(3, 3)
identity_grid = self.get_identity_grid()
height = identity_grid.shape[2]
width = identity_grid.shape[3]
identity_grid = identity_grid.view(1, 2, -1).to(matrix.device)
com = torch.ones([1, 1, identity_grid.shape[-1]]).to(identity_grid.device)
identity_grid = torch.cat((identity_grid, com), dim=1).squeeze(0)
homo_grid = torch.matmul(matrix, identity_grid)
with torch.no_grad():
homo_grid[0, :] = torch.div(homo_grid[0, :], homo_grid[2, :])
homo_grid[1, :] = torch.div(homo_grid[1, :], homo_grid[2, :])
torch.set_grad_enabled(True)
return homo_grid[0:2, :].view(1, 2, height, width)
def get_affine_grid(self, matrix):
matrix = matrix.view(2, 3)
identity_grid = self.get_identity_grid()
height = identity_grid.shape[2]
width = identity_grid.shape[3]
identity_grid = identity_grid.view(1, 2, -1).to(matrix.device)
com = torch.ones([1, 1, identity_grid.shape[-1]]).to(identity_grid.device)
identity_grid = torch.cat((identity_grid, com), dim=1).squeeze(0)
aff_suf = torch.tensor([0, 0, 1], dtype=torch.float).unsqueeze(0).to(self.device)
matrix = torch.cat((matrix, aff_suf), dim=0)
affine_grid = torch.matmul(matrix, identity_grid)
return affine_grid[0:2, :].view(1, 2, height, width)
def get_grid(self, img_a, img_b, return_offsets_only=False):
"""Return the predicted sampling grid that aligns img_a with img_b."""
if img_a.is_cuda and not self.identity_grid.is_cuda:
self.identity_grid = self.identity_grid.to(img_a.device)
# Get Deformation Field
b_size = img_a.size(0)
all_offsets = self.all_offsets(img_a, img_b)
dtheta_for_intact = all_offsets[0]
theta_for_intact = dtheta_for_intact + self.identity_theta.unsqueeze(0).repeat(img_a.size(0), 1)
if dtheta_for_intact.shape[-1] == 6:
theta_for_intact = dtheta_for_intact + self.identity_theta.unsqueeze(0).repeat(img_a.size(0), 1)
trans_grid = self.get_affine_grid(theta_for_intact)
elif dtheta_for_intact.shape[-1] == 8:
dtheta_for_intact = torch.cat((dtheta_for_intact, torch.ones([1, 1]).to(img_a.device)), dim=1)
theta_for_intact = dtheta_for_intact + self.identity_theta.unsqueeze(0).repeat(img_a.size(0), 1)
trans_grid = self.get_homography_grid(theta_for_intact)
deformation = all_offsets[1]
deformation_upsampled = deformation
if deformation.size(2) != self.oh and deformation.size(3) != self.ow:
deformation_upsampled = F.interpolate(deformation, (self.oh, self.ow), mode=sampling_mode,
align_corners=sampling_align_corners)
if return_offsets_only:
resampling_grid = deformation_upsampled.permute([0, 2, 3, 1])
else:
resampling_grid = (self.identity_grid.repeat(b_size, 1, 1, 1) + deformation_upsampled).permute([0, 2, 3, 1])
if dtheta_for_intact.shape[-1] < 6:
resampling_grid_intact = F.affine_grid(theta_for_intact.view(-1, 2, 3), img_a.size())
else:
resampling_grid_intact = trans_grid.permute([0, 2, 3, 1])
kkp = resampling_grid
resampling_grid = resampling_grid_intact + self.para_for_local * resampling_grid
ksa = all_offsets[2]
return resampling_grid_intact
def forward(self, img_a, img_b, apply_on=None):
"""
Predicts the spatial alignment needed to align img_a with img_b. The spatial transformation will be applied
on the tensors passed by apply_on (if apply_on is None then the transformation will be applied on img_a).
:param img_a: the source image.
:param img_b: the target image.
:param apply_on: the geometric transformation can be applied on different tensors provided by this list.
If not set, then the transformation will be applied on img_a.
:return: a list of the warped images (matching the order they appeared in apply on), and the regularization term
calculated for the predicted transformation."""
if img_a.is_cuda and not self.identity_grid.is_cuda:
self.identity_grid = self.identity_grid.to(img_a.device)
# Get Deformation Field
b_size = img_a.size(0)
all_offsets = self.all_offsets(img_a, img_b)
dtheta_for_intact = all_offsets[0]
if dtheta_for_intact.shape[-1] < 6:
# dtheta_for_intact = dtheta_for_intact * self.transfer_matrix
theta_for_intact = dtheta_for_intact + self.identity_theta.unsqueeze(0).repeat(img_a.size(0), 1)
else:
if dtheta_for_intact.shape[-1] == 6:
theta_for_intact = dtheta_for_intact + self.identity_theta.unsqueeze(0).repeat(img_a.size(0), 1)
trans_grid = self.get_affine_grid(theta_for_intact)
elif dtheta_for_intact.shape[-1] == 8:
dtheta_for_intact = torch.cat((dtheta_for_intact, torch.ones([1, 1]).to(img_a.device)), dim=1)
theta_for_intact = dtheta_for_intact + self.identity_theta.unsqueeze(0).repeat(img_a.size(0), 1)
trans_grid = self.get_homography_grid(dtheta_for_intact)
deformation = all_offsets[1]
deformation_upsampled = deformation
if deformation.size(2) != self.oh and deformation.size(3) != self.ow:
deformation_upsampled = F.interpolate(deformation, (self.oh, self.ow), mode=sampling_mode)
resampling_grid = (self.identity_grid.repeat(b_size, 1, 1, 1) + deformation_upsampled).permute([0, 2, 3, 1])
# Wrap image wrt to the deformation field
if apply_on is None:
apply_on = [img_a]
warped_images = []
for img in apply_on:
if dtheta_for_intact.shape[-1] < 6:
resampling_grid_intact = F.affine_grid(theta_for_intact.view(-1, 2, 3), img_a.size())
else:
resampling_grid_intact = trans_grid.permute([0, 2, 3, 1])
resampling_grid = (1 - self.para_for_local) * resampling_grid_intact + self.para_for_local * resampling_grid
warped_images.append(F.grid_sample(img, resampling_grid, mode=sampling_mode, padding_mode='zeros',
align_corners=sampling_align_corners))
# Calculate STN regularization term
reg_term = self._calculate_regularization_term(deformation, warped_images[0])
# return warped_images, reg_term, resampling_grid
return warped_images, reg_term
def _calculate_regularization_term(self, deformation, img):
"""Calculate the regularization term of the predicted deformation.
The regularization may-be applied to different resolution for larger images."""
dh, dw = deformation.size(2), deformation.size(3)
img = None if img is None else img.detach()
reg = 0.0
factor = 1.0
for i in range(self.multi_resolution_regularization):
if i != 0:
deformation_resized = F.interpolate(deformation, (dh // (2 ** i), dw // (2 ** i)), mode=sampling_mode,
align_corners=sampling_align_corners)
img_resized = F.interpolate(img, (dh // (2 ** i), dw // (2 ** i)), mode=sampling_mode,
align_corners=sampling_align_corners)
elif deformation.size()[2::] != img.size()[2::]:
deformation_resized = deformation
img_resized = F.interpolate(img, deformation.size()[2::], mode=sampling_mode,
align_corners=sampling_align_corners)
else:
deformation_resized = deformation
img_resized = img
reg += factor * smoothness_loss(deformation_resized, img_resized, alpha=self.alpha)
factor /= 2.0
return reg
| [
"[email protected]"
] | |
2be3f71105891c23fcae4397b78ff9eeb9fd1e82 | f1d32956219c4d7f63f47031fbf8ae4b8bf15eef | /gcpbt.py | 687f5218dd6bff5de9d469bc3ddb4971bed11b19 | [] | no_license | VaishnaviPunagin/GraphColoring-ComparativeAnalysisOfAlgorithms | 9946756c8b2b9f3d27fb0b29d14b9df316afef7d | d769f37bde209d976139d0105c0ac219d98fbbce | refs/heads/master | 2022-11-06T10:07:25.951199 | 2020-06-28T18:43:53 | 2020-06-28T18:43:53 | 266,312,564 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | import time
# starting time
start = time.time()
n=0
def is_safe(n, graph, colors, c):
# Iterate trough adjacent vertices
# and check if the vertex color is different from c
for i in range(n):
if graph[n][i] and c == colors[i]: return False
return True
# n = vertex nb
def graphColoringUtil(graph, color_nb, colors, n):
# Check if all vertices are assigned a color
if color_nb+1 == n :
return True
# Trying differents color for the vertex n
for c in range(1, color_nb+1):
# Check if assignment of color c to n is possible
if is_safe(n, graph, colors, c):
# Assign color c to n
colors[n] = c
# Recursively assign colors to the rest of the vertices
if graphColoringUtil(graph, color_nb, colors, n+1): return True
# If there is no solution, remove color (BACKTRACK)
colors[n] = 0
colors[n] = 0
#We test the algorithm for the following graph and test whether it is 3 colorable:
# (3)---(2)
# | / |
# | / |
# | / |
# (0)---(1)
vertex_nb = 5
# nb of colors
color_nb = 4
# Initiate vertex colors
colors = [0] * vertex_nb
graph = [
[0,0,1,1,0],
[0,0,0,1,1],
[1,0,0,0,1],
[1,1,0,0,0],
[0,1,1,0,0],
]
#beginning with vertex 0
if graphColoringUtil(graph, color_nb, colors, 0):
print()
else:
print ("No solutions")
#sleeping for 1 second to get 10 seconds runtime
time.sleep(1)
# program body ends
# end time
end = time.time()
tt=end-start
# total time taken
print("Backtracking Algorithm : %f" %tt)
print()
| [
"[email protected]"
] | |
bc198b1bbdd90d4dee4ee10abd4ca15226d57dd9 | e03ae6c0a87187d1ad24a172d1d573c2efa43117 | /homework2-5.py | 262af9fc0d870bf2a604e00ff4047150257f4911 | [] | no_license | AlSavva/Python-basics-homework | 3dc4841974d0fc5d5b1ff9ac7a50934540ab9be1 | 1d21e365c4a4524eb62b0fa1f23da848f9b066d5 | refs/heads/master | 2023-08-01T13:54:17.487022 | 2020-09-30T21:58:30 | 2020-09-30T21:58:30 | 290,989,515 | 0 | 0 | null | 2020-09-30T21:58:31 | 2020-08-28T08:12:05 | Python | UTF-8 | Python | false | false | 2,425 | py | # Реализовать структуру «Рейтинг», представляющую собой не возрастающий набор
# натуральных чисел. У пользователя необходимо запрашивать новый элемент
# рейтинга. Если в рейтинге существуют элементы с одинаковыми значениями, то
# новый элемент с тем же значением должен разместиться после них.
# Подсказка. Например, набор натуральных чисел: 7, 5, 3, 3, 2.
# Пользователь ввел число 3. Результат: 7, 5, 3, 3, 3, 2.
# Пользователь ввел число 8. Результат: 8, 7, 5, 3, 3, 2.
# Пользователь ввел число 1. Результат: 7, 5, 3, 3, 2, 1.
# Набор натуральных чисел можно задать непосредственно в коде, например,
# my_list = [7, 5, 3, 3, 2].
rating_list = []
pre_rating_list = input('Для создания списка введите группу целых чисел,'
'разделённых пробелами: ').split()
for n in pre_rating_list: # поскольку нами не пройдена функция "map", делаем цикл для перевода строк в число
rating_list.append(int(n))
for num in range(1, len(rating_list)): #сортировка введенного списка методом вставки
i = num
while i > 0 and rating_list[i - 1] < rating_list[i]:
rating_list[i], rating_list[i - 1] = rating_list[i - 1], rating_list[i]
i -= 1
print(f'Введённый список отсортирован:\n{rating_list}')
new_num = input('Добавьте ещё одно целое число в список: ')
rating_list.append(int(new_num))
for ind in range(len(rating_list) - 1):#сортировка введенного списка методом выбора
for i in range(ind + 1, len(rating_list)):
if rating_list[i] > rating_list[ind]:
rating_list[i], rating_list[ind] = rating_list[ind], rating_list[i]
print(f'Новый элемент добавлен в соответствии с его рейтингом:\n{rating_list}')
| [
"[email protected]"
] | |
914b43a3f40792a728183c80d318067f6bfb208e | 1278be8cb9f536b6a9c576440e2673279beded2e | /app.py | 0181a61877e31070b4ecafe09aea6524a06d4894 | [] | no_license | patrickhpatin/web-scraping-challenge | ba8dade253aa7b4d1aeec53d0ccdabc9e263bd35 | 5f591d642b5970e65494f624bbc98ac403e2edd9 | refs/heads/master | 2021-02-16T14:57:14.011696 | 2020-03-13T11:11:56 | 2020-03-13T11:11:56 | 245,018,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | # Dependencies
import pandas as pd
import time as time
import numpy as np
import requests
from flask import Flask, render_template, redirect, jsonify
import flask
import scrape_mars
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
# Display routes
@app.route('/routes')
def routes():
"""List of all available api routes."""
return (
f"Available Routes:<br>"
f"/routes<br>"
f"/scrape"
)
# end def routes()
@app.route("/scrape")
def scrape():
try:
if scrape_mars.populate_mars_db() == 200:
mars_data = scrape_mars.get_mars_data_from_db()
# Redirect to the home
return redirect("/", code=302)
else:
print("There was a problem scraping data from NASA. Please try again later.")
# end if
except Exception as e:
print(e)
# end def scrape()
@app.route('/')
def index():
try:
mars_data = scrape_mars.get_mars_data_from_db()
# Return the index.html with mars data populated
return render_template("index.html",
news_title=mars_data[0]["news_title"],
news_p=mars_data[0]["news_p"],
featured_image_url=mars_data[0]["featured_image_url"],
mars_weather=mars_data[0]["mars_weather"],
mars_table=mars_data[0]["mars_table"],
hem1_name=mars_data[0]["hemisphere_image_urls"][0]["title"],
hem1_image=mars_data[0]["hemisphere_image_urls"][0]["img_url"],
hem2_name=mars_data[0]["hemisphere_image_urls"][1]["title"],
hem2_image=mars_data[0]["hemisphere_image_urls"][1]["img_url"],
hem3_name=mars_data[0]["hemisphere_image_urls"][2]["title"],
hem3_image=mars_data[0]["hemisphere_image_urls"][2]["img_url"],
hem4_name=mars_data[0]["hemisphere_image_urls"][3]["title"],
hem4_image=mars_data[0]["hemisphere_image_urls"][3]["img_url"],
background="../Images/Mars.jpg")
except Exception as e:
print(e)
# end def index()
if __name__ == '__main__':
app.run(debug=False) | [
"[email protected]"
] | |
f17ed08bf47fc77482e427e5e7c87e52a0ab5d46 | 756d50be34245115ad28e79f4dfceb5516d17225 | /relsearch.py | af268beec2663fa43b51c0f5de63ab395fea2d2b | [] | no_license | abyssonym/gg3 | f1ce189a2a70786da8b2ab78281b39615fc59af2 | 1e6adadc6765d339ebbd7ca650d9b435d56fb366 | refs/heads/master | 2021-01-18T13:51:25.702975 | 2017-11-16T22:26:30 | 2017-11-16T22:26:30 | 34,976,112 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | from sys import argv
from string import ascii_lowercase
from shutil import copyfile
filename = argv[1]
outfile = "test.smc"
searchstr = argv[2].lower()
if '.' in searchstr:
searchstr = map(int, searchstr.split('.'))
else:
numdict = dict([(b, a) for (a, b) in enumerate(ascii_lowercase)])
searchstr = [numdict[c] if c in numdict else c for c in searchstr]
print searchstr
f = open(filename, 'r+b')
addr = 0
checkstr = None
while True:
f.seek(addr)
bytestr = f.read(len(searchstr))
if len(bytestr) != len(searchstr):
break
bytestr = map(ord, bytestr)
offset = bytestr[0] - searchstr[0]
newbytestr = [i - offset for i in bytestr]
if all([a == b for (a, b) in zip(newbytestr, searchstr)]):
print "%x" % addr
print bytestr
check = None
if not checkstr:
check = raw_input("> ")
if check and check.lower()[0] == 'y':
checkstr = bytestr
if checkstr and all([a == b for (a, b) in zip(checkstr, bytestr)]):
copyfile(filename, outfile)
f2 = open(outfile, 'r+b')
f2.seek(addr)
f2.write("".join([chr(bytestr[0]) for _ in bytestr]))
f2.close()
check = raw_input("> ")
addr += 1
| [
"none"
] | none |
e1bc080590be397ae15d86246e7de108caaf0d0f | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/benanne_kaggle-ndsb/kaggle-ndsb-master/dihedral_ops.py | e5d8d87655fb7072e1ad79d489e425aaca16ac92 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 12,079 | py | import numpy as np
import theano
import theano.sandbox.cuda as cuda
from pycuda.compiler import SourceModule
import theano.misc.pycuda_init
class PyCudaOp(cuda.GpuOp):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def output_type(self, inp):
raise NotImplementedError
def make_node(self, inp):
inp = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp))
assert inp.dtype == "float32"
return theano.Apply(self, [inp], [self.output_type(inp)()])
class CyclicRollOp(PyCudaOp):
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * (inp.type.ndim))
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
mod = SourceModule("""
__global__ void cyclic_roll(float * input, float * output, int batch_size, int num_features) {
int x = blockIdx.x*blockDim.x + threadIdx.x; // feature dim, fastest varying index!
int y = blockIdx.y*blockDim.y + threadIdx.y; // batch dim
int height = 4 * batch_size;
int width = 4 * num_features;
if (x < num_features && y < height) {
for (int i = 0; i < 4; i++) {
int y_out = (y + batch_size * (4 - i)) % height;
int x_out = x + num_features * i;
output[y_out * width + x_out] = input[y * num_features + x];
}
}
}""")
kernel = mod.get_function("cyclic_roll")
def thunk():
in_shape = inputs[0][0].shape
rows, cols = in_shape
assert rows % 4 == 0
out_shape = (rows, 4 * cols)
batch_size = rows // 4
num_features = cols
out = outputs[0]
# only allocate if there is no previous allocation of the right size.
if out[0] is None or out[0].shape != out_shape:
out[0] = cuda.CudaNdarray.zeros(out_shape)
x_block = 16
y_block = 16
block = (x_block, y_block, 1)
x_grid = int(np.ceil(float(in_shape[1]) / x_block))
y_grid = int(np.ceil(float(in_shape[0]) / y_block))
grid = (x_grid, y_grid, 1)
kernel(inputs[0][0], out[0], np.intc(batch_size), np.intc(num_features), block=block, grid=grid)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inp, grads):
top, = grads
top = cuda.basic_ops.gpu_contiguous(top)
return [CyclicRollGradOp()(top)]
cyclic_roll = CyclicRollOp()
class CyclicRollGradOp(PyCudaOp):
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * (inp.type.ndim))
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
mod = SourceModule("""
__global__ void cyclic_roll_grad(float * input, float * output, int batch_size, int num_features) {
int x = blockIdx.x*blockDim.x + threadIdx.x; // feature dim, fastest varying index!
int y = blockIdx.y*blockDim.y + threadIdx.y; // batch dim
int height = 4 * batch_size;
int width = 4 * num_features;
float val = 0;
if (x < num_features && y < height) {
for (int i = 0; i < 4; i++) {
int y_in = (y + batch_size * (4 - i)) % height;
int x_in = x + num_features * i;
val += input[y_in * width + x_in];
}
output[y * num_features + x] = val;
}
}""")
kernel = mod.get_function("cyclic_roll_grad")
def thunk():
in_shape = inputs[0][0].shape
rows, cols = in_shape
assert rows % 4 == 0
assert cols % 4 == 0
out_shape = (rows, cols // 4)
batch_size = rows // 4
num_features = cols // 4
out = outputs[0]
# only allocate if there is no previous allocation of the right size.
if out[0] is None or out[0].shape != out_shape:
out[0] = cuda.CudaNdarray.zeros(out_shape)
x_block = 16
y_block = 16
block = (x_block, y_block, 1)
x_grid = int(np.ceil(float(out_shape[1]) / x_block))
y_grid = int(np.ceil(float(out_shape[0]) / y_block))
grid = (x_grid, y_grid, 1)
kernel(inputs[0][0], out[0], np.intc(batch_size), np.intc(num_features), block=block, grid=grid)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
class CyclicConvRollOp(PyCudaOp):
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * (inp.type.ndim))
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
mod = SourceModule("""
__global__ void cyclic_convroll(float * input, float * output, int batch_size, int num_channels, int map_size) {
int x = blockIdx.x*blockDim.x + threadIdx.x; // feature dim, fastest varying index!
int y = blockIdx.y*blockDim.y + threadIdx.y; // batch dim
int map_size_sq = map_size * map_size;
int example_size = num_channels * map_size_sq;
int num_rows = 4 * batch_size; // number of rows in the input/output, seen as a 2D array
int num_cols = 4 * example_size; // number of columns in the output, seen as a 2D array
// feature indices (channels, height, width)
int x_channel = x / map_size_sq;
int x_f0 = (x % map_size_sq) / map_size;
int x_f1 = x % map_size;
int x_out_f0 = x_f0;
int x_out_f1 = x_f1;
int tmp;
if (x < example_size && y < num_rows) {
for (int i = 0; i < 4; i++) {
int y_out = (y + batch_size * (4 - i)) % num_rows;
int x_out = example_size * i + x_channel * map_size_sq + x_out_f0 * map_size + x_out_f1;
output[y_out * num_cols + x_out] = input[y * example_size + x];
// note that the writes to output go in reverse order for all the rotated feature maps.
// this may slow things down a little, perhaps there is room for further optimization.
// rotate
tmp = x_out_f0;
x_out_f0 = x_out_f1;
x_out_f1 = map_size - 1 - tmp;
}
}
}""")
kernel = mod.get_function("cyclic_convroll")
def thunk():
in_shape = inputs[0][0].shape
full_batch_size, num_channels, height, width = in_shape
assert height == width # else convroll doesn't make sense
assert full_batch_size % 4 == 0
out_shape = (full_batch_size, 4 * num_channels, height, width)
batch_size = full_batch_size // 4
example_size = num_channels * height * width
map_size = height
out = outputs[0]
# only allocate if there is no previous allocation of the right size.
if out[0] is None or out[0].shape != out_shape:
out[0] = cuda.CudaNdarray.zeros(out_shape)
x_block = 16
y_block = 16
block = (x_block, y_block, 1)
x_grid = int(np.ceil(float(example_size) / x_block))
y_grid = int(np.ceil(float(full_batch_size) / y_block))
grid = (x_grid, y_grid, 1)
kernel(inputs[0][0], out[0], np.intc(batch_size), np.intc(num_channels), np.intc(map_size), block=block, grid=grid)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inp, grads):
top, = grads
top = cuda.basic_ops.gpu_contiguous(top)
return [CyclicConvRollGradOp()(top)]
cyclic_convroll = CyclicConvRollOp()
class CyclicConvRollGradOp(PyCudaOp):
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * (inp.type.ndim))
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
mod = SourceModule("""
__global__ void cyclic_convroll_grad(float * input, float * output, int batch_size, int num_channels, int map_size) {
int x = blockIdx.x*blockDim.x + threadIdx.x; // feature dim, fastest varying index!
int y = blockIdx.y*blockDim.y + threadIdx.y; // batch dim
int map_size_sq = map_size * map_size;
int example_size = num_channels * map_size_sq;
int num_rows = 4 * batch_size; // number of rows in the input/output, seen as a 2D array
int num_cols = 4 * example_size; // number of columns in the input, seen as a 2D array
// feature indices (channels, height, width)
int x_channel = x / map_size_sq;
int x_f0 = (x % map_size_sq) / map_size;
int x_f1 = x % map_size;
int x_in_f0 = x_f0;
int x_in_f1 = x_f1;
int tmp;
float val;
if (x < example_size && y < num_rows) {
for (int i = 0; i < 4; i++) {
int y_in = (y + batch_size * (4 - i)) % num_rows;
int x_in = example_size * i + x_channel * map_size_sq + x_in_f0 * map_size + x_in_f1;
val += input[y_in * num_cols + x_in];
// rotate
tmp = x_in_f0;
x_in_f0 = x_in_f1;
x_in_f1 = map_size - 1 - tmp;
}
output[y * example_size + x] = val;
}
}""")
kernel = mod.get_function("cyclic_convroll_grad")
def thunk():
in_shape = inputs[0][0].shape
full_batch_size, num_channels_rolled, height, width = in_shape
assert height == width # else convroll doesn't make sense
assert full_batch_size % 4 == 0
assert num_channels_rolled % 4 == 0
num_channels = num_channels_rolled // 4
batch_size = full_batch_size // 4
out_shape = (full_batch_size, num_channels, height, width)
example_size = num_channels * height * width
map_size = height
out = outputs[0]
# only allocate if there is no previous allocation of the right size.
if out[0] is None or out[0].shape != out_shape:
out[0] = cuda.CudaNdarray.zeros(out_shape)
x_block = 16
y_block = 16
block = (x_block, y_block, 1)
x_grid = int(np.ceil(float(example_size) / x_block))
y_grid = int(np.ceil(float(full_batch_size) / y_block))
grid = (x_grid, y_grid, 1)
kernel(inputs[0][0], out[0], np.intc(batch_size), np.intc(num_channels), np.intc(map_size), block=block, grid=grid)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
| [
"[email protected]"
] | |
cedc86c60287d2aa953379b03f884778d3db8f98 | baf080cb2ff76697daade8ca105b71d786c00fde | /util/plot_cpu.py | e957232679533d7bbca0807ec4d3c622e18023c3 | [] | no_license | bentenballer/SimulatingNetworks | d7bbf272a62f0a50043fc3ddb104b1cccb927e8d | 32a6c5914d967dc45bd91ac7faa2f67dcdb22489 | refs/heads/main | 2023-08-17T14:37:29.611721 | 2021-10-10T06:41:17 | 2021-10-10T06:41:17 | 415,506,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | '''
Plot CPU utilization of each virtual host.
'''
from helper import *
parser = argparse.ArgumentParser("Plot stacked bar chart of CPU usage")
parser.add_argument('--files', '-f',
help="File to read CPU usage from.",
required=True,
nargs="+",
dest="files")
parser.add_argument('--out', '-o',
help="Output png for plot",
default=None,
dest="out")
parser.add_argument('-s', '--summarise',
help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.",
default=False,
dest="summarise",
action="store_true")
parser.add_argument('--labels', '-l',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=None,
nargs="+",
dest="labels")
args = parser.parse_args()
if args.labels is None:
args.labels = args.files
def aggregate(data):
"""Aggregates to give a total cpu usage"""
data = list(map(list, data))
return list(map(sum, list(zip(*data))))
def plot_series():
data = parse_cpu_usage(args.files[0])
N = len(data)
data = transpose(data)
ind = list(range(N))
width=1
colours = ['y','g','r','b','purple','brown','cyan']
legend = "user,system,nice,iowait,hirq,sirq,steal".split(',')
nfields = 7
legend = legend[0:nfields]
p = [0]*nfields
bottom = [0]*N
plt.ylabel("CPU %")
plt.xlabel("Seconds")
for i in range(nfields):
p[i] = plt.bar(ind[0:N], data[i], width, bottom=bottom, color=colours[i])
for j in range(N):
bottom[j] += data[i][j]
plt.legend([e[0] for e in p], legend)
def plot_summary():
plt.ylabel("CPU %")
to_plot=[]
for f in args.files:
data = parse_cpu_usage(f)
N = len(data)
data = transpose(data)
ind = list(range(N))
data = aggregate(data)
to_plot.append(data[10:-10])
plots = plt.boxplot(to_plot)
plt.yticks(list(range(0,110,10)))
plt.title("CPU utilisation")
plt.grid()
plt.xticks(list(range(1, 1+len(args.files))), args.labels)
if args.summarise:
plot_summary()
else:
plot_series()
if args.out is None:
plt.show()
else:
plt.savefig(args.out)
| [
"[email protected]"
] | |
63309f5b16e32ac3d1a5c83f1cabc9d2e02f0132 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/workbook/test_write_workbook_view.py | 683d301b318446951f7cca09b7fc061d5ee04506 | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 4,953 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...workbook import Workbook
class TestWriteWorkbookView(unittest.TestCase):
"""
Test the Workbook _write_workbook_view() method.
"""
def setUp(self):
self.fh = StringIO()
self.workbook = Workbook()
self.workbook._set_filehandle(self.fh)
def test_write_workbook_view1(self):
"""Test the _write_workbook_view() method"""
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view2(self):
"""Test the _write_workbook_view() method"""
self.workbook.worksheet_meta.activesheet = 1
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" activeTab="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view3(self):
"""Test the _write_workbook_view() method"""
self.workbook.worksheet_meta.firstsheet = 1
self.workbook.worksheet_meta.activesheet = 1
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" firstSheet="2" activeTab="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view4(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(0, 0)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view5(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(None, None)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view6(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(1073, 644)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view7(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(123, 70)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="1845" windowHeight="1050"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view8(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(719, 490)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="10785" windowHeight="7350"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view9(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio()
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view10(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio(34.6)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" tabRatio="346"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view11(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio(0)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" tabRatio="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_workbook_view12(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio(100)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" tabRatio="1000"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def tearDown(self):
self.workbook.fileclosed = 1
| [
"[email protected]"
] | |
4041ef0ddfca556771d9ad29a6c5b1bb3bfff056 | b2cb3fba925ff4c74ec6f4e9e47f6d81cf8ab314 | /10809.py | 030b749129b66912b12ed78cae0b3ef73955c312 | [] | no_license | haka913/boj | 599c693ed6c2e06b30a68d7b7e53c5a04b09a67f | 1f634b6e6036b080a876656dbf36c2dbd4f6383e | refs/heads/master | 2022-12-24T06:53:31.621957 | 2020-10-04T15:34:43 | 2020-10-04T15:34:43 | 212,800,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | s = input()
dic = [-1] * 26
for i in s:
if(dic[ord(i) - ord('a')] == -1):
dic[ord(i) - ord('a')] = s.index(i)
print(*dic)
| [
"[email protected]"
] | |
550f5009ce1e9ff9ab80bfc92868db0fb181ce46 | 0549b7bfba799c95dc9232c49569d690b2c3b536 | /src/user_info/consumables/enums.py | 1f8b6bb4da10e0c2d466498811a41009f4159541 | [] | no_license | ilof2/ELiquid | 4f74e37469366a772dfcc3dbabb77aac2b3782f8 | 675c278030c826c91bc258ef22b1fad971ec6595 | refs/heads/master | 2023-04-22T15:37:37.099888 | 2021-05-09T17:29:42 | 2021-05-09T17:29:42 | 362,571,045 | 1 | 0 | null | 2021-05-09T17:24:40 | 2021-04-28T18:34:42 | Python | UTF-8 | Python | false | false | 76 | py | from enum import Enum
class FlavorType(Enum):
VG = "VG"
PG = "PG"
| [
"[email protected]"
] | |
b52e256f0a8a9c2d165f8a149cddf88616e8b42f | a4b9db38e6d9a6b18adda06c7dded17ae8a3386a | /Trie.py | 46c20e428351dcc20ee32a66d61a9c477f72f008 | [] | no_license | TeoWeber/trabalhoCPD | 4d2c0f62917c84ee7331bfa376f02a89ac6d541b | 9504d86e15f2753e330dde0202e615641445dff2 | refs/heads/master | 2020-04-09T01:42:02.424916 | 2018-12-03T02:50:30 | 2018-12-03T02:50:30 | 159,913,868 | 0 | 0 | null | 2018-12-01T05:54:54 | 2018-12-01T05:54:54 | null | UTF-8 | Python | false | false | 1,815 | py | import pickle
class Trie():
# Construtor
def __init__(self, char='RAIZ', value=-1, level=0):
self.id = id
self.char = char
self.value = value
self.children = {}
self.level = level
def __str__(self):
s = "_"*self.level + self.char + " >>> " + str(self.value)
for char in sorted(self.children):
s += "\n" + str(self.children[char])
return s
def insereTrie(raiz, pokemon, n_pok):
node = raiz
lastId = None
# Procura um pedaço ja existente
for id, char in enumerate(pokemon):
if char in node.children:
node = node.children[char]
else:
lastId = id
break
# Nao encontrou o nodo necessario, entao preenche o resto da palavra
if lastId != None:
for id, char in enumerate(pokemon[lastId:-1]):
node.children[char] = Trie(char, -1, lastId+id)
node = node.children[char]
node.children[pokemon[-1]] = Trie(pokemon[-1], n_pok, len(pokemon)-1)
else:
node.value = n_pok
def buscaTrie(raiz, pokemon):
node = raiz
achou = True
for id, char in enumerate(pokemon):
if char in node.children:
node = node.children[char]
else:
achou = False
break
if achou:
return node.value
else:
print("Elemento inexistente")
return -1
def runTrie(list_objs_pokemon):
try:
with open('trie.data', 'rb') as file:
raiz = pickle.load(file)
file.close()
except:
raiz = Trie()
for i in range(len(list_objs_pokemon)): # Cria uma Trie
insereTrie(raiz, list_objs_pokemon[i].name.strip(), list_objs_pokemon[i].id)
with open('trie.data', 'wb') as file:
pickle.dump(raiz, file)
file.close()
name = input("Informe um nome de pokemon: ")
id = buscaTrie(raiz, name.lower())
if id == -1:
print("Erro!")
else:
print(list_objs_pokemon[id - 1])
return raiz
| [
"[email protected]"
] | |
894f3558e7c71d3dddc26e67f412ce6089f639f2 | 4da640cbf45783282c3e382331013eecb5553492 | /lits.py | 3a4e748a40878bc3c7453b93d1b0ef73eec3fd0b | [] | no_license | kuzeydev/mbv-python-projects | 70f455c5e33e9bfa3feddbcffb2ee776fa477706 | a166f3f6f24c33da66f474c886fa7840ec4fdc97 | refs/heads/main | 2023-07-05T03:17:55.021821 | 2021-08-31T19:25:49 | 2021-08-31T19:25:49 | 401,819,818 | 0 | 0 | null | 2021-08-31T19:25:06 | 2021-08-31T19:25:05 | null | UTF-8 | Python | false | false | 132 | py | names = ['mbv' , 'kbv' , 'bob', 'rick']
print(names [1])
print(names[-1])
names[1] = 'bugra'
print(names)
print(names[0:3])
| [
"[email protected]"
] | |
f36bac8cb3c65b13ba04323591cf99f819b50868 | 431c8beacf2b1a54982bf2d06b3dc5cebba87c69 | /buttontest.py | 1b228e5bfeb4437a78e6f55ab31ba9c5574807e5 | [
"MIT"
] | permissive | watrt/micropython-tft-gui | 290c27ba810943033d26214b7f9ec38129fa774e | 1ae9eafccb7084093eb80354e9e30d1f02367221 | refs/heads/master | 2020-12-10T06:49:51.299653 | 2019-05-25T07:30:57 | 2019-05-25T07:30:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,508 | py | # buttontest.py Test/demo of pushbutton classes for Pybboard TFT GUI
# The MIT License (MIT)
#
# Copyright (c) 2016 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from constants import *
from ugui import Button, ButtonList, RadioButtons, Checkbox, Label, Screen
import font14
import font10
from tft_local import setup
class ButtonScreen(Screen):
def __init__(self):
super().__init__()
# These tables contain args that differ between members of a set of related buttons
table = [
{'fgcolor' : GREEN, 'text' : 'Yes', 'args' : ('Oui', 2), 'fontcolor' : (0, 0, 0)},
{'fgcolor' : RED, 'text' : 'No', 'args' : ('Non', 2)},
{'fgcolor' : BLUE, 'text' : '???', 'args' : ('Que?', 2), 'fill': False},
{'fgcolor' : GREY, 'text' : 'Rats', 'args' : ('Rats', 2), 'shape' : CLIPPED_RECT,},
]
# Highlight buttons: only tabulate data that varies
table_highlight = [
{'text' : 'P', 'args' : ('p', 2)},
{'text' : 'Q', 'args' : ('q', 2)},
{'text' : 'R', 'args' : ('r', 2)},
{'text' : 'S', 'args' : ('s', 2)},
]
# A Buttonset with two entries
table_buttonset = [
{'fgcolor' : GREEN, 'shape' : CLIPPED_RECT, 'text' : 'Start', 'args' : ('Live', 2)},
{'fgcolor' : RED, 'shape' : CLIPPED_RECT, 'text' : 'Stop', 'args' : ('Die', 2)},
]
table_radiobuttons = [
{'text' : '1', 'args' : ('1', 3)},
{'text' : '2', 'args' : ('2', 3)},
{'text' : '3', 'args' : ('3', 3)},
{'text' : '4', 'args' : ('4', 3)},
]
labels = { 'width' : 70,
'fontcolor' : WHITE,
'border' : 2,
'fgcolor' : RED,
'bgcolor' : (0, 40, 0),
'font' : font14,
}
# Uncomment this line to see 'skeleton' style greying-out:
# Screen.tft.grey_color()
# Labels
self.lstlbl = []
for n in range(5):
self.lstlbl.append(Label((390, 40 * n), **labels))
# Button assortment
x = 0
for t in table:
Button((x, 0), font = font14, callback = self.callback, **t)
x += 70
# Highlighting buttons
x = 0
for t in table_highlight:
Button((x, 60), fgcolor = GREY, fontcolor = BLACK, litcolor = WHITE,
font = font14, callback = self.callback, **t)
x += 70
# Start/Stop toggle
self.bs = ButtonList(self.callback)
self.bs0 = None
for t in table_buttonset: # Buttons overlay each other at same location
button = self.bs.add_button((0, 240), font = font14, fontcolor = BLACK, height = 30, **t)
if self.bs0 is None: # Save for reset button callback
self.bs0 = button
# Radio buttons
x = 0
self.rb = RadioButtons(BLUE, self.callback) # color of selected button
self.rb0 = None
for t in table_radiobuttons:
button = self.rb.add_button((x, 140), font = font14, fontcolor = WHITE,
fgcolor = (0, 0, 90), height = 40, width = 40, **t)
if self.rb0 is None: # Save for reset button callback
self.rb0 = button
x += 60
# Checkbox
self.cb1 = Checkbox((340, 0), callback = self.cbcb, args = (0,))
self.cb2 = Checkbox((340, 40), fillcolor = RED, callback = self.cbcb, args = (1,))
# Reset button
self.lbl_reset = Label((200, 220), font = font10, value = 'Reset also responds to long press')
self.btn_reset = Button((300, 240), font = font14, height = 30, width = 80,
fgcolor = BLUE, shape = RECTANGLE, text = 'Reset', fill = True,
callback = self.cbreset, args = (4,), onrelease = False,
lp_callback = self.callback, lp_args = ('long', 4))
# Quit
self.btn_quit = Button((390, 240), font = font14, height = 30, width = 80,
fgcolor = RED, shape = RECTANGLE, text = 'Quit',
callback = self.quit)
# Enable/Disable toggle
self.bs_en = ButtonList(self.cb_en_dis)
self.tup_en_dis = (self.cb1, self.cb2, self.rb, self.bs) # Items affected by enable/disable button
self.bs_en.add_button((200, 240), font = font14, fontcolor = BLACK, height = 30, width = 90,
fgcolor = GREEN, shape = RECTANGLE, text = 'Disable', args = (True,))
self.bs_en.add_button((200, 240), font = font14, fontcolor = BLACK, height = 30, width = 90,
fgcolor = RED, shape = RECTANGLE, text = 'Enable', args = (False,))
def callback(self, button, arg, idx_label):
self.lstlbl[idx_label].value(arg)
def quit(self, button):
Screen.shutdown()
def cbcb(self, checkbox, idx_label):
if checkbox.value():
self.lstlbl[idx_label].value('True')
else:
self.lstlbl[idx_label].value('False')
def cbreset(self, button, idx_label):
self.cb1.value(False)
self.cb2.value(False)
self.bs.value(self.bs0)
self.rb.value(self.rb0)
self.lstlbl[idx_label].value('Short')
def cb_en_dis(self, button, disable):
for item in self.tup_en_dis:
item.greyed_out(disable)
def test():
print('Testing TFT...')
setup()
Screen.change(ButtonScreen)
test()
| [
"[email protected]"
] | |
c3ccf4e0c76e93b359190807ae0de1c11b195805 | 8dfda6368e8f566ac2b727980ad4278a43326e60 | /fc.py | ed4768fc7a6fe70bd3dd91084c5eee29d1abb4cd | [] | no_license | Stupidd-Pumpkin/Python-Projects | d5d10652034f02b41e8d7328310630adc55f81a7 | 0b88daed705504d455463690d209509ed8215b9c | refs/heads/main | 2023-03-26T10:45:03.655376 | 2021-03-27T07:13:09 | 2021-03-27T07:13:09 | 352,001,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py |
#file_name="dict.txt"
#file_list=[]
#fin = open(file_name)
#for eachline in fin:
# file_list.append(eachline.strip())
#
#print file_list
#file_set=set(file_list)
import string
#from difflib import *
f=open('dict.txt','r')
open('lists.txt','w').close()
q=open('lists.txt','a')
open('back.txt','w').close()
z=open('back.txt','a')
word_set = set(word.strip().upper() for word in f)
start_word="NOSE"
end_word="CHIN"
maxsize=10
length=len(start_word)
alphabet_list=list(string.ascii_uppercase)
'''
list1=[]
for i in range(0,4):
for a in alphabet_list:
temp=list(start_word)
temp[i]=a
st="".join(temp)
if(st in word_set):
if st != start_word:
list1.append(st)
print ( list1,'\n',file=q)
print ("\n")
list_set=set(list1)
'''
count=0
parentlist=[]
parentlist.append([start_word])
for i in range (1,maxsize):
parentlist.append([])
for st1 in parentlist[i-1]:
for a in alphabet_list:
for j in range(0,length):
temp=list(st1)
temp[j]=a
st="".join(temp)
if(st in word_set):
if(st not in parentlist[i]):
parentlist[i].append(st)
print (i,'\n\n\n\n',parentlist[i],'\n',file=q)
if(end_word in parentlist[i]) and (count ==0 ):
count=i
print (count)
count+=1
backtrack=[]
backtrack.append([end_word])
print ('\n',backtrack[0])
count+=1
for i in range (1,count):
backtrack.append([])
for st1 in backtrack[i-1]:
for a in alphabet_list:
for j in range(0,length):
temp=list(st1)
temp[j]=a
st="".join(temp)
if(st in parentlist[count-i-1]):
if(st not in backtrack[i]) and (st not in backtrack[i-1]):
backtrack[i].append(st)
print('\n',backtrack[i])
print (i,'\n\n\n\n',backtrack[i],'\n',file=z)
| [
"[email protected]"
] | |
82cbd2304696415df1c92ba0cedca7acc29983b8 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_6/mdlyud002/question2.py | ebdc1dede1c8a1ab523e6c9a607a685c9867f7a7 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | # Yudhi Moodley
# Assignment 6 - Vector Calculator
# 23/04/2014
import math
vectorA = []
vectorB = []
addition = []
dotProduct = []
normalization = []
def vector_calculator():
vector1 = input("Enter vector A:\n")
vectorA = vector1.split(' ') # splits the input
vector2 = input("Enter vector B:\n")
vectorB = vector2.split(' ') # splits the input
# addition funtion
for i in range (3):
addNum = eval(vectorA[i]) + eval(vectorB[i])
addition.append(addNum)
print("A+B = [" + str(addition[0]) + ", " + str(addition[1]) + ", " + str(addition[2]) + "]")
# calculates the funtion of the vector
for i in range (3):
multNum = eval(vectorA[i]) * eval(vectorB[i])
dotProduct.append(multNum)
product = 0
for i in range (3):
product += dotProduct[i]
print("A.B = " + str(product))
# normalizes the vector
aSum = eval(vectorA[0])**2 + eval(vectorA[1])**2 + eval(vectorA[2])**2
aRoot = ("{0:.2f}".format(math.sqrt(aSum)))
print("|A| =",aRoot)
bSum = eval(vectorB[0])**2 + eval(vectorB[1])**2 + eval(vectorB[2])**2
bRoot = ("{0:.2f}".format(math.sqrt(bSum)))
print("|B| =",bRoot)
vector_calculator() | [
"[email protected]"
] | |
c1aecb5f5ba8e76b8436c371feeb5d0732281585 | 2f7a05fe584a948d203f0236b6f139220865aabc | /plugins/extractit/icon_extractit/actions/mac_extractor/schema.py | f38e6445d4d28a79313deafb2f2c1e1d6495cd13 | [
"MIT"
] | permissive | Kano69/insightconnect-plugins | 6671995e9c3127137f59939369ab79bdd152127d | 4dc54260470cd8a3d1cb31ae1c48ad3e8ec75194 | refs/heads/master | 2023-08-25T19:14:14.919805 | 2021-10-15T15:01:18 | 2021-10-15T15:01:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Extracts all MAC addresses from a string or file"
class Input:
FILE = "file"
STR = "str"
class Output:
MAC_ADDRS = "mac_addrs"
class MacExtractorInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"type": "string",
"title": "File",
"displayType": "bytes",
"description": "Input file as bytes",
"format": "bytes",
"order": 2
},
"str": {
"type": "string",
"title": "String",
"description": "Input string",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class MacExtractorOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"mac_addrs": {
"type": "array",
"title": "MAC Addresses",
"description": "List of extracted MAC Addresses",
"items": {
"type": "string"
},
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"[email protected]"
] | |
152fe3ddd50b67a56fdbcc413e150071d290d729 | 36b1e387555398e942ccb7ed44b71ea2a036fee4 | /accounting.py | af315222d74f79dc1fd177042413722ffa956e5e | [] | no_license | RomanKhudobei/Accounting-Project | 8e99df581d3a75c182a330ad2913a8ec831c88b1 | 45a95e535578b0fa84fcb7f216c0b351886ba740 | refs/heads/master | 2021-07-18T19:01:54.389635 | 2017-10-26T21:04:27 | 2017-10-26T21:04:27 | 105,196,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,488 | py | import time
from pprint import pprint
DATABASE = {} # Data base to store accounting information
# account is special, if it doesn't have sub_account
SPECIAL_ACCOUNTS = ('01', '03', '05', '06', '08', '09', '22', '23', '24',
'25', '26', '27', '38', '39', '43', '46', '54', '55',
'69', '76', '79', '84', '85', '91', '92', '93', '98')
# valid accounts of accounting (3 digits)
# to avoid entering accounts like '999' or any mistakes in entering
# and it's a lot easier to validate account
VALID_ACCOUNTS = ('100', '101', '102', '103', '104', '105', '106', '107',
'108', '109', '111', '112', '113', '114', '115', '116',
'117', '121', '122', '123', '124', '125', '127', '131',
'132', '133', '134', '135', '141', '142', '143', '151',
'152', '153', '154', '155', '161', '162', '163', '164',
'165', '166', '181', '182', '183', '184', '191', '193',
'201', '202', '203', '204', '205', '206', '207', '208',
'209', '211', '212', '213', '281', '282', '283', '284',
'285', '286', '301', '302', '311', '312', '313', '314',
'315', '316', '331', '332', '333', '334', '335', '341',
'342', '351', '352', '361', '362', '363', '364', '371',
'372', '373', '374', '375', '376', '377', '378', '379',
'401', '402', '403', '404', '411', '412', '413', '414',
'421', '422', '423', '424', '425', '441', '442', '443',
'451', '452', '453', '471', '472', '473', '474', '475',
'476', '477', '478', '481', '482', '483', '484', '491',
'492', '493', '494', '495', '496', '501', '502', '503',
'504', '505', '506', '511', '512', '521', '522', '523',
'531', '532', '601', '602', '603', '604', '605', '606',
'611', '612', '621', '622', '631', '632', '633', '641',
'642', '643', '644', '651', '652', '654', '655', '661',
'662', '663', '671', '672', '680', '681', '682', '683',
'684', '685', '701', '702', '703', '704', '705', '710',
'711', '712', '713', '714', '715', '716', '717', '718',
'719', '721', '722', '723', '731', '732', '733', '740',
'741', '742', '744', '745', '746', '791', '792', '793',
'801', '802', '803', '804', '805', '806', '807', '808',
'809', '811', '812', '813', '814', '815', '816', '821',
'824', '831', '832', '833', '901', '902', '903', '904',
'940', '941', '942', '943', '944', '945', '946', '947',
'948', '949', '951', '952', '961', '962', '963', '970',
'971', '972', '974', '975', '976', '977', '021', '021',
'022', '023', '024', '025', '041', '042', '071', '072')
def check_valid_account(account):
'''Checks account. If it's not valid - throw according exception.'''
assert type(account) == str, 'Account has to be str type.'
assert account.isdigit() == True, 'Account has to be a number str type.'
assert account in SPECIAL_ACCOUNTS or account in VALID_ACCOUNTS, 'You entered invalid account.'
def check_in(database, account, start_remainder=0):
'''Check out if account already in database
and if it's not - creates data structure for it.
Returns:
number of account, sub_account (str type).
'''
check_valid_account(account)
if account in SPECIAL_ACCOUNTS:
if account not in database:
database[account] = {
'start_remainder': start_remainder,
'debit': {},
'credit': {}
}
return account, None
else:
# e.g. if given argument account is 301, than
# account = 30, sub_account = 301
sub_account, account = account, account[0:2]
if account not in database:
database[account] = {
sub_account: {
'start_remainder': start_remainder,
'debit': {},
'credit': {}
}
}
elif sub_account not in database[account]:
database[account].update({
sub_account: {
'start_remainder': start_remainder,
'debit': {},
'credit': {}
}
})
return account, sub_account
def set_start_remainder(database, account, start_remainder):
'''Rewrites start_remainder, even if it already exist.'''
account, sub_account = check_in(database, account)
if sub_account == None:
database[account]['start_remainder'] = start_remainder
else:
database[account][sub_account]['start_remainder'] = start_remainder
def add_debit_operation(database, number, debit, amount, description):
'''Adds operation to debit account. If it's already exist -
rewrites it.'''
account, sub_account = check_in(database, debit)
if sub_account == None:
database[account]['debit'].update({
number: {
'amount': amount,
'description': description
}
})
else:
database[account][sub_account]['debit'].update({
number: {
'amount': amount,
'description': description
}
})
def add_credit_operation(database, number, credit, amount, description):
'''Adds operation to credit account. If it's already exist -
rewrites it.'''
account, sub_account = check_in(database, credit)
if sub_account == None:
database[account]['credit'].update({
number: {
'amount': amount,
'description': description
}
})
else:
database[account][sub_account]['credit'].update({
number: {
'amount': amount,
'description': description
}
})
def add_operation(database, number, debit, credit, amount, description=None):
'''Adds operation to accounting database.'''
assert type(number) == str, 'Number of operation has to be a str type.'
assert type(description) == str or description == None, 'Description has to be a str type.'
assert type(amount) == int or type(amount) == float, 'Amount should be int or float type.'
assert amount >= 0, 'Amount has to be greater or equal to zero.'
assert debit.isdigit() == True, 'Number of account has to be number str type.'
assert credit.isdigit() == True, 'Number of account has to be number str type.'
add_debit_operation(database, number, debit, amount, description)
add_credit_operation(database, number, credit, amount, description)
def calculate_debit_turnover(database, account, sub_account=None):
'''The summ of all debit/credit operations is called turnover.
This function calculate debit turnover.'''
if account in database: # additional checking for case if function invoked alone
turnover = 0
if account in SPECIAL_ACCOUNTS:
operations = database[account]['debit']
for operation in operations:
turnover = turnover + operations[operation]['amount']
return turnover
else:
if sub_account in database[account]:
operations = database[account][sub_account]['debit']
for operation in operations:
turnover = turnover + operations[operation]['amount']
return turnover
def calculate_credit_turnover(database, account, sub_account=None):
'''The summ of all debit/credit operations is called turnover.
This function calculate credit turnover.'''
if account in database: # additional checking for case if function invoked alone
turnover = 0
if account in SPECIAL_ACCOUNTS:
operations = database[account]['credit']
for operation in operations:
turnover = turnover + operations[operation]['amount']
return turnover
else:
if sub_account in database[account]:
operations = database[account][sub_account]['credit']
for operation in operations:
turnover = turnover + operations[operation]['amount']
return turnover
def sumbit_turnover(database):
'''Caltulates and sets debit/credit turnover for each account.'''
for account in database:
if account in SPECIAL_ACCOUNTS:
database[account]['debit']['turnover'] = calculate_debit_turnover(database, account, None)
database[account]['credit']['turnover'] = calculate_credit_turnover(database, account, None)
else:
for sub_account in database[account]:
database[account][sub_account]['debit']['turnover'] = calculate_debit_turnover(database, account, sub_account)
database[account][sub_account]['credit']['turnover'] = calculate_credit_turnover(database, account, sub_account)
def sumbit_end_remainder(database):
'''Calculates and sets end remainder for each account.'''
for account in database:
if account in SPECIAL_ACCOUNTS:
assert 'turnover' in database[account]['debit'], 'You have to invoke sumbit_turnover at first.'
start_remainder = database[account]['start_remainder']
debit_turnover = database[account]['debit']['turnover']
credit_turnover = database[account]['credit']['turnover']
end_remainder = start_remainder + debit_turnover - credit_turnover
database[account]['end_remainder'] = end_remainder
else:
for sub_account in database[account]:
assert 'turnover' in database[account][sub_account]['debit'], 'You have to invoke sumbit_turnover at first.'
start_remainder = database[account][sub_account]['start_remainder']
debit_turnover = database[account][sub_account]['debit']['turnover']
credit_turnonver = database[account][sub_account]['credit']['turnover']
end_remainder = start_remainder + debit_turnover - credit_turnonver
database[account][sub_account]['end_remainder'] = end_remainder
""" If you want to look up how it works, you can use following code """
def test_check_in(database):
accounts = ['101', '131', '201', '207', '23', '26', '301', '311', '372', '377',
'39', '401', '441', '471', '601', '631', '641', '651', '661', '685']
start_remainders = [590000, 120000, 95000, 6000, 10000, 7000, 150, 10350, 500,
10000, 5000, 540000, 15000, 2000, 14000, 17000, 5000, 4000,
15000, 2000]
assert len(accounts) == len(start_remainders)
for index in range(0, len(accounts)):
check_in(database, accounts[index], start_remainders[index])
def test_add_operation(database):
operations = [str(x) for x in range(1, 50)]
deb_accounts = ['101', '92', '131', '972', '311', '79', '701', '79', '201', '201',
'201', '311', '23', '23', '23', '92', '23', '23', '92', '23',
'92', '471', '661', '92', '92', '92', '26', '361', '701', '311',
'311', '901', '701', '301', '661', '372', '98', '311', '641',
'631', '631', '651', '685', '601', '684', '79', '79', '79', '79']
cred_accounts = ['401','131','101','101','701','972','79','441','631','631',
'631','601','201','201','201','201','471','661','661','651',
'651','661','641','207','372','39','23','701','641','361',
'377','26','79','311','301','301','641','641','311','311',
'311','311','311','311','311','92','98','901','441']
amounts = [30000,6700,500,1000,2000,1000,2000,1000,82500,30000,7500,27000,
105000,35000,2000,15000,3000,30000,12000,11250,4500,2000,4500,
500,200,1200,177400,265213,34594,265213,3000,177400,230619,43000,
42000,200,10644,6800,4500,120000,12500,4000,1000,27000,2700,40100,
10644,177400,0]
assert len(operations) == len(deb_accounts) == len(cred_accounts) == len(amounts)
for index in range(0, len(operations)):
add_operation(database, operations[index], deb_accounts[index], cred_accounts[index], amounts[index])
def test_function(database): # compare summs of all deb and cred turnovers. Have to be the same.
deb_turnover = 0
cred_turnonver = 0
for account in database:
if account in SPECIAL_ACCOUNTS:
deb_turnover += database[account]['debit']['turnover']
cred_turnonver += database[account]['credit']['turnover']
else:
for sub_account in database[account]:
deb_turnover += database[account][sub_account]['debit']['turnover']
cred_turnonver += database[account][sub_account]['credit']['turnover']
print((deb_turnover, cred_turnonver))
t = time.time()
test_check_in(DATABASE)
test_add_operation(DATABASE)
sumbit_turnover(DATABASE)
sumbit_end_remainder(DATABASE)
test_function(DATABASE)
print(time.time() - t)
pprint(DATABASE)
| [
"[email protected]"
] | |
4dfb6d16bcfe5cdc4bec9b9739e75f6880607027 | ac7abb68539f884b477c19c80d76ff1e1ef3b4cc | /Runner/Principal.py | 255a84bad72ee88bfec1f56abba2f023060cf01e | [] | no_license | Retravel/Retravel-Fin | 81f28791a926d4b6abba91711ef013c86be29aca | a58287708fb3e2032cda5e96094cb576c2fb761c | refs/heads/master | 2020-03-17T14:35:02.967645 | 2018-05-21T10:01:38 | 2018-05-21T10:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,277 | py | import pygame as pg
import random
from Runner.Options import *
from Runner.Classes import *
class Jeu :
def __init__(self) :
# initialisation de la fenêtre, etc
pg.init()
pg.mixer.init()
self.fenetre = pg.display.set_mode((LARGEUR,HAUTEUR))
pg.display.set_caption(TITRE)
self.clock = pg.time.Clock()
self.running = True
self.font_name = pg.font.match_font(FONT_NAME)
self.load_data()
def load_data(self):
# charger les différents sons du jeu
self.jump_son = pg.mixer.Sound('Runner/son/Jump15.wav')
self.boost_son = pg.mixer.Sound('Runner/son/Randomize87.wav')
self.hurt_son = pg.mixer.Sound('Runner/son/Hit_Hurt5.wav')
self.list_fond = []
# charger les différentes images de fond
for i in range (1, 6):
self.list_fond.append(pg.image.load('Runner/img/fond' + str(i) + '.png').convert())
self.list_fond2 = [self.list_fond[1], self.list_fond[2]]
self.list_fond3 = [self.list_fond[3], self.list_fond[4]]
self.fond = self.list_fond[0]
def new(self) :
# commencer une nouvelle partie
self.score = 0
self.gem_score = 0
self.mob_timer = 0
self.boss_timer = 0
self.portal_timer = 0
self.current_frame = 0
self.last_update = 0
self.spawned_portal = False
self.pass_portal = False
self.spawned_portal2 = False
self.pass_portal2 = False
self.spawn_sol = False
self.spawned_boss = False
self.combat = False
self.boss_died = False
self.all_sprites = pg.sprite.LayeredUpdates()
self.platforms = pg.sprite.Group()
self.objects = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.portals = pg.sprite.Group()
self.obstacles = pg.sprite.Group()
self.boss = pg.sprite.Group()
self.player = Player(self)
for plat in PLATFORM_LIST :
Platform(self, *plat)
pg.mixer.music.load('Runner/son/Chagrin.ogg')
self.run()
def run(self):
# boucle du jeu
pg.mixer.music.play(loops=-1)
self.playing = True
self.win = False
while self.playing == True :
self.clock.tick(FPS)
self.events()
self.update()
self.display()
if self.win :
self.victory_screen()
pg.mixer.music.fadeout(500)
def update(self):
# boucle du jeu mise à jour
self.all_sprites.update()
self.animation_fond()
# apparition ennemis
now = pg.time.get_ticks()
if now - self.mob_timer > MOQ_FREQ + random.choice([-1000, -500, 0, 500, 1000]) :
self.mob_timer = now
if self.score <= SCORE_LIMIT :
Mob_ship(self)
#collision ennemis - phase 1
mob_hits = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)
mob_died = False
for mob in self.mobs :
if not self.player.invincible :
if (mob.rect.left <= self.player.rect.centerx <= mob.rect.right and \
mob.rect.top-5 <= self.player.rect.bottom <= mob.rect.centery) and self.player.jumping :
mob_died = True
mob.kill()
if not self.spawned_portal :
self.score += 1
if mob_hits and not mob_died :
self.hurt_son.play()
self.player.vie -= 1
self.player.invincible = True
#collision obstacles - phase 2
obst_hits = pg.sprite.spritecollide(self.player, self.obstacles, False, pg.sprite.collide_mask)
if obst_hits :
if not self.player.invincible :
self.hurt_son.play()
self.player.vie -= 1
self.player.invincible = True
# on vérifie si le joueur touche une plateforme (uniquement en descendant)
if self.player.vit.y > 0 :
hits = pg.sprite.spritecollide(self.player, self.platforms, False)
if hits:
lowest = hits[0]
for hit in hits :
if hit.rect.bottom > lowest.rect.bottom :
lowest = hit
if lowest.rect.left-10 < self.player.pos.x < lowest.rect.right+10 :
if self.player.pos.y < lowest.rect.bottom+5 :
self.player.pos.y = lowest.rect.top+0.3
self.player.vit.y = 0
self.player.jumping = False
#si le joueur arrive au 2/3 de la largeur de l'écran
if self.player.rect.x >= LARGEUR/3:
if not self.pass_portal and not self.pass_portal2:
self.player.pos.x -= max(abs(self.player.vit.x), 2)
for mob in self.mobs :
mob.rect.x -= max(abs(self.player.vit.x),2)
for plat in self.platforms :
plat.rect.right -= max(abs(self.player.vit.x),2)
for portal in self.portals :
portal.rect.right -= max(abs(self.player.vit.x),2)
# collision entre un object collectable et le joueur
object_hits = pg.sprite.spritecollide(self.player, self.objects, True)
for object in object_hits :
if object.type == 'boost':
self.boost_son.play()
self.player.vit.x = SPEED_BOOST
self.player.vit.y = -JUMP_BOOST
self.player.walking = False
if object.type == 'gem':
self.gem_score += 1
#créer de nouvelles plateformes
if self.spawned_portal2 == False :
while len(self.platforms) < 8 :
if self.spawned_portal == False :
Platform(self, random.randrange(LARGEUR, LARGEUR+240),
random.randrange(150, HAUTEUR-20))
else :
Platform(self, random.randrange(LARGEUR, LARGEUR+240),
random.choice([150, 300, 450 ]))
else :
if not self.spawn_sol :
Platform(self, LARGEUR + 240, HAUTEUR-50)
self.spawn_sol = True
# déclenchement phase 2
if self.score > SCORE_LIMIT:
if now - self.portal_timer > 5000 and not self.spawned_portal and not self.spawned_portal2:
self.portal_timer = now
self.portal1 = Portal(self, 'portal1')
self.spawned_portal = True
# déclenchement phase 3
if self.gem_score > SCORE_LIMIT:
if now - self.portal_timer > 5000 and not self.spawned_portal2:
self.portal_timer = now
self.portal2 = Portal(self, 'portal2')
self.spawned_portal2 = True
for portal in self.portals :
# franchissement portails
if portal.type == 'portal1' :
if self.player.rect.right > portal.rect.centerx+10 :
self.pass_portal = True
else :
self.pass_portal = False
if portal.type == 'portal2' :
if self.player.rect.right > portal.rect.centerx+10 :
self.pass_portal2 = True
else :
self.pass_portal2 = False
if self.pass_portal and not self.pass_portal2 :
#la vitesse est réduite pour ne pas que le joueur aille trop vite par rapport au scrolling
self.player.vit.x *= 0.75
# scrolling indépendant du joueur pour la phase 2
if self.player.vit.x <= 0 :
self.player.pos.x -= VIT_SCROLLING
for plat in self.platforms :
if plat.rect.right <= 0 :
plat.kill()
else :
plat.rect.right -= VIT_SCROLLING
for portal in self.portals :
portal.rect.right -= VIT_SCROLLING
if self.pass_portal2 :
for plat in self.platforms :
if plat.num_image == 4 :
if plat.rect.right <= -240 :
plat.kill()
else :
plat.rect.right -= VIT_SCROLLING
if plat.num_image == 1 :
if plat.rect.right-20 > LARGEUR :
plat.rect.x -= VIT_SCROLLING
for portal in self.portals :
portal.rect.right -= VIT_SCROLLING
if portal.rect.left < 1 and not self.spawned_boss:
Boss(self, 700, HAUTEUR-48)
self.spawned_boss = True
if self.spawned_boss :
#démarrage combat avec le changement d'animation
if self.player.rect.x > LARGEUR*0.6 :
self.combat = True
if self.combat :
#combat de boss
for boss in self.boss :
if boss.rect.x < self.player.rect.x :
boss.vit.x = 2
if boss.rect.x > self.player.rect.x :
boss.vit.x = -2
if self.player.rect.x-1 <= boss.rect.x <= self.player.rect.x+1 :
boss.vit.x = 0
#collisions boss - phase 3
boss_hit = pg.sprite.spritecollide(self.player, self.boss, False, pg.sprite.collide_mask)
if not self.player.invincible and not boss.protection:
if (boss.rect.left+5 <= self.player.rect.centerx <= boss.rect.right-5 and \
boss.rect.top-5 <= self.player.rect.bottom <= boss.rect.centery) and self.player.jumping :
boss.vie -= 1
boss.protection = True
if boss_hit and not self.boss_died:
self.hurt_son.play()
self.player.vie -= 1
self.player.invincible = True
for boss in self.boss :
#si l'ennemi est à cours de vies
if boss.vie <= 0 :
self.boss_died = True
boss.image = boss.died_img
boss.vit.x = 0
if boss.rect.bottom < HAUTEUR -30 :
boss.vit.y = 1
if boss.rect.top > HAUTEUR :
self.win = True
# si le joueur tombe dans le vide
if self.player.rect.top > HAUTEUR :
self.playing = False
# si le joueur n'a plus de vies
if self.player.vie <= 0 :
self.playing = False
# phase 2 - si le joueur n'arrive plus à suivre
if self.player.rect.right < -5 :
self.playing = False
def animation_fond(self):
# changement du fond selon les phase
now = pg.time.get_ticks()
if not self.pass_portal and not self.pass_portal2 :
self.fond = self.list_fond[0]
else :
if self.pass_portal and not self.pass_portal2 :
if now - self.last_update > 2000 :
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.list_fond2)
self.fond = self.list_fond2[self.current_frame]
if self.pass_portal2 :
if now - self.last_update > 2000 :
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.list_fond3)
self.fond = self.list_fond3[self.current_frame]
def events(self) :
# actions / événements
for event in pg.event.get() :
if event.type == pg.QUIT :
if self.playing == True :
self.playing = False
self.running = False
if event.type == pg.KEYDOWN :
if event.key == pg.K_SPACE :
self.player.jump()
if event.type == pg.KEYUP :
if event.key == pg.K_SPACE :
self.player.jump_cut()
def display(self) :
# boucle d'affichage du jeu
self.fenetre.blit(self.fond, (0, 0))
self.all_sprites.draw(self.fenetre)
if self.player.invincible and self.player.vie > 0:
self.fenetre.blit(self.player.shield, (self.player.rect.x-10, self.player.rect.y-3))
for portal in self.portals :
if self.pass_portal == True :
self.fenetre.blit(portal.image, portal.rect)
if not self.pass_portal and not self.pass_portal2 :
self.affiche_text(str(self.score), 30, BLANC, LARGEUR-20, 20)
if self.pass_portal and not self.pass_portal2 :
self.affiche_text(str(self.gem_score), 30, VERT, LARGEUR-20, 20)
for i in range (self.player.vie):
self.fenetre.blit(self.player.coeur,(10+35*i, 10))
for boss in self.boss :
if self.combat :
if boss.vie >= 1 :
self.fenetre.blit(boss.head,(597, 10))
for i in range (boss.vie):
self.fenetre.blit(boss.coeur,(625+35*i, 10))
# après affichage de tous les éléments, on rafraîchit l'écran
pg.display.flip()
def affiche_text(self, text, size, color, x, y) :
#affiche le nombre d'ennemis tués lors de la phase 1
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.fenetre.blit(text_surface, text_rect)
def start_screen(self):
# écran d'accueil
pg.mixer.music.load('Runner/son/Son_start_screen.ogg')
pg.mixer.music.play(loops=-1)
self.fenetre.fill(COULEUR_FOND)
self.affiche_text('RUNNER', 48, JAUNE, LARGEUR/2, HAUTEUR/6 - 20)
self.affiche_text("FLECHES pour BOUGER, ESPACE pour SAUTER", 22 , JAUNE, LARGEUR/2, HAUTEUR*(2/6))
self.affiche_text("Phase 1 : tuer 5 ennemis", 22 , JAUNE, LARGEUR/2, HAUTEUR/2)
self.affiche_text("Phase 2 : ramasser 5 gemmes vertes", 22 , JAUNE, LARGEUR/2, HAUTEUR/2 + 25)
self.affiche_text("Phase 3 : affronter le boss", 22 , JAUNE, LARGEUR/2, HAUTEUR/2 + 50)
self.affiche_text("APPUYEZ sur ENTER pour JOUER", 22 , JAUNE, LARGEUR/2, HAUTEUR*3/4)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def game_over_screen(self):
# écran lorsque l'on perd
if self.running == False :
return
pg.mixer.music.load('Runner/son/Son_game_over.ogg')
pg.mixer.music.play(loops=-1)
self.fenetre.fill(COULEUR_FOND)
self.affiche_text('GAME OVER', 48, ROUGE, LARGEUR/2, HAUTEUR/4)
self.affiche_text("APPUYEZ sur ENTER pour REESAYER", 22 ,
ROUGE, LARGEUR/2, HAUTEUR/2)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def victory_screen(self):
# écran de fin - de victoire
self.fenetre.fill(COULEUR_FOND)
self.affiche_text('YOU WIN - FELICITATIONS', 48, ORANGE, LARGEUR/2, HAUTEUR/4)
self.affiche_text("APPUYEZ sur LA CROIX pour QUITTER le jeu", 22 , ORANGE, LARGEUR/2, HAUTEUR/2)
pg.display.flip()
self.wait_for_key()
if self.running == False :
pg.quit()
def wait_for_key(self):
waiting = True
while waiting :
self.clock.tick(FPS)
for event in pg.event.get() :
if event.type == pg.QUIT :
waiting = False
self.running = False
if event.type == pg.KEYUP :
if event.key == pg.K_RETURN :
waiting = False
| [
"[email protected]"
] | |
5b9b16d3f350192012b8a8d223b402d78902b5c8 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Spacy/source2.7/spacy/lang/id/tokenizer_exceptions.py | 3bba57e4cbd39db28e872da9aa8cb1051962e24a | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 1,722 | py | # coding: utf8
from __future__ import unicode_literals
import regex as re
from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS
from ..tokenizer_exceptions import URL_PATTERN
from ...symbols import ORTH
_exc = {}
for orth in ID_BASE_EXCEPTIONS:
_exc[orth] = [{ORTH: orth}]
orth_title = orth.title()
_exc[orth_title] = [{ORTH: orth_title}]
orth_caps = orth.upper()
_exc[orth_caps] = [{ORTH: orth_caps}]
orth_lower = orth.lower()
_exc[orth_lower] = [{ORTH: orth_lower}]
if '-' in orth:
orth_title = '-'.join([part.title() for part in orth.split('-')])
_exc[orth_title] = [{ORTH: orth_title}]
orth_caps = '-'.join([part.upper() for part in orth.split('-')])
_exc[orth_caps] = [{ORTH: orth_caps}]
for orth in [
"'d", "a.m.", "Adm.", "Bros.", "co.", "Co.", "Corp.", "D.C.", "Dr.", "e.g.",
"E.g.", "E.G.", "Gen.", "Gov.", "i.e.", "I.e.", "I.E.", "Inc.", "Jr.",
"Ltd.", "Md.", "Messrs.", "Mo.", "Mont.", "Mr.", "Mrs.", "Ms.", "p.m.",
"Ph.D.", "Rep.", "Rev.", "Sen.", "St.", "vs.",
"B.A.", "B.Ch.E.", "B.Sc.", "Dr.", "Dra.", "Drs.", "Hj.", "Ka.", "Kp.",
"M.Ag.", "M.Hum.", "M.Kes,", "M.Kom.", "M.M.", "M.P.", "M.Pd.", "M.Sc.",
"M.Si.", "M.Sn.", "M.T.", "M.Th.", "No.", "Pjs.", "Plt.", "R.A.", "S.Ag.",
"S.E.", "S.H.", "S.Hut.", "S.K.M.", "S.Kedg.", "S.Kedh.", "S.Kom.",
"S.Pd.", "S.Pol.", "S.Psi.", "S.S.", "S.Sos.", "S.T.", "S.Tekp.", "S.Th.",
"a.l.", "a.n.", "a.s.", "b.d.", "d.a.", "d.l.", "d/h", "dkk.", "dll.",
"dr.", "drh.", "ds.", "dsb.", "dst.", "faks.", "fax.", "hlm.", "i/o",
"n.b.", "p.p." "pjs.", "s.d.", "tel.", "u.p.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = _exc
| [
"[email protected]"
] | |
0e89ff008a7a7e5cdc2d5f004fae605af78a9091 | 56c9e8f4f28400fc42bb1ee9ad1615a1520854c3 | /Area.py | bb6893e43a3ece38a19467b9040e979187465b5a | [] | no_license | IbnAzeez/Python-Area | 157a9bf333dfc27ed409d4ec123056eb7b735e7a | a60d046dbdd821c4ef76a2391588a15651bf223e | refs/heads/master | 2021-04-24T05:59:30.872779 | 2020-03-25T20:53:38 | 2020-03-25T20:53:38 | 250,088,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | import math
print ('Welcome User')
print ('This program accepts the radius of a circle and returns the area as an output')
print('--------------------------------')
print('Please input radius')
radius = float(input())
Area = math.pi * radius * radius
print('Calculating Area of the Circle, wait a minute!')
print('--------------------------------')
print('Area of Circle is: ')
print(Area)
print('--------------------------------')
print('No need to try using a calculator, the answer is spot on')
print('Thank you')
| [
"[email protected]"
] | |
ee1cc126f996d7479b571b66119ece007be82d74 | 21511fdabe3f0f76bba25d4d6f62fd964d090d15 | /TD02_Bitcoin_Today_practice.py | 3cffde3080ab5c72fc7e705f0f5c8e3757e3a401 | [] | no_license | lilyanB/BP-TD2-BitcoinSeed | efbc2fdb8adc67c60581bce36468327827fd9101 | 9a32f8da6e228ec8ef624054014889380c1c8782 | refs/heads/main | 2023-08-16T00:55:48.972306 | 2021-10-10T20:03:40 | 2021-10-10T20:03:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,339 | py | import secrets
import hashlib
import binascii
import unicodedata
import hmac
import ecdsa
import struct
import base58
from ecdsa.curves import SECP256k1
from ecdsa.ecdsa import int_to_string, string_to_int
from mnemonic import Mnemonic
import bip32utils
from bip32utils import BIP32Key
from bip32utils import BIP32_HARDEN
##############
#Créer un entier aléatoire pouvant servir de seed à un wallet de façon sécurisée
##############
bits = secrets.randbits(128)
bits_hex = hex(bits)
private_key = bits_hex[2:]
##############
#Représenter cette seed en binaire et le découper en lot de 11 bits
##############
bits_bin = bin(bits)
bits_bin = bits_bin[2:]
data = binascii.unhexlify(private_key)
h = hashlib.sha256(data).hexdigest()
b = bin(int(binascii.hexlify(data),16))[2:].zfill(len(data)*8)
checksum = bin(int(h,16))[2:].zfill(256)[: len(data)* 8//32]
tab=[]
word=""
cpt=0
if(len(str(b))<128):
for i in range(0, 128-len(str(b))):
word+="0"
cpt+=1
for j in b:
word=str(word)+str(j)
cpt+=1
if cpt==11:
cpt=0
tab.append(word)
word=""
word+=str(checksum)
tab.append(word)
##############
#Attribuer à chaque lot un mot selon la liste BIP 39 et afficher la seed en mnémonique
##############
with open("english.txt", "r") as f:
wordlist = [w.strip() for w in f.readlines()]
seed = []
for k in range(len(tab)):
for i in range(len(tab[k])//11):
indx = int(tab[k][11*i:11*(i+1)],2)
seed.append(wordlist[indx])
phrase = " ".join(seed)
##############
#Permettre l’import d’une seed mnémonique
##############
seed_temp = str(input("\nVoulez vous importer votre propre seed ? (y/n)"))
if(seed_temp=="y"):
phrase = str(input("\nEntrez votre propre seed : "))
print(phrase)
normalized_mnemonic = unicodedata.normalize("NFKD", phrase)
password = ""
normalized_passphrase = unicodedata.normalize("NFKD", password)
passphrase = "mnemonic" + normalized_passphrase
mnemonic = normalized_mnemonic.encode("utf-8")
passphrase = passphrase.encode("utf-8")
bin_seed = hashlib.pbkdf2_hmac("sha512", mnemonic, passphrase, 2048)
hex_bin = binascii.hexlify(bin_seed[:64])
mnemon = Mnemonic('english')
seed_mnemonic = mnemon.to_seed(mnemonic)
##############
#Extraire la master private key et le chain code
##############
seed_bytes = binascii.unhexlify(hex_bin)
I = hmac.new(b"Bitcoin seed", seed_bytes, hashlib.sha512).digest()
L, R = I[:32], I[32:]
master_private_key = int.from_bytes(L, 'big')
master_chain_code = R
##############
#Extraire la master public key and private
##############
seed = binascii.unhexlify(hex_bin)
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
secret = Il
chain = Ir
xprv = binascii.unhexlify("0488ade4")
xpub = binascii.unhexlify("0488b21e")
depth = b"\x00"
fpr = b'\0\0\0\0'
index = 0
child = struct.pack('>L', index)
k_priv = ecdsa.SigningKey.from_string(secret, curve=SECP256k1)
K_priv = k_priv.get_verifying_key()
data_priv = b'\x00' + (k_priv.to_string())
if K_priv.pubkey.point.y() & 1:
data_pub= b'\3'+int_to_string(K_priv.pubkey.point.x())
else:
data_pub = b'\2'+int_to_string(K_priv.pubkey.point.x())
raw_priv = xprv + depth + fpr + child + chain + data_priv
raw_pub = xpub + depth + fpr + child + chain + data_pub
hashed_xprv = hashlib.sha256(raw_priv).digest()
hashed_xprv = hashlib.sha256(hashed_xprv).digest()
hashed_xpub = hashlib.sha256(raw_pub).digest()
hashed_xpub = hashlib.sha256(hashed_xpub).digest()
raw_priv += hashed_xprv[:4]
raw_pub += hashed_xpub[:4]
#######################
#Full information root key (master public key, master private key...)
######################
root_key = bip32utils.BIP32Key.fromEntropy(seed)
root_address = root_key.Address()
root_public_hex = root_key.PublicKey().hex()
root_private_wif = root_key.WalletImportFormat()
print("\n--------------------------------")
print('Root key:')
print(f'\t{root_key.dump()}')
#######################
#Générer un clé enfant
######################
child_key = root_key.ChildKey(0).ChildKey(0)
child_address = child_key.Address()
child_public_hex = child_key.PublicKey().hex()
child_private_wif = child_key.WalletImportFormat()
print("\n--------------------------------")
print('Child key m/0/0:')
print(f'\t{child_key.dump()}')
#######################
#Générer une clé enfant à l’index N
######################
t = str(input("\nVoulez vous utiliser un index (sans niveau d'indexation) ? (y/n)"))
if (t=="y"):
n = int(input("\nVeuillez choisir le niveau d'indexation ? "))
print("Index choisi : ",n)
i = 0
for x in range(n):
i=i+1
child_key_son = root_key.ChildKey(0).ChildKey(i)
child_address_son = child_key_son.Address()
child_public_hex_son = child_key_son.PublicKey().hex()
child_private_wif_son = child_key_son.WalletImportFormat()
print("--------------------------------")
print('Child key m/0/',i)
print(f'\tAddress: {child_address_son}')
print(f'\tPublic : {child_public_hex_son}')
print(f'\tPrivate: {child_private_wif_son}\n')
print(i)
#######################
#Générer une clé enfant à l’index N au niveau de dérivation M
######################
else:
n = int(input("\nVeuillez choisir le niveau d'indexation ? "))
print("Index choisi : ",n)
m = int(input("\nVeuillez choisir le niveau de dérivation ? "))
print("Dérivation choisi : ",m)
i = 0
for x in range(n):
i=i+1
child_key_son = root_key.ChildKey(m).ChildKey(i)
child_address_son = child_key_son.Address()
child_public_hex_son = child_key_son.PublicKey().hex()
child_private_wif_son = child_key_son.WalletImportFormat()
print("--------------------------------")
print('Child key m/',m,'/',i)
print(f'\tAddress: {child_address_son}')
print(f'\tPublic : {child_public_hex_son}')
print(f'\tPrivate: {child_private_wif_son}\n')
print(i)
#######################
#Information propre
######################
print("-------------------------------------")
print("Vous allez choisir toutes les informations que vous souhaitez récupérer.")
step1 = str(input("\nVoulez vous récupérer la private key? (y/n)"))
if(step1=="y"):
print("private key : ",private_key)
print("-------------------------------------")
step2 = str(input("\nVoulez vous afficher la seed en lot de 11 bites? (y/n)"))
if(step2=="y"):
print("Seed en lot : ",tab)
print("-------------------------------------")
step3 = str(input("\nVoulez vous afficher la phrase en mnémonique? (y/n)"))
if(step3=="y"):
print("Phrase : ",phrase)
print("-------------------------------------")
step4 = str(input("\nVoulez vous afficher la seed BIP39? (y/n)"))
if(step4=="y"):
print(f'BIP39 Seed: {seed_mnemonic.hex()}\n')
print("-------------------------------------")
step5 = str(input("\nVoulez vous afficher la master publique key et la master private key? (y/n)"))
if(step5=="y"):
print("\nOnly public and private root keys:")
print(f'\tPrivate : ,{base58.b58encode(raw_priv)}')
print(f'\tPublic : ,{base58.b58encode(raw_pub)}')
print(f'master chain code (bytes): {master_chain_code}')
print("-------------------------------------")
print("Merci pour votre confiance.")
| [
"[email protected]"
] | |
875b61123dd93a948ac250e15ea5b998a1923731 | 68947387d77e0c869494dd8e7539594a3d1a2ed9 | /Sean_Mitchell_CS_317_Extra_Credit.py | 75af29007b6e66934a52fb1e71785e43f3a3b07e | [] | no_license | SeanMitchell1994/CS317_ExtraCredit | 375702aaefe58a7baf6005b71c9feee6fb19b640 | 526ed1b173fb6490944f05cb2d1d459560057f43 | refs/heads/master | 2020-05-16T19:12:35.240076 | 2019-04-29T01:24:28 | 2019-04-29T01:24:28 | 183,251,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | # ====================================================
# Sean Mitchell
# CS 317-20 Spring 2019
# Extra Credit
#
# Creates a series of petals and rings using turtle
# The total shape as a color gradient that starts low
# and goes high as the shape size increases
# This base color is randomly chosen at runtime
#
# This version include tail_recursion.py
# ====================================================
from turtle import *
import colorsys
import time
from random import randint
from tail_recursion import tail_recursive, recurse # tail_recursion.py is not mine, it's just an
# interesting trick to speed up the program because
# of sequential recurisive calls
# Full credit is provided in the tail_recursion.py
# The program runs fine without, it's just slower
# With this added, it ran roughly 40% faster
color_lut = [] # color lookup table
@tail_recursive
def quarter_circle(steps,length,side,base_color):
# steps = number of times to run
# length = length to move forward
# side = which side is the petal (coming or leaving origin?)
# base_color = value of randomly chosen base color
#
# Draws a quarter circle
# exit condition
if (steps <= 0):
return
# determines if the petal is coming or leaving the origin
if (side == 1):
color(color_lut[base_color - (steps) + 90])
elif (side == -1):
color(color_lut[base_color + (steps)])
# shifts by the value of the length
forward(length)
right(-length)
# recursive call
quarter_circle(steps-1,length,side,base_color)
@tail_recursive
def inner_circle(steps,base_color):
# steps = number of times to run
# base_color = value of randomly chosen base color
#
# Draws the inner geometry using quarter_circle()
# exit condition
if (steps <= 0):
return
# Draws a full petal
quarter_circle(90,1,1,base_color)
right(270)
quarter_circle(90,1,-1,base_color)
# shifts to the right by 5 pixels
right(5)
# recursive call
inner_circle(steps-1,base_color)
@tail_recursive
def petal_ring(steps,base_color):
# steps = number of times to run
# base_color = value of randomly chosen base color
#
# Draws the outer geometry using quarter_circle()
# exit condition
if (steps <= 0):
return
# Draws a full petal
quarter_circle(90,1,1,base_color+90)
right(270)
quarter_circle(90,1,-1,base_color+90)
# shifts the position to follow the outline of the circle
forward(9)
right(-84)
# recursive call
petal_ring(steps-1,base_color)
def Main():
start = time.time()
# populates the color lookup table
for i in range(1000):
color_lut.append(colorsys.hsv_to_rgb(i/1000, 1.0, 1.0))
# generates the random base color
base_color = randint(0, 800)
# run settings
pensize(2)
bgcolor('black')
speed(0)
hideturtle()
# draws the first circle
color(color_lut[base_color + 90])
circle(85)
up()
setpos(0, 85)
down()
# draws the inner petals
inner_circle(19,base_color)
#draws the outer circle
color(color_lut[base_color+180])
up()
setpos(-15,-75)
down()
circle(160)
# draws the outer petals
up()
setheading(0)
setpos(85,90)
down()
petal_ring(60,base_color)
end = time.time()
print(end - start)
done()
Main()
| [
"[email protected]"
] | |
35c12f8dd042fa9e218f0f82d1ced393a6413f71 | e839d7d13689529c945cfd923fa460b3a1fcd1f6 | /invariant3b.py | 58472fd2f3e0139e275e8344a2cc544051671055 | [] | no_license | KaplanLab/Invariants | 3a81765cf9debcc15faed425dc966ae3be5c7eec | b4432ec4639b0d08c0a90630fb1e32a13dfffebf | refs/heads/master | 2020-03-11T08:47:20.151603 | 2018-04-23T06:26:32 | 2018-04-23T06:26:32 | 129,892,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,781 | py |
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import argparse
import sys
def main():
parser=argparse.ArgumentParser(description='Calculates smoothness',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-in',help='input file',dest='infile',type=str,required=True)
parser.add_argument('-out',help='output file prefix',dest='outprefix',type=str,required=True)
parser.add_argument('-d',help='x,y distances to compare (x<y ; compare interactions of i+x with i vs i+y with i)',dest='xy',type=int,nargs=2,default=[1,10],metavar=('X','Y'))
args=parser.parse_args()
infile=args.infile
outprefix=args.outprefix
xy=args.xy
x,y = xy[0],xy[1]
print ("loading npz...\n",file=sys.stderr)
with np.load(infile) as i:
d=i['d']
chr_bin_range=i['chr_bin_range']
chrs=i['chrs']
bin_pos=i['bin_pos']
n=i['n']
nonan=lambda x: x[~np.isnan(x)]
print ("calculating smoothness...",file=sys.stderr)
d[(range(n),range(n))]=np.nan
inv3b=np.zeros(n)
inv3b[:]=np.nan
np.seterr(divide='ignore', invalid='ignore')
for i in range(0,n-y):
c = bin_pos[i,0]
same_chr_bins = (bin_pos[:,0]==c) # bins that are in same chr as i
rng = ( chr_bin_range[c,0], chr_bin_range[c,1] ) # consider only cis bins
distf = lambda x1,x2: np.nanmean(np.abs(x1-x2)) # mean absolute difference
diff_x = distf( d[i+x,rng[0]:rng[1]], d[i,rng[0]:rng[1]] ) # diff_x is the mean absolute difference between the cis interactions of i and the cis interactions of i+x
diff_y = distf( d[i+y,rng[0]:rng[1]], d[i,rng[0]:rng[1]] ) # diff_y is the mean absolute difference between the cis interactions of i and the cis interactions of i+y
inv3b[i] = diff_y - diff_x
print ("saving and plotting...",file=sys.stderr)
np.save(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'.npy',inv3b)
np.savetxt(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'_stats.tab',[np.median(nonan(inv3b))])
plt.figure(figsize=(3,10))
vp=plt.violinplot(nonan(inv3b),showextrema=False,widths=0.8)
for pc in vp['bodies']:
pc.set_alpha(0.8)
vp['bodies'][0].set_facecolor('red')
plt.savefig(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'_hist.png',dpi=300)
plt.figure(figsize=(20,3))
plt.plot(inv3b,'.',color='red')
plt.title("median: "+str(np.median(nonan(inv3b))))
plt.vlines(chr_bin_range[:,0],0,np.nanmax(inv3b))
plt.savefig(outprefix+'_inv3b_'+str(x)+'-'+str(y)+'_plot.png',dpi=300)
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
9b209805bbc3e5381db705ee82f66c38d2e5ef39 | a9243f735f6bb113b18aa939898a97725c358a6d | /0.15/_downloads/plot_compute_rt_average.py | fd3b17129bcbbdb519a78a19a35ccce09b59e38c | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 1,912 | py | """
========================================================
Compute real-time evoked responses using moving averages
========================================================
This example demonstrates how to connect to an MNE Real-time server
using the RtClient and use it together with RtEpochs to compute
evoked responses using moving averages.
Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
has to be running on the same computer.
"""
# Authors: Martin Luessi <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.realtime import RtEpochs, MockRtClient
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
decim=1, reject=dict(grad=4000e-13, eog=150e-6))
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
ev.pick_types(meg=True, eog=False) # leave out the eog channel
if ii == 0:
evoked = ev
else:
evoked = mne.combine_evoked([evoked, ev], weights='nave')
plt.clf() # clear canvas
evoked.plot(axes=plt.gca()) # plot on current figure
plt.pause(0.05)
| [
"[email protected]"
] | |
98abec5efcce801961012aeb66f7b575f4629f70 | 4ce948abfe57dbca767784294a804be6586b6c74 | /login_page/login.py | 2d0d6760e771f8cd1aca33d2ed03237a2c84aed3 | [] | no_license | XampleV/Password-Ch3cker | 275ee1a8be1424e1ecc9257060f605324030b292 | 14dc9ce7732b671a5e35b2dbea181210d253ebc6 | refs/heads/main | 2023-04-17T04:54:43.800189 | 2021-04-03T21:40:48 | 2021-04-03T21:40:48 | 354,161,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtWidgets import *
import sys
from login_page.login_page import Ui_Form as login
from login_page.program_functions import login_functions
import tkinter as tk
import tkinter.messagebox
root = tk.Tk()
root.withdraw()
app = QApplication()
login_f = login_functions()
continue_app = {"start":False}
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = login()
self.ui.setupUi(self)
self.CustomSettings()
self.SetupButtons()
self.show()
def CustomSettings(self):
self.setWindowTitle("Password Ch3cker - Login")
self.ui.password_input.setEchoMode(QtWidgets.QLineEdit.Password)
self.ui.signup_password_input.setEchoMode(QtWidgets.QLineEdit.Password)
def SetupButtons(self):
self.ui.signup_button.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.signup_page))
self.ui.already_a_user_button.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.login_page))
self.ui.register_button.clicked.connect(lambda: self.register_func())
self.ui.login_button.clicked.connect(lambda: self.login_func())
self.ui.submit_auth_button.clicked.connect(lambda: self.check_code())
def register_func(self):
email, password = self.ui.signup_email_input.text(), self.ui.signup_password_input.text()
if ("@" not in email):
tkinter.messagebox.showerror("Invalid Email", "Please enter a valid email.")
return
if (password == ""):
tkinter.messagebox.showerror("Invalid Password", "Please enter a valid password.")
return
# actually signing up here now...
register = login_f.register_account(email, password)
if (type(register) == str):
tkinter.messagebox.showerror("Failure", f"Failed to create your account.\nError: {register}")
return
if (register == True):
tkinter.messagebox.showinfo("Success", "Successfully created your account!")
self.ui.stackedWidget.setCurrentWidget(self.ui.login_page)
return
tkinter.messagebox.showerror("Failed", "Failed to create your account!")
def login_func(self):
login = login_f.login_account(self.ui.email_input.text(), self.ui.password_input.text())
if (login == True):
self.ui.stackedWidget.setCurrentWidget(self.ui.auth_page)
return
tkinter.messagebox("Failure", "The credentials are incorrect.")
def check_code(self):
global continue_app
check = login_f.check_code(self.ui.email_input.text(), self.ui.auth_code_input.text())
if (check == True):
continue_app["start"] = True
tkinter.messagebox.showinfo('Success', "Successfully logged in!")
root.destroy()
return
tkinter.messagebox.showerror("Failure", "Wrong code entered. ")
| [
"[email protected]"
] | |
680d96b054b74302c31825e8d1fb6d117d66499b | a4515918f56dd7ab527e4999aa7fce818b6dd6f6 | /Data Structures/LinkedLists/Python/copy_random_pointer.py | b3ef4628d60d581eefa6d200d79fc56db9a8d61f | [
"MIT"
] | permissive | rathoresrikant/HacktoberFestContribute | 0e2d4692a305f079e5aebcd331e8df04b90f90da | e2a69e284b3b1bd0c7c16ea41217cc6c2ec57592 | refs/heads/master | 2023-06-13T09:22:22.554887 | 2021-10-27T07:51:41 | 2021-10-27T07:51:41 | 151,832,935 | 102 | 901 | MIT | 2023-06-23T06:53:32 | 2018-10-06T11:23:31 | C++ | UTF-8 | Python | false | false | 1,099 | py | """
A linked list is given such that each node contains an additional random
pointer which could point to any node in the list or null.
Return a deep copy of the list.
"""
from collections import defaultdict
class RandomListNode(object):
def __init__(self, label):
self.label = label
self.next = None
self.random = None
def copy_random_pointer_v1(head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
dic = dict()
m = n = head
while m:
dic[m] = RandomListNode(m.label)
m = m.next
while n:
dic[n].next = dic.get(n.next)
dic[n].random = dic.get(n.random)
n = n.next
return dic.get(head)
# O(n)
def copy_random_pointer_v2(head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
copy = defaultdict(lambda: RandomListNode(0))
copy[None] = None
node = head
while node:
copy[node].label = node.label
copy[node].next = copy[node.next]
copy[node].random = copy[node.random]
node = node.next
return copy[head]
| [
"[email protected]"
] | |
b9f3a49f7f1fe0e94be6a1066047c260b2555dcc | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/TauES_test/down/emb/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374851334/HTT_24Jul_newTES_manzoni_Down_Jobs/Job_18/run_cfg.py | 38dbc249e4f6a3beb3e7f9386fe60200d89f9895 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,054 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/TauES_test/down/emb/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374851334/HTT_24Jul_newTES_manzoni_Down_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_146.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_147.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_148.root')
)
| [
"[email protected]"
] | |
f35a1b5fd5ac6605e90666ff032d753126a89666 | 45db4a55c6bd5137b17bf8dfa54ed94f361c3bf6 | /ResonantCircuits/resonantCircuit.py | e092ec6a20e63812e33f488d85c4af3afa794def | [] | no_license | CatT-DancingDev/PythonProjects | 1be3e8f0b0528be1ccbe8aeadb76ac8a5f9961ae | 7b59d9b1843eaddb9254f980f178d6e8ba551106 | refs/heads/main | 2023-04-15T08:06:25.240981 | 2021-04-25T04:13:15 | 2021-04-25T04:13:15 | 361,327,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,609 | py | ###############################################################################################
#
# Program: Resonant Circuit Design
# Module: resonantCircuit.py
# Author: Catherine Trujillo
# Course: CSC 217-470
# Date: 7/07/2020
#
###############################################################################################
#
#
# Description: This module defines/implements the superclass ResonantCircuit, which stores the
# data needed to describe a resonant frequency response.
#
############################## CLASS METHODS LIST #############################################
#
# __init__(self)
# setRF(self, rf)
# setB(self, b)
# setK(self, k)
# getRF(self)
# getB(self)
# getK(self)
# display(self)
#
############################## CLASS DEFINITION ################################################
class ResonantCircuit:
############################## METHODS #########################################################
#
# Method: __init__(self)
#
# Parameters: self
# Return Value: ResonantCircuit object
#
# Purpose: Intantiate a ResonantCircuit Object with data fields for:
# _rf = Resonant Frequency in rad/s
# _b = Bandwidth in rad/s
# _k = Gain at RF
#
#################################################################################################
def __init__(self):
self._rf = 0
self._b = 0
self._k = 0
#################################################################################################
#
# Method: getRF(self)
#
# Parameters: self
# Return Value: self._rf
#
# Purpose: Returns the value of self._rf
#
#################################################################################################
def getRF(self):
return self._rf
#################################################################################################
#
# Method: getB(self)
#
# Parameters: self
# Return Value: self._b
#
# Purpose: Returns the value of self._b
#
#################################################################################################
def getB(self):
return self._b
#################################################################################################
#
# Method: getK(self)
#
# Parameters: self
# Return Value: self._k
#
# Purpose: Returns the value of self._k
#
#################################################################################################
def getK(self):
return self._k
#################################################################################################
#
# Method: setRF(self, rf)
#
# Parameters: self, float rf
# Return Value: None
#
# Purpose: Assigns the value of rf to self._rf
#
#################################################################################################
def setRF(self, rf):
self._rf = rf
#################################################################################################
#
# Method: setB(self, b)
#
# Parameters: self, float b
# Return Value: None
#
# Purpose: Assigns the value of b to self._b
#
#################################################################################################
def setB(self, b):
self._b = b
#################################################################################################
#
# Method: setK(self, k)
#
# Parameters: self, float k
# Return Value: None
#
# Purpose: Assigns the value of k to self._k
#
#################################################################################################
def setK(self, k):
self._k = k
#################################################################################################
#
# Method: display(self)
#
# Parameters: self
# Return Value: None
#
# Purpose: Displays the description of the resonant frequency response
#
#################################################################################################
def display(self):
print("RESONANT FREQUENCY RESPONSE:")
print("Resonant Frequency = {} rad/s".format(self._rf))
print("Bandwidth = {} rad/s".format(self._b))
print("Gain At Resonant Frequency = {} \n".format(self._k))
##################################### END CLASS #################################################
| [
"[email protected]"
] | |
71688feb0681e39a08137247598481369fa9f252 | a08b5385e41fd4a99cc47e71df8310a6ce58721a | /flappy_bird.py | 9199d0f2f28bf0155f7aa3c92d55a6b808867a51 | [] | no_license | walg/NEAT-Flappy-Bird | 91ecbf17969bce6240225b3d5d6a180693cb5efd | 707dee098cfa39cf25e295e76015149f23b5da81 | refs/heads/master | 2020-07-02T22:44:01.620700 | 2019-08-09T00:40:29 | 2019-08-09T00:40:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,612 | py | """
The classic game of flappy bird. Make with python
and pygame. Features pixel perfect collision using masks :o
Date Modified: Jul 30, 2019
Author: Tech With Tim
Estimated Work Time: 5 hours (1 just for that damn collision)
"""
import pygame
import random
import os
import time
import neat
import visualize
import pickle
pygame.font.init() # init font
WIN_WIDTH = 600
WIN_HEIGHT = 800
FLOOR = 730
STAT_FONT = pygame.font.SysFont("comicsans", 50)
END_FONT = pygame.font.SysFont("comicsans", 70)
DRAW_LINES = False
WIN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption("Flappy Bird")
pipe_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","pipe.png")).convert_alpha())
bg_img = pygame.transform.scale(pygame.image.load(os.path.join("imgs","bg.png")).convert_alpha(), (600, 900))
bird_images = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","bird" + str(x) + ".png"))) for x in range(1,4)]
base_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","base.png")).convert_alpha())
gen = 0
class Bird:
"""
Bird class representing the flappy bird
"""
MAX_ROTATION = 25
IMGS = bird_images
ROT_VEL = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
"""
Initialize the object
:param x: starting x pos (int)
:param y: starting y pos (int)
:return: None
"""
self.x = x
self.y = y
self.tilt = 0 # degrees to tilt
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
"""
make the bird jump
:return: None
"""
self.vel = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
"""
make the bird move
:return: None
"""
self.tick_count += 1
# for downward acceleration
displacement = self.vel*(self.tick_count) + 0.5*(3)*(self.tick_count)**2 # calculate displacement
# terminal velocity
if displacement >= 16:
displacement = (displacement/abs(displacement)) * 16
if displacement < 0:
displacement -= 2
self.y = self.y + displacement
if displacement < 0 or self.y < self.height + 50: # tilt up
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
else: # tilt down
if self.tilt > -90:
self.tilt -= self.ROT_VEL
def draw(self, win):
"""
draw the bird
:param win: pygame window or surface
:return: None
"""
self.img_count += 1
# For animation of bird, loop through three images
if self.img_count <= self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count <= self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count <= self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
elif self.img_count <= self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME*4 + 1:
self.img = self.IMGS[0]
self.img_count = 0
# so when bird is nose diving it isn't flapping
if self.tilt <= -80:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
# tilt the bird
blitRotateCenter(win, self.img, (self.x, self.y), self.tilt)
def get_mask(self):
"""
gets the mask for the current image of the bird
:return: None
"""
return pygame.mask.from_surface(self.img)
class Pipe():
"""
represents a pipe object
"""
GAP = 200
VEL = 5
def __init__(self, x):
"""
initialize pipe object
:param x: int
:param y: int
:return" None
"""
self.x = x
self.height = 0
# where the top and bottom of the pipe is
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(pipe_img, False, True)
self.PIPE_BOTTOM = pipe_img
self.passed = False
self.set_height()
def set_height(self):
"""
set the height of the pipe, from the top of the screen
:return: None
"""
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
"""
move pipe based on vel
:return: None
"""
self.x -= self.VEL
def draw(self, win):
"""
draw both the top and bottom of the pipe
:param win: pygame window/surface
:return: None
"""
# draw top
win.blit(self.PIPE_TOP, (self.x, self.top))
# draw bottom
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird, win):
"""
returns if a point is colliding with the pipe
:param bird: Bird object
:return: Bool
"""
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask,top_offset)
if b_point or t_point:
return True
return False
class Base:
"""
Represnts the moving floor of the game
"""
VEL = 5
WIDTH = base_img.get_width()
IMG = base_img
def __init__(self, y):
"""
Initialize the object
:param y: int
:return: None
"""
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
"""
move floor so it looks like its scrolling
:return: None
"""
self.x1 -= self.VEL
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
"""
Draw the floor. This is two images that move together.
:param win: the pygame surface/window
:return: None
"""
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def blitRotateCenter(surf, image, topleft, angle):
"""
Rotate a surface and blit it to the window
:param surf: the surface to blit to
:param image: the image surface to rotate
:param topLeft: the top left position of the image
:param angle: a float value for angle
:return: None
"""
rotated_image = pygame.transform.rotate(image, angle)
new_rect = rotated_image.get_rect(center = image.get_rect(topleft = topleft).center)
surf.blit(rotated_image, new_rect.topleft)
def draw_window(win, birds, pipes, base, score, gen, pipe_ind):
"""
draws the windows for the main game loop
:param win: pygame window surface
:param bird: a Bird object
:param pipes: List of pipes
:param score: score of the game (int)
:param gen: current generation
:param pipe_ind: index of closest pipe
:return: None
"""
if gen == 0:
gen = 1
win.blit(bg_img, (0,0))
for pipe in pipes:
pipe.draw(win)
base.draw(win)
for bird in birds:
# draw lines from bird to pipe
if DRAW_LINES:
try:
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_TOP.get_width()/2, pipes[pipe_ind].height), 5)
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_BOTTOM.get_width()/2, pipes[pipe_ind].bottom), 5)
except:
pass
# draw bird
bird.draw(win)
# score
score_label = STAT_FONT.render("Score: " + str(score),1,(255,255,255))
win.blit(score_label, (WIN_WIDTH - score_label.get_width() - 15, 10))
# generations
score_label = STAT_FONT.render("Gens: " + str(gen-1),1,(255,255,255))
win.blit(score_label, (10, 10))
# alive
score_label = STAT_FONT.render("Alive: " + str(len(birds)),1,(255,255,255))
win.blit(score_label, (10, 50))
pygame.display.update()
def eval_genomes(genomes, config):
"""
runs the simulation of the current population of
birds and sets their fitness based on the distance they
reach in the game.
"""
global WIN, gen
win = WIN
gen += 1
# start by creating lists holding the genome itself, the
# neural network associated with the genome and the
# bird object that uses that network to play
nets = []
birds = []
ge = []
for genome_id, genome in genomes:
genome.fitness = 0 # start with fitness level of 0
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
birds.append(Bird(230,350))
ge.append(genome)
base = Base(FLOOR)
pipes = [Pipe(700)]
score = 0
clock = pygame.time.Clock()
run = True
while run and len(birds) > 0:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
break
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width(): # determine whether to use the first or second
pipe_ind = 1 # pipe on the screen for neural network input
for x, bird in enumerate(birds): # give each bird a fitness of 0.1 for each frame it stays alive
ge[x].fitness += 0.1
bird.move()
# send bird location, top pipe location and bottom pipe location and determine from network whether to jump or not
output = nets[birds.index(bird)].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
if output[0] > 0.5: # we use a tanh activation function so result will be between -1 and 1. if over 0.5 jump
bird.jump()
base.move()
rem = []
add_pipe = False
for pipe in pipes:
pipe.move()
# check for collision
for x, bird in enumerate(birds):
if pipe.collide(bird, win):
ge[x].fitness -= 1
birds.remove(bird)
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if add_pipe:
score += 1
# can add this line to give more reward for passing through a pipe (not required)
for genome in ge:
genome.fitness += 5
pipes.append(Pipe(WIN_WIDTH))
for r in rem:
pipes.remove(r)
remove = []
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() - 10 >= FLOOR or bird.y < -50:
remove.append((bird,nets[x],ge[x]))
for r in remove: # remove birds, associated genome and nets if requried
ge.remove(r[2])
nets.remove(r[1])
birds.remove(r[0])
draw_window(WIN, birds, pipes, base, score, gen, pipe_ind)
# break if score gets large enough
'''if score > 20:
pickle.dump(nets[0],open("best.pickle", "wb"))
break'''
def run(config_file):
"""
runs the NEAT algorithm to train a neural network to play flappy bird.
:param config_file: location of config file
:return: None
"""
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
#p.add_reporter(neat.Checkpointer(5))
# Run for up to 50 generations.
winner = p.run(eval_genomes, 50)
# show final stats
print('\nBest genome:\n{!s}'.format(winner))
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward.txt')
run(config_path)
| [
"[email protected]"
] | |
03c44bbcc3cb986d47719e3b53198dc3ce277e67 | 3d752bef425e906cf0d44bd6ec1683faf53b9ff5 | /Arithmetic Game.py | 456bb95f4f24f345eb93f7bed194aac057013e2f | [] | no_license | MuhamadNawzad/Beginner-projects | 4abe2bf70eb494c4996e44413a12617a7b50d317 | 189842e0c66c36919cb4af7284509dd09c6ae06f | refs/heads/main | 2023-04-07T19:25:50.145710 | 2021-04-19T07:53:27 | 2021-04-19T07:53:27 | 359,373,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | import random
import time
class ArithmeticGame:
def __init__(self, num_questions):
self.num_questions = num_questions
def generate_questions(self):
operand1 = random.randint(0, 30)
operand2 = random.randint(0, 30)
operand = random.choice(['+', '-', '*', '//'])
if operand == '+':
answer = operand1 + operand2
if operand == '-':
answer = operand1 - operand2
if operand == '*':
answer = operand1 * operand2
if operand == '//':
answer = operand1 // operand2
question = str(operand1) + ' ' + str(operand) + ' ' + str(operand2)
return question, answer
def play_game(self):
start_time = time.time()
correct_ans = 0
for i in range(self.num_questions):
question, answer = self.generate_questions()
print(question)
user_answer = int(input('What is your answer?: '))
if answer == user_answer:
print('Your answer is correct.')
correct_ans = correct_ans + 1
else:
print('Your answer is wrong!')
end_time = time.time()
print('You answered ' + str(correct_ans) + ' questions correctly.')
print('You answered in {0:0.1f} seconds'.format(end_time - start_time))
new_game = ArithmeticGame(2)
new_game.play_game() | [
"[email protected]"
] | |
3017eff3a8d21fac6867ed2bc8da08b705f9d229 | cfc415c9b247521b872bf86fd22b55b4a3ff2ee3 | /tensorflow/tools/compatibility/tf_upgrade_v2_test.py | 4b83d50036b6c4e9572b40d7b6377685f94dacc8 | [
"Apache-2.0"
] | permissive | chengmengli06/tensorflow | f7fdb51d709e87b302d60a6dc9391cb6bbaaa3e1 | e81d0c5499eab1ae2d301c5caa128e0b69b0289b | refs/heads/master | 2021-06-24T21:54:28.571878 | 2018-11-16T06:45:48 | 2018-11-16T06:45:48 | 157,813,648 | 0 | 0 | Apache-2.0 | 2018-11-16T04:42:57 | 2018-11-16T04:42:57 | null | UTF-8 | Python | false | false | 6,225 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.assert_near(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.assert_near` to "
"`tf.debugging.assert_near`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay", "tf.train.piecewise_constant",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay"]:
text = "%s(a, b)\n" % decay
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, ["test.py:1: %s requires manual check." % decay])
self.assertIn("%s has been changed" % decay, report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "(a, b)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, ["test.py:1: %s requires manual check." % ns])
self.assertIn("loss_reduction has been changed", report)
def testCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| [
"[email protected]"
] | |
f36b312afc18e9f6b1941362c2dfbc66574e3deb | 98b63e3dc79c75048163512c3d1b71d4b6987493 | /tensorflow/python/keras/tests/memory_test.py | 465df84d6fef375a6f515ec1eb64815e4b74ec3f | [
"Apache-2.0"
] | permissive | galeone/tensorflow | 11a4e4a3f42f4f61a65b432c429ace00401c9cc4 | 1b6f13331f4d8e7fccc66bfeb0b066e77a2b7206 | refs/heads/master | 2022-11-13T11:56:56.143276 | 2020-11-10T14:35:01 | 2020-11-10T14:35:01 | 310,642,488 | 21 | 12 | Apache-2.0 | 2020-11-06T16:01:03 | 2020-11-06T16:01:02 | null | UTF-8 | Python | false | false | 2,599 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory leaks in eager execution.
It is possible that this test suite will eventually become flaky due to taking
too long to run (since the tests iterate many times), but for now they are
helpful for finding memory leaks since not all PyObject leaks are found by
introspection (test_util decorators). Please be careful adding new tests here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager.memory_tests import memory_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SingleLayerNet(keras.Model):
"""Simple keras model used to ensure that there are no leaks."""
def __init__(self):
super(SingleLayerNet, self).__init__()
self.fc1 = keras.layers.Dense(5)
def call(self, x):
return self.fc1(x)
class MemoryTest(test.TestCase):
def testMemoryLeakInSimpleModelForwardOnly(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
inputs = array_ops.zeros([32, 100], dtypes.float32)
net = SingleLayerNet()
def f():
with backprop.GradientTape():
net(inputs)
memory_test_util.assert_no_leak(f)
def testMemoryLeakInSimpleModelForwardAndBackward(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
inputs = array_ops.zeros([32, 100], dtypes.float32)
net = SingleLayerNet()
def f():
with backprop.GradientTape() as tape:
result = net(inputs)
tape.gradient(result, net.variables)
del tape
memory_test_util.assert_no_leak(f)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
c614d728dc81ff7d988e8e31bef916f6660284a7 | 0b787275b7389f352da1b9cc38abba8ca42540a3 | /16 excepciones.py | 5653f0cb5eedb20d5bceb648edc0557db7c233e8 | [] | no_license | jorgerojaspython/clases_de_python | 76d9eb0b3233f75fae1279b04781393ce39aa33f | df9382cabfa02c3a11a315f77e10df15da3fe7c9 | refs/heads/master | 2020-12-13T22:29:09.958678 | 2020-02-01T17:17:48 | 2020-02-01T17:17:48 | 234,550,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | try:
x=int(input("Enter a number"))
y=1/x
print(y)
except ZeroDivisionError:
print("no puede dividir para cero")
except ValueError:
print("debe ser un entero")
print("the end")
import math
x = float(input("Enter a number: "))
assert x >= 0.0
x = math.sqrt(x)
print(x)
| [
"[email protected]"
] | |
831b79a3092e0a4c027903556aa8d5a0da312dbb | dfa52338be02769ae4383ec81620e7c10a774dd1 | /temp.py | 3dd9d66d49ec3761c6ccfaaf7b35ecc438fecc26 | [] | no_license | shuvayan/EloquentJavascript | 345933eb7f41d1829134441d65e09053b7919391 | 99681ffe3375a45a35eaec056038d78823de76ed | refs/heads/master | 2020-12-02T21:20:00.787785 | 2017-09-08T08:58:28 | 2017-09-08T08:58:28 | 96,296,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,091 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import pandas as pd
#import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#matplotlib inline
# Import statements required for Plotly
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
#Import libraries for modelling:
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss
from imblearn.over_sampling import SMOTE
import xgboost
# Import and suppress warnings
import warnings
warnings.filterwarnings('ignore')
import os
os.chdir = 'C:/Users/shuvayan.das/Documents/AttritionModelling'
attrition = pd.read_csv('C:/Users/shuvayan.das/Documents/AttritionModelling/Attrition.csv')
attrition.head()
#Drop the employee code:
attrition.isnull().any()
#Only department has missing values,assign a seperate category to these records
attrition_df = attrition.fillna("unknown")
attrition_df.isnull().any()
attrition_df.columns.to_series().groupby(attrition_df.dtypes).groups
# The target column is in integer format,change to categorical
attrition_df['Terminated'] = attrition_df['Terminated'].astype('category')
# There are some records where the Tenure is negative or the Tenure is less than LastPromoted Time
if ((attrition_df['Tenure'] <= attrition_df['TimeLastPos']) | (attrition_df['Tenure'] <= 0)):
attrition_df['Flag_Variable'] = 1
else:
attrition_df['Flag_Variable'] = 0
attrition_df.to_csv("Attrition_processed.csv")
#Distribution of the dataset
# Plotting the KDEplots
f, axes = plt.subplots(3, 3, figsize=(10, 10), sharex=False, sharey=False)
# Defining our colormap scheme
s = np.linspace(0, 3, 10)
cmap = sns.cubehelix_palette(start=0.0, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Age'].values
y = attrition_df['Tenure'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,0])
axes[0,0].set( title = 'Age against Tenure')
cmap = sns.cubehelix_palette(start=0.333333333333, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Age'].values
y = attrition_df['Annual Income'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,1])
axes[0,1].set( title = 'Age against Annual Income')
cmap = sns.cubehelix_palette(start=0.666666666667, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['TimeLastPos'].values
y = attrition_df['Age'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,2])
axes[0,2].set( title = 'TimeLastPos against Age')
cmap = sns.cubehelix_palette(start=1.333333333333, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Tenure'].values
y = attrition_df['Last Rating'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,1])
axes[1,1].set( title = 'Tenure against Last Rating')
cmap = sns.cubehelix_palette(start=2.0, light=1, as_cmap=True)
# Generate and plot
x = attrition_df['Tenure'].values
y = attrition_df['Annual Income'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[2,0])
axes[2,0].set( title = 'Years at company against Annual Income')
f.tight_layout()
# 3D Plots:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = attrition_df['Tenure']
y = attrition_df['TimeLastPos']
z = attrition_df['LastRating']
c = attrition_df['Terminated']
_ = ax.scatter(xs=x, ys=y, zs=z, c=c)
_ = ax.set_xlabel('Tenure')
_ = ax.set_ylabel('Annual Income')
_ = ax.set_zlabel('LastRating')
_ = plt.title('Plot 1: Multivariate Visualization of Attrition by Color(red if left)')
plt.show()
# creating a list of only numerical values for correlation.
numerical = ['Tenure','TimeLastPos','Annual Income','Age','LastRating']
data = [
go.Heatmap(
z= attrition[numerical].astype(float).corr().values, # Generating the Pearson correlation
x=attrition[numerical].columns.values,
y=attrition[numerical].columns.values,
colorscale='Viridis',
reversescale = False,
text = True ,
opacity = 1.0
)
]
layout = go.Layout(
title='Pearson Correlation of numerical features',
xaxis = dict(ticks='', nticks=36),
yaxis = dict(ticks='' ),
width = 900, height = 700,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='labelled-heatmap')
# Define a dictionary for the target mapping
target_map = {'Yes':1.0, 'No':0.0}
# Use the pandas apply method to numerically encode our attrition target variable
attrition["Attrition_numerical"] = attrition_df["Terminated"].apply(lambda x: target_map[x])
#Pairplot Visualisations
# Refining our list of numerical variables
g = sns.pairplot(attrition[numerical], hue='Attrition_numerical', palette='seismic',
diag_kind = 'kde',diag_kws=dict(shade=True),hue = "Terminated")
g.set(xticklabels=[])
| [
"[email protected]"
] | |
8ab7a399f4ab540a36f920fa8cdb90d8ca3db19b | 6b6c55c1631adb035dcf9cf92bb17eebbb738ff2 | /PAR III/update_stock.py | 07fdb1b648d0c84e8bb540b22dc4b6c4f1a4d708 | [
"MIT"
] | permissive | Anderson-VargasQ/mecatronicaUNT_Prog2_Digitalizaci-n_del_Sistema_de_Ventas.- | def210c9b124176372118c1b5f9d2138881bcd7b | a151f13da27040440eee7ae97520e34a9dc9f70c | refs/heads/main | 2023-03-13T06:46:51.434048 | 2021-03-05T08:49:37 | 2021-03-05T08:49:37 | 344,736,478 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | import pymongo
def update_stock(codigo_producto,stock,stock_disp):
client = pymongo.MongoClient("mongodb+srv://grupo_hailpy:[email protected]/Proyecto?retryWrites=true&w=majority")
db = client.test
try:
print("MongoDB version is %s" %
client.server_info()['version'])
except pymongo.errors.OperationFailure as error:
print(error)
quit(1)
my_database = client.test
my_collection = my_database.bases
#Para cambiar parametros dentro de un dato
my_collection.update_one(
{ "_id": codigo_producto }, # query
{
"$set": { # new data
"stock":stock,
"stock_disp":stock_disp
}
}
) | [
"[email protected]"
] | |
0ba939c744fe84858b93f836679c134f43f4fe14 | 21ce30e043c07052a7fa39bb9fdd6cb7fda90aad | /algorithms hackerrank/cavity map.py | 35b32a34dc7171dfe4cebf4daebd0e4c3d1f0590 | [] | no_license | nikhildewoolkar/Competitive-coding | e41d5cc0ab5a45c9cf7177d3a1da553d45ccf6c3 | e45ba7e0c9fcc19da9fde2b0657eb7646f24147b | refs/heads/master | 2023-03-16T00:03:11.575553 | 2021-03-09T11:26:33 | 2021-03-09T11:26:33 | 340,855,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | def cavityMap(grid):
grid1=grid.copy()
for i in range(1,len(grid)-1):
for j in range(1,len(grid)-1):
if(grid[i][j]>grid[i-1][j] and grid[i][j]>grid[i+1][j] and grid[i][j]>grid[i][j-1] and grid[i][j]>grid[i][j+1]):
grid1[i][j]="X"
for i in grid1:
print(''.join(i))
n = int(input())
grid = []
for _ in range(n):
grid_item = list(input())
grid.append(grid_item)
result = cavityMap(grid)
| [
"[email protected]"
] | |
e30491020a6fd96d5b31d0024462ca68ec66ef9d | 5ab82eb488e973fe9e4020f91adbfe4c38e16158 | /média.py | a4ad4a2580b78565a17b75ae014dd3f1a3202035 | [] | no_license | Robertobappe/Introdu-o-Ci-ncia-da-Computa-o-com-Python-Parte-1 | b6251a32a9f1f9669aa471fdbd459ed977d34757 | dd6c5af437bb0c482415b824f75742fc35bb5d27 | refs/heads/main | 2023-08-10T17:28:48.226620 | 2021-10-02T17:23:35 | 2021-10-02T17:23:35 | 411,464,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | pn=int(input("Digite a primeira nota:"))
sn=int(input("Digite a segunda nota:"))
tn=int(input("Digite a terceira nota:"))
qn=int(input("Digite a quarta nota:"))
media=((pn+sn+tn+qn)/4)
print("A média aritmética é",media)
| [
"[email protected]"
] | |
3f7b73f4572985238fd69887dbe034b6bdf3b83f | b61a47202ffe716826e3498461e1243f8694a3e7 | /hesapla-arg.py | eb6bdb13f4c5bfccaba2b33c5d1059aa3ad70f5b | [] | no_license | serhatyazicioglu/Data-Science-and-Machine-Learning-Bootcamp | f4b3e4ed58c511a9187a14e50a03ae8eb8de8372 | 6584f3a83459b5674cb11f1fc89e12f99bbceee0 | refs/heads/main | 2023-03-23T02:14:58.347805 | 2021-03-16T17:40:27 | 2021-03-16T17:40:27 | 331,138,928 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | # -*- coding: UTF-8 -*-
"""
Yazdığımız her uygulama grafik arayüzüne sahip olmaz.
Bazı uygulamalar komut satırına daha uygundur ve bu uygulamalar bazı parametrelere ihtiyaç duyar.
Argparse: Terminal üzerinden yazdığımız kodlara input'lar vermemizi sağlar.
Aşağıdaki argparse fonksiyonunu terminal üzerinden çalıştırmak için örnek kullanım şu şekildedir:
python <fonk.ismi.py> --sayi1 <1.değer> --sayi2 <2.değer> --islem <işlem türü>
python hesapla-arg.py --sayi1 5 --sayi2 10 --islem carp
"""
import argparse # kütüphane yüklenmesi. (mevcut değilse pip install argparse)
# get args
ap = argparse.ArgumentParser() # argparse nesnesini yapılandırma
ap.add_argument("--sayi1", required=True, help="sayi1 giriniz! (--sayi1)") # required: bu argümanın gerekli olduğunu belirtir.
ap.add_argument("--sayi2", required=True, help="sayi2 giriniz! (--sayi2)") # help: kullanıcıya bilgilendirme yapar.
ap.add_argument("--islem", required=True, help="İslem turu giriniz! (--islem=topla|cikar|carp|bol)") # kullanıcıdan yapacağı işlem bilgisini alıyoruz.
# terminal üzerinden örnek kullanım: python hesapla-arg.py --sayi1 5 --sayi2 10 --islem carp
args = vars(ap.parse_args()) # alınan tüm inputları args içerisinde topladık. sayi1 inputunu çağırmak için args["sayi1"] kullanılır.
try:
# set args to vars
sayi1 = float(args["sayi1"]) # sayi1 olarak girilen değeri float tipine dönüştürür ve sayi1 olarak kaydeder.
sayi2 = int(args["sayi2"]) # sayi2 olarak girilen değeri integer tipine dönüştürür ve sayi2 olarak kaydeder.
islem = args["islem"] # kullanıcıdan alınan islem inputunu islem olarak kaydettik.
print(islem + " isleminin sonucu:") # asagidaki islemlere göre yapilan islemi ve islem sonucunu baskilar.
if islem == "topla": # kullanıcıdan alınan input değeri topla ise ekrana toplamı baskılar.
print(sayi1 + sayi2)
elif islem == "cikar": # kullanıcıdan alınan input değeri cikar ise ekrana farkı baskılar.
print(sayi1 - sayi2)
elif islem == "carp": # kullanıcıdan alınan input değeri çarpma ise ekrana çarpımı baskılar.
print(sayi1 * sayi2)
elif islem == "bol": # kullanıcıdan alınan input değeri bölme ise ekrana bölümü baskılar.
print(sayi1 / sayi2)
else:
print("Tanımlanmamıs islem turu girdiniz!") # kullanıcı farklı bir değer girerse hata mesajı çıkarır.
except Exception as e:
print("Hata var! ==> " + str(e))
| [
"[email protected]"
] | |
36a255bdfa0ae4e48cd1170b522ea6ada667d61b | 07e03f73ea68b22d10c92a0e61a86a42ff32d6fd | /Gerador de Planilhas para Memorion v4.py | a423fc21ce700d40c5aaebd5ca4d8826c056e7bb | [] | no_license | Erick-Faster/Projeto-Tradutor | d76b69fcf3d774aeb21573fcdcce62ac4b05706a | e0c53086879d9d9eb99c5e659eba4b6a44bc773f | refs/heads/master | 2020-12-19T06:16:12.003793 | 2020-01-22T19:08:54 | 2020-01-22T19:08:54 | 235,645,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,208 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 2019
Finished on Tue Jun 11 2019
###########FEATURES#############
-- Monta planilha de excel contendo
-- Palavras inseridas em alemao
-- Traducao das palavras
-- Genero dos Substantivos
-- 2 Exemplos de aplicacao
-- Tipo (substantivo, verbo, etc...)
-- Extração de dados do Pons e Reverso Context
-- Formato de planilha para ser inserido no Memorion
-- Formatação para que os sites leiam umlauts e eszetts
-- Extração de dados em xlsx e csv
-- Busca por arquivo base, dando nome como entrada
-- Escolhe nome para arquivo de saida
@author: Faster-PC
"""
import openpyxl, os, re
import pandas as pd
from selenium import webdriver
from unidecode import unidecode
'''
###################################
Funcoes
##################################
'''
#Coleta o formato de arquivo especifico
def Coleta(nomeBase,tipo):
if tipo == 1: #Se for um csv
base = pd.read_csv(nomeBase+'.csv',encoding='latin-1') #Latin-1 para corrigir erro com caracteres
elif tipo == 2: #Se for formato Excel
base = pd.read_excel(nomeBase+'.xlsx')
else:
palavras = ['Tisch','Tasche','Auto']
return palavras
palavras = base.iloc[:,0] #Cliva a primeira coluna
palavras = list(palavras) #Converte o DataFrame para Lista
return palavras
#Converte caracteres estranhos
def Converte(palavras,idioma):
regex = re.compile(r'[äöüÄÖÜß]') #Cita regras. Localiza caracteres entre []
if idioma == 'de':
for i in range(len(palavras)):
Verificador = False #Criterio para manter o looping
while Verificador == False: #Garante que todos os caracteres especiais sejam encontrados
try:
mo = regex.search(palavras[i]) #Procura em 'palavras' de acordo com regra
aux = mo.group() #caractere especial encontrado
span = mo.span() #posicao do caractere especial
palavraAux = list(palavras[i]) #Transforma string em lista
#Converte caractere especial em forma apropriada
if aux == 'Ä':
palavraAux[span[0]] = 'Ae'
pass
elif aux == 'Ö':
palavraAux[span[0]] = 'Oe'
pass
elif aux == 'Ü':
palavraAux[span[0]] = 'Ue'
pass
elif aux == 'ä':
palavraAux[span[0]] = 'ae'
pass
elif aux == 'ö':
palavraAux[span[0]] = 'oe'
pass
elif aux == 'ü':
palavraAux[span[0]] = 'ue'
pass
elif aux == 'ß':
palavraAux[span[0]] = 'ss'
pass
else:
print('ERROR')
pass
palavras[i] = ''.join(palavraAux) #transforma lista em string de novo
print('Conversao de %s bem sucedido!'%palavras[i])
palavraAux.clear() #elimina lista
except:
Verificador = True #Encerra busca
continue #Se nao encontrar, vai para o proximo caso
else: #Para todos os outros idiomas
for i in range(len(palavras)):
palavras[i] = unidecode(palavras[i]) #Remove acentos e caracteres especiais
return palavras
#Coleta Exemplos e Traducoes do Reverso Context
def Reverso(palavras,idiomaBase):
if idiomaBase == 'de':
idiomaB = 'deutsch'
pass
elif idiomaBase == 'fr':
idiomaB = 'franzosisch'
pass
elif idiomaBase == 'en':
idiomaB = 'englisch'
pass
elif idiomaBase == 'es':
idiomaB = 'spanisch'
pass
exemplos = [] #Vetor temporario
exemploFinal = [] #Vetor permanente
traducoes = []
traducaoFinal = []
for i in range (len(palavras)): #acao para cada palavra
browser.get("https://context.reverso.net/%C3%BCbersetzung/"+idiomaB+"-portugiesisch/"+palavras[i]) #site no qual informacao eh extraida
'''
exemplos
'''
try:
frases = browser.find_elements_by_class_name('text') #Encontra todos os elementos de frases
#Converte dados das frases de Web para String
for j in range (len(frases)):
exemplos.append(frases[j].text)
#Elimina vazios existentes no vetor temporario
for j in range (len(exemplos)):
try:
exemplos.remove("") #Remove todos os vazios da string
except:
break
#Confere se nao ha Typo
k = 0
if exemplos[0] == 'Meinst Du:':
k = 1
#Separa frases desejadas
exemplo = [exemplos[k],exemplos[k+1]," ~~ ",exemplos[k+2],exemplos[k+3]] #Seleciona as 2 primeiras frases
#Une vetor em uma unica String
stringExemplo = " | " #Separador entre cada elemento do vetor
stringExemplo = stringExemplo.join(exemplo) #Transforma vetor em uma string unica
#Adicionar string no vetor permanente
exemploFinal.append(stringExemplo)
print("Exemplo para %s processado!" %palavras[i])
exemplos = [] #zera vetor temporario
except:
exemploFinal.append("ERROR")
'''
Traducoes
'''
try:
traducaoWEB = browser.find_elements_by_class_name('translation')
for j in range (len(traducaoWEB)):
traducoes.append(traducaoWEB[j].text)
#Elimina vazios existentes no vetor temporario
for j in range (len(traducoes)):
try:
traducoes.remove("") #Remove todos os vazios da string
except:
break
if len(traducoes) > 1:
traducao = traducoes[0]+", "+traducoes[1]
else:
traducao = traducoes[0]
traducaoFinal.append(traducao)
print("Traducao adicionada: %s\n" %traducao)
traducoes = []
except:
traducaoFinal.append("ERROR")
return exemploFinal, traducaoFinal
#Coleta artigos classes e erros do site Pons
def Pons (palavras,idiomaBase):
for i in range (len(palavras)): #Repete de acordo com a qtde de palavras
browser.get("https://de.pons.com/%C3%BCbersetzung?q="+palavras[i]+"&l="+idiomaBase+"en&in=&lf=de&qnac=") #Entra no site PONS
print(palavras[i])
#Busca pelo genero
try:
artigo = browser.find_element_by_class_name('genus') #Busca genero
if artigo.text == "m":
artigos.append("Der")
pass
elif artigo.text == "f":
artigos.append("Die")
pass
elif artigo.text == "nt":
artigos.append("Das")
pass
else:
artigos.append("ERROR")
pass
print("Artigo: %s" %artigo.text)
except: #Comum quando nao eh um substantivo
artigos.append("") #Nao retorna artigo nenhum
#Busca pela classe/tipo da palavra (subst, verbo, adjetivo, etc)
try:
classe = browser.find_element_by_class_name('wordclass') #Busca classe
classes.append(classe.text) #add classe
print("Classe: %s\n" %classe.text)
except:
classes.append("ERROR")
#Verifica a possibilidade de possiveis erros
try:
erro = browser.find_element_by_tag_name('strong') #Procura na tag <strong>
erro = erro.text #atribui texto na variavel
regex = re.compile(r'(Meinten Sie vielleicht:)\s(\w+)') #Cria regra para padrao
mo = regex.search(erro) #procura padrao
auxErro = mo.group(1) #Valor que sera except caso nao seja encontrado
auxSugestao = mo.group(2) #Sugestao de palavra dada pelo Pons
if auxErro == 'Meinten Sie vielleicht:': #Caso o erro seja positivo
erros.append("WARNING -> %s"%auxSugestao) #Retorna erro com sugestao
else:
erros.append("") #Nao retorna nada
except:
erros.append("")
return artigos, classes, erros
#Funcao que insere tudo em um vetor final e salva no Excel no formato FlashCards do Memorion
def SalvarExcel(nomeArquivo,palavrasFinais,traducoes,artigos,exemplos,classes,erros):
vetorFinal = [] #Informacoes que irao para o Excel
for i in range(len(palavras)):
vetorFinal.append([traducoes[i],palavrasFinais[i],artigos[i],exemplos[i],classes[i],erros[i]]) #Add palavra, artigo, classe e exemplos
workbook = openpyxl.Workbook() #Cria arquivo Excel
for i in range (len(vetorFinal)): #Qtde de elementos do vetor final
workbook.active.append(vetorFinal[i]) #Add vetor, linha por linha
os.chdir('C:\\Users\\Faster-PC\\MyPythonFiles') #Seleciona Diretorio
#Verifica se o arquivo ja existe
savePoint = os.path.isfile('./'+nomeArquivo+'.xlsx')
if savePoint == False: #Caso nao exista, salvara nele msm
workbook.save(nomeArquivo+'.xlsx') #Salva Excel
print('%s.xlsx criado com sucesso!'%nomeArquivo)
else: #Caso ja exista
save = 2 #Valor atribuido ao nome do arquivo
saveStg = str(save) #Transforma int em String
#Condicao de parada
while savePoint == True: #Enquanto existir um arquivo igual
savePoint = os.path.isfile('./'+nomeArquivo+saveStg+'.xlsx') #Busca arquivo com numero na frente
if savePoint == False: #Se nao existir
workbook.save(nomeArquivo+saveStg+'.xlsx') #Salva Excel com numero
savePoint = False #Parou
print('%s%s.xlsx criado com sucesso!'%(nomeArquivo,saveStg))
else: #Se ainda existir
save = save + 1 #Add um numero ao arquivo
saveStg = str(save) #Transforma o numero em String
def GUI():
root.title("Gerador de FlashCards") #Titulo do programa
mainframe = ttk.Frame(root, padding="3 3 12 12") #Espacos extras nas 4 direcoes
mainframe.grid(column=0, row=0, sticky=(N, W, E, S)) #Dimensoes do frame principal
root.columnconfigure(0, weight=1) #coluna 0 possui 1 espaco garantido
root.rowconfigure(0, weight=1) #linha 0 possui um espaco garantido
#variaveis
nomeBase = StringVar()
nomeArquivo = StringVar()
idiomaBase = StringVar()
teste = StringVar()
nomeEntrada_entry = ttk.Entry(mainframe, width = 20, textvariable=nomeBase)
nomeEntrada_entry.grid(column=2,row=1,sticky=(W,E))
nomeSaida_entry = ttk.Entry(mainframe, width = 20, textvariable=nomeArquivo)
nomeSaida_entry.grid(column=2,row=3, sticky=(W,E))
ttk.Label(mainframe, text="Qual o nome do arquivo?").grid(column=1, row=1, sticky=W)
ttk.Label(mainframe, text="Idioma:").grid(column=1, row=2, sticky=W)
ttk.Label(mainframe, text="Qual o nome da Saida?").grid(column=1, row=3, sticky=W)
ttk.Label(mainframe, textvariable=teste).grid(column=1, row=4, sticky=W)
ttk.Radiobutton(mainframe, text='De', variable=idiomaBase, value='de').grid(column=2, row=2, sticky=W)
ttk.Radiobutton(mainframe, text='Fr', variable=idiomaBase, value='fr').grid(column=2, row=2)
ttk.Radiobutton(mainframe, text='Es', variable=idiomaBase, value='es').grid(column=2, row=2, sticky=E)
ttk.Button(mainframe, text="Fechar", command=root.destroy).grid(column=2, row=5, sticky=E)
ttk.Button(mainframe, text="OK", command=funcaoTeste).grid(column=2, row=4, sticky=E)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5) #Para cada grid, deixa um espacinho
nomeEntrada_entry.focus() #Inicia comando na primeira caixa de entrada
root.bind('<Return>', funcaoTeste) #Ativa 'Enter' para o botao
'''
############################################################
AQUI COMECA O MAIN
############################################################
'''
root = Tk()
GUI()
root.mainloop()
'''
GUI
'''
from tkinter import *
from tkinter import ttk
def funcaoTeste(*args):
try:
if idiomaBase.get() == 'de':
teste.set('DEUTSCH')
pass
elif idiomaBase.get() == 'fr':
teste.set('FRANÇAIS')
pass
elif idiomaBase.get() == 'es':
teste.set('ESPAÑOL')
pass
else:
value = nomeArquivo.get()
teste.set(value)
pass
except:
teste.set('ERROR')
pass
nomeBase = nomeBase.get()
nomeArquivo = nomeArquivo.get()
idiomaBase = idiomaBase.get()
'''
Tipos de dados que serao extraidos
'''
palavrasFinais = []
artigos = []
classes = []
exemplos = []
traducoes = []
erros = []
'''
Questionario
'''
while True:
VerificaCSV = os.path.isfile('./'+nomeBase+'.csv')
VerificaXLSX = os.path.isfile('./'+nomeBase+'.xlsx')
if VerificaCSV == True and VerificaXLSX == False:
tipo = 1
break
elif VerificaCSV == False and VerificaXLSX == True:
tipo = 2
break
elif VerificaCSV == True and VerificaXLSX == True:
tipo = int(input("Qual o formato da fonte? [1]csv , [2]xlsx : "))
break
else:
print("Arquivo nao encontrado. Atribuindo teste")
tipo = 3
break
'''
Codigo de Coleta de palavras
'''
palavras = Coleta(nomeBase,tipo) #Coleta palavras de csv[1] ou excel[2]
palavrasFinais = palavras[:] #Cria nova lista de palavras nao convertidas, para ir na tabela final
palavras = Converte(palavras,idiomaBase) #Retira umlauts e eszetts
'''
Codigo de busca no Pons e Reverso
'''
browser = webdriver.PhantomJS() #Chama Navegador fantasma
artigos, classes, erros = Pons(palavras,idiomaBase) #Elementos que usam o Pons
exemplos, traducoes = Reverso(palavras,idiomaBase) #Elementos que usam o Reverso Context
browser.close() #Fecha navegador fantasma
'''
Salvando arquivo
'''
SalvarExcel(nomeArquivo,palavrasFinais,traducoes,artigos,exemplos,classes,erros)
'''
########################################
FIM DO CODIGO
########################################
'''
'''Observacoes'''
| [
"[email protected]"
] | |
5473b4dc8a460bd82fdbb8d63a294758359036eb | 8b6edb9665bf90fe93d224fd2903e879d6f92f1d | /scripts/helpers.py | 9c11e71d8fc1a86076a5736f16fd84a717c1251c | [] | no_license | Mandyli1996/Multi-modal-learning-for-Neural-Record-Linkage | 808836f8b9f059e7fcf01db0a202bb100f27a806 | d6ada3bbc226adfa5ef5cfaae9b648e9b426921a | refs/heads/master | 2022-01-31T12:18:05.429898 | 2019-08-16T01:43:46 | 2019-08-16T01:43:46 | 197,054,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,109 | py | import pandas as pd
import numpy as np
import os
import re
# DATA HANDLING
def is_str_list(x):
"""
given a pd.Series of strings, return True if all elements
begin and end with square brackets
"""
return np.all(x.astype(str).str.startswith('[') & \
x.astype(str).str.endswith(']'))
def str_to_list(x):
"convert a string reprentation of list to actual list"
x = x[1:-1]
x = x.split(',')
return [int(i) for i in x]
def load_data(data_dir, filenames=['test_1', 'test_2', 'test_y',
'train_1', 'train_2', 'train_y',
'val_1', 'val_2', 'val_y']):
"""
returns a dictionary of test, train, and validation datasets with their
respective sources and targets. filenames serve as keys.
"""
data = dict()
for filename in filenames:
df = pd.read_csv(os.path.join(data_dir, filename+'.csv'), low_memory=False)
str_list_mask = df.apply(is_str_list, axis='rows')
df.loc[:, str_list_mask] = df.loc[:, str_list_mask].applymap(str_to_list)
data[filename] = df
return data
def str_to_list_df(x):
df = x.copy()
mask = df.apply(is_str_list, axis='rows')
df.loc[:, mask] = df.loc[:, mask].applymap(str_to_list)
return df
def str_to_num(x):
if type(x) == float:
return x
else:
return float(re.sub('[^0-9|^\.]', '', x))
def examine_data(set1, set2, columns, bool_mask, mapping):
df1 = set1.copy()
df2 = set2.copy()
def idx_to_word(x):
string = ''
for idx in x:
string += ' ' + mapping['idx2word'][idx]
return string
df1.loc[:, columns] = df1.loc[:, columns].applymap(idx_to_word)
df2.loc[:, columns] = df2.loc[:, columns].applymap(idx_to_word)
both = pd.concat([df1, df2], axis=1)
both = both.loc[bool_mask, :]
return both
# HYPEROPT VISUALIZATIONS
def hyperopt_val_diagnostic(val_name, trials):
ts = [trial['tid'] for trial in trials.trials]
results = [trial['result']['loss'] for trial in trials.trials]
fig, axes = plt.subplots(1, 3, figsize = (16,4))
axes[0].scatter(ts, vals)
axes[0].set(xlabel='iteration', ylabel=val_name)
axes[1].hist(np.array(vals).squeeze())
axes[1].set(xlabel=val_name, ylabel='frequency')
axes[2].scatter(vals, results)
axes[2].set(xlabel=val_name, ylabel='loss')
plt.tight_layout()
def visualize_hyperparameters(trials):
for val in trials.trials[0]['misc']['vals'].keys():
hyperopt_val_diagnostic(val, trials)
# HELPERS FOR MODEL GENERATION
def get_document_frequencies(raw_data_dir, mapping, set1='set1', set2='set2'):
# read csv data from directory as pd.DataFrame
set1 = pd.read_csv(os.path.join(raw_data_dir, set1 + '.csv'), encoding='latin1')
set2 = pd.read_csv(os.path.join(raw_data_dir, set2 + '.csv'), encoding='latin1')
# select only columns whose values are lists embedded as strings
mask1 = set1.apply(is_str_list, axis='rows')
mask2 = set2.apply(is_str_list, axis='rows')
# convert strings back into lists
set1 = set1.loc[:, mask1].applymap(str_to_list)
set2 = set2.loc[:, mask2].applymap(str_to_list)
# concatenate columns so all relevant attributes become a single list
def concat_columns(x):
idx_list = list()
for lst in x.values:
idx_list += lst
return idx_list
set1 = set1.apply(concat_columns, axis='columns')
set2 = set2.apply(concat_columns, axis='columns')
# +1 because default value of DefaultDict not counted
doc_freqs_1 = np.zeros(len(mapping['idx2word'])+1)
doc_freqs_2 = np.zeros(len(mapping['idx2word'])+1)
for index, item in set1.iteritems():
uniq_indices = set(item)
for idx in uniq_indices:
doc_freqs_1[idx] += 1
for index, item in set2.iteritems():
uniq_indices = set(item)
for idx in uniq_indices:
doc_freqs_2[idx] += 1
return doc_freqs_1, doc_freqs_2
| [
"[email protected]"
] | |
33708d51847a2c0f48609d985a8d8d1f806b84ba | 1f69ebe2eb1aa6214ba4bc0288940f2d4e580ab7 | /Assignment/assi1/harmonic.py | 70d44e00de90a4115ee6c3b1686d88795b276f9b | [] | no_license | Prathamesh-Mone/PPL19-20 | 68f8003760d62c782163def37fcc74050f9a8e4f | c192deff3e171099cca5ab6c880ef01ba149cb9c | refs/heads/master | 2022-10-05T09:30:16.835203 | 2020-06-11T06:21:50 | 2020-06-11T06:21:50 | 248,428,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | def sumreci(n) :
i = 1; new = 0
while i <= n :
if n%i == 0 :
new = new + 1/i
i = i + 1
return new
def numdivisors(n) :
i = 1; count = 0
while i <= n :
if n%i == 0 :
count = count + 1
i = i + 1
return count
if __name__ == "__main__" :
i = 1; l = 1
while i <= 8 :
p = sumreci(l)
q = numdivisors(l)
if q/p == int(q/p) :
print(l," is a harmonic number \n")
i = i + 1
l = l + 1
| [
"[email protected]"
] | |
eea24f51c349fb5fbbbc953d159fc360bb09cf38 | 6355e7024c047cc074637011dcb9f9934073dbf3 | /les/wsgi.py | c6a33d0418d8d7a3a6c3bdc38beba57846e729a6 | [] | no_license | guilhascorreia24/Componentes-de-user | a25f56e4cab8b45fb7ba185fc5722d5521235f2a | acd8a63ac0ef448704616a378b5bc08b1c84ffb3 | refs/heads/master | 2021-02-28T20:41:04.678911 | 2020-03-13T21:13:02 | 2020-03-13T21:13:02 | 245,731,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for les project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'les.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
0b89423c220dcc5323af0513aba930115f2245b1 | d89e79cbabe985368645e12271419faf60b2f0cd | /forest_quiebra.py | 1ac0f0d398749fcf21abc3300581d81438ecc0da | [] | no_license | DiegoCelis33/CarranzaDiego_Ejercicio17 | 39e7f303c9a11685b1072a8a99c5d021bd770658 | ea0f2b7c24c33076b00a942ade4a35e621a1dc54 | refs/heads/master | 2021-04-23T20:23:48.557351 | 2020-03-25T16:58:56 | 2020-03-25T16:58:56 | 249,995,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | #!/usr/bin/env python
# coding: utf-8
# In[90]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd # para leer datos
import sklearn.ensemble # para el random forest
import sklearn.model_selection # para split train-test
import sklearn.metrics # para calcular el f1-score
from scipy.io import arff
# In[169]:
data1 = arff.loadarff('1year.arff')
data2 = arff.loadarff('2year.arff')
data3 = arff.loadarff('3year.arff')
data4 = arff.loadarff('4year.arff')
data5 = arff.loadarff('5year.arff')
data1 = pd.DataFrame(data1[0])
data2 = pd.DataFrame(data2[0])
data3 = pd.DataFrame(data3[0])
data4 = pd.DataFrame(data4[0])
data5 = pd.DataFrame(data5[0])
#data = pd.concat([data1, data2,data3,data4,data5], axis=0)
data = pd.concat([data1, data2,data3,data4,data5])
sd = getattr(data, "class")
data['class']=sd.astype(int)
data = data.dropna()
predictors = list(data.keys())
predictors.remove('class')
#print(predictors, np.shape(np.array(predictors)))
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
data[predictors], data['class'], test_size=0.5)
X_test, X_validation, y_test, y_validation = sklearn.model_selection.train_test_split(
data[predictors], data['class'], test_size=0.2)
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=10, max_features='sqrt')
n_trees = np.arange(1,100,25)
f1_train = []
f1_test = []
feature_importance = np.zeros((len(n_trees), len(predictors)))
for i, n_tree in enumerate(n_trees):
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_tree, max_features='sqrt')
clf.fit(X_train, y_train)
f1_train.append(sklearn.metrics.f1_score(y_train, clf.predict(X_train)))
f1_test.append(sklearn.metrics.f1_score(y_test, clf.predict(X_test)))
feature_importance[i, :] = clf.feature_importances_
maximo = n_trees[np.argmax(f1_test)]
# In[158]:
#plt.scatter(n_trees, f1_test)
# In[186]:
feature_importance = np.zeros((maximo, len(predictors)))
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=maximo, max_features='sqrt')
clf.fit(X_validation, y_validation)
f1_validation = sklearn.metrics.f1_score(y_validation, clf.predict(X_validation))
feature_importance[i, :] = clf.feature_importances_
avg_importance = np.average(feature_importance, axis=0)
a = pd.Series(avg_importance, index=predictors)
print(a)
plt.figure()
a.nlargest().plot(kind='barh')
plt.xlabel('Average Feature Importance')
plt.title('M='+str(maximo))
plt.savefig("features.png")
# In[171]:
f1_validation
# In[ ]:
| [
"[email protected]"
] | |
8b672339668cec234c5434af9ba27be18894617a | ae74a7d1ef50f27cad108915d2879e01217934d4 | /01-QT/06-Dialog/InputDialog.py | 81afd8203dd0c98684bd8887c15158b2f83ec1b3 | [] | no_license | dyyzqz1015/python_learn | 0f9a585b782018559d8cb8a35f7b0bd1fc21a8ff | 62ee8c49e1e551d9d6bb881c553e8602f3a2c054 | refs/heads/master | 2021-06-10T18:50:36.761950 | 2019-12-11T08:40:14 | 2019-12-11T08:40:14 | 135,088,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : 01-QT
# @Time : 2018/5/26 17:12
# @Author : QinZai
# @File : InputDialog.py
# @Software: PyCharm
from PyQt5.QtWidgets import (QWidget, QPushButton, QLineEdit,
QInputDialog, QApplication)
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
self.btn.clicked.connect(self.showDialog)
self.le = QLineEdit(self)
self.le.move(130, 22)
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Input dialog')
self.show()
def showDialog(self):
text, ok = QInputDialog.getText(self, 'Input Dialog',
'Enter your name:')
if ok:
self.le.setText(str(text))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
54320cc144accbbc19a2366c523173264961565a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02922/s040859855.py | c93462013926489db291dd42664757224e2579ba | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import sys
import math
from collections import deque
def input():
return sys.stdin.readline().rstrip()
def main():
A, B = map(int, input().split())
a = math.ceil((B-1)/(A-1))
print(a)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
85bb81e5e17872fc46fd3611bd88d10b73e77db0 | 297d426d5519c669b210e82d4aff479a51949e52 | /routines/flat_frames.py | 9cb1d822913380b9855f37540a8e5a5c7d5178d6 | [] | no_license | PulkitMalhotra15/Lunar-Eclipse-Analysis | 4722d6779bad00f107a553dccd4335051dde570b | d07f6b36eec79fea1f10e3129d8c6b72bc669521 | refs/heads/master | 2020-08-21T22:27:14.922373 | 2019-10-19T21:09:11 | 2019-10-19T21:09:11 | 216,261,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 31 11:23:59 2018
@author: sid
"""
import numpy as np
import astropy.io.fits as fit
import glob
wdir = '/home/atom/2018_07_27 TLE Jaisalmer/2018_07_28 TLE Jaisalmer/dark frames/'
lists = [[],[],[],[]]
lists[0] += glob.glob(wdir+'flat_bl*.fit')
lists[1] += glob.glob(wdir+'flat_IR*.fit')
lists[2] += glob.glob(wdir+'flat_HA*.fit')
lists[3] += glob.glob(wdir+'flat_HB*.fit')
flats=['bl','IR','HA','HB']
for i in range(len(lists)):
flat = np.zeros((1335,2003))
for j in range(len(lists[i])):
data=fit.open(lists[i][j])
hdr=data[0].header
img=data[0].data
flat += img
fit.writeto('/home/atom/2018_07_27 TLE Jaisalmer/Analysis/images/dark_and_flat/'
+'flat_'+flats[i]+'.fit',flat,header=hdr)
| [
"[email protected]"
] | |
1d7fcddad197b9c1e5b50b8573b0b569e645370a | 35a1593fbd15c8ef1a20971055774a1cdcd41bce | /test/test_rpc_fork.py | 5e2432f60183e5c5213ef1772931d6b7939ae669 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | sahils2607/pytorch | 884a2da2a613b525522a1615c8c5ecef013e4fb1 | 16454095e09eab2e737d99ad569cd44bb7910f03 | refs/heads/master | 2020-08-07T15:13:53.319183 | 2019-10-07T21:22:42 | 2019-10-07T21:24:34 | 213,499,886 | 1 | 0 | NOASSERTION | 2019-10-07T22:39:43 | 2019-10-07T22:39:43 | null | UTF-8 | Python | false | false | 421 | py | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
from rpc_test import RpcTest
from common_distributed import MultiProcessTestCase
from common_utils import run_tests
class RpcTestWithFork(MultiProcessTestCase, RpcTest):
def setUp(self):
super(RpcTestWithFork, self).setUp()
self._fork_processes()
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
] | |
e6b0e6837166020928a9bfbdf5bc302fa4f86ad8 | 7dfa21d74dae975082c6d5deaa01248bac1dcc26 | /.circleci/cimodel/data/pytorch_build_data.py | 09476a970b40045f3d53a7de2f01f11f71d683ae | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | mruberry/pytorch | 88cf536ed58d20a409c1e5119be4ec04ec960082 | 19f73180cfb39eb67110d2a1d541975a49211453 | refs/heads/master | 2022-02-03T16:25:31.070089 | 2019-04-22T17:52:28 | 2019-04-22T17:58:15 | 130,132,886 | 4 | 1 | NOASSERTION | 2020-01-16T16:51:39 | 2018-04-18T23:24:38 | C++ | UTF-8 | Python | false | false | 3,956 | py | #!/usr/bin/env python3
from cimodel.lib.conf_tree import ConfigNode, X
CONFIG_TREE_DATA = [
("trusty", [
(None, [
X("2.7.9"),
X("2.7"),
X("3.5"),
X("nightly"),
]),
("gcc", [
("4.8", [X("3.6")]),
("5.4", [("3.6", [X(False), X(True)])]),
("7", [X("3.6")]),
]),
]),
("xenial", [
("clang", [
("5", [X("3.6")]),
]),
("cuda", [
("8", [X("3.6")]),
("9", [
# Note there are magic strings here
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L21
# and
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L143
# and
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
X("2.7"),
X("3.6"),
]),
("9.2", [X("3.6")]),
("10", [X("3.6")]),
]),
("android", [
("r19c", [X("3.6")]),
]),
]),
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"trusty": TrustyCompilerConfigNode,
"xenial": XenialCompilerConfigNode,
}
return next_nodes[distro]
class TrustyCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
def child_constructor(self):
return TrustyCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class TrustyCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XlaConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
class XenialCompilerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XenialCompilerVersionConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
| [
"[email protected]"
] | |
71dafe2db4bc761973d6704dc92903b815a5d803 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/channel/channel_metadata/tests/test__parse_video_quality_mode.py | 1d27462067dbc38950831d7cb97ceae62bdabb9d | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 594 | py | import vampytest
from ..preinstanced import VideoQualityMode
from ..fields import parse_video_quality_mode
def test__parse_video_quality_mode():
"""
Tests whether ``parse_video_quality_mode`` works as intended.
"""
for input_data, expected_output in (
({}, VideoQualityMode.auto),
({'video_quality_mode': VideoQualityMode.auto.value}, VideoQualityMode.auto),
({'video_quality_mode': VideoQualityMode.full.value}, VideoQualityMode.full),
):
output = parse_video_quality_mode(input_data)
vampytest.assert_eq(output, expected_output)
| [
"[email protected]"
] | |
0375e087942c2c2346f1f6076ecf3070bb98af45 | 3424fd15a6b4a079c24cb45c896277e3dd7cf0c8 | /plugins/modules/oneview_enclosure_group_facts.py | 15216f74b45c47563922cd7e99e1ea7af0c69bc1 | [
"Apache-2.0"
] | permissive | SHANDCRUZ/test-codecov | e7544c4616e281ada4858f34897b7a1e59be7748 | f7aca851423641570fa86d3f8233235d14c71756 | refs/heads/main | 2023-03-24T01:43:18.048625 | 2021-03-23T12:39:51 | 2021-03-23T12:39:51 | 350,258,321 | 0 | 0 | Apache-2.0 | 2021-03-23T12:47:30 | 2021-03-22T08:07:33 | Python | UTF-8 | Python | false | false | 4,355 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_enclosure_group_facts
short_description: Retrieve facts about one or more of the OneView Enclosure Groups.
description:
- Retrieve facts about one or more of the Enclosure Groups from OneView.
version_added: "2.3.0"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author:
- "Gustavo Hennig (@GustavoHennig)"
- "Bruno Souza (@bsouza)"
options:
name:
description:
- Enclosure Group name.
required: false
type: str
options:
description:
- "List with options to gather additional facts about Enclosure Group.
Options allowed:
C(configuration_script) Gets the configuration script for an Enclosure Group."
required: false
type: list
extends_documentation_fragment:
- hpe.oneview.oneview
- hpe.oneview.oneview.params
- hpe.oneview.oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Enclosure Groups
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 2000
delegate_to: localhost
- debug: var=enclosure_groups
- name: Gather paginated, filtered and sorted facts about Enclosure Groups
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 2000
params:
start: 0
count: 3
sort: 'name:descending'
filter: 'status=OK'
scope_uris: '/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'
- debug: var=enclosure_groups
- name: Gather facts about an Enclosure Group by name with configuration script
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 2000
name: "Test Enclosure Group Facts"
options:
- configuration_script
delegate_to: localhost
- debug: var=enclosure_groups
- debug: var=enclosure_group_script
'''
RETURN = '''
enclosure_groups:
description: Has all the OneView facts about the Enclosure Groups.
returned: Always, but can be null.
type: dict
enclosure_group_script:
description: The configuration script for an Enclosure Group.
returned: When requested, but can be null.
type: dict
'''
from ansible_collections.hpe.oneview.plugins.module_utils.oneview import OneViewModule
class EnclosureGroupFactsModule(OneViewModule):
argument_spec = dict(
name=dict(required=False, type='str'),
options=dict(required=False, type='list'),
params=dict(required=False, type='dict')
)
def __init__(self):
super().__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.enclosure_groups)
def execute_module(self):
facts = {}
enclosure_groups = []
name = self.module.params.get("name")
if name:
if self.current_resource:
enclosure_groups = self.current_resource.data
if "configuration_script" in self.options:
facts["enclosure_group_script"] = self.current_resource.get_script()
else:
enclosure_groups = self.resource_client.get_all(**self.facts_params)
facts["enclosure_groups"] = enclosure_groups
return dict(changed=False, ansible_facts=facts)
def main():
EnclosureGroupFactsModule().run()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8e15123ac1006ef3d53de1573baf06184dd75c95 | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/plat-mac/carbon/lists.py | 4fc2252e450153f6e01beee179948af9cb5f9698 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,082 | py | # 2016.08.04 20:01:15 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/Lists.py
def FOUR_CHAR_CODE(x):
return x
listNotifyNothing = FOUR_CHAR_CODE('nada')
listNotifyClick = FOUR_CHAR_CODE('clik')
listNotifyDoubleClick = FOUR_CHAR_CODE('dblc')
listNotifyPreClick = FOUR_CHAR_CODE('pclk')
lDrawingModeOffBit = 3
lDoVAutoscrollBit = 1
lDoHAutoscrollBit = 0
lDrawingModeOff = 8
lDoVAutoscroll = 2
lDoHAutoscroll = 1
lOnlyOneBit = 7
lExtendDragBit = 6
lNoDisjointBit = 5
lNoExtendBit = 4
lNoRectBit = 3
lUseSenseBit = 2
lNoNilHiliteBit = 1
lOnlyOne = -128
lExtendDrag = 64
lNoDisjoint = 32
lNoExtend = 16
lNoRect = 8
lUseSense = 4
lNoNilHilite = 2
lInitMsg = 0
lDrawMsg = 1
lHiliteMsg = 2
lCloseMsg = 3
kListDefProcPtr = 0
kListDefUserProcType = kListDefProcPtr
kListDefStandardTextType = 1
kListDefStandardIconType = 2
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-mac\carbon\lists.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 20:01:15 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
36119431fd312a3e8902674067afbe6396c63da9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/219/29883/submittedfiles/testes.py | 4f4b4f48b93942c5a8eddaabeee18acfd3de9bd6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a=int(input('Digite a:'))
b=int(input('Digite b:'))
c=int(input('Digite c:'))
d=int(input('Digite d:'))
if a>=b and a>=c and a>=d:
print(a)
if b<=c and b<=d:
print(b)
elif b>=a and b>=c and b>=d
print(
| [
"[email protected]"
] | |
3c288b0c24feff89d6007028f01d23e3a6030c44 | fd3c30fe9afdf03fb2ff627fa19f6b7739075393 | /homeassistant/components/plugwise/__init__.py | f7986f915401c6c2dbc3df607fe1f60618f0245d | [
"Apache-2.0"
] | permissive | uvjustin/home-assistant | 8b761a56c7d98c22395367ce83eb41cf45ccf11a | 223000a9fbd2a46539054ad93a9dd29333205415 | refs/heads/dev | 2023-08-31T04:01:57.353481 | 2020-09-23T15:57:06 | 2020-09-23T15:57:06 | 234,028,164 | 3 | 2 | Apache-2.0 | 2023-01-13T06:03:32 | 2020-01-15T07:56:05 | Python | UTF-8 | Python | false | false | 6,308 | py | """Plugwise platform for Home Assistant Core."""
import asyncio
from datetime import timedelta
import logging
from typing import Dict
from Plugwise_Smile.Smile import Smile
import async_timeout
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
COORDINATOR,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
UNDO_UPDATE_LISTENER,
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
SENSOR_PLATFORMS = ["sensor"]
ALL_PLATFORMS = ["binary_sensor", "climate", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Plugwise platform."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Plugwise Smiles from a config entry."""
websession = async_get_clientsession(hass, verify_ssl=False)
api = Smile(
host=entry.data[CONF_HOST],
password=entry.data[CONF_PASSWORD],
port=entry.data.get(CONF_PORT, DEFAULT_PORT),
timeout=30,
websession=websession,
)
try:
connected = await api.connect()
if not connected:
_LOGGER.error("Unable to connect to Smile")
raise ConfigEntryNotReady
except Smile.InvalidAuthentication:
_LOGGER.error("Invalid Smile ID")
return False
except Smile.PlugwiseError as err:
_LOGGER.error("Error while communicating to device")
raise ConfigEntryNotReady from err
except asyncio.TimeoutError as err:
_LOGGER.error("Timeout while connecting to Smile")
raise ConfigEntryNotReady from err
update_interval = timedelta(
seconds=entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL[api.smile_type]
)
)
async def async_update_data():
"""Update data via API endpoint."""
try:
async with async_timeout.timeout(10):
await api.full_update_device()
return True
except Smile.XMLDataMissingError as err:
raise UpdateFailed("Smile update failed") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Smile",
update_method=async_update_data,
update_interval=update_interval,
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
api.get_all_devices()
if entry.unique_id is None:
if api.smile_version[0] != "1.8.0":
hass.config_entries.async_update_entry(entry, unique_id=api.smile_hostname)
undo_listener = entry.add_update_listener(_update_listener)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"api": api,
COORDINATOR: coordinator,
UNDO_UPDATE_LISTENER: undo_listener,
}
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, api.gateway_id)},
manufacturer="Plugwise",
name=entry.title,
model=f"Smile {api.smile_name}",
sw_version=api.smile_version[0],
)
single_master_thermostat = api.single_master_thermostat()
platforms = ALL_PLATFORMS
if single_master_thermostat is None:
platforms = SENSOR_PLATFORMS
for component in platforms:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
coordinator.update_interval = timedelta(
seconds=entry.options.get(CONF_SCAN_INTERVAL)
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in ALL_PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class SmileGateway(CoordinatorEntity):
"""Represent Smile Gateway."""
def __init__(self, api, coordinator, name, dev_id):
"""Initialise the gateway."""
super().__init__(coordinator)
self._api = api
self._name = name
self._dev_id = dev_id
self._unique_id = None
self._model = None
self._entity_name = self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the entity, if any."""
return self._name
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
device_information = {
"identifiers": {(DOMAIN, self._dev_id)},
"name": self._entity_name,
"manufacturer": "Plugwise",
}
if self._model is not None:
device_information["model"] = self._model.replace("_", " ").title()
if self._dev_id != self._api.gateway_id:
device_information["via_device"] = (DOMAIN, self._api.gateway_id)
return device_information
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._async_process_data()
self.async_on_remove(
self.coordinator.async_add_listener(self._async_process_data)
)
@callback
def _async_process_data(self):
"""Interpret and process API data."""
raise NotImplementedError
| [
"[email protected]"
] | |
ab512419d61466446b1eb72ac84831498db20e06 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_02_01/operations/express_route_circuits_operations.py | 3f54bb957557bb672da6c0a1511c65b6114abde7 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 46,037 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitsOperations(object):
"""ExpressRouteCircuitsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-02-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def get(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuit or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express
route circuit operation.
:type parameters:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ExpressRouteCircuit or
ClientRawResponse<ExpressRouteCircuit> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _update_tags_initial(
self, resource_group_name, circuit_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, circuit_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ExpressRouteCircuit or
ClientRawResponse<ExpressRouteCircuit> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _list_arp_table_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_arp_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_arp_table(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised ARP table associated with the express
route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsArpTableListResult or
ClientRawResponse<ExpressRouteCircuitsArpTableListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsArpTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsArpTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'}
def _list_routes_table_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table associated with the express
route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'}
def _list_routes_table_summary_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table_summary.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table_summary(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table summary associated with the
express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableSummaryListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableSummaryListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'}
def get_stats(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitStats or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitStats or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_stats.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitStats', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'}
def get_peering_stats(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitStats or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitStats or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_peering_stats.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitStats', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuit
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPaged[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the express route circuits in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuit
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPaged[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'}
| [
"[email protected]"
] | |
07e5f0e71f423af3c4020570575fed9135c2cab2 | ece651284bb8e98abed1017fb99df84fc404b3fc | /GUI Based Projects (PyQt5)/Basics/CreatingWindowTitle.py | e8cfddf743f0a498e6089533035ba49da261ad00 | [] | no_license | pathakabhi24/Python-Projects | 9f9237c1cc431df1049c675ff429fd2c64a5f180 | 8115f9e91bb0011f16fa1bc1258779c63659aa36 | refs/heads/master | 2023-06-12T23:31:52.598868 | 2021-07-07T06:22:03 | 2021-07-07T06:22:03 | 293,288,542 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import sys
import PyQt5.QtWidgets as qw
def window():
# application object
app = qw.QApplication([])
# application window
win = qw.QMainWindow() # --
# set windows location. The position is the location of top left corner of the window.
# (x_position, y_position, width, height)
win.setGeometry(200, 200, 200, 200)
# setting window title
win.setWindowTitle("I am window title")
# set label to the window
label = qw.QLabel(win)
label.setText("This is a label")
label.move(50, 60) # (x, y) from top left corner
# showing application window
win.show()
# so called clean exit for the below line otherwise we can use app.exec_() also.
sys.exit(app.exec_())
window()
| [
"[email protected]"
] | |
52fd37577fe3d760e5a1895cd15f5e333db1388f | 47d1c840f9cb8576614e41470348aac388e9bb51 | /config.py | 26a522bd1ce1c11cf408a1049863060a378634df | [
"Apache-2.0"
] | permissive | Nitinguptadu/Yugen-ai-Heroku | 389d95dc0f97b8b44e6789d5388c1365e545b954 | 7d135fc2625c198c36a11ff4f5cfa565cbc8086e | refs/heads/main | 2022-12-29T03:21:03.127328 | 2020-10-17T19:07:11 | 2020-10-17T19:07:11 | 304,949,836 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from os import environ as env
import multiprocessing
PORT = int(env.get("PORT", 8000))
DEBUG_MODE = int(env.get("DEBUG_MODE", 1))
# Gunicorn config
bind = ":" + str(PORT)
workers = multiprocessing.cpu_count() * 2 + 1
threads = 2 * multiprocessing.cpu_count()
| [
"[email protected]"
] | |
43a08ccc7487f6dc08dd3dcc5dc285c918aaac1d | 27c1e46f8efdaa0009645d2a02d2822e32493469 | /ReportingTool.py | 80c459180736aed755fdd47ce507ec4c49b869df | [
"MIT"
] | permissive | Nsharma96/FullStack | c593a0d7572f071149ec502d5eac1c87d22696c4 | 17d9a6b549e543ca0a5f3b5e0fd799d91b54a74b | refs/heads/master | 2020-04-24T20:28:09.556801 | 2019-03-13T14:59:39 | 2019-03-13T14:59:39 | 172,244,847 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | #!/usr/bin/env python2
import psycopg2
import pandas as pd
pd.set_option("display.colheader_justify", "center")
dbname = "news"
def execute_q(q):
db = psycopg2.connect(database=dbname)
c = db.cursor()
c.execute(q)
return c.fetchall()
db.close()
# The most popular three articles of all time.
result1 = execute_q("""select articles.title, artiView.num from articles ,
(select path , count(path)
as num from log where status
like '%200%' group by path order by num desc limit 4)
as artiView
where '/article/' || articles.slug = artiView.path
order by artiView.num desc;""")
# The most popular article authors of all time.
result2 = execute_q("""select authors.name,authorViewSums.authorView
from authors,authorViewSums
where authors.id=authorViewSums.author""")
# Days on which more than 1% of requests lead to errors.
result3 = execute_q("""select d as Day,m as Month,y as Year,
(err*1.0/total_Requests)*100 as Error
from error_Matrix
where (err*1.0/total_Requests)*100>1;""")
print("\nThe most popular three articles of all time.\n")
res = pd.DataFrame(data=result1, columns=['Article', 'Views'])
print(res)
print("\n")
print("The most popular article authors of all time.\n")
res = pd.DataFrame(data=result2, columns=['Author', 'Views'])
print(res)
print("\n")
print("Days on which more than 1 percent of requests lead to errors.\n")
res = pd.DataFrame(data=result3, columns=['Day', 'Month', 'Year', 'ERROR%'])
res = res.astype(int)
print(res)
print("\n")
| [
"[email protected]"
] | |
88ca9452a4509137e40048ebdbbc850643a241a0 | 04f09d432a28fa50f3b13f5f46bf4b9b62c01e2f | /html-png_code.py | f01c308213943a840d27cc0d717a3587ebecb18e | [
"MIT"
] | permissive | sketch2code-mit/html-png | adfad5884dac50154356ec4b87213f635e4e6f6d | 5808c3a4a1b4784e59dcf4427867a3fe501f29d6 | refs/heads/main | 2023-04-06T09:58:01.806598 | 2021-04-12T00:36:02 | 2021-04-12T00:36:02 | 357,011,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | # this is the coding for generating html screenshot
# written by Doodle2Code team for MIT 6.962 Applied Machine Learning
import time
from selenium import webdriver
import os
c = 0
n = 0
j = 0
i = 0
for c in range(3):
for n in range(10):
for j in range(10):
for i in range(10):
# name of the file in format of 0000.html
fn = str(c)+str(n)+str(j)+str(i)+".html"
path = '/Users/Username/Desktop/filename/'
tmpurl = 'file://{path}/{mapfile}'.format(
path=path, mapfile=fn) # find the address of the html
driver = webdriver.Chrome()
driver.maximize_window()
# maximum waiting time for opening the html
driver.implicitly_wait(6)
driver.get(tmpurl) # open html
time.sleep(1)
# print("done')
driver.get_screenshot_as_file(
str(c)+str(n)+src(j)+str(i)+".png") # rename the file
| [
"[email protected]"
] | |
240a7289d113f8698a15f0a955c1a8ee2f5aec27 | 6e786e8f4c229aeb0822c9cdffccf109d7a32d41 | /CodeChef/CARPTUN.py | f8ef8e7280275fddc7f1f8aa48f53533cb8dd129 | [] | no_license | rajatdiptabiswas/competitive-programming | 337da9c86e0cfae7ed1b36fec9b6e225167edfcc | ed036e91b9516d8a29d1322db159d68462178d94 | refs/heads/master | 2021-06-10T08:59:40.296322 | 2021-04-17T06:51:18 | 2021-04-17T06:51:18 | 138,393,143 | 0 | 1 | null | 2019-10-26T19:53:33 | 2018-06-23T10:43:17 | Python | UTF-8 | Python | false | false | 378 | py | #!/usr/bin/env python3
def main():
t = int(input())
for testcase in range(t):
tolls = int(input())
toll_time = [int(toll) for toll in input().split()]
cars,distance,velocity = map(int, input().split())
if cars == 2:
print("{:.8f}".format(max(toll_time)))
elif cars > 2:
print("{:.8f}".format(max(toll_time) * (cars-1)))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
0d8a1b2b108cc0e64cc844704ad0d367dc981df0 | df256ccf9821df359bfc0a6aba66dbf0639f0d09 | /bin/pip | f39dadc90dc8cecdec615b77118e2fbc1495f0e1 | [] | no_license | samiaellin06/Khawoon | 053381e757a09adc9981405f53ff8c4ca5bbc178 | 0d8f16a957726b8c7db22edc787b60c6895ebfe6 | refs/heads/master | 2022-12-08T11:55:43.207109 | 2020-08-27T11:54:25 | 2020-08-27T11:54:25 | 290,758,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | #!/home/kruf/PycharmProjects/khawoon/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | ||
db6f2f09a0d8daefdad6eb9f4f41bd89a4ac860e | e7ca19f01903016680ab7b72debc66bafeb1eaac | /frames_orig.py | f398a70f8250512b5b571162de0653df9718e8cd | [] | no_license | MedSun/count_video | 528816f92bf12c5415dd599480eae146e31bcaff | d8aca296ba427e50f95d1d5af4b68a0b615a43dd | refs/heads/master | 2022-11-30T09:09:13.829684 | 2020-08-13T16:17:29 | 2020-08-13T16:17:29 | 287,324,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import json
import os
import cv2
import requests
from app import ROOT_DIR
def frames_from_video(res):
response = json.loads(res)
video_name = response["video_name"]
video_file = response["video_file"]
video_cap = cv2.VideoCapture(video_file)
success, image = video_cap.read()
if success:
path = os.path.join(ROOT_DIR, 'frames_orig_images/' + video_name + '.jpg')
cv2.imwrite(path, image)
file = {'file': open(path, "rb")}
response = requests.post("http://localhost:4000/api/upload-file", files=file)
return json.dumps({"pic": response.json()["path"]})
else:
print("Ошибка при создании опорного кадра для ролика " + video_name)
return json.dumps("") | [
"[email protected]"
] | |
1bcbcbfe92659458a764c39a0f71f668340971fc | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /accounts/perms.py | bd00bb6b63018efa7cc39d7709ce8ee5829b7d04 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,451 | py | # -*- coding: utf-8 -*-
from simple_perms import PermissionLogic, register
from helpers.mixins import BasicPermissionLogicMixin
class UserPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, user_to_view, *args):
if user_to_view == user:
return True
if user.is_client or user.is_professional:
return False
if user.is_administrator or user.is_advisor or user.is_manager:
return True
return self.admin_permission(user, user_to_view, *args)
def change(self, user, user_to_modify, *args):
if user_to_modify == user:
return True
if user.is_client or user.is_professional:
return False
if user.is_administrator:
return True
# Allow same group modifications
if user_to_modify.group is not None and user_to_modify.group.is_member(user):
if user.is_advisor and user_to_modify.is_advisor:
return True
if user.is_manager and (
user_to_modify.is_advisor or user_to_modify.is_manager
):
return True
if (user.is_advisor or user.is_manager) and user_to_modify.is_client:
return True
if (
user.is_manager
and user_to_modify.is_advisor
and user_to_modify.group.admin_group == user.group
and user.group.is_admin
):
return True
if (
user.is_manager
and user_to_modify.is_manager
and user_to_modify.group == user.group
):
return True
return self.admin_permission(user, user_to_modify, *args)
def change_user_type(self, user, *args):
"""
Perm for user to change user_type for user_modified
Parameters
----------
user : User
args : Dict(user_modified, to_user_type)
"""
user_modified = args[0]["user_modified"]
to_user_type = args[0]["to_user_type"]
if user.is_client or user.is_professional:
return False
if user_modified.is_client or user_modified.is_professional:
return False
if to_user_type == "client" or to_user_type == "professional":
return False
if user.is_administrator:
return True
if user.is_manager:
if (
user_modified.is_advisor
or user_modified.is_superadvisor
or user_modified.is_manager
and user_modified.group.is_member(user)
):
if to_user_type in ["advisor", "superadvisor", "manager"]:
return True
if (
user.is_superadvisor
and to_user_type in ["advisor", "superadvisor"]
and user_modified.is_advisor
):
return True
return self.admin_permission(user, user_modified, *args)
register("user", UserPermissionLogic)
register("accounts/user", UserPermissionLogic)
class RgpdConsentPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, rgpdconsent, *args):
if rgpdconsent.user == user:
return True
return self.admin_permission(user, rgpdconsent, *args)
change = view
register("rgpdconsent", RgpdConsentPermissionLogic)
register("accounts/rgpdconsent", RgpdConsentPermissionLogic)
class GroupPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, group, *args):
if user.is_anonymous:
return False
if user.is_administrator:
return True
if user.is_advisor or user.is_manager:
return True
return self.admin_permission(user, group, *args)
def create(self, user, group, group_data, *args):
if user.is_anonymous:
return False
if user.is_administrator:
return True
if user.is_manager:
if not group_data:
return False
if user.group is not None:
if group is not None:
if group.admin_group.pk == user.group.pk:
return True
return self.admin_permission(user, None, *args)
def change(self, user, group, *args):
if user.is_anonymous:
return False
if user.is_administrator:
return True
if (
user.is_manager
and user.group is not None
and group.admin_group == user.group
):
return True
return self.admin_permission(user, group, *args)
def partial_change(self, user, group, *args):
"""
change only some fiels on group
"""
if user.is_advisor and user.group is not None and group == user.group:
return True
return self.admin_permission(user, group, *args)
register("group", GroupPermissionLogic)
register("accounts/group", GroupPermissionLogic)
class GroupPlacePermissionLogic(BasicPermissionLogicMixin, PermissionLogic):
def view(self, user, group, *args):
if user.is_anonymous:
return False
if user.is_expert:
return True
return self.admin_permission(user, group, *args)
register("group_place", GroupPlacePermissionLogic)
register("accounts/group_place", GroupPlacePermissionLogic)
| [
"[email protected]"
] | |
aacc12eabb0b4eb5e62e7da5668c3ba88bb40c61 | 2f5797309b741938dca213353f042c77825b0936 | /server_run.py | 35559759cb860a0476b02c5e749109bf2aeb1303 | [] | no_license | electramite/RPi_dashboard | 0def396c04ea99a5f8345363e37ffd421dad8067 | 02cb5a959e9ad86e15184283602b10407264cba7 | refs/heads/main | 2022-12-30T13:44:01.199658 | 2020-10-20T08:36:06 | 2020-10-20T08:36:06 | 305,641,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from flask import render_template, url_for, request
import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
trig = 17
echo = 27
GPIO.setup(trig, GPIO.OUT)
GPIO.setup(echo, GPIO.IN)
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
distance = sensor_1()
return render_template("sensor.html", distance=distance)
def sensor_1():
GPIO.output(trig, True)
time.sleep(0.00001)
GPIO.output(trig, False)
while GPIO.input(echo)==0:
pulse_s = time.time()
while GPIO.input(echo)==1:
pulse_e = time.time()
pulse_d = pulse_e - pulse_s
d = 34000*pulse_d/2
return int(d)
if __name__ == "__main__":
app.run(host = '0.0.0.0',port=4556,debug=True)
| [
"[email protected]"
] | |
f0704c277601046e7ecff140c4ce76723f895a6f | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/python2.7/test/outstanding_bugs.py | 5a947e5deea9d551dd5f2994869ab7dd70a83e94 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/action/.parts/packages/python2/2.7.6/lib/python2.7/test/outstanding_bugs.py | [
"[email protected]"
] | |
fc0054ef638181b7cdaa31bdd9b2c7d6c701b84a | 8bbe2351bbd157a46ccf8530cde4e4cc7b0bd3b7 | /main.py | 94f313395298785422b7caeae12f2b205f2ce590 | [] | no_license | airuibel/py_code | 8dc98d71e79a4c0f785ad5cf81b2ca2073061ebf | 1da9a9dcd37475dd14bab6ae58bca1e2dff4c251 | refs/heads/master | 2020-06-18T03:47:43.754204 | 2018-03-20T09:31:00 | 2018-03-20T09:31:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,021 | py | # import package
import numpy as np
from pandas import DataFrame
import pandas as pd
import re
from dateutil import relativedelta
import datetime as dt
# 1.1
def df_groupby(df, groupkey, col, func, res_col_name, asint=False, dup=False):
"""
:param df: 一个df 征对 1+ 用户
:param groupkey: df中聚合分类的变量名
:param col: df中待聚合的变量名,字符串或者列表
:param func: 聚合方式,支持sum /max /min /avg /count/ distinct_count
:param res_col_name: 聚合结果列名,字符串或者列表
:param asint: if asint=True ,聚合结果转为int ;default asint=False;
:param dup: if dup=True ,变量取值去重 ;default dup=False;
:return:df_res df
"""
# dropna all row
df = df.dropna(axis=0, how='all')
# reformat type
try:
if func != 'count' and func != 'distinct_count':
df[col] = df[col].astype('float32')
except ValueError:
print('the col could not convert string to float!')
# duplicate the col
if dup:
df = df.drop_duplicates(df.columns)
# compatible str
if type(col) != list:
col = [col]
if type(res_col_name) != list:
res_col_name = [res_col_name]
if type(func) != list:
func = [func]
# agg index
df_res = DataFrame(df[groupkey].unique(), columns=[groupkey])
for i in func:
if i == 'sum':
df_res_ago = DataFrame(df.groupby(groupkey)[col].sum())
elif i == 'max':
df_res_ago = DataFrame(df.groupby(groupkey)[col].max())
elif i == 'min':
df_res_ago = DataFrame(df.groupby(groupkey)[col].min())
elif i == 'avg':
df_res_ago = DataFrame(df.groupby(groupkey)[col].mean())
elif i == 'std':
df_res_ago = DataFrame(df.groupby(groupkey)[col].std())
elif i == 'count':
df_res_ago = DataFrame(df.groupby(groupkey)[col].count())
elif i == 'distinct_count':
df_res_ago = DataFrame(df.groupby(groupkey)[col].nunique())
else:
print('input func error!')
df_res_ago = df_res_ago.reset_index()
df_res = pd.merge(df_res, df_res_ago, how='left', on=groupkey)
columns_list = [groupkey]
columns_list.extend(res_col_name)
df_res.columns = columns_list
if asint:
df_res[res_col_name] = df_res[res_col_name].astype(int)
return df_res
# use example
# df_groupby(df,'appl_no', 'phone_gray_score', 'sum', 'phone_gray_score_sum', dup=False, asint=False)
# df_groupby(df,'appl_no', ['phone_gray_score'], ['sum'], ['phone_gray_score_sum'], dup=False, asint=False)
# df_groupby(df,'appl_no', ['register_cnt','phone_gray_score'], ['sum'], ['register_cnt_sum','phone_gray_score_sum'], dup=False, asint=False)
# df_groupby(df,'appl_no', ['register_cnt','phone_gray_score'], ['sum','avg','count'], ['register_cnt_sum','phone_gray_score_sum','register_cnt_avg','phone_gray_score_avg','register_cnt_count','phone_gray_score_count'], dup=False, asint=False)
# 1.2.1
def col_dummy(x, col, dummy_dict=[]):
"""
function about:变量编码功能函数集
by boysgs @20171103
:param x: 一个数值
:param col: df中需重新编码的变量名
:param dummy_dict: 列表,变量所有取值组成,示例['value_1','value_2']
:return:col_dummy_dict
"""
dummy_dict_sorted = sorted(dummy_dict)
dummy_dict_sorted_key = np.array(['_'.join(['if', col, i]) for i in dummy_dict_sorted])
dummy_dict_sorted_value = [0] * len(dummy_dict_sorted_key)
col_dummy_zip = zip(dummy_dict_sorted_key, dummy_dict_sorted_value)
col_dummy_dict = dict((a, b) for a, b in col_dummy_zip)
#
if x in dummy_dict_sorted:
col_dummy_dict['_'.join(['if', col, x])] = 1
return col_dummy_dict
# use example
# df = pd.DataFrame({'col1': [1, np.nan, 2, 3], 'col2': [3, 4, 5, 1], 'col3': ['s', 'a', 'c', 'd']})
# dummy_dict = ['a', 'b', 'c', 'd', 's']
# col = 'col3'
# DataFrame(list(df[col].apply(lambda x: col_dummy(x, col, dummy_dict))))
# 1.2.2
def col_dummy_lb(x, lb_trans, sorted_dummy_varname_list=[]):
"""
function about:变量编码功能函数集(使用LabelBinarizer方法)
by boysgs @20171103
:param x: 一个数值
:param lb_trans: 一个变量利用preprocessing.LabelBinarizer 方法生成的对象
:param sorted_dummy_varname_list: 列表,升序排列的变量所有取值组成,示例['value_1','value_2']
:return:col_dummy_dict 字典
"""
dummy_value = lb_trans.transform(str([x]))
col_dummy_dict = dict(zip(sorted_dummy_varname_list, dummy_value[0]))
return col_dummy_dict
# 2.1
def meetOneCondition(x,symbol = '=',threshold = ('None','b')):
"""
# 输入:
# 变量名:年龄
# 符号:=,!=,>,< , >=, <= , in , not in,like, not like
# 阈值:10,(10,11),'%10%'
# 输出
# 满足条件输出1,否则输出0
"""
if pd.isnull(x) or x == '':
if symbol in ['!=','not in ','not like'] and threshold!='None':
return 1
elif threshold=='None':
if symbol == '=':
return 1
elif symbol == '!=':
return 0
else:
return 0
elif symbol == '=':
if threshold=='None':
return 0
elif x == threshold:
return 1
else:
return 0
elif symbol == '!=':
if threshold=='None':
return 1
elif x != threshold:
return 1
else:
return 0
elif symbol == '>':
if x > threshold:
return 1
else:
return 0
elif symbol == '<':
if x < threshold:
return 1
else:
return 0
elif symbol == '>=':
if x >= threshold:
return 1
else:
return 0
elif symbol == '<=':
if x <= threshold:
return 1
else:
return 0
elif symbol == 'in':
if x in threshold:
return 1
else:
return 0
elif symbol == 'not in':
if x not in threshold:
return 1
else:
return 0
elif symbol == 'like':
if threshold[0] == '%' and threshold[-1] == '%':
if threshold[1:-1] in x:
return 1
else:
return 0
if threshold[0] == '%' and threshold[-1] != '%':
if threshold[1:] == x[len(x)-len(threshold[1:]):]:
return 1
else:
return 0
if threshold[0] != '%' and threshold[-1] == '%':
if threshold[0:-1] == x[0:len(threshold[0:-1])]:
return 1
else:
return 0
else:
return 'you need cheack your "like" threshold'
elif symbol == 'not like':
if threshold[0] == '%' and threshold[-1] == '%':
if threshold[1:-1] not in x:
return 1
else:
return 0
if threshold[0] == '%' and threshold[-1] != '%':
if threshold[1:] != x[len(x)-len(threshold[1:]):]:
return 1
else:
return 0
if threshold[0] != '%' and threshold[-1] == '%':
if threshold[0:-1] != x[0:len(threshold[0:-1])]:
return 1
else:
return 0
else:
return 'you need cheack your "not like" threshold'
elif symbol =='regex':
if re.search(threshold,x):
return 1
else:
return 0
else:
return 'please contact the developer for increaing then type of the symbol'
# test:
# x = 'abcde'
# meetOneCondition(x,'=','abcd2')
# meetOneCondition(x,'like','abc%')
# meetOneCondition(x,'like','%abc')
# meetOneCondition(x,'regex','b|adz|z')
# 2.2
def meetMultiCondition(condition = ((),'and',())):
"""
# 输入
# 多个条件,单个条件参考meetOneCondition中的
# 例子 condition = ( ('age','>=',18), 'and', ( ('age','<=',40),'or',('gender','=','female') ) )
# 输出
# 满足条件输出1,否则输出0
"""
if 'and' in condition:
a = [k for k in condition if k!='and']
b = []
for l in range(len(a)):
b.append(meetMultiCondition(a[l]))
if 0 in b:
return 0
else:
return 1
if 'or' in condition:
a = [k for k in condition if k != 'or']
b = []
for l in range(len(a)):
b.append(meetMultiCondition(a[l]))
if 1 in b:
return 1
else:
return 0
else:
return meetOneCondition(condition[0],condition[1],condition[2])
# test
# zz ='abcde'
# yy = 10
# xx = 5
# meetMultiCondition(((zz,'=','abc'),'or',(yy,'>',7)))
# 2.3
def singleConditionalAssignment(conditon =('z','=',('None','b')),assig1=1, assig2=0):
"""
# 单条件赋值
# 输入
# 参考meetOneCondition的输入
# 例如:conditon = ('age','>=',18)
# 输出:
# 满足条件assig1
# 不满足条件assig2
"""
if meetOneCondition(conditon[0],conditon[1],conditon[2])==1:
return assig1
elif meetOneCondition(conditon[0], conditon[1], conditon[2]) == 0:
return assig2
else:
return meetOneCondition(conditon[0],conditon[1],conditon[2])
# test
# singleConditionalAssignment((x, '=', 'abcde'), 5, 1)
# 2.4
def multiConditionalAssignment(condition = (),assig1 = 1,assig2 = 0):
"""
# 多个条件赋值
###输入
##多个条件类似meetMultiCondition的输入
###输出:
##满足条件assig1
##不满足条件assig2
"""
if meetMultiCondition(condition)==1:
return assig1
else:
return assig2
# test
# xx=5
# multiConditionalAssignment(condition =((zz,'=','abcde'),'and',( (yy,'>',10), 'or', (xx,'=',5) )),assig1 = 999,assig2 = 0)
# 2.5
def multiConditionalMultAssignment(condition = ((('zz','not in', ('硕士','博士')),1),(('zz','not in', ('硕士','博士')),2)),assig = 0):
"""
####多个条件多个赋值
###输入
##多个条件类似meetMultiCondition的输入,再加一满足的取值
###输出:
##满足条件输出输入目标值
##不满足条件assig
"""
for l in condition:
if meetMultiCondition(l[0])==1:
return l[1]
return assig
# test
# multiConditionalMultAssignment((((zz,'=','abcdef'),1),((zz,'=','abcde'),2)),3)
# 3.1
def substring(string,length,pos_start=0):
"""
function about : 字符串截取
by dabao @20171106
:param string: 被截取字段
:param length: 截取长度
:param pos_start: 从第几位开始截取,defualt=0
:return: a string :substr
"""
pos_end = length + pos_start
if string is np.NaN:
return np.NaN
else:
str_type = type(string)
if str_type==str:
substr = string[pos_start:pos_end]
else:
string = str(string)
substr = string[pos_start:pos_end]
return substr
# test
# string=370321199103050629
# length=4
# pos_start=6
# substring(string,length,pos_start)
# string=np.NaN
# 3.2
def charindex(substr,string,pos_start=0):
"""
function about : 字符串位置查询
by dabao @20171106
:param substr
:param string: substr 在 string 起始位置
:param pos_start: 查找substr的开始位置,default=0
:return: a int :substr_index
"""
if string is np.NaN:
return np.NaN
else:
substr = str(substr)
string = str(string)
substr_index = string.find(substr,pos_start)
return substr_index
# test
# string='370321199103050629'
# substr='1991'
# charindex(substr,string)
# string.find(substr,0)
# 3.3
def trim(string,substr=' ',method='both'):
"""
function about : 删除空格或其他指定字符串
by dabao @20171106
:param string: a string
:param substr: 在string两端删除的指定字符串,default=' '
:param method: 删除方式:left 删除左边, right 删除右边, both 删除两边
:return: a string :string_alter
"""
if string is np.NaN:
return np.NaN
else:
substr = str(substr)
string = str(string)
if method in ['left','right','both']:
if method =='left':
string_alter = string.lstrip(substr)
elif method == 'right':
string_alter = string.rstrip(substr)
elif method == 'both':
string_alter = string.strip(substr)
else:
string_alter = string.strip(substr)
print("Warning: method must be in ['left','right','both']! If not, the function will be acting as 'both'")
return string_alter
# test:
# string=' OPPO,HUAWEI,VIVO,HUAWEI '
# trim(string)
# (4)计算字符串长度:SQL中的LEN()函数 ,python自带 len()
# (5)字符串转换为大、小写:SQL 中的 LOWCASE,UPPER 语句,python自带函数 string.upper(),string.lower()
# 3.4
def OnlyCharNum(s,oth=''):
# 只显示字母与数字
s2 = s.lower()
fomart = 'abcdefghijklmnopqrstuvwxyz0123456789'
for c in s2:
if not c in fomart:
s = s.replace(c,'')
return s
# 4.1
def dateformat(date,symbol):
"""
输入:
变量名:时间,按照格式接收10位、19位
可选:'year','month','day','hour','minute','second'
输出
满足条件输出值,否则报错
"""
if pd.isnull(date):
return np.NaN
date = str(date)
if len(date)==10:
date=date+' 00:00:00'
date=dt.datetime.strptime(date,'%Y-%m-%d %H:%M:%S')
if symbol in ['year','month','day','hour','minute','second']:
if symbol =='year':
datetime_elect = date.year
elif symbol == 'month':
datetime_elect = date.month
elif symbol == 'day':
datetime_elect = date.day
elif symbol == 'hour':
datetime_elect = date.hour
elif symbol == 'minute':
datetime_elect = date.minute
elif symbol == 'second':
datetime_elect = date.second
else:
datetime_elect = np.NaN
print("Warning: symbol must be in ['year','month','day','hour','minute','second']! If not, the function will be acting as 'both'")
return datetime_elect
# test1:
# dateformat('2017-09-25 12:58:45','day')
# dateformat('2017-09-25 12:58:45','hour')
# dateformat('2017-09-25','day')
# dateformat(null,'hour')
# 4.2
def datediff(symbol,date_begin,date_end):
"""
输入:
变量名:时间,按照格式接收10位、19位
可选:'year','month','day','hour','minute','second'
输出
满足条件输出值,否则报错
"""
if pd.isnull(date_begin) or pd.isnull(date_end):
return np.NaN
date_begin = str(date_begin)
date_end = str(date_end)
if len(date_begin)==4:
date_begin=date_begin+'-01-01 00:00:00'
if len(date_end)==4:
date_end=date_end+'-01-01 00:00:00'
if len(date_begin)==7:
date_begin=date_begin+'-01 00:00:00'
if len(date_end)==7:
date_end=date_end+'-01 00:00:00'
if len(date_begin)==10:
date_begin=date_begin+' 00:00:00'
if len(date_end)==10:
date_end=date_end+' 00:00:00'
date_begin=dt.datetime.strptime(date_begin,'%Y-%m-%d %H:%M:%S')
date_end=dt.datetime.strptime(date_end,'%Y-%m-%d %H:%M:%S')
if symbol in ['year','month','day','hour','minute','second']:
r = relativedelta.relativedelta(date_end,date_begin)
if symbol =='year':
datetime_diff=r.years
elif symbol == 'month':
datetime_diff=r.years*12+r.months
elif symbol == 'day':
datetime_diff = (date_end-date_begin).days
elif symbol == 'hour':
datetime_days = (date_end-date_begin).days
datetime_seconds = (date_end-date_begin).seconds
datetime_diff = datetime_seconds/3600+datetime_days*24
elif symbol == 'minute':
datetime_days = (date_end-date_begin).days
datetime_seconds = (date_end-date_begin).seconds
datetime_diff=datetime_seconds/60+datetime_days*24*60
elif symbol == 'second':
datetime_days = (date_end-date_begin).days
datetime_seconds = (date_end-date_begin).seconds
datetime_diff=datetime_seconds+datetime_days*24*60*60
else:
datetime_diff = np.NaN
print("Warning: symbol must be in ['year','month','day','hour','minute','second']! If not, the function will be acting as 'both'")
return datetime_diff
# test
# datediff('month','2013','2017-09-25 12:58:45')
# datediff('day','2017-09-25','2017-12-30')
# datediff('hour','2017-09-15 10:58:45','2017-09-25 12:58:45')
# datediff('day','2017-09-25','2017-12-30 12:58:45') | [
"l"
] | l |
ea7af78e34c8acc6637fb1902a7c88c16081361f | 0daa78054f5d5b505047aaa28ecbbea1662f9c53 | /loop.py | 047a171e01830a7b9011b773017a51188177d3eb | [] | no_license | WindWalker19/Python_for_everybody | 22d8e616ce19a0259a56f4a30048700c142cbc63 | a2302f2ed4fcc334a096dda22b4ff6e7603c7c22 | refs/heads/master | 2022-07-11T16:39:38.038333 | 2020-05-13T21:17:40 | 2020-05-13T21:17:40 | 263,165,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #A while loop with break.
while True:
line = input("> ")
if line == "done":
print(line)
break
print("Blastoff")
while True:
line = input("> ")
if line[0] == "#":
continue # The continue would ask to go to the top of the loop without executing the code after it.
print("hello")
if line == "done":
break
print("Blastoff")
| [
"[email protected]"
] | |
b5abfe01419986db825a86397318c45516c2d8f0 | 814df4c836843382dc9aecc907da7e2d8e824b53 | /Decryption_Server.py | 96d28a363a72f441e1d8b007236ed04a4704894e | [] | no_license | Aditya-Ramachandran/RSA_Cryptography | ed6909dc359a6f949f0a91d24ed047354918df63 | 18f6b1a30250573286488244cc832d0883ebba10 | refs/heads/master | 2022-12-09T21:31:37.320591 | 2020-09-08T16:23:11 | 2020-09-08T16:23:11 | 289,639,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | from __future__ import unicode_literals
import socket
from math import sqrt
import random
from random import randint as rand
import pickle
host = socket.gethostname()
port = 5000
s = socket.socket()
s.bind((host, port))
s.listen(2)
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
def mod_inverse(a, m):
for x in range(1, m):
if (a * x) % m == 1:
return x
return -1
def isprime(n):
if n < 2:
return False
elif n == 2:
return True
else:
for i in range(1, int(sqrt(n)) + 1):
if n % i == 0:
return False
return True
#initial two random numbers p,q
p = rand(1, 1000)
q = rand(1, 1000)
def generate_keypair(p, q,keysize):
# keysize is the bit length of n so it must be in range(nMin,nMax+1).
# << is bitwise operator
# x << y is same as multiplying x by 2**y
# i am doing this so that p and q values have similar bit-length.
# this will generate an n value that's hard to factorize into p and q.
nMin = 1<<(keysize-1)
nMax = (1<<keysize) - 1
primes=[2]
# we choose two prime numbers in range(start, stop) so that the difference of bit lengths is at most 2.
start = 1<<(keysize//2-1)
stop = 1<<(keysize//2+1)
if start >= stop:
return []
for i in range(3, stop + 1, 2):
for p in primes:
if i % p == 0:
break
else:
primes.append(i)
while(primes and primes[0] < start):
del primes[0]
#choosing p and q from the generated prime numbers.
while primes:
p = random.choice(primes)
primes.remove(p)
q_values = [q for q in primes if nMin <= p * q <= nMax]
if q_values:
q = random.choice(q_values)
break
n = p * q
phi = (p - 1) * (q - 1)
#generate public key 1<e<phi(n)
e = random.randrange(1, phi)
g = gcd(e, phi)
#as long as gcd(1,phi(n)) is not 1, keep generating e
while True:
e = random.randrange(1, phi)
g = gcd(e, phi)
#generate private key
d = mod_inverse(e, phi)
if g==1 and e!=d:
break
#public key (e,n)
#private key (d,n)
return ((e, n), (d, n))
def decrypt(msg_ciphertext, package):
d, n = package
msg_plaintext = [chr(pow(c, d, n)) for c in msg_ciphertext]
# No need to use ord() since c is now a number
# After decryption, we cast it back to character
# to be joined in a string for the final result
return (''.join(msg_plaintext))
public, private = generate_keypair(p, q, 8)
print(host)
conn, address = s.accept()
print("Connected to: " + str(address))
conn.send(str(public[0]).encode())
conn.send(str(public[1]).encode())
print("Public Key: ",public)
while True:
encoded_data = pickle.loads(conn.recv(1024*4))
for i in range(len(encoded_data)):
encoded_data[i]=int(encoded_data[i])
if not encoded_data:
break
#print(''.join(map(lambda x: str(x), encoded_data)))
decoded_data = decrypt(encoded_data, private)
print("Client : " + str(decoded_data))
conn.close() | [
"[email protected]"
] | |
c48387129d1b598576966c6cc19fb3e7bd4127ea | 4aee1b5faac38af7429d5a4e518f971b5e966cf6 | /MaximizationBias_Example6.7/MaximizationBias_QLearning_Example6.7.py | dd0b4ca11110dc48c9cac60cc593679f86fc8fd1 | [] | no_license | themagicsketchbook/RL_Sutton-Barto_Solutions | 5d35ea4524f06e7fc0002f9be861651fbb3acf04 | 9e0cc8696357c6f3cdbc4a662e5c6a062a5857ff | refs/heads/master | 2022-01-12T04:13:19.583974 | 2019-07-16T13:18:51 | 2019-07-16T13:18:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,507 | py | import numpy as np
import matplotlib.pyplot as plt
class simulation:
def __init__(self,action_num,method):
self.action_num = action_num
self._method = method
self.ACTIONS = [[0,1],[i for i in range(action_num)]]
if self._method == 'Q':
self.Q_values = [[0.0, 0.0], [0.0 for i in range(action_num)]]
else:
self.Q1_values = [[0.0,0.0],[0.0 for i in range(action_num)]]
self.Q2_values = [[0.0, 0.0], [0.0 for i in range(action_num)]]
def choose_action(self,state):
e = np.random.random()
if e < EPSILON:
action = np.random.choice(self.ACTIONS[state])
else:
if self._method == 'Q':
action = np.random.choice(np.flatnonzero(self.Q_values[state] == np.max(self.Q_values[state])))
else:
action_values = np.array(self.Q1_values[state])+np.array(self.Q2_values[state])
action = np.random.choice(np.flatnonzero(action_values == np.max(action_values)))
return action
def determine_transition(self,cur_state,action):
next_state = None
ended = True
if cur_state == 0:
reward = 0
if action == 0:
next_state = 1
ended = False
if cur_state == 1:
reward = np.random.normal(-0.1, 1)
return next_state,reward,ended
def update_QValues(self,curr_state,action,reward,next_state):
if self._method == 'Q':
if next_state == None:
self.Q_values[curr_state][action] += ALFA * (reward - self.Q_values[curr_state][action])
else:
max_nextQValue = np.max(self.Q_values[next_state])
self.Q_values[curr_state][action] += ALFA * (
reward + GAMMA * max_nextQValue - self.Q_values[curr_state][action])
else:
e = np.random.random()
if e<0.5:
if next_state == None:
self.Q1_values[curr_state][action]+=ALFA*(reward-self.Q1_values[curr_state][action])
else:
max_nextQValue = self.Q2_values[next_state][np.argmax(self.Q1_values[next_state])]
self.Q1_values[curr_state][action] += ALFA * (reward + GAMMA*max_nextQValue- self.Q1_values[curr_state][action])
else:
if next_state == None:
self.Q2_values[curr_state][action]+=ALFA*(reward-self.Q2_values[curr_state][action])
else:
max_nextQValue = self.Q1_values[next_state][np.argmax(self.Q2_values[next_state])]
self.Q2_values[curr_state][action] += ALFA * (reward + GAMMA*max_nextQValue- self.Q2_values[curr_state][action])
def run_simulation(self):
episode_direction = []
for episode in range(EPISODES):
curr_state = 0
while True:
action = self.choose_action(curr_state)
next_state, reward, episode_ended= self.determine_transition(curr_state, action)
self.update_QValues(curr_state,action,reward,next_state)
if episode_ended:
episode_direction.append(1 if curr_state == 1 else 0)
break
curr_state = next_state
return 100*np.divide(np.cumsum(episode_direction),np.arange(1,EPISODES+1))
EPSILON = 0.1
B_ACTION_CHOICE = [1,2,5,10,100]
ALFA = 0.1
GAMMA = 1
EPISODES = 300
RUNS = 10000
Percentage_left_actions = np.zeros((len(B_ACTION_CHOICE),EPISODES))
method = 'DQ' # Use Q if using just Q and use 'DQ' if using Double-Q
for run in range(RUNS):
if run in np.arange(0,RUNS,RUNS/10):
print('Run number = {}'.format(run))
for i,action_num in enumerate(B_ACTION_CHOICE):
Sim = simulation(action_num,method)
Percentage_left_actions[i,:]+=Sim.run_simulation()
Percentage_left_actions/=RUNS
fig = plt.figure(figsize=(8,10))
Actions_Plot = plt.subplot()
for i,action_choice in enumerate(B_ACTION_CHOICE):
Actions_Plot.plot(np.arange(1,EPISODES+1),Percentage_left_actions[i],label = '{}'.format(action_choice))
Actions_Plot.set_xticks([1,100,200,300])
Actions_Plot.set_yticks([0,5,25,50,75,100])
Actions_Plot.set_ylabel('% left actions from A')
Actions_Plot.set_xlabel('Episodes')
Actions_Plot.legend(title = 'Number of actions in B') | [
"[email protected]"
] | |
4113853c20a7674a37b502b73ee6d10f9288b8e6 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/eventgrid/v20200101preview/get_event_subscription.py | 80b0097a55e3921372b7d0b2fecda5f6ee7bbc0b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 8,537 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetEventSubscriptionResult',
'AwaitableGetEventSubscriptionResult',
'get_event_subscription',
]
@pulumi.output_type
class GetEventSubscriptionResult:
"""
Event Subscription
"""
def __init__(__self__, dead_letter_destination=None, destination=None, event_delivery_schema=None, expiration_time_utc=None, filter=None, id=None, labels=None, name=None, provisioning_state=None, retry_policy=None, topic=None, type=None):
if dead_letter_destination and not isinstance(dead_letter_destination, dict):
raise TypeError("Expected argument 'dead_letter_destination' to be a dict")
pulumi.set(__self__, "dead_letter_destination", dead_letter_destination)
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if event_delivery_schema and not isinstance(event_delivery_schema, str):
raise TypeError("Expected argument 'event_delivery_schema' to be a str")
pulumi.set(__self__, "event_delivery_schema", event_delivery_schema)
if expiration_time_utc and not isinstance(expiration_time_utc, str):
raise TypeError("Expected argument 'expiration_time_utc' to be a str")
pulumi.set(__self__, "expiration_time_utc", expiration_time_utc)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, list):
raise TypeError("Expected argument 'labels' to be a list")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if retry_policy and not isinstance(retry_policy, dict):
raise TypeError("Expected argument 'retry_policy' to be a dict")
pulumi.set(__self__, "retry_policy", retry_policy)
if topic and not isinstance(topic, str):
raise TypeError("Expected argument 'topic' to be a str")
pulumi.set(__self__, "topic", topic)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="deadLetterDestination")
def dead_letter_destination(self) -> Optional['outputs.StorageBlobDeadLetterDestinationResponse']:
"""
The DeadLetter destination of the event subscription.
"""
return pulumi.get(self, "dead_letter_destination")
@property
@pulumi.getter
def destination(self) -> Optional[Any]:
"""
Information about the destination where events have to be delivered for the event subscription.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="eventDeliverySchema")
def event_delivery_schema(self) -> Optional[str]:
"""
The event delivery schema for the event subscription.
"""
return pulumi.get(self, "event_delivery_schema")
@property
@pulumi.getter(name="expirationTimeUtc")
def expiration_time_utc(self) -> Optional[str]:
"""
Expiration time of the event subscription.
"""
return pulumi.get(self, "expiration_time_utc")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventSubscriptionFilterResponse']:
"""
Information about the filter for the event subscription.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Optional[Sequence[str]]:
"""
List of user defined labels.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the event subscription.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="retryPolicy")
def retry_policy(self) -> Optional['outputs.RetryPolicyResponse']:
"""
The retry policy for events. This can be used to configure maximum number of delivery attempts and time to live for events.
"""
return pulumi.get(self, "retry_policy")
@property
@pulumi.getter
def topic(self) -> str:
"""
Name of the topic of the event subscription.
"""
return pulumi.get(self, "topic")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetEventSubscriptionResult(GetEventSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventSubscriptionResult(
dead_letter_destination=self.dead_letter_destination,
destination=self.destination,
event_delivery_schema=self.event_delivery_schema,
expiration_time_utc=self.expiration_time_utc,
filter=self.filter,
id=self.id,
labels=self.labels,
name=self.name,
provisioning_state=self.provisioning_state,
retry_policy=self.retry_policy,
topic=self.topic,
type=self.type)
def get_event_subscription(event_subscription_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventSubscriptionResult:
"""
Event Subscription
:param str event_subscription_name: Name of the event subscription
:param str scope: The scope of the event subscription. The scope can be a subscription, or a resource group, or a top level resource belonging to a resource provider namespace, or an EventGrid topic. For example, use '/subscriptions/{subscriptionId}/' for a subscription, '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for a resource group, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}' for a resource, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}' for an EventGrid topic.
"""
__args__ = dict()
__args__['eventSubscriptionName'] = event_subscription_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid/v20200101preview:getEventSubscription', __args__, opts=opts, typ=GetEventSubscriptionResult).value
return AwaitableGetEventSubscriptionResult(
dead_letter_destination=__ret__.dead_letter_destination,
destination=__ret__.destination,
event_delivery_schema=__ret__.event_delivery_schema,
expiration_time_utc=__ret__.expiration_time_utc,
filter=__ret__.filter,
id=__ret__.id,
labels=__ret__.labels,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
retry_policy=__ret__.retry_policy,
topic=__ret__.topic,
type=__ret__.type)
| [
"[email protected]"
] | |
b539a324c93a3ce5b5b5feedc5d1287601d63ffd | 0b4957de738dd05f964ea838016b4b811feca970 | /tests/utils/test_utils_shell.py | fdae13b81ae7f8e06716a3e3f09b9ce5f7a76e6a | [
"MIT",
"Apache-2.0"
] | permissive | bossjones/ultron8 | bdb5db72ba58b80645ae417cdf97287cfadd325d | 09d69c788110becadb9bfaa7b3d2a2046f6b5a1c | refs/heads/master | 2023-01-13T06:52:45.679582 | 2023-01-03T22:25:54 | 2023-01-03T22:25:54 | 187,934,920 | 0 | 0 | Apache-2.0 | 2023-01-03T22:25:56 | 2019-05-22T00:44:03 | Python | UTF-8 | Python | false | false | 1,008 | py | """Test shell utils"""
# pylint: disable=protected-access
import logging
import pytest
from six.moves import zip
from ultron8.utils.shell import quote_unix
logger = logging.getLogger(__name__)
@pytest.mark.utilsonly
@pytest.mark.unittest
class TestShellUtilsTestCase:
def test_quote_unix(self):
arguments = ["foo", "foo bar", "foo1 bar1", '"foo"', '"foo" "bar"', "'foo bar'"]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
""",
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
assert actual_value == expected_value.strip()
| [
"[email protected]"
] | |
7f9fb102af433872f71ba987ff70a370de785e99 | 5e79820bf510e4bd5a5be1de22246cf9e54ecf30 | /logestic_regression/norm.py | cdc913cfa8eb98770b039869b9d5b1e49abd9d34 | [] | no_license | bojunf/machine-learning-project | 08258bb7675a3896e8df32d8ea1099baae2995d4 | f4b12ce76f26408b1707ca2b8116b58042780a8a | refs/heads/master | 2021-08-14T12:28:06.153629 | 2017-11-15T17:54:31 | 2017-11-15T17:54:31 | 110,864,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | import sys
import numpy as np
ftrain = str(sys.argv[1])
ftest = str(sys.argv[2])
fval = str(sys.argv[3]) # input file names
traindata = []
with open('{0}'.format(ftrain), 'r') as f: # read training data
nline = 0
for line in f.readlines():
nline = nline + 1
arr = line.replace('\n', '').split(',')
traindata.append(map(int, arr))
traindata = np.array(traindata)
mean, std = [], []
nfeat = len(traindata[0])
for i in range(nfeat): # find mean and std for each features of all training data
mean.append(np.mean(traindata[:, i]))
std.append(np.std(traindata[:, i]))
testdata, valdata = [], []
normtrain, normtest, normval = [], [], []
with open('{0}'.format(ftest), 'r') as f: # read test data
nline = 0
for line in f.readlines():
nline = nline + 1
arr = line.replace('\n', '').split(',')
testdata.append(map(int, arr))
with open('{0}'.format(fval), 'r') as f: # read validation data
nline = 0
for line in f.readlines():
nline = nline + 1
arr = line.replace('\n', '').split(',')
valdata.append(map(int, arr))
testdata = np.array(testdata)
valdata = np.array(valdata)
for i in range(nfeat): # normalize data based on mean and std of training data
if (std[i] != 0.0):
traindata[:, i] = (traindata[:, i] - mean[i]) / float(std[i])
testdata[:, i] = (testdata[:, i] - mean[i]) / float(std[i])
valdata[:, i] = (valdata[:, i] - mean[i]) / float(std[i])
np.savetxt('norm_train.txt', traindata)
np.savetxt('norm_test.txt', testdata)
np.savetxt('norm_val.txt', valdata)
np.savetxt('mean.txt', mean)
np.savetxt('std.txt', std) # save normalized data into files
| [
"[email protected]"
] | |
2df3e4723de73e9f011ea8c4dbbaf3d9347995df | b83b0cd0ceeaed79afbc8203dfc38336553b324f | /Python/loops/starpattern.py | 82a8545dbc27eb7076f3703cfb3c384bd50f5b43 | [] | no_license | SBartonJr3/ClassBarton | 2576bd3423676698a61185a25835c1ed2fdeb9c6 | 514093ec6e5d8990ba3452b2ff8e2b0c76259ee6 | refs/heads/master | 2020-04-27T18:24:39.005771 | 2019-06-10T17:21:29 | 2019-06-10T17:21:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | #Stephen Barton Jr
#Python Programming, star pattern
#22 APR 2019
def main():
for i in range(1,6):
for j in range(1,i+1):
print("*", end = " ")
print()
main()
| [
"[email protected]"
] | |
1f1e8010afc725d867016c4c3a0daab4b420b78b | 29801a4a09e6c97061e67e21fd7600317d86bd29 | /TIY2_4.py | 22ce625692bbaadfa8eded661600954ebfdcf6d6 | [] | no_license | EthanPassinoFWCS/Chapter2Anthis | 3fe1f0b501a67202686879e7b2ffdad196e02a44 | 1f21cee7b09979d47878dc76c891ca7a667fbedf | refs/heads/main | 2023-01-04T11:21:28.922019 | 2020-11-02T20:12:26 | 2020-11-02T20:12:26 | 309,483,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | name = "John Smith"
print(name.lower())
print(name.upper())
print(name.title()) | [
"[email protected]"
] | |
a811597869c088ec4c17da0719f6b9a3e9e8a9b8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_46/83.py | 728c1c577aee018ba646a8511a4f62a6e9af6751 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | import psyco
psyco.full()
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
if args not in self.memoized:
self.memoized[args] = self.function(*args)
return self.memoized[args]
def clear(self):
self.memoized = {}
def alloc(size, default = 0): return [default] * size
def alloc2(r, c, default = 0): return [alloc(c, default)] * r
def isset(a, bit): return ((a >> bit) & 1) > 0
def dig(c): return ord(c) - 48
def abs(x):
if x<0: return -x;
return x
def area(x1, y1, x2, y2, x3, y3):
return abs((x3-x1)*(y2-y1) - (x2-x1)*(y3-y1))/2
def bisection(f, lo, hi):
"""
finds the integer x where f(x)=0.
assumes f is monotounous.
"""
while lo < hi:
mid = (lo+hi)//2
midval = f(mid)
if midval < 0:
lo = mid+1
elif midval > 0:
hi = mid
else:
return mid
return None
def minarg(f, args):
min_val = None
min_arg = None
for a in args:
temp=f(a)
if min_arg==None or temp<min_val:
min_val=temp
min_arg=a
return min_arg, min_val
#mat[i] = lowest row for the row currently at position i
def solve():
c=0
for i in range(N):
#print mat, c
#print "i=", i
if mat[i]>i:
for j in range(i+1, N):
if mat[j]<=i:
#print "replace", i, " with ", j
mat.insert(i, mat[j])
#print mat
del mat[j+1]
#mat[j]=None
c+=j-i
break
return c
from time import time
if __name__ == "__main__":
def getInts(): return map(int, input.readline().rstrip('\n').split(' '))
def getFloats(): return map(float, input.readline().rstrip('\n').split(' '))
def getMatrix(rows): return [getInts() for _ in range(rows)]
input, output = open("d:/gcj/in", "r"), open('d:/gcj/output', 'w')
start_time=time()
for case in range(1, int(input.readline()) + 1):
N, = getInts()
mat=[[int(d) for d in input.readline().rstrip('\n')] for _ in range(N)]
for i in range(N):
j=N-1
while j>0 and mat[i][j]==0:
j-=1
mat[i]=j
s="Case #%d: %d\n" % (case, solve())
print s
output.write(s)
print time()-start_time
| [
"[email protected]"
] | |
1bae4554cc3411d5a5d0020cd8da45c46a8036f4 | 27b42507a4a6f122e545db06c9629c7693e5768d | /list even num.py | 0e22377fee378316ddaa2ff7a91b3cc1bfbb1d80 | [] | no_license | pooja89299/list | e4651a24f5ff78c37d2c8f93af35e1fd85be0031 | 99244365f920145ff638392d3183e97ae4ccdd2d | refs/heads/main | 2023-05-07T10:08:21.241095 | 2021-05-20T13:03:46 | 2021-05-20T13:03:46 | 369,207,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # a=[1,2,13,15,78,9,10,19,61,51,41,4]
# b=[]
# i=0
# sum=0
# while i<len(a):
# k=a[i]
# if k%2==0:
# b.append(k)
# sum=sum+1
# i=i+1
# print(b)
# print(sum) | [
"[email protected]"
] | |
a9b78fbdc46c4c090b2ee84da3860c8721ba674b | a60133740a2097ccd90c37b5616e611e06025d1c | /evaluate.py | bb1cb184711eeaef49b0e4e5e6e9c0c36df94c34 | [] | no_license | kubumiro/CNN-Python-Framework | 078c42835554843e8af6c2564904f4c6061e9914 | 4b08b18c244601c444671cc96ed72e3863ae323f | refs/heads/master | 2020-09-22T06:15:26.402529 | 2019-11-30T23:42:51 | 2019-11-30T23:42:51 | 225,083,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py |
def model_predict(model, X, y)
| [
"[email protected]"
] | |
52389b5b2bff83aa9b999bd20397ad5a96cf1b26 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/601.py | 1c4900414caa5c3d523730cdea08f4e249066ea5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!/usr/bin/env python3
from fractions import gcd
from math import log
rounds = int(input())
for i in range(rounds):
n, d = input().split('/')
n = int(n)
d = int(d)
g = gcd(n,d)
n = n//g
d = d//g
if log(d,2) != round( log(d,2)):
print("Case #{}: impossible".format(i+1))
continue;
while n!=1 :
n -= 1
g = gcd(n,d)
n = n // g
d = d // g
print("Case #{}: {}".format(i+1,int(log(d,2))))
| [
"[email protected]"
] | |
bf3ea11b9c446a4883cb22a7a78fb68a8f7dc894 | b9767eeeddd3d3e6f591cc96a24d2fabd4373749 | /helper.py | bd4e5cbe961767a6bb5621eefa87765679b6b355 | [] | no_license | artika-tech/Olympics-Data-Analysis | a578c0ca0878a97607c7ff9cfc33dff43180631c | 4304d1d33404ae25b8a904456bc16beb3d0721ae | refs/heads/main | 2023-08-04T20:05:24.751663 | 2021-09-04T16:10:03 | 2021-09-04T16:10:03 | 403,099,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | import numpy as np
def fetch_medal_tally(df, year, country):
medal_df = df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'])
flag = 0
if year == 'Overall' and country == 'Overall':
temp_df = medal_df
if year == 'Overall' and country != 'Overall':
flag = 1
temp_df = medal_df[medal_df['region'] == country]
if year != 'Overall' and country == 'Overall':
temp_df = medal_df[medal_df['Year'] == int(year)]
if year != 'Overall' and country != 'Overall':
temp_df = medal_df[(medal_df['Year'] == int(year)) & (medal_df['region'] == country)]
if flag == 1:
x = temp_df.groupby('Year').sum()[['Gold', 'Silver', 'Bronze']].sort_values('Year').reset_index()
else:
x = temp_df.groupby('region').sum()[['Gold', 'Silver', 'Bronze']].sort_values('Gold',
ascending=False).reset_index()
x['total'] = x['Gold'] + x['Silver'] + x['Bronze']
x['Gold'] = x['Gold'].astype('int')
x['Silver'] = x['Silver'].astype('int')
x['Bronze'] = x['Bronze'].astype('int')
x['total'] = x['total'].astype('int')
return x
def medal_tally(df):
medal_tally = df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'])
medal_tally = medal_tally.groupby('region').sum()[['Gold', 'Silver', 'Bronze']].sort_values('Gold',
ascending=False).reset_index()
medal_tally['total'] = medal_tally['Gold'] + medal_tally['Silver'] + medal_tally['Bronze']
medal_tally['Gold'] = medal_tally['Gold'].astype('int')
medal_tally['Silver'] = medal_tally['Silver'].astype('int')
medal_tally['Bronze'] = medal_tally['Bronze'].astype('int')
medal_tally['total'] = medal_tally['total'].astype('int')
return medal_tally
def country_year_list(df):
years = df['Year'].unique().tolist()
years.sort()
years.insert(0, 'Overall')
country = np.unique(df['region'].dropna().values).tolist()
country.sort()
country.insert(0, 'Overall')
return years, country
def data_over_time(df, col):
nations_over_time = df.drop_duplicates(['Year', col])['Year'].value_counts().reset_index().sort_values('index')
nations_over_time.rename(columns={'index': 'Edition', 'Year': col}, inplace=True)
return nations_over_time
def most_successful(df, sport):
temp_df = df.dropna(subset=['Medal'])
if sport != 'Overall':
temp_df = temp_df[temp_df['Sport'] == sport]
x = temp_df['Name'].value_counts().reset_index().head(15).merge(df, left_on='index', right_on='Name', how='left')[
['index', 'Name_x', 'Sport', 'region']].drop_duplicates('index')
x.rename(columns={'index': 'Name', 'Name_x': 'Medals'}, inplace=True)
return x
def yearwise_medal_tally(df, country):
temp_df = df.dropna(subset=['Medal'])
temp_df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'], inplace=True)
new_df = temp_df[temp_df['region'] == country]
final_df = new_df.groupby('Year').count()['Medal'].reset_index()
return final_df
def country_event_heatmap(df, country):
temp_df = df.dropna(subset=['Medal'])
temp_df.drop_duplicates(subset=['Team', 'NOC', 'Games', 'Year', 'City', 'Sport', 'Event', 'Medal'], inplace=True)
new_df = temp_df[temp_df['region'] == country]
pt = new_df.pivot_table(index='Sport', columns='Year', values='Medal', aggfunc='count').fillna(0)
return pt
def most_successful_countrywise(df, country):
temp_df = df.dropna(subset=['Medal'])
temp_df = temp_df[temp_df['region'] == country]
x = temp_df['Name'].value_counts().reset_index().head(10).merge(df, left_on='index', right_on='Name', how='left')[
['index', 'Name_x', 'Sport']].drop_duplicates('index')
x.rename(columns={'index':'Name','Name_x':'Medals'},inplace=True)
return x
def weight_v_height(df, sport):
athlete_df = df.drop_duplicates(subset=['Name','region'])
athlete_df['Medal'].fillna('No Medal',inplace=True)
if sport != 'Overall':
temp_df = athlete_df[athlete_df['Sport']==sport]
return temp_df
else:
return athlete_df
def men_vs_women(df):
athlete_df = df.drop_duplicates(subset=['Name', 'region'])
men = athlete_df[athlete_df['Sex']=='M'].groupby('Year').count()['Name'].reset_index()
women = athlete_df[athlete_df['Sex'] == 'F'].groupby('Year').count()['Name'].reset_index()
final = men.merge(women,on='Year',how='left')
final.rename(columns={'Name_x':'Male','Name_y':'Female'},inplace=True)
final.fillna(0,inplace=True)
return final
| [
"[email protected]"
] |
Subsets and Splits