blob_id
string
repo_name
string
path
string
length_bytes
int64
score
float64
int_score
int64
text
string
c631c0cfb2a18645a422092a706393cdb7b1e535
alvarov0907/python-scientific
/hellotensorflow.py
5,857
3.59375
4
''' Basic Tensorflow2 walkthrough and snippet cookbook. You can start here adding whatever you want to have a functional running code. Run this code with ann3 environment. Tensorflow is fast, and allows automatic differentiation. # OpemMP sometimes raises coredumps, try export KMP_DUPLICATE_LIB_OK=TRUE Sources: * Deep Learning with Tensorflow 2 and Keras, Antonio Gulli et al, 2019 * Intro to Tensorflow and Deep Learning, Dr. Michael Fairbank ''' # %% # Basic Tensorflow model. import tensorflow as tf W = tf.Variable( tf.ones(shape=(2,2)), name="W") b = tf.Variable( tf.zeros( shape=(2)), name="b") @tf.function def model(x): return W * x + b # TF has lazy evaluation. This means that the code is not executed until you finally need it. out_a = model([1,0]) print( out_a ) # %% a=tf.constant([[5,6],[8,9]]) b=tf.constant([[1,2],[3,4]]) c=tf.multiply(a,b) # Hadamart product print(c) # %% a=tf.constant([[5,6],[8,9]]) b=tf.constant([[1,2],[3,4]]) c=tf.matmul(a,b) print(c) # %% a=tf.constant([[5,6],[8,9]]) b=tf.constant([[1,2],[3,4]]) c=tf.greater(a,b) print(c) # %% a=tf.constant([[1,2],[3,-4]], tf.float32) c=tf.cast(a, tf.int32) print(c) # %% a=tf.constant(2, tf.float32) c=tf.add(a,a) print(c) # %% a=tf.constant([2,3,5,3,3], tf.float32) c=tf.reduce_max(a) d=tf.argmax(a) print(c) print(d) # %% # Automatic differentiation, wonder of tensorflow x=tf.Variable(5.0, tf.float32) with tf.GradientTape() as g: #g.watch(x) y=tf.multiply(x,x) dydx=g.gradient(y,[x]) print(c) #print(dydx.numpy()) # %% # Automatic differentiation, wonder of tensorflow x=tf.Variable(4.0, tf.float32) y=tf.Variable(2.0, tf.float32) with tf.GradientTape(persistent=True) as g: f=tf.pow(x,tf.constant(2.0, tf.float32))*tf.constant(3.0, tf.float32)+y dydx=g.gradient(f,[x,y]) print(f) print(dydx[0].numpy(), dydx[0].numpy()) # %% print("Let's move forward with the classic MNIST prediction.") accuracies = [] import numpy as np from tensorflow import keras epochs = 30 batch_size = 128 verbose = 1 nb_classes = 10 n_hidden = 128 validation_split = 0.2 mnist = keras.datasets.mnist ( X_train, Y_train), (X_test, Y_test) = mnist.load_data() # 60k rows, 28x28 reshaped = 784 X_train = X_train.reshape( 60000, reshaped) X_test = X_test.reshape(10000, reshaped) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print(X_train.shape[0], ' train samples') print(X_test.shape[0], ' test samples') Y_train = tf.keras.utils.to_categorical(Y_train, nb_classes) Y_test = tf.keras.utils.to_categorical(Y_test, nb_classes) # %% # Sequential model model = tf.keras.models.Sequential() model.add( keras.layers.Dense(nb_classes, input_shape=(reshaped,), name='dense_layer', activation='softmax')) # Objective function # MSE, binary_crossentropy, categorical_crossentropy, and so on model.compile( optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_split=validation_split) import matplotlib.pyplot as plt plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper right') plt.show() test_loss, test_acc = model.evaluate(X_test, Y_test) print('Test accuracy:', test_acc) accuracies.append( test_acc ) # %% # Change the network architecture and rebuild the model. model = tf.keras.models.Sequential() model.add(keras.layers.Dense(n_hidden, input_shape=(reshaped,), name='dense_layer', activation='relu')) model.add(keras.layers.Dense(n_hidden, name='dense_layer_2', activation='relu')) model.add(keras.layers.Dense(nb_classes, name='dense_layer_3', activation='softmax')) model.summary() model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, Y_train, batch_size = batch_size, epochs=epochs, verbose=verbose, validation_split=validation_split) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper right') plt.show() test_loss, test_acc = model.evaluate(X_test, Y_test) print('Test accuracy:', test_acc) accuracies.append( test_acc ) # %% # Let's use dropout to improve performance dropout = 0.3 model = tf.keras.models.Sequential() model.add(keras.layers.Dense(n_hidden, input_shape=(reshaped,), name='dense_layer', activation='relu')) model.add(keras.layers.Dropout(dropout)) model.add(keras.layers.Dense(n_hidden, name='dense_layer_2', activation='relu')) model.add(keras.layers.Dense(nb_classes, name='dense_layer_3', activation='softmax')) model.summary() model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, Y_train, batch_size = batch_size, epochs=epochs, verbose=verbose, validation_split=validation_split) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper right') plt.show() test_loss, test_acc = model.evaluate(X_test, Y_test) print('Test accuracy:', test_acc) accuracies.append( test_acc ) print(accuracies) # %%
e938bb5eb9e742f8a97b04003c7806d476902162
RennanFelipe7/algoritmos20192
/Lista 3/lista03ex17.py
623
3.921875
4
QuantidadeDeTurmas = int(input("Qual a quantidade de turmas? ")) cont = 0 SomaDeAlunos = 0 while cont < QuantidadeDeTurmas: QuantidadeDeAlunos = int(input("Qual a quantidade de alunos dessa turma? ")) while QuantidadeDeAlunos > 40: print("O maximo de alunos por tuma é 40, por favor digite novamente a quantidade de alunos ") QuantidadeDeAlunos = int(input("Qual a quantidade de alunos dessa turma? ")) SomaDeAlunos = SomaDeAlunos + QuantidadeDeAlunos cont = cont + 1 Media = int (SomaDeAlunos / QuantidadeDeTurmas) print("Seu numero medio de alunos por turmas foi de",Media,"alunos")
447fe79bdc451ce7a0810187ca9d7319f10f61de
canwe/python3-course-advanced
/29_multiprocessing/multiprocessing_demo.py
1,765
4.09375
4
""" The multiprocessing module allows to spawn processes the same way the threading module allows to spawn threads. The advantage on multiprocessing module is that we can avoid the GIL (Global Interpreter Lock) and use in a real way multiple processors on a machine. """ import os from multiprocessing import Process, current_process def doubler(number): """ A doubling function that can be used by a process """ result = number * 2 # Using process id # proc = os.getpid() # we get the current process id by using the python module os # print ('{} doubled to {} by process id: {}'.format(number, result, proc)) # Using process name proc_name = current_process().name # grabs the name of the process calling the function print ('{} doubled to {} by process: {}'.format(number, result, proc_name)) if __name__ == '__main__': numbers = [5, 10, 15, 20, 25] procs = [] for index, number in enumerate(numbers): # Using process id proc = Process(target=doubler, args=(number,)) # creates a process for each number in the list procs.append(proc) proc.start() # Assigning process name proc = Process(target=doubler, name='Test', args=(2,)) procs.append(proc) proc.start() for proc in procs: proc.join() # tells Python to wait for the process to terminate # Using process id # 5 doubled to 10 by process id: 9544 # 10 doubled to 20 by process id: 9545 # 15 doubled to 30 by process id: 9546 # 20 doubled to 40 by process id: 9547 # 25 doubled to 50 by process id: 9548 # Using process name # 5 doubled to 10 by process: Process-1 # 10 doubled to 20 by process: Process-2 # 15 doubled to 30 by process: Process-3 # 20 doubled to 40 by process: Process-4 # 25 doubled to 50 by process: Process-5 # 2 doubled to 4 by process: Test
26b280a437aafe8ef6cf66d6b649180869f7df89
lastosellie/algorithm
/Programmers/12914.py
440
4
4
''' 멀리 뛰기 https://programmers.co.kr/learn/courses/30/lessons/12914 ''' def dfs(a, n): s = sum(a) if s == n: print(a) a.pop() return 1 elif s > n: a.pop() return 0 a.append(1) sum1 = dfs(a, n) a.pop() a.append(2) sum2 = dfs(a, n) def solution(n): answer = 0 a = [] a.append(1) dfs(a, n) return answer print(solution(4)) print(solution(3))
27dbe29c01c2ded00473f8a86e79a574bd6583f3
SongJXin/ARTS
/leetcode/1-two-sum/two-sum.py
907
3.671875
4
# Runtime: 1160 ms, faster than 31.89% of Python3 online submissions for Two Sum. # Memory Usage: 13.8 MB, less than 27.63% of Python3 online submissions for Two Sum. class Solution1: def twoSum(self, nums, target): for num in nums: if target - num in nums[nums.index(num)-len(nums)+1:]: return [nums.index(num) , nums[nums.index(num)-len(nums)+1:].index(target - num)+nums.index(num)+1] #Runtime: 7816 ms, faster than 5.01% of Python3 online submissions for Two Sum. #Memory Usage: 13.9 MB, less than 23.82% of Python3 online submissions for Two Sum. class Solution2: def twoSum(self, nums, target): i = 0 while i < len(nums): difference = target - nums[i] j = i + 1 while j < len(nums): if nums[j] == difference: return[i,j] j = j + 1 i = i + 1
307a927f5994b65a5d1a4587a845236ea81af730
edu-athensoft/stem1401python_student
/sj190912_python2/py1003/review_if3.py
349
3.828125
4
# flow control - if statement level = int(input("Enter your current level: ")) # 0 - 19.99, 20 - 49.99, 50 + if level>=50 : print("buy my sword of lvl 50") print("collecting mineral") elif level>=20 : print("buy a stick of lvl 20") print("farming") else: print("My current level is {}".format(level)) print("farming")
7db633ba0f68fe8c10af5c45655b1924d4d36723
MarianaDrozd/cursor_my_homeworks
/HW8_Advanced_classes.py
4,621
3.953125
4
from __future__ import annotations from typing import Dict, Any from abc import ABC, abstractmethod import uuid import random import time class Animal(ABC): def __init__(self, power: int, speed: int): self.id = None self.max_power = power self.current_power = power self.speed = speed @abstractmethod def eat(self, forest: Forest): raise NotImplementedError class Predator(Animal): def eat(self, forest: Forest): prey = random.choice(list(forest.animals.values())) if self.current_power == 0: return elif prey.current_power == 0: return else: if self.id == prey.id: print(f">>>>>The Predator {animal.id} is hunting...<<<<<") self.current_power = round(self.current_power * 0.5) print(f"The Predator {self.id} was left without a dinner and its power is {self.current_power}!") if self.current_power == 0: print(f"The Predator {animal.id} is dead by starving!") else: print(f">>>>>The Predator {animal.id} is hunting the Prey {prey.id}...<<<<<") if self.speed > prey.speed and self.current_power > prey.current_power: self.current_power = min(round(self.current_power + (self.max_power * 0.5)), self.max_power) print(f"The Predator {animal.id} kills the Prey {prey.id}.", f"The Predator's power is {animal.current_power}") prey.current_power = 0 else: self.current_power = max(round(self.current_power - self.max_power * 0.3), 0) if self.current_power <= 0: print(f"The Predator {animal.id} loses the fight and dies.") else: print(f"The Predator's {animal.id} power became {animal.current_power}.") prey.current_power = max(round(prey.current_power - prey.max_power * 0.3), 0) print(f"The Prey's {prey.__class__.__name__} {prey.id} power became {prey.current_power}.") def __repr__(self): return f"{__class__.__name__} {animal.id}" class Herbivorous(Animal): def eat(self, forest: Forest): if self.current_power == 0: return else: self.current_power = min(round(self.current_power + (self.max_power * 0.5)), self.max_power) print(f"{__class__.__name__} {animal.id} is eating. Its power became {self.current_power}.") def __repr__(self): return f"{__class__.__name__} {animal.id}" AnyAnimal: Any[Herbivorous, Predator] class Forest: def __init__(self): self.animals: Dict[str, AnyAnimal] = dict() self.i = 0 def __iter__(self): self.i = 0 self.animal_i = list(self.animals.values()) return self def __next__(self): self.i += 1 if self.i <= len(self.animal_i): return self.animal_i[self.i-1] else: raise StopIteration def add_animal(self, animal: AnyAnimal): print(f"=====Added new animal {animal.__class__.__name__} {animal.id}, power:{animal.current_power}, " f"speed: {animal.speed}=====") self.animals.update({animal.id: animal}) def remove_animal(self, animal: AnyAnimal): if len(self.animals) == 0: return print(f"=====Removing dead animal {animal.__class__.__name__} {animal.id}...=====") self.animals.pop(animal.id) def any_predator_left(self): return not all(isinstance(animal, Herbivorous) for animal in self.animals.values()) def animal_generator(): while True: anim = random.choice([Predator(random.randrange(25, 100, 1), random.randrange(25, 100, 1)), Herbivorous(random.randrange(25, 100, 1), random.randrange(25, 100, 1))]) anim.id = uuid.uuid4() yield anim if __name__ == "__main__": n = 0 nature = animal_generator() forest = Forest() for i in range(10): animal = next(nature) forest.add_animal(animal) while True: print(f"...............{n}...............") n += 1 if not forest.any_predator_left(): break for animal in forest: animal.eat(forest=forest) if animal.current_power == 0: forest.remove_animal(animal) time.sleep(1) print(f"These animals survived in the forest: {list(forest.animals)}.") print("Game over")
bf44e5f9f0985059c574875d9f303d77210338f5
Erivaldojelson/Calculadora
/day6.py
440
3.84375
4
x = int (1) # x will be 1 y = int (2.8) # y will be 2 z = int ("3") # z will be 3 w = float("4.2") # w will be 4.2 print(x) print(y) print(z) print(w) x = float(1) # x will be 1 y = float (2.8) # y will be 2 z = float("3") # z will be 3 w = float("4.2") # w will be 4.2 print(x) print(y) print(z) print(w) x = str ("sl") # x will be 'sl' y = str(2) # y will be '2' z = (3.0) # z will be '3.0' print(x) print(y) print(z)
99defb0a781227421b12b4a315bba50f843d6929
Mosiv/Python-lissons
/game.py
596
3.875
4
'''Game: Guess the number''' import random random_number = random.randint(1,50) attempt_counter = 0 print('Игра, угадай число от 1 до 50') while attempt_counter < 6: number = int(input('Введите число: ')) attempt_counter+=1 if number == random_number: print('Угадал') break if number > random_number: print('Меньше') if number < random_number: print('Больше') if number != random_number and attempt_counter == 6: print(f'Ты проиграл число было: {random_number}')
f79ae4edfa85555f6e93ff8a792160865278cdb9
carolynfischer/various_python_scripts
/is_unique.py
357
4
4
""" Determine if all characters in the sting are unique """ def is_unique(s): split_string = [] for i in s: split_string.append(i) unique_set = set(split_string) if len(s) == len(unique_set): return True else: return False if __name__ == "__main__": print is_unique("kalapala") print is_unique("aoeui")
defcc1face872ed1e02adcd9d8fedd5e9a63595d
nareshenoy/base
/src/script/project_euler/47.py
1,443
3.625
4
import math def get_num_factors(n): factors = set() sq = n / 2 + 1 for i in range(2, sq + 1): if n == 1: break while n % i == 0: factors.add(i) n = n / i #print i return len(factors) def main(): n_1 = get_num_factors(1) n_2 = get_num_factors(2) n_3 = get_num_factors(3) n_4 = get_num_factors(4) n = 4 while (1): if n_1 == 4 and n_2 == 4 and n_3 == 4 and n_4 == 4: print n - 3 exit() elif n_4 != 4: n = n + 4 while get_num_factors(n) != 4: n = n + 4 n_1 = get_num_factors(n - 3) n_2 = get_num_factors(n - 2) n_3 = get_num_factors(n - 1) n_4 = 4 else: n = n + 1 n_1 = n_2 n_2 = n_3 n_3 = n_4 n_4 = get_num_factors(n) if n_4 != 4: n = n + 4 while get_num_factors(n) != 4: n = n + 4 n_1 = get_num_factors(n - 3) n_2 = get_num_factors(n - 2) n_3 = get_num_factors(n - 1) n_4 = 4 #print get_num_factors(553) #print get_num_factors(4) #print get_num_factors(13) #print get_num_factors(238202) if __name__ == "__main__": main()
bc741497e4a8d4f012b7e11550ad1ed5a3fac44f
anlaganlag/Funcs
/有序流.py
1,669
3.625
4
def __init__(self, n: int): self.pointer = 1 self.stream = (n+1)*[""] def insert(self, id: int, value: str) -> List[str]: self.stream[id] = value //每次插入初始化都爲[],類似每次先做清理工作... ans = [] //如果id和指針匹配..有點類似擊中了g點..沒擊中就return [] if self.pointer == id: //將id選一個分身即i... i = id //這個i是以擊中的id爲起點可以移動到到爲空的位置..且最多移動到n... while i < len(self.stream): //如果爲空則停止...即此時i爲第一個空.. if not self.stream[i]: break //每次嘗試一步 i+=1 //此時i有點類似ai排雷機器人..將位置給pointer... self.pointer = i //起點到終點-1 ans = self.stream[id:i] return ans """ 1656. 设计有序流 有 n 个 (id, value) 对,其中 id 是 1 到 n 之间的一个整数,value 是一个字符串。不存在 id 相同的两个 (id, value) 对。 设计一个流,以 任意 顺序获取 n 个 (id, value) 对,并在多次调用时 按 id 递增的顺序 返回一些值。 实现 OrderedStream 类: OrderedStream(int n) 构造一个能接收 n 个值的流,并将当前指针 ptr 设为 1 。 String[] insert(int id, String value) 向流中存储新的 (id, value) 对。存储后: 如果流存储有 id = ptr 的 (id, value) 对,则找出从 id = ptr 开始的 最长 id 连续递增序列 ,并 按顺序 返回与这些 id 关联的值的列表。然后,将 ptr 更新为最后那个 id + 1 。 否则,返回一个空列表。 """
aa04005069b22096f9676a94144309831d044972
iMikeT/MathProject2
/Postcode Task/Postcode.py
4,934
4.0625
4
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv(r'F:\Documents\University\Math Project\Project Year 4\Machine Learning\Job Example\Task Sheet\Postcode_Estimates_Table_1.csv') # Create dataframe print('Original Data') print() print(df.head()) print() clist = list(df['Postcode']) clist = [i[:3] for i in clist] # Reduce Postcode to the first 3 characters del df['Postcode'] df.insert(0, 'Postcode', clist) # Replace old Postcode column with new column print('Reduced Postcode Data') print() print(df.head()) print(df.shape) print() df = df.groupby('Postcode')['Total', 'Males', 'Females', 'Occupied_Households'].sum().reset_index() # Group together rows with the same postcode print('Grouped Postcode Data') print() print(df.head()) print(df.shape) print() max = len(df) n = 20000 # This number is set so that we group together all the postcodes where the the total is less than n multiplier = 2.5 # This number controls the max value that Total Addition can sum up to (1 for 20,000 gives issue in report, 2.5 for 50,000 to solves issue) dfSmall = df[df['Total'] <= n].reset_index(drop=True) # Create new dataframe with all the postcodes where Total <= n df = df[df['Total']>n].reset_index(drop=True) # Overwrite original dataframe to now only consist of postcodes with more than 20,000 residents print('Grouped Data where Total <',n) print() print(dfSmall.head()) print(dfSmall.shape) min = len(dfSmall) print() print('The percentage of postcodes less than', n, 'is: {:.1%}'.format((min/max))) print() dfSmall = dfSmall.sort_values(by=['Total']) # Set the dataframe to decend from the lowest value of Total dfSmall = dfSmall.reset_index() dfSmall['Total Addition'] = dfSmall['Total'].cumsum() # Create new column that is the sum of the column Total print('Ordered Grouped Data') print(dfSmall.head()) print() cond = dfSmall[dfSmall['Total Addition']<=n*multiplier] # The first n rows where Total Addition < n*multiplier index = dfSmall.index.get_loc(cond.iloc[-1].name) + 1 # Capture the highest index and add 1 which is the total number of rows print('The first',index,'rows where Total Addition <',n*multiplier) print() print(cond) print() l = 5 # Total number of columns to be created lists = [[] for _ in range(l)] t = True while t: lists[0] += [dfSmall[0:index]['Postcode'].sum()] # This is the main piece of code that calculates the groups of postcodes to be re-labeled lists[1] += [dfSmall[0:index]['Total'].sum()] # to the new codes. It sums all columns for the total number of rows less than cond and lists[2] += [dfSmall[0:index]['Males'].sum()] # stores the values in individual lists, lists[3] += [dfSmall[0:index]['Females'].sum()] lists[4] += [dfSmall[0:index]['Occupied_Households'].sum()] dfSmall = dfSmall.iloc[index:] # removes these rows and resets the index values dfSmall = dfSmall.reset_index(drop=True) del dfSmall['Total Addition'] # resets the Total addition column and repeats the process until all rows have been accounted for dfSmall.insert(6, 'Total Addition', dfSmall['Total'].cumsum()) cond = dfSmall[dfSmall['Total Addition']<=n*multiplier] if len(dfSmall.index) == 0: break #print(cond) index = dfSmall.index.get_loc(cond.iloc[-1].name) + 1 frames = list(zip(lists[0],lists[1],lists[2],lists[3],lists[4])) # Creates the new dataframe using all the lists made in the loop frames = pd.DataFrame(frames, columns=['Postcode', 'Total', 'Males', 'Females', 'Occupied_Households']) list = frames['Postcode'] # Stores values of the groups of original postcodes and then deletes that column to replace with the new codes 001, 002, etc del frames['Postcode'] code = [str(x).zfill(3) for x in range(len(frames))] frames.insert(0, 'Postcode', code) frames.insert(5, 'Original Postcodes', list) # then inserts the groups of original postcodes as the final column print('Sorted Total Data for Frames Dataframe') print() print(frames.sort_values(by=['Total']).head()) print() pd.set_option('display.max_columns', None) df['Original Postcodes'] = ['' for _ in range(0, len(df))] # To prevent NaN values coming up in the Original Postcodes column combine = [frames, df] combine = pd.concat(combine, sort=False) # Combine the new code groups and the rest of the postcodes greater than n into one dataframe. print('The First 5 Rows of the Final Dataframe') print() print(combine.head()) # View the final results print() print('The Last 5 Rows of the Final Dataframe') print(combine.tail()) #plt.bar(combine['Postcode'], combine['Total']) # To view the bar charts of the final dataframe but takes a lot of processing power #plt.xticks(rotation='vertical') #plt.show()
7091eb0e3645669c18d0080c2ac9445d6423916d
ElvinKim/201803_ita_python
/20180325_2_practice_1.py
357
3.875
4
def average(a, b): return (a + b) / 2 print(average(10, 20)) def average_lst(lst): total = 0 cnt = 0 for num in lst: total += num cnt += 1 return total / cnt a = [1, 2, 3, 4, 5] print(average_lst(a)) print(average_lst((1, 2, 3))) def average_lst_v2(lst): return sum(lst) / len(lst)
966a9200a9cff8f58c7bf341d59cbbcd6a7be814
liyanhang/liyh-footmark
/python-footmark/code/if.py
283
3.828125
4
#!/usr/bin/python # Filename: if.py number=23 guess=int(raw_input('Enter an integer:')) if guess==number: print'woaini' elif guess<number: print'niaiwo' # do whatever you want else: print'no, it is ugly' print'DDDD' # jieshaoyixia if False: print 'haobuhao'
534f77ddf5d2706619925527a05cc48e28fd45fe
FelicianoSachilombo/psr_21-22
/Parte03/T/Ex2/class_example.py
653
3.890625
4
#!/usr/bin/python3 import colorama class Person: def __init__(self, name, address, phone): self.name = name self.address = address self.phone = phone def __str__(self): return 'Name: ' + self.name + ' address: ' + self.address + ' phone: ' + str(self.phone) def main(): jose = Person('Jose', 'Aveiro', 93) miguel = Person('Miguel', 'Ilhavo', 91) print(jose) miguel.name += jose.name print(miguel) x = miguel.name + jose.name print(x) # lst_persons = [jose, miguel] # # for person in lst_persons: # print(person.name) if __name__ == '__main__': main()
2ad59aa36eb769291e2670902af4653be5e6352a
vladvlad23/UBBComputerScienceBachelor
/FundamentalsOfProgramming/Assignment 03-04/complexOperations.py
1,218
4.0625
4
def getReal(complexNumber): return complexNumber.real def getImaginary(complexNumber): return complexNumber.imag def turnStringToComplexNumber(number): ''' Function will receive a string which will be turned into a complex Number The String is guaranteed to be a complex number ''' # new string must be formed to be able to create complex number with complex() function # this means copy to new string everything except the last part and append j to it if its i if 'i' in number: newStringNumber = number[0:len(number) - 1] + 'j' else: newStringNumber = number complexNumber = complex(newStringNumber) return complexNumber def isComplexNumber(number): ''' Function will test if a number is a complex Number by trying to call turnStringToComplexNumber it will return true if succeeded and false otherwise ''' try: number = turnStringToComplexNumber(number) except ValueError: return False return True def isReal(complexNumber): return getImaginary(complexNumber) == 0 def modulo(complexNumber): ''' Function returns the modulo of a complexNumber ''' return abs(complexNumber)
4f4994ff203923341ba6eac13727f4a4b2acfd03
green-fox-academy/florimaros
/week-3/friday/gyak/gyak21b.py
213
3.984375
4
u = 13 #if u is between 10 and 20 print Sweet if less than 10 print More if more than 20 print less. out = "" if u > 10 and u <=20: out="Sweet" elif u <= 10: out="More" else: out="Less" print(out)
ecb2cb457a26bbfa0804fbb689345585aaf76053
DeisherJohn/DailyCodingSolutions
/PythonSolutions/p021.py
3,096
3.796875
4
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Program: Room scheduler # Daily Problem #: 21 # Author: John Deisher # Date Started: 5/9/2019 # Date Finished: 5/9/2019 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ This problem was asked by Snapchat. Given an array of time intervals (start, end) for classroom lectures (possibly overlapping), find the minimum number of rooms required. For example, given [(30, 75), (0, 50), (60, 150)], you should return 2. """ from random import randint, seed from time import time seed(time()) def times_overlap(timeA, timeB): if timeA[0] < timeB[0] and timeA[1] > timeB[0]: return True elif timeA[0] < timeB[1] and timeA[1] > timeB[1]: return True else: return False pass def time_overlaps_in_room(room, newTime): for time in room: if times_overlap(time, newTime): return True return False pass def schedule_rooms(list_of_times): rooms = list(list()) for time in list_of_times: added = False for room in rooms: if not time_overlaps_in_room(room, time): room.append(time) added = True break else: newRoom = [time] rooms.append(newRoom) return len(rooms) pass def make_room_schedule(num_of_times = 10): times = list() for _ in range(num_of_times): a = randint(0,300) b = randint(a,a+100) times.append((min(a,b), max(a,b))) return times pass def main(): testSchedule = make_room_schedule(100) print("Times a rooms is needed (size==100): ") print(testSchedule) print("\nNumber of rooms required: " + str(schedule_rooms(testSchedule))) pass if __name__ == '__main__': main() """ OUTPUT: Times a rooms is needed (size==100): [(174, 263), (87, 151), (105, 105), (185, 277), (65, 76), (162, 242), (99, 160), (5, 64), (6, 63), (124, 150), (102, 200), (87, 143), (145, 208), (136, 153), (24, 98), (153, 229), (61, 70), (67, 126), (36, 127), (195, 290), (43, 125), (165, 230), (23, 95), (285, 290), (116, 178), (154, 211), (235, 288), (87, 141), (76, 148), (147, 200), (261, 341), (54, 145), (50, 58), (284, 345), (163, 187), (64, 90), (130, 183), (234, 331), (221, 288), (68, 72), (200, 286), (201, 268), (49, 74), (237, 326), (27, 49), (87, 138), (185, 283), (123, 139), (127, 129), (39, 69), (35, 109), (212, 311), (24, 79), (37, 56), (161, 191), (244, 284), (126, 172), (253, 255), (181, 214), (220, 283), (168, 223), (36, 93), (193, 259), (179, 212), (286, 371), (123, 154), (39, 133), (87, 127), (197, 242), (52, 68), (193, 284), (71, 111), (65, 94), (167, 211), (293, 391), (147, 201), (210, 286), (230, 279), (223, 226), (114, 116), (106, 200), (108, 137), (299, 300), (129, 226), (277, 326), (272, 341), (51, 77), (196, 262), (206, 215), (239, 328), (158, 173), (105, 193), (36, 86), (251, 280), (1, 73), (14, 36), (288, 292), (277, 342), (119, 148), (281, 311)] Number of rooms required: 18 [Finished in 0.1s] """
cc3cd55c42307d9fbffa1472432e2ce475dd9e8a
doct0rX/PythonMIT1x
/week2/ProblemSet/pset2.py
547
3.765625
4
""" Author: Mustafa Jamal Uncomment the code out to run it on your machine. """ # balance = 3329 changableBalance = balance # annualInterestRate = 0.2 monthlyInterestRate = annualInterestRate / 12.0 lowestPayment = 0 while changableBalance > 0: changableBalance = balance lowestPayment += 10 for i in range(12): changableBalance = changableBalance - lowestPayment changableBalance = changableBalance + (changableBalance * monthlyInterestRate) # print(changableBalance) print('Lowest Payment ' + str(lowestPayment))
21d1983aaa1599f59fefa2de8a4a38f27d884c4d
wilsonmakchina/small_scripts
/text_processing/regEx_usage.py
319
3.53125
4
# substitute ',' for '/' for my file import re import sys file_name = sys.argv[1] file = open(file_name) # read file file2 = open(file_name + '_result.txt', 'w') # write file for line in file.readlines(): line = line.rstrip() line = re.sub('/', ',', line, count=0) print(line,file=file2) file.close() file2.close()
071cc0fe9debfbc187f6939e8e82657ec648433e
Horeb294/hub
/mypython.py
161
3.515625
4
myname ="Esther Nyamekye" mynumber = 100 mybool = True def full_name(first, last): return first + " " + last full_name("Esther", "Nyamekye") #comments in python
d65ba7c053d31a081a2bb0f1f7ce2abda1b091b5
AK-1121/code_extraction
/python/python_24738.py
140
3.71875
4
# Delete list of elements from a list listBig = [1,2,3,4,5,6,7,8,4,7] listSmall = [4,7] for item in listSmall: listBig.remove(item)
16a3e2766d05baa04c4dfb6cfd4475a6bbd26d35
URTK/Lab-2-2
/Main.py
268
3.890625
4
x,y = float(input('x = ')),float(input('y = ')) if (x>=1 and x<=7 and y>=1 and y<=7): # Входит в квадрат ? if (x+y>=5 and x+y<=11 and x-y>=-3 and x-y<=3): # Входит в ромб? print('True') else: print('False') else: print('False')
2e99a52dd8789b22ed35ddb288e354b1b4f2f6ff
ZanataMahatma/Python-Exercicios
/ex112/utilidadescev/moeda/__init__.py
1,826
4.3125
4
'''Exercício Python 112: Dentro do pacote utilidadesCeV que criamos no desafio 111, temos um módulo chamado dado. Crie uma função chamada leiaDinheiro() que seja capaz de funcionar como a função imputa(), mas com uma validação de dados para aceitar apenas valores que seja monetários.''' def aumentar(preço=0, taxa=0, formato=False): """ ->Calcular o aynebti de um determinado preço, retornando o resuldado com ou sem formatação. :param preço: o preço que se quer reajustar :param taxa: qual é a porcetagem do aumento. :param formato: quer a saída formatada ou não? :return: o valor reajustado,com ou sem formato. """ res = preço + (preço * taxa/100) return res if formato is False else moeda(res) def diminuir(preço=0, taxa=0, formato=False): res = preço - (preço * taxa/100) return res if formato is False else moeda(res) def dobro(preço=0,formato=False): res = preço * 2 return res if not formato else moeda(res) def metade(preço=0, formato=False): res = preço / 2 return res if not formato else moeda(res) def moeda(preço=0, moeda='R$'): """ -> :param preço: Converter os numeros de ponto pra virgula dos prints :param moeda: moeda colocara em real :return: Retorna um print formatado. moeda.moeda() """ return f'{moeda}{preço:.2f}'.replace('.',',') def resumo(preço=0,taxaa=10,taxar=5): print('-'* 30) print('RESUMO DO VALOR'.center(30)) print('-' * 30) print(f'Preço analisado:\t{moeda(preço)}') print(f'Dobro do preço:\t\t{dobro(preço,True)}') print(f'Metade do preço:\t{metade(preço,True)}') print(f'Com {taxar}% de aumento:\t{aumentar(preço, taxaa,True)}') print(f'{taxar}% de redução:\t\t{diminuir(preço,taxar,True)}') print('-'* 30)
0133774a0c3ec40cfe359f73aa1b0e0fa690b7eb
dcarlyle/udacity_dlnd_image_classification_p2
/dlnd_image_classification.py
40,833
3.703125
4
# coding: utf-8 # # Image Classification # In this project, you'll classify images from the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images. # ## Get the Data # Run the following cell to download the [CIFAR-10 dataset for python](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz). # In[1]: """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import problem_unittests as tests import tarfile cifar10_dataset_folder_path = 'cifar-10-batches-py' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('cifar-10-python.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 'cifar-10-python.tar.gz', pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open('cifar-10-python.tar.gz') as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) # In[2]: import sys print(sys.version) print(sys.version_info) print("\nPython ver: {}".format( sys.version_info[0])) help("modules") # ## Explore the Data # The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following: # * airplane # * automobile # * bird # * cat # * deer # * dog # * frog # * horse # * ship # * truck # # Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). # # * The `sample_id` is the id for an image and label pair in the batch. # # Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions. # In[3]: get_ipython().magic('matplotlib inline') get_ipython().magic("config InlineBackend.figure_format = 'retina'") import helper import numpy as np # Explore the dataset batch_id = 3 # Possible Batch Ids: [1, 2, 3, 4, 5] sample_id = 10 # 0 to max is 9,999 helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id) # ## Implement Preprocess Functions # ### Normalize # In the cell below, implement the `normalize` function to take in image data, `x`, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as `x`. # #Local Normalization versus local norms # 'from feedback from review' # # In neurobiology, there is a concept called **lateral inhibition**. This refers to the capacity of an excited neuron to subdue its neighbors. We basically want a significant peak so that we have a form of local maxima. # # The normalize function normalizes image data in the range of 0 to 1, inclusive. # # However there were minor errors which I madein my normalisation technique. # # The normalisation I implemented was **local normalisation** i.e. it would normalise based on image specific maximums. The problem with this method is - for any image, say a dog against a brighter object - the normalised pixel values of the dog would be lower as compared to an image of the same dog photographed against a darker background, though essentially the pixel values of a dog have remained the same. # # Instead of a local normalization we will take the maximum and minimum global values, so that irrespective of image intensities, the normalised values of an object are identical across all images. # # # # In[4]: def normalize(x): """ Normalize a list of sample image data in the range of 0 to 1 : x: List of image data. The image shape is (32, 32, 3) : return: Numpy array of normalize data """ # TODO: Implement Function # return None # x is a numpy.ndarray # print(x.shape) #(499, 32, 32, 3) #print(x) ''' [[[[ 87 112 177] [ 90 69 177] [ 15 9 87] ..., [189 174 255] [ 85 254 129] [ 37 183 222]] ''' # range of 0 to 1 a = 0 b = 1 # being an image I will assume 0 to 255 color_scale_min = 0 # np.amin(x) color_scale_max = 255 #np.amax(x) provides only the "Local maximum" for this image, so replaced with global maximum # store old shape of tensor dimensions = x.shape # <class 'tuple'>, len(x.shape) = 4 dimensions # reshape to a single vector array image_data = x.reshape(-1, 1) # normalize image_data = a + ( ( (image_data - color_scale_min)*(b - a) )/( color_scale_max - color_scale_min ) ) # convert back to tensor x = image_data.reshape(dimensions) return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_normalize(normalize) # ### One-hot encode # Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the `one_hot_encode` function. The input, `x`, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to `one_hot_encode`. Make sure to save the map of encodings outside the function. # # Hint: Don't reinvent the wheel. # In[5]: #replaced with one above def one_hot_encode(x): """ One hot encode a list of sample labels. Return a one-hot encoded vector for each label. : x: List of sample Labels : return: Numpy array of one-hot encoded labels """ # TODO: Implement Function # Please note: I was struggling with running this twice and the forum mentor (@akansha_803081651828) # kindly explained I needed to fix the array size to the size of the classes (dog, cat, etc...) # https://discussions.udacity.com/t/assertion-error-in-one-hot-encoding/226204/2?source_topic_id=224916 from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False) # there are 10 classes in the label feature in our array, # however if we rerun this we may find that not all features are included (e.g. maybe only 4 of them, # so we will get different results. lb.fit(np.array([[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0]])) return lb.transform(x) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_one_hot_encode(one_hot_encode) # ### Randomize Data # As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset. # ## Preprocess all the data and save it # Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation. # In[6]: """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode) # # Check Point # This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. # In[7]: """ DON'T MODIFY ANYTHING IN THIS CELL """ import pickle import problem_unittests as tests import helper # Load the Preprocessed Validation data valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb')) # ## Build the network # For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project. # # >**Note:** If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup. # # >However, if you would like to get the most out of this course, try to solve all the problems _without_ using anything from the TF Layers packages. You **can** still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the `conv2d` class, [tf.layers.conv2d](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d), you would want to use the TF Neural Network version of `conv2d`, [tf.nn.conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d). # # Let's begin! # # ### Input # The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions # * Implement `neural_net_image_input` # * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) # * Set the shape using `image_shape` with batch size set to `None`. # * Name the TensorFlow placeholder "x" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder). # * Implement `neural_net_label_input` # * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) # * Set the shape using `n_classes` with batch size set to `None`. # * Name the TensorFlow placeholder "y" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder). # * Implement `neural_net_keep_prob_input` # * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) for dropout keep probability. # * Name the TensorFlow placeholder "keep_prob" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder). # # These names will be used at the end of the project to load your saved model. # # Note: `None` for shapes in TensorFlow allow for a dynamic size. # In[8]: import tensorflow as tf def neural_net_image_input(image_shape): """ Return a Tensor for a bach of image input : image_shape: Shape of the images : return: Tensor for image input. """ # TODO: Implement Function # 1. Return a TF Placeholder # 2. Set the shape using image_shape with batch size set to "None". # 3. Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder. # https://www.tensorflow.org/api_docs/python/tf/placeholder # print(image_shape) # (32, 32, 3) # print ([None] + list(image_shape)) # we want [None, 32, 32, 3] same as "shape=[batch_size, *image_shape]" # print(*image_shape) # unpack the tuple with the * pointer object # ref: http://stackoverflow.com/questions/41783136/tensorflow-batch-size-in-input-placholder batch_size = None # None means you can run the model with a variable number of inputs (one or more). shaped = [batch_size] + list(image_shape) # shaped is a type <list> # print("shaped {}".format(shaped)) # shaped [None, 32, 32, 3] #print(type(shaped)) # return tf.placeholder(tf.float32, shape=[batch_size, *image_shape], name = "x") return tf.placeholder(tf.float32, shape=shaped, name = "x") def neural_net_label_input(n_classes): """ Return a Tensor for a batch of label input : n_classes: Number of classes : return: Tensor for label input. """ # TODO: Implement Function # Return a TF Placeholder # Set the shape using n_classes with batch size set to None. # Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder. # print (type(n_classes)) # int of value 10 batch_size = None # None means you can run the model with a variable number of inputs (one or more). shaped = [batch_size] + [n_classes] # print("shaped {}".format(shaped)) # [None, 10] return tf.placeholder(tf.float32, shape=shaped, name = "y") def neural_net_keep_prob_input(): """ Return a Tensor for keep probability : return: Tensor for keep probability. """ # TODO: Implement Function # Return a TF Placeholder for dropout keep probability. # Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder. return tf.placeholder(tf.float32, name = "keep_prob") """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tf.reset_default_graph() tests.test_nn_image_inputs(neural_net_image_input) tests.test_nn_label_inputs(neural_net_label_input) tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input) # ### Convolution and Max Pooling Layer # Convolution layers have a lot of success with images. For this code cell, you should implement the function `conv2d_maxpool` to apply convolution then max pooling: # * Create the weight and bias using `conv_ksize`, `conv_num_outputs` and the shape of `x_tensor`. # * Apply a convolution to `x_tensor` using weight and `conv_strides`. # * We recommend you use same padding, but you're welcome to use any padding. # * Add bias # * Add a nonlinear activation to the convolution. # * Apply Max Pooling using `pool_ksize` and `pool_strides`. # * We recommend you use same padding, but you're welcome to use any padding. # # **Note:** You **can't** use [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) for **this** layer, but you can still use TensorFlow's [Neural Network](https://www.tensorflow.org/api_docs/python/tf/nn) package. You may still use the shortcut option for all the **other** layers. # In[9]: #ref to following article from https://github.com/tensorflow/tensorflow/issues/1122 #ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow?answertab=votes#tab-top #import numpy as np #import tensorflow as tf #from tensorflow.python #import control_flow_ops def batch_norm(x, n_out, phase_train, scope='bn'): """ Batch normalization on convolutional maps. Args: x: Tensor, 4D BHWD input maps n_out: integer, depth of input maps phase_train: boolean tf.Varialbe, true indicates training phase scope: string, variable scope Return: normed: batch-normalized maps """ with tf.variable_scope(scope): beta = tf.Variable(tf.constant(0.0, shape=[n_out]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[n_out]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.5) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed # In[10]: def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides): """ Apply convolution then max pooling to x_tensor :param x_tensor: TensorFlow Tensor :param conv_num_outputs: Number of outputs for the convolutional layer :param conv_ksize: kernal size 2-D Tuple for the convolutional layer :param conv_strides: Stride 2-D Tuple for convolution :param pool_ksize: kernal size 2-D Tuple for pool :param pool_strides: Stride 2-D Tuple for pool : return: A tensor that represents convolution and max pooling of x_tensor """ # TODO: Implement Function # The classification of a given patch in an image is determined by the weights # and biases corresponding to that patch. # lesson Convolutional networks/Parameters # the dimensions are (W−F+2P)/S+1 #image_width = x_tensor.get_shape()[2].value #image_height = x_tensor.get_shape()[3].value #padding = 1 #Valid or same, https://www.tensorflow.org/api_guides/python/nn#Convolution #strides = pool_strides[0] #((Image.width - Filter.width + 2 * Padding)/ Stride) +1 #new_width = ((image_width - filter_width + (2 * padding)) / (strides )) + 1.0 #print(new_width) # Lesson Convolutional networks/Quiz: Convolutional Output Shape # https://classroom.udacity.com/nanodegrees/nd101/parts/2a9dba0b-28eb-4b0e-acfa-bdcf35680d90/modules/ca299d5f-35f6-4520-a2b0-74c6878008b3/lessons/2fd24529-215c-47b5-a644-2c23650493f6/concepts/d0db3cab-ad70-46ec-9614-4dfc27dfc865 # 1. Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor. channels = x_tensor.get_shape()[3].value # RGB, the depth from the number of filters Y filter_width = conv_ksize[0] # kernal size 2-D Tuple for the convolutional layer filter_height = conv_ksize[1] '''experiment from http://cs231n.stanford.edu/slides/winter1516_lecture5.pdf slide 56''' # The shape of the filter weight is (height, width, input_depth, output_depth) filter_dimensions = tf.truncated_normal([filter_width, filter_height, channels, conv_num_outputs], stddev=0.01) filter_weights = tf.Variable(filter_dimensions, name="W") # The shape of the filter bias is (output_depth,) #in Tensor Board minst Dandellion Mané replaced with filter_bias = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B") filter_bias = tf.Variable(tf.constant(0.1, shape=[conv_num_outputs]), name="B") # Set the stride for each dimension (batch_size, height, width, depth) strides = [1, conv_strides[0], conv_strides[1], 1] # (batch, height, width, depth) # TODO: set the padding, either 'VALID' or 'SAME'. padding='SAME' # 'VALID' is not recommended for this project # To make life easier, the code is using tf.nn.bias_add() to add the bias. # Using tf.add() doesn't work when the tensors aren't the same shape. conv = tf.nn.bias_add(tf.nn.conv2d(x_tensor, filter_weights, strides, padding), filter_bias) '''Experiment from pro Tip from reviewer - adding batch normalzation after conv and before maxpool''' '''https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization''' ''' tf.nn.batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None) ''' '''before or after ReLU :) ducha-aiki/caffenet-benchmark says before ReLU''' #phase_train = tf.placeholder(tf.bool, name='phase_train') # normally the TensorFlow Session would be passed a dictionary but this is something not editable # in this notebook so I will set perminantly to True using the tf.bool object computed_val = tf.constant(10.0) constant_val = tf.constant(37.0) phase_train = tf.less(computed_val, constant_val) conv_bn = batch_norm(conv, conv_num_outputs, phase_train) #conv_bn = conv '''end experiment''' # RELU layer will apply an elementwise activation function, #relu_layer = tf.nn.relu(conv) relu_layer = tf.nn.relu(conv_bn) # strides=[1, 2, 2, 1] - [batch, height, width, channels]). # For both ksize and strides, the batch and channel dimensions are typically set to 1. new_strides = [1, pool_ksize[0], pool_ksize[1], 1] # performs max pooling with the ksize parameter as the size of the filter. max_pool = tf.nn.max_pool(relu_layer, ksize=new_strides, strides=new_strides, padding=padding) # A tensor that represents convolution and max pooling of x_tensor return max_pool """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_con_pool(conv2d_maxpool) # ### Flatten Layer # Implement the `flatten` function to change the dimension of `x_tensor` from a 4-D tensor to a 2-D tensor. The output should be the shape (*Batch Size*, *Flattened Image Size*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages. # In[11]: def flatten(x_tensor): """ Flatten x_tensor to (Batch Size, Flattened Image Size) : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions. : return: A tensor of size (Batch Size, Flattened Image Size). """ # TODO: Implement Function batch_size = x_tensor.get_shape()[0] if batch_size.value == None: batch_size = -1 else: batch_size = x_tensor.get_shape()[0].value width = x_tensor.get_shape()[1].value height = x_tensor.get_shape()[2].value channels = x_tensor.get_shape()[3].value image_dimensions = width * height * channels x2D_tensor = tf.reshape(x_tensor, [batch_size, image_dimensions]) return x2D_tensor """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_flatten(flatten) # ### Fully-Connected Layer # Implement the `fully_conn` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages. # In[12]: def fully_conn(x_tensor, num_outputs): """ Apply a fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function # ref lesson "convolutional networks / Tensorflow Convolutional Layer" filter_flattend_size = x_tensor.get_shape()[1].value #weight and bias - Use Dandelion Mané's way to do it from Tensor board video #fully_conn_weights = tf.Variable(tf.truncated_normal([filter_flattend_size, num_outputs])) #, mean=0.0, stddev=0.01 )) fully_conn_weights = tf.Variable(tf.truncated_normal([filter_flattend_size, num_outputs], mean=0.0, stddev=0.01 ), name="W") #fully_conn_bias = tf.Variable(tf.zeros([num_outputs]), name="B") fully_conn_bias = tf.Variable(tf.constant(0.1, shape=[num_outputs]), name="B") fully_conn_output = tf.matmul(x_tensor, fully_conn_weights) # To make life easier, the code is using tf.nn.bias_add() to add the bias. # Using tf.add() doesn't work when the tensors aren't the same shape. fully_conn_output_with_bias = tf.nn.bias_add(fully_conn_output, fully_conn_bias) fully_relu_output = tf.nn.relu(fully_conn_output_with_bias) #A 2-D tensor where the second dimension is num_outputs. #print(fully_relu_output) return fully_relu_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_fully_conn(fully_conn) # ### Output Layer # Implement the `output` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages. # # **Note:** Activation, softmax, or cross entropy should **not** be applied to this. # In[13]: def output(x_tensor, num_outputs): """ Apply a output layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function #print("x_tensor:{}".format(x_tensor.get_shape()[1])) #(?, 128) so we only need second value #print("num_outputs:{}".format(num_outputs)) # 40 is the new dimension # get the dimensions for weights [128, 40] weight_dimensions = tf.truncated_normal([int(x_tensor.get_shape()[1]), num_outputs]) # weights place holder initialised weights = tf.Variable(weight_dimensions , name ="W") # bias place holder initialised bias = tf.Variable(tf.truncated_normal([num_outputs]) , name ="B") # To make life easier, the code is using tf.nn.bias_add() to add the bias. # Using tf.add() doesn't work when the tensors aren't the same shape. # ref: Lesson Convolutional networks/Convolutional Network in TensorFlow # where fc1 is the fully connected layer preceeding this # out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) op_output = tf.nn.bias_add(tf.matmul(x_tensor, weights), bias) return op_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_output(output) # ### Create Convolutional Model # Implement the function `conv_net` to create a convolutional neural network model. The function takes in a batch of images, `x`, and outputs logits. Use the layers you created above to create this model: # # * Apply 1, 2, or 3 Convolution and Max Pool layers # * Apply a Flatten Layer # * Apply 1, 2, or 3 Fully Connected Layers # * Apply an Output Layer # * Return the output # * Apply [TensorFlow's Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout) to one or more layers in the model using `keep_prob`. # In[14]: def conv_net(x, keep_prob): """ Create a convolutional neural network model : x: Placeholder tensor that holds image data. : keep_prob: Placeholder tensor that hold dropout keep probability. : return: Tensor that represents logits """ # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers # Play around with different number of outputs, kernel size and stride # Function Definition from Above: # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) # I think ksize should be bigger than the pool size conv_ksize = (4,4) #(3,3) conv_strides = (1,1) pool_ksize = (3,3) pool_strides = (1,1) # PROBLEM....!!!!!!!! # With the Print_stat we blow up if layer1 > 80 and layer2 > 96 (disable printstat and we are fine :) # might be because my implementation of conv_net is not efficient? So add a new conv layer # Layer 1 - 32*32*3 to 8*8*32 conv_num_outputs = 32#80 #64#80 #80 #1024 #128 #32 conv1 = conv2d_maxpool(x, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) # Layer 2 - 8*8*32 to 2*2*64 conv_num_outputs = 96#96 #70 #192 #96 #64 conv2 = conv2d_maxpool(conv1, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) # Layer 3 - conv_num_outputs = 288#128#50 #96 #64 conv3 = conv2d_maxpool(conv2, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) # We want to feed all of the nodes from the last conv layer into the first fully connected layer, # but the shapes are too complicated. Flattening the layer is simply to change its shape so it # is easier to work with # Fully connected layer - 2*2*64 to ? flatten_layer = flatten(conv3) flatten_layer = tf.nn.dropout(flatten_layer, keep_prob) #<---- dropout # TODO: Apply 1, 2, or 3 Fully Connected Layers # Play around with different number of outputs # Function Definition from Above: # fully_conn(x_tensor, num_outputs) ''' Fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. ''' num_outputs = 512 fc1 = fully_conn(flatten_layer, num_outputs) #fc1 = tf.nn.dropout(fc1, keep_prob) #<---- dropout num_outputs = 256 fc2 = fully_conn(fc1, num_outputs) #fc2 = tf.nn.dropout(fc2, keep_prob) #<---- dropout num_outputs = 128 fc3 = fully_conn(fc2, num_outputs) # TODO: Apply an Output Layer # Set this to the number of classes # Function Definition from Above: # output(x_tensor, num_outputs) num_outputs = 10 # to match the different image types we have....no more no less :) out = output(fc3, num_outputs) # TODO: return output return out """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ ############################## ## Build the Neural Network ## ############################## # Remove previous weights, bias, inputs, etc.. tf.reset_default_graph() # Inputs x = neural_net_image_input((32, 32, 3)) y = neural_net_label_input(10) keep_prob = neural_net_keep_prob_input() # Model logits = conv_net(x, keep_prob) # Name logits Tensor, so that is can be loaded from disk after training logits = tf.identity(logits, name='logits') # Loss and Optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer().minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') tests.test_conv_net(conv_net) # ## Train the Neural Network # ### Single Optimization # Implement the function `train_neural_network` to do a single optimization. The optimization should use `optimizer` to optimize in `session` with a `feed_dict` of the following: # * `x` for image input # * `y` for labels # * `keep_prob` for keep probability for dropout # # This function will be called for each batch, so `tf.global_variables_initializer()` has already been called. # # Note: Nothing needs to be returned. This function is only optimizing the neural network. # In[15]: def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch): """ Optimize the session on a batch of images and labels : session: Current TensorFlow session : optimizer: TensorFlow optimizer function : keep_probability: keep probability : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data """ # TODO: Implement Function #The train_neual_network should only have one session.run call , on the optimizer, see Lesson CNN/CNN in TensorFlow #and make sure the keep_probablity is passed in your cost and accuracy functions (in your print stats function) session.run(optimizer, feed_dict={ x: feature_batch, y: label_batch, keep_prob: keep_probability}) pass """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_train_nn(train_neural_network) # ### Show Stats # Implement the function `print_stats` to print loss and validation accuracy. Use the global variables `valid_features` and `valid_labels` to calculate validation accuracy. Use a **keep probability of `1.0`** to calculate the loss and validation accuracy. # # Feedback from review # Thank you for your help :) This is my second attempt, I did not implement the **Print_stats** finction correctly. # # ### The print_stats function prints loss and validation accuracy. # Per the instructions, instead of printing the accuracy and loss on the training set you need to print the accuracy on the validation set. # # In **valid_acc variable** use the global variables **valid_features** and **valid_labels** instead of **feature_batch** and **label_batch**. # # Then retrain the network and resubmit. # In[16]: def print_stats(session, feature_batch, label_batch, cost, accuracy): """ Print information about loss and validation accuracy : session: Current TensorFlow session : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data : cost: TensorFlow cost function : accuracy: TensorFlow accuracy function """ # TODO: Implement Function # Calculate batch loss and accuracy loss = session.run(cost, feed_dict={ x: valid_features, y: valid_labels, keep_prob: 1}) # As per reviewer's suggestion I am going to use the global variables valid_features and valid_labels # instead of feature_batch and label_batch. valid_acc = session.run(accuracy, feed_dict={ x: valid_features, y: valid_labels, keep_prob: 1}) ''' Lesson: CNN/CNN in Tensorflow print('Epoch {:>2}, Batch {:>3} -' 'Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format( epoch + 1, batch + 1, loss, valid_acc)) ''' print('Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format( loss, valid_acc)) pass # ### Hyperparameters # Tune the following parameters: # * Set `epochs` to the number of iterations until the network stops learning or start overfitting # * Set `batch_size` to the highest number that your machine has memory for. Most people set them to common sizes of memory: # * 64 # * 128 # * 256 # * ... # * Set `keep_probability` to the probability of keeping a node using dropout # In[17]: # TODO: Tune Parameters epochs = 48 # model reached repeating accuracy at around epoch 48, so stop before over fitting batch_size = 1024 #1024 #64 # bigger batch size the better keep_probability = 0.4 #0.5 # ### Train on a Single CIFAR-10 Batch # Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section. # In[18]: """ DON'T MODIFY ANYTHING IN THIS CELL """ print('Checking the Training on a Single Batch...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) #sess.run(tf.initialize_all_variables()) # Training cycle for epoch in range(epochs): batch_i = 1 for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # ### Fully Train the Model # Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches. # In[19]: """ DON'T MODIFY ANYTHING IN THIS CELL """ save_model_path = './image_classification' print('Training...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) #sess.run(tf.initialize_all_variables()) # Training cycle for epoch in range(epochs): # Loop over all batches n_batches = 5 for batch_i in range(1, n_batches + 1): for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # Save Model saver = tf.train.Saver() save_path = saver.save(sess, save_model_path) # # Checkpoint # The model has been saved to disk. # ## Test Model # Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters. # In[20]: """ DON'T MODIFY ANYTHING IN THIS CELL """ get_ipython().magic('matplotlib inline') get_ipython().magic("config InlineBackend.figure_format = 'retina'") import tensorflow as tf import pickle import helper import random # Set batch size if not already set try: if batch_size: pass except NameError: batch_size = 64 save_model_path = './image_classification' n_samples = 4 top_n_predictions = 3 def test_model(): """ Test the saved model against the test dataset """ test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb')) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load model loader = tf.train.import_meta_graph(save_model_path + '.meta') loader.restore(sess, save_model_path) # Get Tensors from loaded model loaded_x = loaded_graph.get_tensor_by_name('x:0') loaded_y = loaded_graph.get_tensor_by_name('y:0') loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') loaded_logits = loaded_graph.get_tensor_by_name('logits:0') loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0') # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size): test_batch_acc_total += sess.run( loaded_acc, feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0}) test_batch_count += 1 print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count)) # Print Random Samples random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples))) random_test_predictions = sess.run( tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions), feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0}) helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions) test_model() # ## Why 50-70% Accuracy? # You might be wondering why you can't get an accuracy any higher. First things first, 50% isn't bad for a simple CNN. Pure guessing would get you 10% accuracy. However, you might notice people are getting scores [well above 70%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130). That's because we haven't taught you all there is to know about neural networks. We still need to cover a few more techniques. # ## Submitting This Project # When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_image_classification.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission. # In[ ]:
2a4aae8f42361dd0c1ae73ec7a7834b9cc9c11e2
ascurlock/Scurlock-Math361B
/Number Theory/N5_Divisors_Scurlock.py
573
3.921875
4
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 27 12:02:25 2019 @author: Ashley """ #%% def divisor(n): div = [] for i in range(1, int(n/2) + 1): if n%i == 0: div.append(i) print('The proper divisors of', n ,'are',div) divisor(220) #%% def divisor_mod(n): div = [] for i in range(1, int(n/2) + 1): if n%i == 0: div.append(i) print('the sum of prop divisors is',sum(div)) print('he sum of prop divisors mod 100 is',sum(div)%100) return sum(div)%100 divisor_mod(220)
e262832a22b015a681d69fa141654333ffc59c10
prashantchanne12/Leetcode
/unique number of occurence.py
1,136
3.90625
4
''' Given an array of integers arr, write a function that returns true if and only if the number of occurrences of each value in the array is unique. Example 1: Input: arr = [1,2,2,1,1,3] Output: true Explanation: The value 1 has 3 occurrences, 2 has 2 and 3 has 1. No two values have the same number of occurrences. Example 2: Input: arr = [1,2] Output: false Example 3: Input: arr = [-3,0,1,-3,1,1,1,-3,10,0] Output: true ''' # Solution 1 class Solution(object): def uniqueOccurrences(self, arr): dict1 = {} dict2 = {} for i in arr: if i in dict1: dict1[i] = dict1[i] + 1 else: dict1[i] = 1 for i in dict1.values(): if i in dict2: return False else: dict2[i] = True return True # Solution 2 class Solution(object): def uniqueOccurrences(self, arr): dict = {} for i in arr: if i in dict: dict[i] = dict[i] + 1 else: dict[i] = 1 return len(dict.values()) == len(set(dict.values()))
b2c2e1fbc1d3b6806daf2afb800b636889023dd5
jcohen66/python-sorting
/questions/medium/keypad_string.py
2,985
4.21875
4
import string def get_key_to_letters(): ''' Create a map ''' possible_letters = string.ascii_lowercase possible_keys = string.digits key_to_letters = {} start_index = 0 for key in possible_keys: if key == '0': key_to_letters[key] = " " elif key == '1': key_to_letters[key] = "" else: num_letters = 3 if key in {"7", "9"}: num_letters = 4 letters = possible_letters[start_index: start_index + num_letters] key_to_letters[key] = letters start_index += num_letters return key_to_letters # Build the map from keys to letters KEY_TO_LETTERS = get_key_to_letters() def keypad_string(keys): ''' Given a string consisting of 0-9, find the string that is created using a standard phone keypad. | 1 | 2 (abc) | 3 (def) | | 4 (ghi) | 5 (jkl) | 6 (mno) | | 7 (pqrs) | 8 (tuv) | 9 (wxyz) | | * | 0 ( ) | # | You can ignore 1, and 0 corresponds to space >>> keypad_string("12345") 'adgj' >>> keypad_string("4433555555666") 'hello' >>> keypad_string("2022") 'a b' >>> keypad_string("") '' >>> keypad_string("111") '' >>> keypad_string("157") 'jp' >>> keypad_string("*") Traceback (most recent call last): ... AssertionError: Invalid Key ''' result = "" count = 0 prev_key = "" curr_key = "" valid_keys = set(string.digits) # Loop through the keys and add the for curr_key in keys: assert curr_key in valid_keys, "Invalid Key" if curr_key == "1": pass else: if not prev_key: # first key press prev_key = curr_key count = 1 else: # get the map curr_key_letters = KEY_TO_LETTERS[curr_key] # press same key if prev_key == curr_key: # press X times already if count == len(curr_key_letters): # get last key result += curr_key_letters[-1] count = 1 # hasn't pressed X times else: count += 1 # press different key else: prev_letters = KEY_TO_LETTERS[prev_key] result += prev_letters[count - 1] prev_key = curr_key count = 1 # Handle special case where key was pressed # multiple times but the key wouldnt have changed # so capture the last keypress. if curr_key: curr_key_letters = KEY_TO_LETTERS[curr_key] # check if there is a mapping for the # last key pressed. if len(curr_key_letters) > 0: result += curr_key_letters[count - 1] return result
acce6bc75b30f05b4d9e765f413c61b022d1ef76
PulkitSingh2008/Calculator-that-you-need
/Clacu.py
839
4.34375
4
#defining function (calculation) def calculation(number1,number2): # if statements to perform calculations if operator =="+": total=number1+number2 return (total) elif operator =="*": total=number1*number2 return (total) elif operator =="^": total=number1**number2 return (total) elif operator =="**": total=number1**number2 return (total) elif operator =="-": total=number1-number2 return (total) elif operator =="/": total=number1/number2 return (total) else: return ("Invalid operation") number1=float(input("please enter your first number: ")) print(number1) number2=float(input("please enter your second number: ")) print(number2) operator=input("Please enter the arithmetic operator: ") print(operator) #calling function(calculation) calculation(number1,number2)
41310e7d2cb40d3fbc1c5a8ae59ba0be7e08b34a
pixelblacksmith/megacode
/polynomial.py
1,888
3.828125
4
def eval(x, poly): '''Evaluate at x the polynomial with coefficients given in poly. The value p(x) is returned.''' sum = 0 while 1: sum = sum + poly[0] # Add the next coef. poly = poly[1:] # Done with that one. if not poly: break # If no more, done entirely. sum = sum * x # Mult by x (each coef gets x right num times) return sum def read(prompt = '', file = sys.stdin): '''Read a line of integers and return the list of integers.''' # Read a line if prompt: line = input(prompt) if line == 'quit': raise EOFError('Input quit on attempt to read polynomial.') retval = [ ]; for val in str.split(line): retval.append(int(val)) return retval # # Create a string of the polynomial in sort-of-readable form. def srep(p): '''Print the coefficient list as a polynomial.''' # Get the exponent of first coefficient, plus 1. exp = len(p) # Go through the coefs and turn them into terms. retval = '' while p: # Adjust exponent. Done here so continue will run it. exp = exp - 1 # Strip first coefficient coef = p[0] p = p[1:] # If zero, leave it out. if coef == 0: continue # If adding, need a + or -. if retval: if coef >= 0: retval = retval + ' + ' else: coef = -coef retval = retval + ' - ' # Add the coefficient, if needed. if coef != 1 or exp == 0: retval = retval + str(coef) if exp != 0: retval = retval + '*' # Put the x, if we need it. if exp != 0: retval = retval + 'x' if exp != 1: retsal = retval + '^' + str(exp) # For zero, say that. if not retval: retval = '0' return retval
210c220a65b37348e504e208e97b63665635b964
friedlich/python
/19年7月/7.25/np.reshape.py
447
4.3125
4
# 在numpy模块中,我们经常会使用resize 和 reshape,在具体使用中,通常是使用resize改变数组的尺寸大小, # 使用reshape用来增加数组的维度。 # 给数组一个新的形状而不改变其数据 import numpy as np X=np.array([1,2,3,4,5,6,7,8]) X_2=X.reshape((2,4)) #return a 2*4 2-dim array X_3=X.reshape((2,2,2)) # return a 2*2*2 3-dim array print("X:\n",X) print("X_2:\n",X_2) print("X_3:\n",X_3)
6459d257467ca13b66072a2e33831224780a9115
KONASANI-0143/Dev
/requirements/venky_task/AI/pyplot.py
270
3.546875
4
import matplotlib.pyplot as plt x=[1,2,3] y=[4,5,8] x1=[7,8,9] y2=[4,5,6] plt.plot(x,y,label="First line") plt.plot(x1,y2,label="second label") plt.xlabel("Plot Number") plt.ylabel("Important var") plt.title(" Interesting graph and check it") plt.legend() plt.show()
6c75b48db65713fe753428334e42dacef1c9a964
plasticroad/DataCamp
/Python/07.Cleaning-Data-in-Python/01.Exploring-your-data/05.Visualizing-multiple-variables-with-boxplots.py
823
4.25
4
''' Visualizing multiple variables with boxplots Histograms are great ways of visualizing single variables. To visualize multiple variables, boxplots are useful, especially when one of the variables is categorical. In this exercise, your job is to use a boxplot to compare the 'initial_cost' across the different values of the 'Borough' column. The pandas .boxplot() method is a quick way to do this, in which you have to specify the column and by parameters. Here, you want to visualize how 'initial_cost' varies by 'Borough'. pandas and matplotlib.pyplot have been imported for you as pd and plt, respectively, and the DataFrame has been pre-loaded as df. INSTRUCTIONS 100 XP Using the .boxplot() method of df, create a boxplot of 'initial_cost' across the different values of 'Borough'. Display the plot. '''
ebc756af54b04e24ea197aa227b64bf74af34e6d
why1679158278/python-stu
/python资料/day8.4/day04/exercise06.py
143
3.640625
4
""" 在终端中录入一个内容,循环打印每个文字的编码值。 """ for chr in input("请输入文字:"): print(ord(chr))
bc77036f6b96d947ee1f794a37cf45986d3f2f91
Davidxswang/leetcode
/easy/840-Magic Squares In Grid.py
1,730
4.0625
4
""" https://leetcode.com/problems/magic-squares-in-grid/ A 3 x 3 magic square is a 3 x 3 grid filled with distinct numbers from 1 to 9 such that each row, column, and both diagonals all have the same sum. Given an grid of integers, how many 3 x 3 "magic square" subgrids are there? (Each subgrid is contiguous). Example 1: Input: [[4,3,8,4], [9,5,1,9], [2,7,6,2]] Output: 1 Explanation: The following subgrid is a 3 x 3 magic square: 438 951 276 while this one is not: 384 519 762 In total, there is only one magic square inside the given grid. Note: 1 <= grid.length <= 10 1 <= grid[0].length <= 10 0 <= grid[i][j] <= 15 """ # time complexity: O(n), space complexity: O(1) class Solution: def numMagicSquaresInside(self, grid: List[List[int]]) -> int: if len(grid) < 3 or len(grid[0]) < 3: return 0 result = 0 def check(r, c): l = [] def suml(i, j, k): return l[i - 1] + l[j - 1] + l[k - 1] for row in range(-1, 2): for column in range(-1, 2): l.append(grid[r + row][c + column]) return sorted(l) == list(range(1, 10)) and ( suml(1, 5, 9) == suml(2, 5, 8) == suml(3, 5, 7) == suml(4, 5, 6) == suml(1, 2, 3) == suml(7, 8, 9) == suml( 1, 4, 7) == suml(3, 6, 9) == 15) and l[4] == 5 for center_row in range(1, len(grid) - 1): for center_column in range(1, len(grid[0]) - 1): if check(center_row, center_column): result += 1 return result
21c49bf72688125502f89b747e439b5fa85e445d
claukako/intermediate-python-course
/dice_roller.py
501
4.09375
4
import random def main(): dicerolls = int(input('How many dice would you like to roll? ')) dice_size = int(input('How many sides are the dice? ')) dicesum = 0 for i in range(0, dicerolls): roll = random.randint(1, 6) dicesum += roll if roll == 1: print(f'you rolled a {roll}! Critical fail!') elif roll == dice_size: print(f'you rolled a {roll}! Critical success!') else: print(f'you rolled a {roll}') print(f'you rolled a total of {dicesum}') if __name__=="__main__": main()
1680dc420a819b35814a90848677a99846010662
DamienMolina/watermark
/watermark.py
786
3.5625
4
from PIL import Image import os import glob mask = Image.open('mask.png') area = (50, 50) name_folder_in = "photos_in" name_folder_in = input("What's the name of the folder of the photos you want to watermark?") name_folder_out = "watermarked_photos" if not os.path.exists(name_folder_out): os.makedirs(name_folder_out) if not os.path.exists(name_folder_out+"/"+name_folder_in): os.makedirs(name_folder_out+"/"+name_folder_in) list_files = [] file_type = ["*.jpg", "*.JPG"] for i in file_type: param = "{}/{}".format(name_folder_in, i) list_files.extend(glob.glob(param)) for i in list_files: im_original = Image.open(i) im_original.paste(mask, area, mask) path_out = "{}/{}".format(name_folder_out,i) print(path_out) im_original.save(path_out)
8060e4e27bbdee7fe96a4fc0168fd69084732875
wise200/AdventOfCode
/2019/day01.py
234
3.71875
4
def fuel(n): return max(0, n // 3 - 2) sum = 0 with open('day01.dat', 'r') as file: for line in file: num = int(line) while fuel(num) > 0: sum += fuel(num) num = fuel(num) print(sum)
53897c8c65d83255fa67af3c7f07343b767879fc
Harish1901/Deeplearning
/Linearregression.py
1,606
4
4
import pandas as pd import matplotlib.pyplot as plt import sklearn from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder, OneHotEncoder #data imported in csv format data=pd.read_csv("/Users/Santhosh/Downloads/50_Startups.csv") # separating dependent and independent variables X = data.iloc[:,:-1].values Y = data.iloc[:,4].values #using encoders and one hot since one of dependent variable is in categorical format labelencoder=LabelEncoder() X[:,3]=labelencoder.fit_transform(X[:,3]) onehotencoder=OneHotEncoder(categorical_features=[3]) X=onehotencoder.fit_transform(X).toarray() # splitting variables for train and test X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size = 0.33, random_state = 5) print(X_train.shape) print(X_test.shape) print(Y_train.shape) print(Y_test.shape) # importing and initialising model learn = LinearRegression() #tarining model learn.fit(X_train, Y_train) Y_train_pred = learn.predict(X_train) Y_test_pred = learn.predict(X_test) #printing test and train data df=pd.DataFrame(Y_test_pred,Y_test) print(df) #calculating error mse = mean_squared_error(Y_test, Y_test_pred) print(mse) import numpy as np rmse=np.sqrt(mse) print(rmse) #Plotting plt.scatter(Y_train_pred, Y_train_pred - Y_train,c='blue',marker='o',label='Training data') plt.scatter(Y_test_pred, Y_test_pred - Y_test,c='lightgreen',marker='s',label='Test data') plt.xlabel('Predicted values') plt.ylabel('Residuals') plt.legend(loc= 'upper left') plt.hlines(y=0,xmin=40000,xmax=200000) plt.plot() plt.show()
7ee1619205f470af76e65d39fa0bd020390990f5
bagwanbulbul/pythonBasicQuestions
/if_else_Q8.py
189
4.03125
4
#input by user user_input = int(raw_input("enter the number")) if(user_input%5 == 0 and user_input%15 == 0): print ("dono se divisible hai") else: print ("dono se divisible nhi hai")
ed23d1feee1682f6b42706401908f5e78f701a18
jsjimenez51/holbertonschool-higher_level_programming
/0x0B-python-input_output/7-save_to_json_file.py
389
4.09375
4
#!/usr/bin/python3 """ Module that defines save_to_json_file. """ import json def save_to_json_file(my_obj, filename): """ Writes an object to a text file using JSON representation Args: my_obj: the object that is written to the file filename: the file to write the object to """ with open(filename, 'w') as a_file: a_file.write(json.dumps(my_obj))
d3c8ac718e9d26362c259fad9ca0de903eaa0638
stefanosc/algorithms
/graph/clustering_large.py
1,832
3.53125
4
"""Simple implementation of clustering algorithm using bit hamming to determine verticies distance """ import os import sys misc_path = os.path.abspath(os.path.join('..', 'misc')) sys.path.append(misc_path) from bitmasks import bitmasks from union_find_clustering import UnionFind import numpy as np import itertools class Graph(): """simple Graph class to run clustering algorithm""" def __init__(self, file_name): self._verticies = np.zeros(2**24, dtype=bool) self.process_lines(file_name) self._bitmasks = [i for i in itertools.chain(bitmasks(24,2), bitmasks(24,1))] def process_lines(self, file_name): with open(file_name) as myfile: for line in myfile: self._verticies[int("".join(line.split()), base=2)] = True self._union_find = UnionFind(arr=self._verticies) def clustering(self): """ """ for vertex in self._verticies.nonzero()[0]: for mask in self._bitmasks: if self._verticies[vertex ^ mask]: self._union_find.union(vertex, vertex^mask) return self._union_find.sets def clustering_numpy(self): """ """ verticies = self._verticies.nonzero()[0] for mask in self._bitmasks: for v_index, connected_vertex in enumerate(verticies ^ mask): if self._verticies[connected_vertex]: if verticies[v_index] < connected_vertex: self._union_find.union(verticies[v_index], connected_vertex) return self._union_find.sets # import profile graph = Graph('../test_data/graph/clustering_big.txt') # profile.run("graph.clustering()") # profile.run("print(graph.clustering_numpy())") print(graph.clustering_numpy())
7aaaefa5a5332f0948dcd906fd6b9b1be407c52a
noveljava/study_leetcode
/completed/146. LRU Cache.py
1,596
3.671875
4
class LRUCache: def __init__(self, capacity: int): self.capacity = capacity self.lru_info = [] self.memory = {} def get(self, key: int) -> int: if key not in self.lru_info: return -1 self.lru_info.remove(key) self.lru_info.insert(0, key) return self.memory[key] def put(self, key: int, value: int) -> None: if len(self.lru_info) == self.capacity and key not in self.lru_info: self.lru_info.pop() if key in self.lru_info: self.lru_info.remove(key) self.lru_info.insert(0, key) self.memory[key] = value return None # Your LRUCache object will be instantiated and called as such: # obj = LRUCache(capacity) # param_1 = obj.get(key) # obj.put(key,value) if __name__ == '__main__': # lRUCache = LRUCache(2) # lRUCache.put(1, 1) # cache is {1 = 1} # lRUCache.put(2, 2) # cache is {1 = 1, 2 = 2} # assert 1 == lRUCache.get(1) # return 1 # lRUCache.put(3, 3) # LRU key was 2, evicts key 2, cache is {1=1, 3=3} # assert -1 == lRUCache.get(2) # returns - 1(not found) # lRUCache.put(4, 4) # LRU key was 1, evicts key 1, cache is {4=4, 3=3} # assert -1 == lRUCache.get(1) # return -1(not found) # assert 3 == lRUCache.get(3) # return 3 # assert 4 == lRUCache.get(4) # return 4 lRUCache = LRUCache(2) assert -1 == lRUCache.get(2) lRUCache.put(2, 6) assert -1 == lRUCache.get(1) lRUCache.put(1, 5) lRUCache.put(1, 2) assert 2 == lRUCache.get(1) assert 6 == lRUCache.get(2)
2cbf1d1a71bb05bd124905effc05e5255ecca781
crystal30/DataStructure
/PycharmProjects/BST/BinarySearch.py
4,201
3.796875
4
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #在有序的数组中,查找目标元素target,若target在数组中,返回target的下标,若不在该数组中,返回-1 class BinarySearch(): #1. 用递归的方式实现 二分查找法 @classmethod def bS1(cls,arr,target): n = len(arr) #递归结束的条件 return cls.__bs(arr, 0, n-1, target) #搜索范围 arr[l,r],前闭后闭。 @classmethod def __bs(cls, arr, l, r, target): #当还剩一个元素时,此时不能再进行分割,所以,看一下此时这个元素是否等于target #等于,就就返回target的小标,不等于返回-1 if l == r: return l if target == arr[l] else -1 mid = l + (r-l)//2 #由于每个分支路下边都有return,所以,可以用if,if,if 也可以用if,elif, else,效果是一样的 if target < arr[mid]: return cls.__bs(arr, l, mid-1, target) elif target > arr[mid]: return cls.__bs(arr, mid+1, r, target) else: #target == arr[mid]: return mid # 2.用非递归的方式实现 二分查找法 # 查找的范围是arr[l,r], 前闭后闭的范围 @classmethod def bS2(cls, arr, target): n = len(arr) l = 0 r = n-1 while l<=r: mid = l + (r - l) // 2 if target < arr[mid]: r = mid-1 elif target > arr[mid]: l = mid+1 else: return mid return -1 #对于binarySearch1 binarySearch2,其对具有重复元素的有序数组,就不知道他到底输出的是第几个重复元素的下标 #eg:当arr = [3,3,4,5,6,7] ,target=3 时,输出 为0;当arr = [3,3,3,4,5,6,7],target=3时,输出为 1 #为什么 binarySearch1 binarySearch2中,对于重复的元素,其输出的下标不确定呢? ##因为,首先将检索查找范围中的中间元素,所以重复元素所处的位置不同,处在可能被作为mid的位置的重复元素,越先被检索到,输出其下标 #如果我们 要想输出重复元素的第一个小标或是最后一个下标该如何做呢? #见floor, ceil # 二分查找法, 在有序数组arr中, 查找target # 如果找到target, 返回第一个target相应的索引index # 如果没有找到target, 返回比target小的最大值相应的索引, 如果这个最大值有多个, 返回最大索引 # 如果这个target比整个数组的最小元素值还要小, 则不存在这个target的floor值, 返回 - 1 @classmethod def floor(cls, arr,target): n = len(arr) l = 0 r = n-1 while l<r: mid = l + (r - l) // 2 if target <= arr[mid]: r = mid else:#target > arr[mid] l = mid + 1 assert l==r, "That time l should be equal r" if target == arr[l]: return l else: return l-1 # 二分查找法, 在有序数组arr中, 查找target # 如果找到target, 返回最后一个target相应的索引index # 如果没有找到target, 返回比target大的最小值相应的索引, 如果这个最小值有多个, 返回最小索引 # 如果这个target比整个数组的最大元素值还要大, 则不存在这个target的floor值, 返回 - 1 @classmethod def ceil(cls, arr,target): n = len(arr) l = 0 r = n - 1 while l < r: mid = l + (r - l) // 2 if target < arr[mid]: r = mid else: #target >= arr[mid] l = mid+1 assert l == r, "This time l should be equal r" if target == arr[r]: return r else: return r+1 if r+1<n else -1 if __name__ == '__main__': # arr = [3,3,3,3,4,5,6,7] arr = [40,40,40,40, 41, 41, 41, 41,43, 43, 44] target = 45 # print(binarySearch1(arr, target)) # print(binarySearch2(arr, target)) BS = BinarySearch() re1 = BS.floor(arr, target) re2 = BS.ceil(arr, target) print(re1) print(re2)
116e24f632d6f60cd0c54b2e09021e55c370eee4
lmjamily/Git-Out
/notaesquecida.py
128
3.859375
4
A = int(input("Digite a nota1: ")) M = int(input("Digite a média: ")) print("Essa é a sua nota da seguda prova: " , 2*M - A)
353d2f751abc0ce5173dac9cd0faad55328cef25
blueleen2/ML_Projects
/Navive_Bayes.py
2,286
3.9375
4
# Assigning features and label variables weather=['Sunny','Sunny','Overcast','Rainy','Rainy','Rainy','Overcast','Sunny','Sunny','Rainy','Sunny','Overcast','Overcast','Rainy'] temp=['Hot','Hot','Hot','Mild','Cool','Cool','Cool','Mild','Cool','Mild','Mild','Mild','Hot','Mild'] play=['No','No','Yes','Yes','Yes','No','Yes','No','Yes','Yes','Yes','Yes','Yes','No'] # Import LabelEncoder from sklearn import preprocessing #creating labelEncoder le = preprocessing.LabelEncoder() # Converting string labels into numbers. weather_encoded=le.fit_transform(weather) print(weather_encoded) # Converting string labels into numbers temp_encoded=le.fit_transform(temp) label=le.fit_transform(play) print("Temp:",temp_encoded) print("Play:",label) features = zip(weather_encoded,temp_encoded) features = list(features) print(features) #Import Gaussian Naive Bayes model from sklearn.naive_bayes import GaussianNB #Create a Gaussian Classifier model = GaussianNB() # Train the model using the training sets model.fit(features,label) #Predict Output predicted= model.predict([[0,2]]) # 0:Overcast, 2:Mild print("Predicted Value:", predicted) # 1: Yes ############################################################################################################### #Import scikit-learn dataset library from sklearn import datasets #Load dataset wine = datasets.load_wine() # print the names of the 13 features print("Features: ", wine.feature_names) # print the label type of wine(class_0, class_1, class_2) print("Labels: ", wine.target_names) # Import train_test_split function from sklearn.model_selection import train_test_split # Split dataset into training set and test set # 70% training and 30% test X_train, X_test, y_train, y_test = train_test_split(wine.data, wine.target, test_size=0.3, random_state=109) #Import Gaussian Naive Bayes model from sklearn.naive_bayes import GaussianNB #Create a Gaussian Classifier gnb = GaussianNB() #Train the model using the training sets gnb.fit(X_train, y_train) #Predict the response for test dataset y_pred = gnb.predict(X_test) #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # Model Accuracy, how often is the classifier correct? print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
fd4875b14e04df99341144fcb4f31de09ecd68c9
tannyboi/induction
/DataStructures, Modules, Exceptions,Classes/tuple_dict.py
410
3.5
4
d1 = dict([('sape', 4139), ('guido', 4127), ('jack', 4098)]) for (x,y) in d1.items(): print(x,y) a=(0,1,2) b='b' d2 = dict.fromkeys(a,b) for (x,y) in d2.items(): print(x,y) d2.update({3:'g'}) print(d2) print(d2.setdefault(1,'f')) #new entry with same key not allowed, will retur alloted value print(d2.setdefault(4,'f')) #new entry with key 4 made, assigned value returned
36378eb673f26b653fe8a5d43bc012e5e063f7e4
cocoon333/lintcode
/68_solution.py
443
3.5
4
class Solution: """ @param root: A Tree @return: Postorder in ArrayList which contains node values. """ def postorderTraversal(self, root): res = [] self.postorderTraversa(root, res) return res def postorderTraversa(self, node, res): if (node): self.postorderTraversa(node.left, res) self.postorderTraversa(node.right, res) res.append(node.val)
310e9f04dcbd51e6373e3e350470afe9b16d8ecb
A-Dragon-Bot/ReadingNotes
/myDeque.py
2,659
3.765625
4
class myDeque: #构造方法,默认队列大小为10 def __init__(self, iterable=None,maxlen = 10): if iterable==None: self._content = [] self._current = 0 else: self._content = list(iterable) self._current = len(iterable) self._size = maxlen if self._size < self._current: self._size = self._current #析构方法 def __del__(self): del self._content #修改队列大小 def setSize(self, size): if size < self._current: #如果缩小队列,需要同时删除后面的元素 for i in range(size, self._current)[::-1]: del self._content[i] self._current = size self._size = size #在右侧入队 def appendRight(self, v): if self._current < self._size: self._content.append(v) self._current = self._current + 1 else: print('The queue is full') #在左侧入队 def appendLeft(self, v): if self._current < self._size: self._content.insert(0, v) self._current = self._current + 1 else: print('The queue is full') #在左侧出队 def popLeft(self): if self._content: self._current = self._current - 1 return self._content.pop(0) else: print('The queue is empty') #在右侧出队 def popRight(self): if self._content: self._current = self._current - 1 return self._content.pop() else: print('The queue is empty') #循环移位 def rotate(self, k): if abs(k) > self._current: print('k must <= '+str(self._current)) return self._content = self._content[-k:] + self._content[:-k] #元素翻转 def reverse(self): self._content = self._content[::-1] #显示当前队列中元素个数 def __len__(self): return self._current #使用print()打印对象时,显示当前队列中的元素 def __str__(self): return 'myDeque(' + str(self._content) + ', maxlen='+ str(self._size) + ')' #直接对象名当做表达式时,显示当前队列中的元素 __repr__ = __str__ #队列置空 def clear(self): self._content = [] self._current = 0 #测试队列是否为空 def isEmpty(self): return not self._content #测试队列是否已满 def isFull(self): return self._current == self._size if __name__ == '__main__': print('Please use me as a module.')
22283e3e1b1dbfa69471fd46141c11766bbcf52f
akashmmcode/python
/singlefuctionareas.py
758
3.859375
4
a = int(input("select an input : 1-circle , 2-triangle , 3-rectangle , 4-square \nselect one : ")) def geometry(shapes): if shapes == 1: r = int(input("input radius :")) area_of_circle = 3.14*r**2 return(area_of_circle) elif shapes == 2: h = int(input(" input height :")) b = int(input(" input base :")) area_of_triangle = (h*b/2) return(area_of_triangle) elif shapes == 3: w = int(input(" input width")) l = input(input(" input length")) area_of_rectangle = (w*l) return(area_of_rectangle) elif shapes == 4: s = int(input(" input side")) area_of_square = s**2 return(area_of_square) print(geometry(a))
01c9cc19ec2062259bc69df24f839cd9d4d59d38
coderZsq/coderZsq.practice.data
/study-notes/py-collection/11_列表/02_in_练习.py
267
3.640625
4
month = int(input('请输入月份:')) if month in [3, 4, 5]: print('春季') elif month in [6, 7, 8]: print('夏季') elif month in [9, 10, 11]: print('秋季') elif month in [12, 1, 2]: print('冬季') else: print('非法输入')
b068e9c22e7d930f4380f81917c9c42af8ebe9d8
kongyitian/coding_interviews
/LeetCode/Easy/Longest_Common_Prefix.py
800
3.640625
4
''' Write a function to find the longest common prefix string amongst an array of strings. ''' class Solution(object): def longestCommonPrefix(self, strs): """ :type strs: List[str] :rtype: str """ if(len(strs)==0): return "" shortest = len(strs[0]) for s in strs: shortest = len(s) if shortest > len(s) else shortest res = "" flag = True for i in range(shortest): temp = strs[0][i] res = res + temp for s in strs: if s[i] == temp: continue else: flag = False break if(not flag): break return res if flag else res[0:len(res)-1]
fa72208f7fafec5304bdb4f3296f868279e0eeb0
hariniv22/Election_Analysis
/Python_practice.py
2,763
4.3125
4
print("Hello World") counties = ["Arapahoe", "Denver", "Jefferson"] if counties[1] == "Denver": print(counties[1]) if "El Paso" in counties: print("El Paso is in the list of counties") else: print("El Paso is not in the list of counties") if "Araphoe" and "El Paso" in counties: print("Arapahoe and El Paso are in counties list") else: print("Arapahoe or El Paso are not in the counties list") if "Arapahoe" or "El Paso" in counties: print("Arapahoe or El Paso in the list of counties") else: print("Arapahoe and El Paso not in the list of counties") for county in counties: print(county) numbers = [0,1,2,3,4] for n in numbers: print(n) for num in range(5): print (num) for num in range(len(counties)): print (counties[num]) counties_tuple = ("Alpahoe", "Denver", "Jefferson") print (len(counties_tuple)) for county in counties_tuple: print (county) print(counties_tuple[0]) counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438} print("DICTIONARY....") for county in counties_dict.keys(): print(county) for county in counties_dict: print(counties_dict.get(county)) print("KEY:VALUE PAIR") for key,value in counties_dict.items(): print(key + " county has " + str(value) + " voters") print(f"{key} county has {value:,} voters.........") print("----------------------------------------------------") voting_data = [{"county":"Arapahoe", "registered_voters": 422829}, {"county":"Denver", "registered_voters": 463353}, {"county":"Jefferson", "registered_voters": 432438}] for data in voting_data: print(data) for value in data.values(): print(value) for key, value in data.items(): print(f"{key} county has {value} registered voters............") for county_dict in voting_data: county = county_dict["county"] registered_voters = county_dict["registered_voters"] print(f"county {county} has {registered_voters:,} registered voters") my_votes = int(input("How many votes did you get in the election?")) total_votes = int(input("How many votes are there in total?")) percentage_votes = (my_votes/total_votes)*100 #print("I received " + str(percentage_votes) + "% of total votes") print(f"I received {percentage_votes} %of total votes") print("--------------------------------") candidate_votes = int(input("How many votes did the candidate get in the election? ")) total_votes = int(input("What is the total number of votes in the election? ")) candidate_messages = { f"You received {candidate_votes:,} votes in the election. " f"There are total of {total_votes:,} votes in the election. " f"You received {candidate_votes/total_votes * 100:.2f} percentage of votes" } print(candidate_messages)
1cc723dc2e4c4bd1cf29dd32f635af0dccdc123b
SpenserHardin/vending-machine
/src/vending_machine.py
884
3.5
4
from src.model.penny import Penny class VendingMachine(object): DISPLAY = 'Insert Coins' PAYMENT = 0.0 def __init__(self, item, validator, price=None): self.item = item self.price = price self.validator = validator def insert_coins(self, coin): identified_coin = self.validator.calculate_coin(coin) if not isinstance(identified_coin, Penny): self.PAYMENT += identified_coin.value if self._payment_is_sufficient(): self._update_display() return self._calculate_change() else: return identified_coin.value def _calculate_change(self): return round(self.PAYMENT - self.item.price, 2) def _payment_is_sufficient(self): return self.PAYMENT >= self.item.price def _update_display(self): self.DISPLAY = 'Thank you'
12782ddc994c8914541733e1c8f9dd39e38777fb
bagaspandu154/tugaaas-big-data-menggunakan-python
/hello.py
392
3.5
4
print ("======================================") print (" Biodata Sederhana Dengan Python ") print ("======================================") nama = input("Masukkan nama: Bagas Pandu Annursyah") kelas = input("Masukkan kelas: Teknik Informatika A") kampus = input("Inputkan kampus: Universitas Narotama") print ("nama, Saya %s, kelas saya %s kampus saya %s kampus" % (nama, kelas, kampus))
f9795fd1f5ef75f2a05785acfe1dac5167b8a339
Filin3/python
/2for6_2.py
203
3.640625
4
s = "" k = 0 for i in range(3): for j in range(4): x = input("Введите элемент {} строкой и {} столбцом: ".format(i+1, j+1)) s += x s += "\n" print(s)
fa800bb30c9b08e419f88db3e9ec41a0529f8ad2
kiransy015/Python-Frameworks
/venvScripts1/venvScripts/Pgm52.py
462
3.90625
4
#Set is unordered DataStructure and it doesnot allow duplicate elements b={20,40,10,50,90,10,60,10} print(b) #Adding elements into a set b.add(200) print(b) #Merging elements of 2 different sets b={20,40,10,50,90} c={10,30,40} b.update(c) print("After merge :",b) print(c) #Removing elements from a set b.remove(40) print("After remove :",b) #Updating elements b.remove(30) b.add(80) print("After update :",b) #Set doesnot support indexing print(b[0])
c9df7600784325c1a468c405d473d3fe4970b94b
jghafa/archive
/CouncilSQL.py
2,597
3.6875
4
#!/usr/bin/python3 """ Create a SQLite database of items uploaded to Internet Archive This replaces a three pickle files and will add the abilty for update programs to run together """ print('Council SQL insertion') import pickle import sqlite3 from internetarchive import * #Define the database. The database is completely rebuilt every program run. SQLconn = sqlite3.connect('Council.sqlite') SQL = SQLconn.cursor() SQL.execute("""drop table if exists Video;""") SQL.execute("""create table Video (item text PRIMARY KEY, locked BOOL );""") SQL.execute("""drop table if exists Ordinance;""") SQL.execute("""create table Ordinance (item text PRIMARY KEY, locked BOOL );""") SQL.execute("""drop table if exists Proceeding;""") SQL.execute("""create table Proceeding (item text PRIMARY KEY, locked BOOL );""") print('Reading Videos') picklefile = 'CouncilVideo.pickle' try: CouncilVideo = pickle.load(open(picklefile, "rb")) except (OSError, IOError) as e: print ('Reading council video collection') CouncilVideo = [item.metadata['identifier'] for item in search_items('collection:(councilmeetings)').iter_as_items()] #pickle.dump(CouncilVideo, open(picklefile, "wb"), protocol=pickle.HIGHEST_PROTOCOL) print('Reading Ordinances') picklefile = 'CouncilOrdinance.pickle' try: CouncilOrdinance = pickle.load(open(picklefile, "rb")) except (OSError, IOError) as e: print ('Reading citycouncil ordinance collection') CouncilOrdinance = [item.metadata['identifier'] for item in search_items('collection:(citycouncilordinances)').iter_as_items()] #pickle.dump(CouncilOrdinance, open(picklefile, "wb"), protocol=pickle.HIGHEST_PROTOCOL) print('Reading Proceedings') picklefile = 'CouncilProceedings.pickle' try: CouncilProceedings = pickle.load(open(picklefile, "rb")) except (OSError, IOError) as e: print ('Reading citycouncil proceeding collection') CouncilProceedings = [item.metadata['identifier'] for item in search_items('collection:(citycouncilproceedings)').iter_as_items()] #pickle.dump(CouncilProceedings, open(picklefile, "wb"), protocol=pickle.HIGHEST_PROTOCOL) #Insert the items in the database print('Loading SQL') tups = [(element,0) for element in CouncilVideo] SQL.executemany('INSERT OR IGNORE into Video values (?,?)', tups ) tups = [(element,0) for element in CouncilOrdinance] SQL.executemany('INSERT OR IGNORE into Ordinance values (?,?)', tups ) tups = [(element,0) for element in CouncilProceedings] SQL.executemany('INSERT OR IGNORE into Proceeding values (?,?)', tups ) #Save the database SQLconn.commit() print('Done')
d3a1828226389c29f5d539885a2f2316441a1367
gretasimba/movieTrailerWebsite
/entertainment_center.py
4,724
3.75
4
import media # class Movie initates in media.py import fresh_tomatoes # class generates fresh_tomatoes.html """ creates 6 instances of class Movies to create "my favore movies" website. Each instance pass 4 attributes to class Movie: movie_title as string, movie_storyline as string, poster_url as string, youtube_trailer as string. Then creates list with all 6 instances included. Last it calls fresh_tomatoes.open_movies_page, which creates HTML page that displays movies with title, and poster images. Movie trailer from YouTube shows whem poster omage is clicked. Method takes movies (list) as argument""" # initialises movie "It" it = media.Movie("It", "A group of young kids are faced with their biggest fears", "http://cdn6.ihorror.com/app/uploads/17632177_1388557637831190_3930730815628374813_o-2.jpg", "https://www.youtube.com/watch?v=FnCdOQsX5kc") # initialises movie "Amadeus" amadeus = media.Movie("Amadeus", "Antonio Salieri believes that Wolfgang Amadeus" "Mozart's music is divine and miraculous", "http://t1.gstatic.com/images?q=tbn:ANd9GcRszKttX-DHHFm8ZRtTqUCrUYGS34C5clU2Shgw2nlEEle7heDN", "https://www.youtube.com/watch?v=yIzhAKtEzY0") # initialises movie "The Big Lebowski" lebowski = media.Movie("The Big Lebowski", "Jeff Bridges plays Jeff Lebowski who insists on being" "called ""the Dude,"" a laid-back, easygoing burnout" "who happens to have the same name as a millionaire" "whose wife owes a lot of dangerous people" "a whole bunch of money -- resulting in " "the Dude having his rug soiled,sending him" "spiraling into the Los Angeles underworld.", "http://t3.gstatic.com/images?q=tbn:ANd9GcRBYp315X-0pNvI-Dvqj8FR0AGdF39VCprXpurd0cQel__e17CP", "https://www.youtube.com/watch?v=cd-go0oBF4Y") # initialises movie "The Icredibles" incredibles = media.Movie("The Icredibles", "In this lauded Pixar animated film," " married superheroes Mr. Incredible" "(Craig T. Nelson) and Elastigirl (Holly Hunter)" "are forced to assume mundane lives as Bob and" "Helen Parr after all super-powered activities" "have been banned by the government." "While Mr. Incredible loves his " "wife and kids, he longs to return to" " a life of adventure, and he gets a" "chance when summoned to an island " "to battle an out-of-control robot." "Soon, Mr. Incredible is in trouble, and it's" "up to his family to save him", "https://upload.wikimedia.org/wikipedia/en/e/ec/The_Incredibles.jpg", "https://www.youtube.com/watch?v=eZbzbC9285I") # initialises movie "Apocalypse Now" apocalypse = media.Movie("Apocalypse Now", "In Vietnam in 1970, Captain Willard" " (Martin Sheen) takes a perilous and" " increasingly hallucinatory journey " "upriver to find and terminate Colonel Kurtz" "(Marlon Brando), a once-promising officer" " who has reportedly gone completely mad." "In the company of a Navy patrol boat filled" "with street-smart kids", "https://upload.wikimedia.org/wikipedia/en/c/c2/Apocalypse_Now_poster.jpg", "https://www.youtube.com/watch?v=IkrhkUeDCdQ") # initialises movie "Fight Club" fight_club = media.Movie("Fight Club", "A depressed man (Edward Norton) suffering" "from insomnia meets a strange" "soap salesman named Tyler Durden" "(Brad Pitt) and soon finds himself" "living in his squalid house" "after his perfect apartment is destroyed...", "https://upload.wikimedia.org/wikipedia/en/f/fc/Fight_Club_poster.jpg", "https://www.youtube.com/watch?v=J8FRBYOFu2w") # creates list movies movies = [it, amadeus, lebowski, incredibles, apocalypse, fight_club] # calls method from fresh_tomatoes fresh_tomatoes.open_movies_page(movies)
53605d3cebf52412744af54ce3cfaa9bec8da4de
passlonis/SUMMER_BOOTCAMP_2018_Python
/lesson4/task3/list_items.py
684
4.0625
4
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::") animals = ['elephant', 'lion', 'tiger', "giraffe", "monkey", 'dog'] print(animals) animals[1:3] = ['cat'] print(animals) animals[1:3] = [] print(animals) animals = [] print(animals) print("----------------------------------------------------") lista = ["fabio", "roger", "hugo", " martin", "renata", "fabiola", "claudia", "maylee"] print(lista) lista[2:4] = ["wilson"] #reemplaza martin y hugo por wilson print(lista) lista[0:2] = [] #borra fabio,roger de la lista print(lista) lista = [] #borra los elementos de lista print(lista) print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
69f544673ba8d292af4fac97658bbcb8bdbb60cb
Quving/telegram-alfred
/alfred/material/user.py
1,346
3.6875
4
#!/usr/bin/env python3 class User: def __init__(self, user_dict=None): self.user_dict = user_dict self.id = self.__get_user_dict_attribute(key="id", default="") self.username = self.__get_user_dict_attribute(key="username", default="") self.first_name = self.__get_user_dict_attribute(key="first_name", default="") self.preferences = self.__get_user_dict_attribute(key="preferences", default={"region": "unknown", "rubrik": "unknown", "lokales": "unknown"}) def __get_user_dict_attribute(self, key, default=None): """ Return a value from the user_dict. If not exist, return default. :param key: :param default: :return: """ if not key in self.user_dict: return default else: return self.user_dict[key] def to_dict(self): """ Returns a dictionary object that represent the User object. :return: """ data = {"id": self.id, "username": self.username, "first_name": self.first_name, "preferences": self.preferences} return data
97a60f0a45eaadcf7a2b5328f9be10b83d4abab3
DevKheder/PythonCrashCourse
/introducing-lists/3-5 changing-guest-list.py
573
4.28125
4
# creating a list of guests guests = ["modar", "sara", "samer", "ahmed", "mouhammed"] # storing the name of the guest who can't make it to dinner popped_guest = guests.pop(0) # print a message and inform the name who can't make it print("I am afraid " + popped_guest.title() + " can't make it to the dinner") # replacing the popped guest with another person new_guest = guests.insert(0, 'tony') print(guests) # using the for loops to print a message for each person for guest in guests: print("Howdyy, " + guest.title() + " I would like to invite you for a dinner")
6c026d6d3fc9088c3e567780d5bf9f0018948688
yinlinzh/dive-into-python
/skipper.py
733
3.71875
4
class SkipIterator: def __init__(self, wrapper): self.wrapper = wrapper self.offset = 0 def __next__(self): if self.offset >= len(self.wrapper): raise StopIteration() else: res = self.wrapper[self.offset] self.offset += 2 return res class SkipObject: def __init__(self, wrapper): self.wrapper = wrapper def __iter__(self): return SkipIterator(self.wrapper) def main(): alpha = 'abcdef' skipper = SkipObject(alpha) i = iter(skipper) print(next(i), next(i), next(i), sep=' - ') for x in skipper: for y in skipper: print(x + y, end=' | ') if __name__ == '__main__': main()
d631d18f19831293d875556d0e3bc078eca2aa3e
jasongorman/codecraft_nov_2020
/source/propertybased_py/test/square_root_test.py
413
3.53125
4
import unittest from maths import sqrt from parameterized import parameterized class SquareRootTest(unittest.TestCase): @parameterized.expand( [(0,), (1,), (4,), (9,), (16,), (0.25,), (25,)] ) def test_square_root(self, input): self.assertEqual(input, sqrt(input) * sqrt(input)) if __name__ == '__main__': unittest.main()
176e51029490030a1850a61d55626c3f41bf90a7
dcpatti/Basic-Python
/py-bank/main.py
2,146
3.90625
4
#Load the CSV reader and the OS path interpreter import os import csv #Open and read the file. The first row is a header with open('budget_data.csv', 'r') as csvfile: next(csvfile) readCSV = csv.reader(csvfile, delimiter=',') num_lines = 0 total = 0 PLAverage = 0 PLHighest = 0 PLLowest = 0 HighDate = '' LowDate = '' for column in readCSV: #The total number of months included in the dataset num_lines += 1 #The net total amount of "Profit/Losses" over the entire period total += int(column[1]) #Average of the changes in the profit/losses over the entire period #Has to be done outside the iteration or else it recalculates on every row and gets too big #Check the value to see if it is the greatest increase in profits (date and amount) over the entire period #If not, check if it is the greatest decrease in losses (date and amount) over the entire period #If either condition is met, put the value into the variable along with the value of the month if int(column[1]) > PLHighest: PLHighest = int(column[1]) HighDate = str(column[0]) elif int(column[1]) < PLLowest: PLLowest = int(column[1]) LowDate = str(column[0]) #Do all the writing and printing #First open a file to hold the output f = open("budget_results.txt", "w") f.write("Budget Info\n") #clean up the formatting of the Average PLAverage = "{:.2f}".format((total/num_lines)) print("Number of Months: " + str(num_lines)) f.write("Number of Months: " + str(num_lines)+" \n") print ("Total Profit or Loss: " + str(total)) f.write ("Total Profit or Loss: " + str(total)+" \n") #the trailing " \n" are needed to get a line break in the output file print ("Average Profit/Loss " + str(PLAverage)) f.write ("Average Profit/Loss " + str(PLAverage)+" \n") print ("Highest Increase " + HighDate + " " + str(PLHighest)) f.write("Highest Increase " + HighDate + " " + str(PLHighest)+" \n") print ("Highest Decrease " + LowDate + " " + str(PLLowest) ) f.write ("Highest Decrease " + LowDate + " " + str(PLLowest)+" \n")
cf902a3752b4faf8cdb37ca3c81b6b10ba38af66
ethanfuller/python
/2014 tax bracket.py
1,935
3.65625
4
#!/usr/bin/python2.2 -tt # Copyright 2014 Point_Four. All Rights Reserved. import os def main(): os.system('clear') print '' # Chart variables personal_exemption = 3950 std_deduction_single = 6200 std_deduction_marriedj = 12400 std_deduction_marrieds = 6200 # Start/Filing status print 'Filing status' print 'a.) Single' print 'b.) Married filing jointly & surviving spouses' print 'c.) Married filing seperate returns' print '* Select by letter' status_select = raw_input('>> ') # Single (Background selection) if status_select == 'a': print 'Filing status: Single' salary = input('Anual salary: ') dependents = input('Number of dependents (including you): ') personal_exemption_dependents = personal_exemption * dependents algorithm = salary - personal_exemption_dependents - std_deduction_single print 'Bracket salary: %d' % algorithm # Married filing jointly & surviving spouses elif status_select == 'b': print 'Filing status: Married filing jointly & surviving spouses' salary = input('Anual salary: ') dependents = input('Number of dependents (including you): ') personal_exemption_dependents = personal_exemption * dependents algorithm = salary - personal_exemption_dependents - std_deduction_marriedj print 'Bracket salary: %d' % algorithm # Married filing seperate returns elif status_select == 'c': print 'Filing status: Married filing seperate returns' salary = input('Anual salary: ') dependents = input('Number of dependents (including you): ') personal_exemption_dependents = personal_exemption * dependents part_algorithm = salary - personal_exemption_dependents - std_deduction_marrieds algorithm = part_algorithm / 2 print 'Bracket salary: %d' % algorithm if __name__ == '__main__': main()
69c513a362f3a7fbe286b6a456b7f5348b872b6c
YongHoonJJo/Python
/Lang_J2P/if.py
777
4.1875
4
### if ... else ... ### money = 1 if money: print("by taxi") else: print('by walk') # 'by taxi' money = 2000 if money >=3000: print("by taxi") else: print('by walk') # 'by walk' ### and, or, not ### money = 2000 card = 1 if money >=3000 or card: print("by taxi") else: print('by walk') # 'by taxi' ### x in s, x not in s ### # s includes list, tuple ans string. if 1 in [1, 2, 3]: print('1 in list') if 'a' not in ('b', 'c', 'd'): print("'a' not in tuple") if 'j' not in 'python': print("'j' not in 'python'") pocket = ['paper', 'cellphone', 'money'] if 'money' in pocket: pass else: print('get card') ### elif ### pocket = ['paper', 'cellphone'] card = 1 if 'money' in pocket: print('by bus') elif card: print('by taxi') else: print('by walk')
c3db86107413657fcbd4775839f5eaef9ec8af37
Single430/pyecharts
/pyecharts/charts/parallel.py
2,425
3.546875
4
#!/usr/bin/env python # coding=utf-8 from pyecharts.base import Base class Parallel(Base): """ <<< Parallel chart >>> Parallel Coordinates is a common way of visualizing high-dimensional geometry and analyzing multivariate data. """ def __init__(self, title="", subtitle="", **kwargs): super(Parallel, self).__init__(title, subtitle, **kwargs) def add(self, *args, **kwargs): self.__add(*args, **kwargs) def config(self, schema=None, c_schema=None): """ :param schema: Dimension index of coordinate axis. a axis name list, like ['apple', 'orange', 'watermelon'] :param c_schema: User customize coordinate axis for parallel coordinate. dim: Dimension index of coordinate axis. name: Name of axis. type: Type of axis value: Numerical axis, suitable for continuous data. category: Category axis, suitable for discrete category data. Data should only be set via data for this type. min: The minimun value of axis. max: The maximum value of axis. inverse: Whether axis is inversed. nameLocation: Location of axis name. it can be 'start', 'middle', 'end'. :return: """ if schema: _schema = [{"dim": i, "name": v} for i, v in enumerate(schema)] self._option.update(parallelAxis=_schema) if c_schema: self._option.update(parallelAxis=c_schema) def __add(self, name, data, **kwargs): """ :param name: Series name used for displaying in tooltip and filtering with legend, or updating data and configuration with setOption. :param data: data array of series, it is represented by a two-dimension array -> [[],[]] :param kwargs: :return: """ self._option.update( parallel={ "left": "5%", "right": "13%", "bottom": "10%", "top": "20%" }) self._option.get('legend')[0].get('data').append(name) self._option.get('series').append({ "type": "parallel", "coordinateSystem": "parallel", "name": name, "data": data, }) self._config_components(**kwargs)
e5f7614216ac5d429678010f7aa34fa0c5f908be
Israelmath/jobs
/scicrop-api-test-python.py
7,842
3.5
4
# Python 3.6.9 import json import datetime from typing import Dict import requests class Degrees: """ Classe responsável por organizar as informações dos cursos e graduações Obs: Algumas implementações de métodos serviram como testes """ def __init__(self, instituicao: str, nome_curso: str, data_inicio: str, data_termino: str): self.institution_name = instituicao self.degree_name = nome_curso self.begin_date = data_to_epoch(data_inicio) self.end_date = data_to_epoch(data_termino) def to_dict(self) -> Dict: degrees_map = { 'institution_name': self.institution_name, 'degree_name': self.degree_name, 'begin_date': self.begin_date, 'end_date': self.end_date } return degrees_map def to_json(self): return json.dumps(self.to_dict()) def __str__(self): return f'Instituição: {self.institution_name}\n' \ f'Nome do curso: {self.degree_name}\n' \ f'Data de início: {self.begin_date}\n' \ f'Data de término: {self.end_date}\n' class Candidato: """ Classe responsável por organizar as informações de um candidato Obs: Algumas implementações de métodos serviram como testes """ def __init__(self, nome, email, celular, idade, endereco, inicio_teste, oportunidade, experiencia, graduacao, habilidades, banco_de_dados, hobbies, motivo, url_git): self.__full_name = nome self.__email = email self.__mobile_fone = celular self.__age = idade self.__home_address = endereco self.__start_date = inicio_teste self.__opportunity_tag = oportunidade self.__past_jobs_experience = experiencia self.__degrees = graduacao self.__programming_skills = habilidades self.__database_skills = banco_de_dados self.__hobbies = hobbies self.__why = motivo self.__git_url_repositories = url_git def to_dict(self) -> Dict: candidato_map = { "full_name": self.__full_name, "email": self.__email, "mobile_phone": self.__mobile_fone, "age": self.__age, "home_address": self.__home_address, "start_date": self.__start_date, "opportunity_tag": self.__opportunity_tag, "past_jobs_experience": self.__past_jobs_experience, "degrees": self.__degrees, "programming_skills": self.__programming_skills, "database_skills": self.__database_skills, "hobbies": self.__hobbies, "why": self.__why, "git_url_repositories": self.__git_url_repositories } return candidato_map def __str__(self): infos = f''' "full_name": {self.__full_name}, "email": {self.__email}, "mobile_phone": {self.__mobile_fone}, "age": {self.__age}, "home_address": {self.__home_address}, "start_date": {self.__start_date}, "opportunity_tag": {self.__opportunity_tag}, "past_jobs_experience": {self.__past_jobs_experience}, "degrees": {self.__degrees}, "programming_skills": {self.__programming_skills}, "database_skills": {self.__database_skills}, "hobbies": {self.__hobbies}, "why": {self.__why}, "git_url_repositories": {self.__git_url_repositories} ''' return infos class HttpClient: """ Classe responsável pelas requisições HTTP Obs: Algumas implementações de métodos serviram como testes """ def __init__(self): self.__address_get = '' self.__address_post = '' @property def address_get(self): return self.__address_get @property def address_post(self): return self.__address_post @address_get.setter def address_get(self, new_address): self.__address_get = new_address @address_post.setter def address_post(self, new_address): self.__address_post = new_address def post(self, dados: str = '', cabecalho: dict = ''): """ Método responsável por enviar a requisição HTTP utilizando o método POST :param dados: arquivo opcional, no formato Dict ou JSON, enviado para a API :param cabecalho: arquivo opcional, no formato Dict ou JSON, para personalizar os headers :return: null """ req = requests.post(self.__address_post, data=dados, headers=cabecalho) print(f'POST -> req.status_code: {req.status_code}') print(f'POST -> req.body: {req.text}') print(f'POST -> req.headers: {req.headers}') def get(self): req = requests.get(self.__address_get) print(f'GET -> req.status_code: {req.status_code}') print(f'GET -> req: {req}') print(f'GET -> req: {req.text}') def __str__(self): return f'''GET address -> {self.__address_get}\nPOST address -> {self.__address_post}''' def data_to_epoch(date: str): """ Função responsável por transformar uma string com uma data no formato dd/mm/aaaa em Unix epoch standard :param date: Data em string no formato dd/mm/aaaa :return: Unix epoch data """ return int(datetime.datetime(year=int(date[4:8]), month=int(date[2:4]), day=int(date[:2])).timestamp()) def main(): """ Função responsável por todas as atribuições dos cursos, dados do candidato, http address para o método POST e envio dos dados. :return: null """ url_job = 'https://engine.scicrop.com/scicrop-engine-web/api/v1/jobs/post_resume' lic_matematica = Degrees('Universidade de Sao Paulo - USP', 'Licenciatura em Matematica', '01022020', '01012023') formacao_python = Degrees('Alura', 'Formacao Python', '05022020', '17022020') formacao_sql = Degrees('Alura', 'Formacao SQL com MySQL Server da Oracle', '20042020', '04072020') formacao_ds = Degrees('Alura', 'Formacao Data Science', '05022020', '12042020') degs = [lic_matematica.to_dict(), formacao_python.to_dict(), formacao_sql.to_dict(), formacao_ds.to_dict()] nome = 'Israel Alves Lucena Gomes' email = '[email protected]' celular = '+55 (11) 9.9800-7721' idade = 29 endereco = 'Rua Doutor Angelo Vita, 180 - Sao Paulo' inicio_teste = data_to_epoch('07072020') oportunidade = 'Desenvolvedor Python' experiencia = 'Trabalhei quase dez anos na docencia, mas este ano resolvi mudar ' \ 'integralmente minha carreira me voltando completamente para a ' \ 'area do desenvolvimento.' certificados = degs habilidades = ['python', 'dart', 'C'] banco_de_dados = ['mysql', 'mongodb', 'redis'] hobbies = ['Jogar Magic: The Gathering', 'Assistir a filmes'] motivo = 'Depois de tanto tempo na docencia, passando também pelo empreendedorismo ' \ 'e o gerenciamento de um site, percebi quanto o desenvolvimento de soluções ' \ 'digitais sao, cada vez mais, necessárias ao paradigma de sociedade neste e nos ' \ 'proximos seculos. Por isso resolvi fazer essa transformação na minha carreira, ' \ 'unindo a paixao pela matematica e o gosto por desenvolvimento.' url_git = 'https://github.com/Israelmath' content_type: Dict[str, str] = { 'content-type': 'application/json' } israel = Candidato( nome, email, celular, idade, endereco, inicio_teste, oportunidade, experiencia, certificados, habilidades, banco_de_dados, hobbies, motivo, url_git ) http_client_jobs = HttpClient() http_client_jobs.address_post = url_job http_client_jobs.post(dados=json.dumps(israel.to_dict()), cabecalho=content_type) if __name__ == '__main__': main()
b00efe87b508e6da7e5968bdaaab2f88e2e77b4d
wanjinzhong/python_study
/venv/My_Script/SSM.py
8,034
3.6875
4
class Student: id = 0 name = '' score = 0 grade = '' def __init__(self): pass def __init__(self, id, name, score): self.id = id self.name = name self.score = score def calc_grade(self): if self.score < 0: return "CHEAT" elif self.score < 60: return "D" elif self.score < 70: return "C" elif self.score < 80: return "B" elif self.score < 90: return "A" elif self.score <= 100: return "A+" else: return "Wrong" def get_table(self): return str(self.id) + "\t" + self.name + "\t" + str(self.score) + "\t" + self.calc_grade() students = [Student(1, 'Neil', 100), Student(2, 'Bill', 100), Student(3, 'Leon', 100)] grades = ((0, "FAIL"), (50, "D"), (60, "C"), (70, "B"), (80, "A"), (-1, "CHEAT!")) hint = "=======Student Score Management=======\n" hint += " 1. Add a new record\n" hint += " 2. Delete a record\n" hint += " 3. Query records\n" hint += " 4. Update record\n" hint += " 5. Exit\n" hint += "======================================\n" def add_record(): global students while True: try: print("Student Id:") id = int(input(">>")) except ValueError: print("Id must be number") else: student = query_by_id(id) if student is not None: print("This student is already exist, cover it?(y/n)") cover = input(">>"); if cover.upper() == 'Y': student = student[0] break else: print("Cancel add a new record") return else: student = Student() students.append(student) break print("Student Name:") name = input(">>") while True: try: print("Student Score:") score = float(input(">>")) except ValueError: print("Score must be number") else: break student.id = id student.name = name student.score = score print("Add record success") print_table(student) def print_tables(stus): print(get_table_header()) if stus is None or len(stus) == 0: print(" No Data") else: for s in stus: print(s.get_table()) print(get_table_foot(), end="\n\n") def print_table(student): print(get_table_header()) if student is None: print(" No Data") else: print(student.get_table()) print(get_table_foot(), end="\n\n") def get_table_header(): str = "Id\tName\tScore\tGrade\n" str += "=========================" return str def get_table_foot(): str = "=========================" return str def delete_record(): print("1. Delete by id") print("2. Delete by name") while True: try: choose = int(input(">>")) except ValueError: print("Error: Only accept number") else: if choose != 1 and choose != 2: print("Error: Range: 1-2") else: break if choose == 1: print("Id:") while True: try: id = int(input(">>")) except ValueError: print("Error: Only accept number") else: break stus = query_by_id(id) if stus is None: print("No record found") else: print("Bellow record found, sure to delete?(y/n)") print_table(stus) cover = input(">>"); if cover.upper() == 'Y': delete_records(stus) print("Delete records success") else: print("Cancel delete a record") elif choose == 2: print("Name:") name = input(">>") stus = query_by_name(name) if stus is None: print("No record found") else: print("Bellow record found, sure to delete?(y/n)") print_tables(stus) cover = input(">>"); if cover.upper() == 'Y': delete_records(stus) print("Delete records success") else: print("Cancel delete a record") def delete_records(toDelete): for j in toDelete: students.remove(j) def query_record(): print("1. Query by one") print("2. Query by all") while True: try: choose = int(input(">>")) except ValueError: print("Error: Only accept number") else: if choose != 1 and choose != 2: print("Error: Range: 1-2") else: break if choose == 1: query() elif choose == 2: query_all() def update_record(): print("1. Update by id") print("2. Update by name") while True: try: choose = int(input(">>")) except ValueError: print("Error: Only accept number") else: if choose != 1 and choose != 2: print("Error: Range: 1-2") else: break if choose == 1: while True: print("Id: ") try: id = int(input(">>")) except ValueError: print("Error: Only accept number") else: break student = query_by_id(id) elif choose == 2: print("Name: ") name = input(">>") student = query_by_name(name) if student is None or len(student) == 0: print("No record found") return elif len(student) > 1: print("Too much record found, please make your condition more accurate") print_tables(student) return else: stu = student[0] while True: try: print("Update Score:") score = float(input(">>")) except ValueError: print("Score must be number") else: stu.score = score print("Update score success") print_table(stu) break def query(): print("1. Query by id") print("2. Query by name") while True: try: choose = int(input(">>")) except ValueError: print("Error: Only accept number") else: if choose != 1 and choose != 2: print("Error: Range: 1-2") else: break if choose == 1: while True: print("Id: ") try: id = int(input(">>")) except ValueError: print("Error: Only accept number") else: break student = query_by_id(id) print_table(student) elif choose == 2: print("Name: ") name = input(">>") student = query_by_name(name) print_tables(student) def query_by_id(id): stus = [] for s in students: if s.id == id: stus.append(s) return stus def query_by_name(name): stus = [] for s in students: if name.upper() in s.name.upper(): stus.append(s) return stus def query_all(): global students print_tables(students) while True: print(hint) while True: try: print("Please choose one: ") choose = int(input(">>")) except ValueError: print("Error: Only accept number") else: if choose < 1 or choose > 5: print("Error: Range: 1-5") else: break if choose == 1: add_record() elif choose == 2: delete_record() elif choose == 3: query_record() elif choose == 4: update_record() elif choose == 5: print("Bye") break
33f988ac5a9515a7e63a2663e253a0dcd47b70c4
minskeyguo/mylib
/python-edu/17-pygame-basic/02-geometry.py
1,223
3.796875
4
#!/usr/bin/python3 import sys, pygame from pygame.locals import * black = (0, 0, 0) white = (255,255,255) red = (255,0,0) green = (0, 255, 0) blue = (0, 0, 255) pygame.init() pygame.display.set_caption("drawing") # set the title of the window surface = pygame.display.set_mode((400, 300)) # return pygame.Surface surface.fill(white) # <=== white the surface # draw polygon pygame.draw.polygon(surface, green, ((123, 0), (234,132), (269, 211), (77, 66)), 0) # draw line pygame.draw.line(surface, red, (70, 200), (80,100), 20) pygame.draw.circle(surface, black, (30, 50), 15, 10) pygame.draw.ellipse(surface, black, (30, 50, 100, 60), 10) pygame.draw.rect(surface, blue, (30, 50, 100, 60)) # event loop: handling event, update game state(variables), rendering graphics while True: for event in pygame.event.get(): # QUIT etc, defined in pygame.locals if event.type == QUIT: pygame.quit() sys.exit() pygame.display.update() # render surface into screen # Question: # what's the different if we move Line 14 ~ 18 into loop before pygame.display.update() ? """ Surface: 2D rectangle Color : (r, g, b, a) Rect: (x0, y0, width, height) """
f9d1e890f8ee8b178841fc8533b580baa62209db
dustinboswell/daily-coding-problem
/prob45.py
397
3.984375
4
''' Using a function rand5() that returns an integer from 1 to 5 (inclusive) with uniform probability, implement a function rand7() that returns an integer from 1 to 7 (inclusive). ''' import random def rand5(): return random.randint(1, 5) def rand7(): while True: rand25 = 5 * (rand5() - 1) + (rand5() - 1) # 0 to 24 if rand25 < 21: return 1 + (rand25 % 7)
445669ad33d8221a605da97516acd333a1fd483b
stdai1016/NUEiP_Interview_Test_2021_11
/test_3.py
1,029
4.46875
4
#!/usr/bin/env python3 """ 題目三、資料處理 - 陣列 """ def minus_sum_of_array(nums1, nums2): """ 返回將陣列總和減去另一陣列總和之值 @param `nums1` @param `nums2` @return """ return sum(nums1) - sum(nums2) def separate_numbers_by_parity(numbers): """ 將數字陣列分割成偶數和奇數陣列並返回 @param `list` `numbers` @return `tuple[list, list]` two list contain even numbers and odd numbers respectively """ even = [] odd = [] for val in numbers: if val % 2: odd.append(val) else: even.append(val) return even, odd if __name__ == '__main__': nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] even_nums, odd_nums = separate_numbers_by_parity(nums) diff = minus_sum_of_array(odd_nums, even_nums) print(f'3.1 奇數總和減去偶數總和: {diff}') print('3.2 分割陣列') print(f'\t 偶數值陣列: {even_nums}') print(f'\t 奇數值陣列: {odd_nums}')
eb4367cb1dddd2f1467068f0737d888fb4492a7c
sqho/python-projects
/assignments/disemvoweler.py
424
3.90625
4
def removeVowels(input_string): letters = [] for x in input_string: letters.append(x) for letter in letters: if letter == "a" or letter == "e": letters.remove(letter) elif letter == "i" or letter == "o": letters.remove(letter) elif letter == "u": letters.remove(letter) final_string = "" for leftovers in letters: final_string = final_string + leftovers print final_string removeVowels("Hello")
c755282673eb73b92edc1cb3d5c29d8c1699af96
ivenpoker/Python-Projects
/Projects/Online Workouts/w3resource/Basic - Part-I/program-87.py
908
4.1875
4
# !/usr/bin/env python3 ####################################################################################### # # # Program purpose: Get the size of a file. # # Program Author : Happi Yvan <[email protected]> # # Creation Date : August 29, 2019 # # # ####################################################################################### import os if __name__ == "__main__": try: file_path = input("Enter path to file: ") print(f"\nThe size of {file_path} is: {os.path.getsize(file_path)} bytes") except FileNotFoundError as fileNotFound: print(f"Path is not valid file.\n{fileNotFound}")
5e86e45678379767821f91ea7e764ddc2f0ebf34
dalaAM/month-01
/day16_all/day16/exercise04.py
596
4.21875
4
""" 写出for元组的原理 写出for字典的原理(不使用for,获取字典键值对) """ tuple01 = (3, 54, 5, 56, 6, 7, 8) # for item in tuple01: # print(item) iterator = tuple01.__iter__() while True: try: item = iterator.__next__() print(item) except StopIteration: break dict01 = {"a": "A", "b": "B", "c": "C"} # for key in dict01: # print(key) # print(dict01[key]) iterator = dict01.__iter__() while True: try: key = iterator.__next__() print(key) print(dict01[key]) except StopIteration: break
feeac25008c5fcffa0eb42e9eec779c4ca226ee2
MrTamas/PCAP
/Module 4/Introduction to Functions/bmi_with_unit_conversion.py
574
3.734375
4
def feet_m(feet, inches): m_per_feet = 0.3048 m_per_inches = m_per_feet / 12 meters_from_feet = feet * m_per_feet meters_from_inches = inches * m_per_inches return meters_from_feet + meters_from_inches def lb_kg(lbs): kg_per_lb = 0.4536 return lbs * kg_per_lb def bmi(height1, height2, weight, system): if system == 'imperial': metric_height = feet_m(height1, height2) metric_weight = lb_kg(weight) else: metric_height = height1 metric_weight = weight return metric_height/metric_weight**2
5affe1906df6bda3bddac594dd1fa054bb5231a4
LanghuaYang/origin
/pythoncode/operation.py
255
3.578125
4
i = 2 * 5 print(i) i = 2 ** 4 print(i) j = 5/2 print(j) j = 5//2 print(j) j = 5%2 print(j) k = ~5 print(k)#按位翻转 ~x = -(x+1) k = True k = not k k = not 1==2#逻辑非 ! print(k) m = True print(k and m) #逻辑与 && print(k or m) #逻辑或 ||
28e8c0babce3c87300e36cab093c628dbf2d323d
aobakwemmokwa/ampackage
/ampackage/sorting.py
2,649
4.21875
4
# -*- coding: utf-8 -*- """ Created on Mon Mar 18 20:37:06 2019 @author: Aobakwe Mmokwa """ from random import randint def bubble_sort(items): """ sort items of a list using bubble sort. bubble sort runs through a list checking items and swapping them if required: example [4, 1, 2, 3] [1, 4, 2, 3] [1, 2, 4, 3] [1, 2, 3, 4] Args: A list of items to sort returns: A sorted list in ascending order """ for i in range(len(items) - 1, 0, -1): for j in range(i): if items[j] > items[j + 1]: items[j], items[j + 1] = items[j + 1], items[j] return items def merge_sorted_lists(list1, list2): """ Merge list1 and list2 returns one sorted list Args: list1 (list): first sorted list list2 (list): second sorted list returns: (list): A sorted list from the two lists """ sorted_list = [] while list1 and list2: if list1[0] < list2[0]: sorted_list.append(list1.pop(0)) else: sorted_list.append(list2.pop(0)) sorted_list.extend(list1) sorted_list.extend(list2) return sorted_list def merge_sort(items): """ sort items of a list using merge sort. Sort divides the list into multiple sublists and continuously merge them together and further diving until it is sorted Args: A list of items to sort returns: A sorted list in ascending order """ if len(items) <= 1: return items left = items[0: int(len(items)/2)] #variables to divide the list by half right = items[int(len(items)/2) :] #each contains the half of the list left = merge_sort(left) right = merge_sort(right) return merge_sorted_lists(left, right) def quick_sort(items): """ sort items of a list using merge sort. Like Merge Sort, QuickSort is a Divide and Conquer algorithm. It picks an element as pivot and partitions the given array around the picked pivot. Args: A list of items to sort returns: A sorted list in ascending order """ if len(items) <= 1: return items pivot = items[randint(0, len(items) - 1)] #Choose a random number as pivot left = [] right = [] if len(items) <= 1: return items for i in items: if i < pivot: left.append(i) else: right.append(i) left = quick_sort(left) right = quick_sort(right) return merge_sorted_lists(left, right)
5abbd95b986a25737ded0b5868177f67fb6046ed
rsreevishal/A-December-of-Algorithms-2019
/December-13/d13.py
256
3.59375
4
n = int(input('Enter no of switches:')) res = [False for _ in range(n+1)] for i in range(1,n+1): for j in range(1,n+1): if(j%i == 0): res[j] = not res[j] print('No of switches in the \'on\' state at the end:',res[1:n+1].count(True))
9b88c85c7ef7f484effa5b5dd0d1d41082025a27
Rim-El-Ballouli/Python-Crash-Course
/solutions/chapter10/ex_10_4.py
250
3.859375
4
while True: print('enter q to quit qny time ') name = input('Enter your name ') if name == 'q': break else: print('Hello ' + name) with open('guest_book.txt', 'a') as file: file.write(name + '\n')
2fdd1c8256d6d9564cbc3c8e2c3cdaa47c7bb411
NJonas184/CS_portfolio
/CSC 356 Machine Learning/Exercise 4.py
788
3.734375
4
import pandas as pd import numpy as np def main(): print("Hello World!") nanExercise = np.random.randint(30, size = (10, 3)) preGen = np.array([ [12, 15, 15], [23, np.nan, 25], [14, 17, 21], [np.nan, np.nan, np.nan], [np.nan, 25, 1], [17, 29, 26], [ 5, 19, np.nan], [ 9, 9, 2], [np.nan, np.nan, 5], [16, 5, 9]]) print(repr(nanExercise)) column_names = ["col 1", "col 2", "col 3"] df = pd.DataFrame(preGen, columns = column_names) print(repr(df)) #How many nan in each col print(df.isna().sum()) #How many nan in each row print(df.isnull().sum(axis=1)) copyOne = df.copy() copyTwo = df.copy() if __name__ == "__main__": main()
960de1b5d12a7400556d49c408b40a81aa7a3d4f
balloman/lpthw
/examples/EX43/ex43_classes.py
6,437
3.890625
4
from sys import exit from random import randint from time import sleep class Scene(object): def enter(self): print "This scene is not yet configured. Subclass it and implement enter()." exit(1) class Engine(object): def __init__(self, scene_map): self.scene_map = scene_map def play(self): current_scene = self.scene_map.opening_scene() while True: print "\n----------" next_scene_name = current_scene.enter() current_scene = self.scene_map.next_scene(next_scene_name) class Death(Scene): def enter(self): print "Sadly your terrible life has come to an end." print "Your ghost leaves your body only to be killed by the ghost-hunters they have brought with them." sleep(5) exit(1) class Finished(Scene): def enter(self): print "Congratulations on beating the game." sleep(30) exit(0) class CentralCorridor(Scene): def enter(self): print "There is a Gothon standing here and the only way to defeat him is with a joke." self.prompt = "> " print "Tell him a joke." joke = raw_input(self.prompt) if len(joke) > 10: print "Success!!" print "The Gothon laughs at your joke and you blast him with your gun before he can react." return 'laser_weapon_armory' else: print "Joke was not good enough" print "You die for bad joke!!!" return 'death' class LaserWeaponArmory(Scene): def enter(self): sleep(5) prompt = ">>> " print "You walk in to see a huge room of weapons." sleep(1) print "There are quite a lot of weapons here." sleep(1) print 'You see a door that says "Top Secret!" that has a keypad beside it.' sleep(1) print "It looks like you need a code to get into the room" sleep(1) print "However if you miss the code 6 times, you can't enter it anymore" sleep(1) print "The code is rom 1-100." guesses = 1 code = randint(1, 100) ucode = int(raw_input(prompt)) print "." sleep(1) print "." sleep(1) print "." sleep(1) while ucode != code and guesses < 6: print "That is incorrect" if ucode < code: print "The code you guessed is lower than the code!" elif ucode > code: print "The code you guessed is higher than the code!" ucode = int(raw_input(prompt)) guesses += 1 if guesses == 6 and ucode != code: print "You hear the lock fuse as alarms start blaring." print "You accept defeat as you slump down to the ground." print "Seconds later you are killed by a Gothon's blaster." return 'death' if ucode == code: print "Yay, the code was correct!" print "The door opens to a large room filled with Neutron Bombs, the strongest bombs in the known universe." print "You grab one and run away as fast as you can(these bombs may look small, but they pack a BIG punch!)" print "The first place you go to is the bridge." return 'the_bridge' class TheBridge(Scene): def enter(self): sleep(7) print "You run onto the bridge with the bomb." print "There are some gothons there that don't see you." print "They hear you walk in and turn around, but are not using thier guns to shoot" print ", afraid to detonate the bomb" print "You can either slowly drop the bomb at the end of the bridge or throw it and run away." action = raw_input(">>> ") if "throw it" in action: print "You throw it and run" print "What you do not know is that all Gothons on the ship are trained in bomb Undetonation." print "They quickly shoot you and then disarm the bomb" print "You die from your wounds." return 'death' elif "drop" in action: print "You point your blaster at the bomb and slowly walk to the end of the bridge." print "They hold their hands up and you drop the bomb at the end and run." print "You shoot the lock to lock them inside." print "They are too late to react and you get away" print "Let's blow this popsicle stand!" return 'escape_pod' class EscapePod(Scene): def enter(self): sleep(10) print "You run faster than you ever have before to escape the bomb" print "You get to the escape pods and there are 5 there" print "You dont have time to look and choose one at random." ship = int(raw_input(">>> ")) good_ship = randint(1, 5) if ship != good_ship: print 'You jump into ship %d and hit the "GO" button' % ship print "You slowly see the dials come to life and the vehicle starts shaking." print "The pod suddenly shoots off into the air and to space." print "You look behind you and see the ship blow into a million bits and pieces" sleep(5) print "MISSION ACCOMPLISHED!" sleep(5) return 'finished' elif ship == good_ship: print 'You jump into ship %d and hit the "GO" button' % ship print "The dials come to life and the vehicle starts shaking." print "..." sleep(2) print "Alarms in the pod start blaring as warnings flash on the HUD." print "The pod fails to lift and explodes." print "You die." return 'death' class Map(object): scenes = { 'central_corridor': CentralCorridor(), 'laser_weapon_armory': LaserWeaponArmory(), 'the_bridge': TheBridge(), 'escape_pod': EscapePod(), 'death': Death(), 'finished': Finished() } def __init__(self, start_scene): self.start_scene = start_scene def next_scene(self, scene_name): return Map.scenes.get(scene_name) def opening_scene(self): return self.next_scene(self.start_scene) a_map = Map('central_corridor') a_game = Engine(a_map) a_game.play()
f412518d42cc6fd4245ad78a17f10761156bb5db
fabriciocovalesci/ListOfBrazilPythonExercises
/SequentialStructure/number_9.py
528
4.40625
4
""" [PT] 9. Faça um Programa que peça a temperatura em graus Farenheit, transforme e mostre a temperatura em graus Celsius. Fórmula matemática: C = (5 * (F-32) / 9). [EN] 9. Make a Program that asks for the temperature in Farenheit degrees, transform and show the temperature in degrees Celsius. Mathematical formula: C = (5 * (F-32) / 9). """ def calc_temperature_F_C(): temp_Fare = int(input("The temperature in Farenheit degrees: ")) print(f"Temperature in degrees Celsius: {(5 * (temp_Fare-32) / 9):.2f}°")
de51803803160ac07c2ee85d967f2fd644e25b39
mauricioZelaya/QETraining_BDT_python
/DennisGamboa/Inheritance_test.py
761
3.875
4
class Person: def __init__(self, first, last, age, ci): self.first_name = first self.last_name = last self.ages = age self.cis = ci def Name(self): return self.first_name + " " + self.last_name + " " + self.ages + " " + self.cis class Employee(Person): def __init__(self, first, last, age, ci, employe_id, department): Person.__init__(self, first, last, age, ci) self.employee_id = employe_id self.department = department def GetEmployee(self): return self.Name() + ", " + self.employee_id + ", " + self.department x = Person("Marge", "Simpson", "28", "405260") y = Employee("Homer", "Simpson", "30", "457829", "1001", "Tester") print(x.Name()) print(y.GetEmployee())
4a24f36df7f69a2ab50c5ba6f12dd9ff45cc2f93
NiklasMM/podcast_stats
/podcast_stats/__init__.py
1,830
3.65625
4
"""podcast_stats - A script to pull the rss feed for a podcast and run some analysis on it.""" import feedparser from datetime import datetime, timedelta from time import mktime __version__ = '0.1.0' __author__ = 'Niklas Meinzer <[email protected]>' __all__ = [] def get_parsed_feed(feed_url): """ Pull a podcast rss feed from a given url and calculate time between episodes. :raises ValueError: If the feed does not have all of the required data """ parsed_feed = feedparser.parse(feed_url) if parsed_feed is None: raise ValueError("No feed could be found") last_datetime = None episodes = [] for index, entry in enumerate(reversed(parsed_feed["entries"])): struct_time = mktime(entry.published_parsed) published_datetime = datetime.fromtimestamp(struct_time) if index == 0: # use an empty timedelta for the first episode time_since_last = timedelta() else: time_since_last = published_datetime - last_datetime episodes.append( { "title": entry.title, "published_datetime": published_datetime, "time_since_last": time_since_last, "published": True } ) last_datetime = published_datetime # Add a dummy episode to measure the time from the last episode until now episodes.append({ "title": "[Next unpublished episode]", "published_datetime": datetime.now(), "time_since_last": datetime.now() - last_datetime, "published": False }) return episodes def weekday_distribution(feed): result = [0] * 7 for entry in feed: if entry["published"]: result[entry["published_datetime"].weekday()] += 1 return result
a9d206d586cc88581e3e74f04612e90a1db3748b
saraducks/Python_interview_prep
/LPHW/exercise_3.py
1,115
3.984375
4
def test_math_func(x,y): addition = x + y; #This is to test '+' addition print "This is addition", addition if x > y: # check which number is great and then perform '-' subraction subraction = x - y print "This is subraction", subraction else: subraction = y - x print "This is subraction", subraction if x > y: # check if numerator is greater than denominator division = x/y print "This is division", division else: division = y/x print "This is division", division multiplication = x * y #This is to test '*' print "This is Multiplication", multiplication remainder = x % y #test my modulo and get my remainder print "This is remainder", remainder temp = True if x < y else False #test '<' symbol and else will evaluate to x '>' y print "Test < and > logic", temp print "whats is 5 % 2", 5 % 2 #evaluating directly from print statement print "if x <= y", 4<=2 test_math_func(4,5) # This will call my tes_math_function.
242635e6f7ded26f4ed0fbad9ff8da7fc9fc1c24
maxmahe1/python-eval
/Huffman/huffman.py
3,724
3.734375
4
# La classe Node va servir pour initialiser les choses. Le but de l'algorithme est de construire l'arbre # et d'ensuite le remonter, finalement un petit peu comme le programme précèdent. # Les notations dg et dd correspondent à feuille droite et feuille gauche. class Node: def __init__(self,valeur, dg, dd=None): self.valeur = valeur if dd == None: self.name = dg self.dg = None self.dd = None else: self.name = dg.name+dd.name self.dg = dg self.dd = dd def __repr__(self): return f"{self.valeur}, {self.name}" # Rentrons maintenant dans le dur en créant la classe qui crée l'arbre. # On stock d'abord les lettre en fonction de leur fréquence d'apparition que l'on compte avec la fonction occur. # Le programme tree lui utilise la méthode de construction vue sur l'article Wikipédia: # On associe les 2 noeuds de plus faible poids pour en créer un nouveau de poids égale à la somme des 2. class TreeBuilder: def __init__(self, text): self.text = text self.lst = list(self.text) self.l_feuille = [] def occur(self): occur = {} for i in self.lst: if i not in occur: occur[i] = 1 else: occur[i] += 1 letters = sorted(occur.items(), key=lambda t: t[1]) return letters def tree(self): Letters = self.occur() Tot_feuilles = [] for i in Letters: Tot_feuilles.append(Node(i[1], i[0])) while len(Tot_feuilles) > 1: f1 = Tot_feuilles[0] f2 = Tot_feuilles[1] v = f1.valeur + f2.valeur new_feuilles = Node(v, f1, f2) del Tot_feuilles[1] del Tot_feuilles[0] j = 0 while j<len(Tot_feuilles) and Tot_feuilles[j].valeur <= new_feuilles.valeur: j += 1 if j == 0: Tot_feuilles.append(new_feuilles) else: Tot_feuilles.insert(j, new_feuilles) return Tot_feuilles[0] # Quant à la classe Codec, celle-ci a pour but d'encoder et décoder la chaine. La fonction create crée la chaine. # La fonction encode crée la chaine binaire # La fonction decode écrit la phrase grâce à la chaine binaire en regardant si c'est 0 ou 1 ce qui permet # de savoir si on va à droite ou à gauche class Codec: def __init__(self, racine): self.racine = racine self.dic = {} def create(self, noeud = None, code = ""): if noeud == None: noeud = self.racine if len(noeud.name) == 1: self.dic[noeud.name] = code else: code1 = code + "0" code2 = code + "1" return self.create(noeud.dg, code1), self.create(noeud.dd, code2) def encode(self, text): codage = str() for i in text: codage += self.dic[i] return codage def decode(self, code): decodage = str() i = 0 while i < len(code): noeud = self.racine while len(noeud.name) > 1: if code[i] == "0": noeud = noeud.dg else: noeud = noeud.dd i+=1 decodage += noeud.name return decodage text = "a dead dad ceded a bad babe a beaded abaca bed" builder = TreeBuilder(text) binary_tree = builder.tree() codec = Codec(binary_tree) codec.create() encoded = codec.encode(text) decoded=codec.decode(encoded) print(f"{text}\n{encoded}") if decoded != text: print("OOPS")
945f3769cb26025f4af271267704f722b3b0cec3
felipemaion/studying_python
/bmr.py
1,360
4.15625
4
'''This is a BMR (Basic Metabolism Rate) calculator from Mifflin St Jeor''' #''' coded by Screw ''' def get_info(): try: mass = float(input('What\'s your weight in kg?: ')) height = float(input('What\'s your height in cm?: ')) age = int(input('How old are you?: ')) individual = input('Are you Male of Female?: ') return {'mass':mass,'height':height,'age':age,'sex':individual} except ValueError: print('Can\'t take letters as input,Only numbers are accepted!') return False def calculate_bmr(person_info): if person_info: mass = person_info['mass'] height = person_info['height'] age = person_info['age'] individual = person_info['sex'] if individual.lower() == 'male': bmr = (float(10)*float(mass) + float(6.25)*float(height) - int(5.0)*int(age) + 5) print('Your daily calorie expenditure is '+str(round(bmr)) + ' kcal per day') return bmr if individual.lower() == 'female': bmr = (float(10)*float(mass) + float(6.25)*float(height) - int(5.0)*int(age) - 161) print('Your daily calorie expenditure is ' +str(round(bmr)) + ' kcal per day') return bmr person = get_info() persons_bmr = calculate_bmr(person)
19672d3b46c559218813a64dbd1ec686b7055f75
mhiyer/sampling_methods_with_pandas
/bootstrapping_pandas_dataframe.py
4,079
4.0625
4
# -*- coding: utf-8 -*- """ Created on Sun Dec 22 14:01:21 2019 @author: mh iyer Description: Bootstrapping or 'sampling with replacement' Given a labelled dataset, generate multiple samples by sampling with replacement randomly Equivalent to picking marbles out of a jar- pick the first one, put it back, pick the second one, put it back, and so on for N samples Bootstrapping may result in: a) Over-representation of samples: As samples are randomly picked, some may appear several times e.g. picking the same blue marble several times even though there are other blue marbles and other marbles with different colours b) Under-representation of samples: Again, as samples are randomly picked, some may not appear at all! e.g. completely ignoring yellow marbles while blue, red and silver marbles may be picked (appear at least once) The above situations may be ameliorated by adjusting the number of trials - there isn't a golden rule to do this, it is dataset dependent and you may wish to do some trial and error Input: pandas dataframe containing desired data num_trials: how many sets of data you require It is assumed that the number of generated examples in each generated dataframe is the same as the number of examples in the original dataset e.g. if you started out with 100 examples, each of the generated datasets will contain 100 examples """ import pandas as pd import random # create sample data if no data is input def create_dataset(): # I used this website for inspiration: # https://www.python-course.eu/Decision_Trees.php data = pd.DataFrame({"toothed":["True","True","True","False","True","True","True","True","True","False"], "hair":["True","True","False","True","True","True","False","False","True","False"], "breathes":["True","True","True","True","True","True","False","True","True","True"], "legs":["True","True","False","True","True","True","False","False","True","True"], "species":["Mammal","Mammal","Reptile","Mammal","Mammal","Mammal","Reptile","Reptile","Mammal","Reptile"]}) return(data) # create bootstrap class class Bootstrap: def __init__(self, dataset, num_trials=2): self.dataset = dataset self.target_size = len(self.dataset) self.num_trials = num_trials # get the number of examples present in the dataset self.num_examples = len(self.dataset) # generate lists for storing the generated data self.bootstrapped_data = [] def generate_data(self): # loop through the number of trials for i in range(0, self.num_trials): # generate a set of random numbers # these correspond to the INDICES of the examples which will be used in the current data generation trial # e.g. if 0 is one of the indices, the first example in the dataset will be used random_indices = [random.randint(0,self.num_examples-1) for i in range(self.num_examples)] # initialize an empty pandas dataframe df = pd.DataFrame() # build the new dataset by invoking the examples using their indices for index in random_indices: df = df.append(self.dataset.iloc[index]) # append the pandas dataframe self.bootstrapped_data.append(df) return (self.bootstrapped_data) # an illustration to show how the class may be used in practice if __name__ == "__main__": # create dataset if you don't have one ready dataset = create_dataset() # create bootstrap object b = Bootstrap(dataset, num_trials = 3) # get your bootstrapped data! bootstrapped_data = b.generate_data() # look at the first dataframe generated print(bootstrapped_data[0])
f4ebf3d3aee06c92786121cd2fa22efa0e35d18c
czer01ne/Tensor_practice
/simple_linear_regression_gd_tf_test.py
715
3.5625
4
import tensorflow as tf ## 데이터 수집 x_data = [1, 2, 3] y_data = [1, 2, 3] ## 예측 모델 정의 W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) b = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) y = W * x_data + b ## 비용 함수, 최적화 함수 정의 cost = tf.reduce_mean(tf.square(y - y_data)) a = tf.Variable(0.1) # learning rate, alpha optimizer = tf.train.GradientDescentOptimizer(a) train = optimizer.minimize(cost) # 변수 초기화 init = tf.global_variables_initializer() ## 훈련 sess = tf.Session() sess.run(init) for step in range(2001): sess.run(train) if step % 20 == 0: print(step, sess.run(cost), sess.run(W), sess.run(b))
420a3c2ae09c59434975712320c0b34113291b28
ZhuYun97/product-classification
/utils/stopwordslist.py
808
3.59375
4
def stopwordslist(filepath): stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()] stopwords.append("★") stopwords.append("[") stopwords.append("]") stopwords.append("【") stopwords.append("】") stopwords.append("(") stopwords.append(")") stopwords.append("/") stopwords.append("-") stopwords.append("(") stopwords.append(")") stopwords.append("/") stopwords.append("-") stopwords.append("+") stopwords.append("*") stopwords.append("|") stopwords.append("·") stopwords.append(":") stopwords.append("…") stopwords.append("’") stopwords.append("'") stopwords.append('"') stopwords.append('◆') # 空格不能去,否则准确率极低 return stopwords
45aadfde687414e060052b08f9578c201c31493c
tharunnayak14/python
/ch13.py
1,687
3.5
4
# data on the web # xml # json # eXtensible markup language(XML) # <people> # <person> # <name> tharun </name> # <phone> 231823 </phone> # </person> # <person> # <name> nayak </name> # <phone type = "intl"> 234342 </phone> # <email /> # </person> # </people> # xml schema import xml.etree.ElementTree as ET data = '''<stuff> <people> <person> <name> chuck </name> <phone type = "intl"> 32132 </phone> <email hide = "yes"/> </person> <person> <name> bob </name> <phone type = "intl"> 1237623 </phone> <email hide = "yes"/> </person> </people> </stuff>''' # tree = ET.fromstring(data) # print('Name:', tree.find('name').text) # print('Attr:', tree.find('email').get('hide')) stuff = ET.fromstring ( data ) lst = stuff.findall ( 'people/person' ) print ( 'person count:', len ( lst ) ) for i in lst: print ( i.find ( 'name' ).text ) print ( i.find ( 'phone' ).text ) # json import json data = '''{ "name" : " tharun", "phone" :{ "type" : "intl", "number" : "23891238" }, "email" : { "hide" : "yes" } }''' info = json.loads ( data ) # info is a dictionary print ( info["name"] ) print ( info["email"]["hide"] ) yo = '''[ { "id" : "01", "x" : "7", "name" : "tharun" }, { "id" : "07", "x" : "4", "name" : "nayak" } ]''' info = json.loads ( yo ) # info is a list here print ( len ( yo ) ) for i in info: print ( i["name"] ) print ( i["id"] ) print ( i["x"] ) # application program interface
641eb82a7eabe47971297b22a86e878649cf538b
zhilyaev/ArchPC-SUAI
/src/1.py
2,322
3.5625
4
def toHex(v, bits): return hex((v + (1 << bits)) % (1 << bits)) def bin_add(*args): return bin(sum(int(x, 2) for x in args))[2:] def toFloat(x): z = '0' print('X := ' + str(x)) if x < 0: z = '1' x = (-1) * x if x > 8388607: # int(111 1111 1111 1111 1111 1111, 2) => 8388607 bits = 64 else: bits = 32 print("Знак = " + z) x = bin(x)[2:] print('x to bin -> ' + str(x)) p = len(x) print('Порядок -> ' + str(p)) p = bin(p)[2:] print('Порядок to bin -> ' + str(p)) p = bin_add('10000000', p) print('Порядок = ' + str(p)) m = x[1:] print('Мантисса = ' + str(m)) res = z + p + m zero = bits - len(res) print('Дописываем ' + str(zero) + ' нулей') t = '0' * zero res += t print("=> " + res) return hex(int(res, 2)) V = 6 # Вариант G = 6 # Группа BIT = 128 print("Вариант: " + str(V)) print("Группа: " + str(G)) S = V + G x1 = ((-1) ** V) * (S * 3) x2 = ((-1) ** (V + 1)) * (S + 17) x3 = ((-1) ** (V + 2)) * ((S + 29) ** 2) x4 = ((-1) ** (V + 3)) * ((S + 23) ** 2) x5 = x3 ** 2 x6 = (-1) * (x4 ** 2) x7 = (-1) * (x5 * (2 ** 28)) x8 = (-1) * (x6 * (2 ** 20)) x9 = (x7 * (2 ** 52)) - 0xc x = [x1, x2, x3, x4, x5, x6, x7, x8, x9] print() print("Пункт A) Десятичные :") for i in x: print(i) print() print("Пункт Б) Перевод в HEX :") for i in x: print(toHex(i, BIT)) print() print("Пункт Г) Адресса данных :") for i in range(len(x)): it = (G * V) + 10 * i print(str(it) + " => " + toHex(it, BIT)) # toHex() can be replaced with hex() print() print("Пункт Д) Адресса данных :") for i in range(len(x)): it = V + 100 + (i * 10) print(str(it) + " => " + toHex(it, BIT)) # toHex() can be replaced with hex() print() print("E) Начальный адрес размещения:") print(V * 10 + 200) print() print("Ж) Начальный адрес размещения:") print(V * G + 230) print() print("З) Начальный адрес размещения:") print(V * G + 300) print() print("В системе с плавающей запятой") for i in x: print(toFloat(i)) print('=============================')
e3e3947589e9f212b228636dd385b242703b85b1
sakshi13-cmd/tathastu_week_of_code
/tathastuproject2/pattern3.py
170
3.796875
4
n=int(input("enter a value:")) for i in range(n): print((str(n-i) + "*") * (n-1-i) + str(n-i)) for j in range(2,n+1): print((str(j) + "*") * (j-1) + str(j))
6d68492a2cf15a2524fdcc7e028df27a86cbc871
Anshul1196/leetcode
/challenges/2020/07/W2/D4.py
576
3.765625
4
""" Solution for July LeetCoding Challenges Week 2 Day 4: Subsets """ class Solution: """ Double the set iteratively - Number of nodes: N - Space Complexity: O(N 2^N) - Time Complexity: O(N 2^N) Runtime: 32 ms / 86.44% Memory Usage: 14 MB / 60.80% """ def subsets(self, nums: List[int]) -> List[List[int]]: power_set = [[]] for num in nums: # By doing this, `power_set` contains one set without `num` and one set with `num` power_set += [item+[num] for item in power_set] return power_set
bbf0d1a5b0b488e8e8a2c6b7b4fcf60d53a03c5e
udayom/PyPractice
/subclass_example.py
673
3.90625
4
class Student(): def __init__(self,name): self.name = name a = Student("Sam") print a.name b = Student("UDAY") print b.name print "New Example" class Rectangle(): def __init__(self,l,b): self.length = l self.breadth = b def getArea(self): return self.length*self.breadth def getPerimeter(self): return 2*(self.length+self.breadth) a = Rectangle(2,4) print a.getArea() print a.getPerimeter() print "New Example" class Child(): def __init__(self,name): self.name = name class Student(Child): def __init__(self,name,roll): self.roll = roll Child.__init__(self,name) a = Child("xyz") print a.name b = Student("abc",12) print b.name print b.roll
4eb8e9a7668244a894bb00e7f55a08e41000924e
OZ-T/leetcode
/0/merge_two_lists.py
1,345
3.9375
4
# Definition for singly-linked list. class ListNode(object): def __init__(self, x): self.val = x self.next = None class Solution(object): def mergeTwoLists(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ if l1 is None and l2 is None: return None head = ListNode(None) prev = head while l1 and l2: if l1.val < l2.val: node = ListNode(l1.val) l1 = l1.next else: node = ListNode(l2.val) l2 = l2.next prev.next = node prev = node while l1: node = ListNode(l1.val) l1 = l1.next prev.next = node prev = node while l2: node = ListNode(l2.val) l2 = l2.next prev.next = node prev = node return head.next l11 = ListNode(1) l12 = ListNode(3) l13 = ListNode(5) l11.next = l12 l12.next = l13 l21 = ListNode(2) l22 = ListNode(4) l23 = ListNode(6) l21.next = l22 l22.next = l23 s = Solution() res = s.mergeTwoLists(l11, l21) while res: print res.val res = res.next l11 = None l21 = ListNode(1) res = s.mergeTwoLists(l11, l21) while res: print res.val res = res.next
2b60f356fca203039c88b201316359c3c6181f28
allenchng/Bit-O-Code
/IQ/240.py
1,618
3.765625
4
# Suppose you are given two datasets as shown below: # Data on the Gini coefficient (proxy for income inequality). # This is a number between 0 and 1, where 0 corresponds to perfect equality (e.g. everyone has the same income) and 1 corresponds to perfect inequality (where one person has all the income—and everyone else has no income). You can read more about the Gini coefficient on Wikipedia here # Data containing a Happiness Score from the World Happiness Report. # The score represents a weighted average across a number of variables and ranges from 1-10, where 10 is perfectly happy. You can read more about the World Happiness Report Wikipedia here # Given this data, determine if there is a correlation between income equality (field: 'current' in Gini index dataset) and happiness (field: 'overall_score' in Happiness dataset). You'll only want to keep records that exist in both datasets (there are many countries/regions in the World Happiness Report that do not have a measured Gini index). import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from scipy.stats import pearsonr gini_index = pd.read_csv('https://raw.githubusercontent.com/erood/interviewqs.com_code_snippets/master/Datasets/gini_index.csv') happiness_index = pd.read_csv('https://raw.githubusercontent.com/erood/interviewqs.com_code_snippets/master/Datasets/happiness_index.csv') sub = pd.merge(happiness_index, gini_index, how="inner", left_on="country_or_region", right_on="country").reset_index() pearsonr(sub.overall_score, sub.current) sns.regplot(sub.overall_score, sub.current)