blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
261
content_id
stringlengths
40
40
detected_licenses
sequencelengths
0
45
license_type
stringclasses
2 values
repo_name
stringlengths
8
111
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
72 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
530k
616M
star_events_count
int64
0
102k
fork_events_count
int64
0
24.6k
gha_license_id
stringclasses
9 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
40 values
src_encoding
stringclasses
10 values
language
stringclasses
1 value
is_vendor
bool
1 class
is_generated
bool
2 classes
length_bytes
int64
11
4.05M
extension
stringclasses
25 values
content
stringlengths
10
4.04M
authors
sequencelengths
1
1
author_id
stringclasses
578 values
88be1a8dbca36a3704310ed5d08336575231773d
09e57dd1374713f06b70d7b37a580130d9bbab0d
/data/cirq_new/cirq_program/startCirq_pragma263.py
bc6b11402b4ca6da63e2f0bf7495b508300d9153
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
2,624
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=4 # total number=15 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np class Opty(cirq.PointOptimizer): def optimization_at( self, circuit: 'cirq.Circuit', index: int, op: 'cirq.Operation' ) -> Optional[cirq.PointOptimizationSummary]: if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)): return cirq.PointOptimizationSummary( clear_span=1, clear_qubits=op.qubits, new_operations=[ cirq.CZ(*op.qubits), cirq.X.on_each(*op.qubits), cirq.X.on_each(*op.qubits), ] ) #thatsNoCode def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.H.on(input_qubit[1])) # number=2 c.append(cirq.H.on(input_qubit[1])) # number=7 c.append(cirq.H.on(input_qubit[2])) # number=3 c.append(cirq.H.on(input_qubit[3])) # number=4 c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5 c.append(cirq.H.on(input_qubit[0])) # number=12 c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13 c.append(cirq.H.on(input_qubit[0])) # number=14 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8 c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11 # circuit end c.append(cirq.measure(*input_qubit, key='result')) return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2820 simulator = cirq.Simulator() result = simulator.run(circuit, repetitions=circuit_sample_count) frequencies = result.histogram(key='result', fold_func=bitstring) writefile = open("../data/startCirq_pragma263.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
f736b231f33cffb070a9a4bb88108c52854ef72a
19a7959e21d430d6544aa6f8dd511902c83c6e91
/实验源代码/red_blue_game/dataProcessor.py
5a9519fc44e81658ea071adfa79f967c0f6a2d69
[]
no_license
xioajiumi/red_blue_game
b6280b07e2fde7ee46180a962e4ad4cc7653d25d
ba6d876e8d0375af97010a324a499a5c35251fab
refs/heads/main
2023-08-31T20:29:57.475151
2021-10-20T12:28:10
2021-10-20T12:28:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,420
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Site : # @File : dataProcessor.py import json import openpyxl as ox import matplotlib.pyplot as plt import numpy as np import math def store_2_json(file_name, path, datas): json_datas = json.dumps(datas) file = path + file_name with open(file, "w") as fo: fo.write(json_datas) def write_2_excel(file_name, path, datas, render=True): file_name = path + file_name # 创建一个表格 wb = ox.Workbook() wb.create_sheet('datail') wb.create_sheet('means') # 把详细数据写入detail表单,把每局平均得分写入means表单 ds = wb.get_sheet_by_name("datail") ms = wb.get_sheet_by_name("means") render_data = [] if render else None # 若要渲染则留个后门数据,不用再读取xlsx文件获得数据 # 对两个表单进行预处理 ds.cell(1, 3).value = "最终得分" for i in range(1, 9): # 写出列表头 ds.cell(1, 2 + 2 * i).value = f"第{i}轮策略" ds.cell(1, 3 + 2 * i).value = f"第{i}轮损益" ms.cell(1, 1).value = "第N局" ms.cell(1, 2).value = "双方总分" # 循环写入每局数据 for game_num, game_data in datas.items(): ms.cell(1 + game_num, 1).value = f"第{game_num}局" # 写出第几局 ds.cell(2 * game_num, 1).value = f"第{game_num}局" # 写出第几局 ds.cell(2 * game_num, 2).value = f"玩家A" # 标出A玩家 a_sco, b_sco = game_data["fin_score"][0], game_data["fin_score"][1] # 获取A\B玩家最终分数 ms.cell(1 + game_num, 2).value = (a_sco + b_sco) # 写入每局玩家总分之和 if render: render_data.append(a_sco + b_sco) # 添加渲染所需数据 ds.cell(2 * game_num, 3).value = f"{a_sco}" # 标出A玩家总分 ds.cell(2 * game_num + 1, 2).value = f"玩家B" # 标出B玩家 ds.cell(2 * game_num + 1, 3).value = f"{b_sco}" # 标出B玩家总分 for round_num, round_data in game_data["rounds"].items(): a_dec, a_gain = round_data["decisions"][0], round_data["changed"][0] # 获得A玩家策略与损益 b_dec, b_gain = round_data["decisions"][1], round_data["changed"][1] # 获得B玩家策略与损益 # 取消以下数据后表格中不再有False,True,而是背叛与合作 # a_dec ="合作" if a_dec else "背叛" # b_dec ="合作" if b_dec else "背叛" # 取消以下数据后表格中不再有False,True,而是0与1 # a_dec =1 if a_dec else 0 # b_dec =1if b_dec else 0 ds.cell(2 * game_num, 2 + round_num * 2).value = f"{a_dec}" # 标出A玩家策略 ds.cell(2 * game_num, 3 + round_num * 2).value = f"{a_gain}" # 标出A玩家损益 ds.cell(2 * game_num + 1, 2 + round_num * 2).value = f"{b_dec}" # 标出B玩家策略 ds.cell(2 * game_num + 1, 3 + round_num * 2).value = f"{b_gain}" # 标出B玩家损益 wb.save(file_name) return render_data def render(file_name, path, render_data): items = {} # 存放各种结果出现的频数 for data in render_data: items[data] = items.get(data, 0) + 1.0 # 统计出成绩-频数键对值 x_y = [(x, y) for (x, y) in items.items()] # 转换数据结构 x_y.sort(key=lambda item: item[0]) # 对键对值进行排序 x, y_f, y_p = [], [], [] for item in x_y: x.append(item[0]) # 升序存放得分 y_f.append(item[1]) # 存放对应得分的频数 y_p.append(item[1] / 1000) # 存放对应得分的频率 # 频数直方图 plt.xlabel("Score") plt.ylabel("Frequency") plt.bar(x, y_f) # 单局总分及其对应的频次 plt.plot(x, y_f) # 单局总分及其对应的频次 plt.bar([_ / 2 for _ in x], y_f) # 玩家平均分及其对应的频次 plt.show() area=area_2_xaxis(x,y_f) print("area:",area) print(f"得分结果数量:{len(x)}") # 概率密度图 plt.xlabel("Score") plt.ylabel("Probability") mean = np.mean(render_data) # 总分均值 var_f = np.var(render_data) # 频数分布方差 pro = [x[i] * y_p[i] for i in range(len(x))] var_p = np.var(pro) # 频率分布方差 print(f"频数方差:{var_f}") print(f"频率方差:{var_p}") plt.plot(x, [y/area for y in y_f], linewidth=2) # 进行正态分布曲线拟合尝试 mu, sigma_f,sigma_p = mean, math.sqrt(var_f),math.sqrt(var_p) print("sigma_p:",sigma_p) print("sigma_f:",sigma_f) print("count 0:",render_data.count(0.0)) print("non-neg total score rate:",sum([1 for data in render_data if data >=0])/1000) for sig in [sigma_f,]: print(f"mu:{mean}\nsigma:{sigma_f}") draw_normal_dis(mean,sig) plt.show() def area_2_xaxis(xs,ys): #计算频数分布曲线下拉至x轴的面积 area=0 for i in range(len(xs)): if i == len(xs)-1: return area else: bottem=xs[i+1]-xs[i] height=(ys[i+1]+ys[i])/2 area+=bottem*height def draw_normal_dis(mu,sigma): x_ = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 50) y_ = np.exp(-(x_ - mu) ** 2 / (2 * sigma ** 2)) / (math.sqrt(2 * math.pi) * sigma) plt.plot(x_, y_, linewidth=2,color="coral")
d45b2df2ceb71ae350e9d6a317ee4e09741e503e
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_207/507.py
3881fca5b62b736452cde9286a5ba5618161c3b5
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,981
py
def validate(s): pass def solver(line): n,r,o,y,g,b,v = line t1 = b - o t2 = y - v t3 = r - g if t1 < 0 or t2 < 0 or t3 < 0: return "IMPOSSIBLE" if 0 in [t1,t2,t3]: if line[1:].count(0) == 4: L = [(r,'R'),(o,'O'),(y,'Y'),(g,'G'),(b,'B'),(v,'V')] L.sort(key = lambda x: -x[0]) if L[0][0] == L[1][0]: return (L[0][1] + L[1][1]) * L[0][0] else: return "IMPOSSIBLE" else: return "IMPOSSIBLE" L = [t1,t2,t3] if sum(L) < 2 * max(L): return "IMPOSSIBLE" else: L = [[t1,'B'],[t2,'Y'],[t3,'R']] s = '_' while sum(i[0] for i in L) > 3: #error: haven't enforced start != end L.sort(key = lambda x: -x[0]) if L[0][1] != s[-1]: s += L[0][1] L[0][0] -= 1 else: s += L[1][1] L[1][0] -= 1 if L[1][0] < 0: print "bad stuff" s = s[1:] if s: t = s[0] + s[-1] else: t = 'RR' d = {'RR' : 'BRY', 'RY' : 'BRY', 'RB' : 'YRB', 'YR' : 'BYR', 'YY' : 'BYR', 'YB' : 'RYB', 'BR' : 'YBR', 'BY' : 'RBY', 'BB' : 'RBY'} s += d[t] s = s.replace('B','BO' * o + 'B', 1) s = s.replace('Y','YV' * v + 'Y', 1) s = s.replace('R','RG' * g + 'R', 1) return s #case testing needs to happen fout = open('out.txt','w') f = open('in.txt') T = int(f.readline()) for case in range(1,T+1): line = f.readline() line = line.split() line = [int(i) for i in line] ans = solver(line) str = "Case #%d: %s\n" % (case, ans) print str, fout.write(str) f.close() fout.close()
23dd9f6560ebd7e9a36a1242ce006af8f3c1d1db
8eb2306b6fdd1dfd687cc399af9a5ccdd5cfdeea
/make_snr.py
c811a13b201085633641ba8f80a3666e65197bbf
[]
no_license
kangkulee/matplot-autoExcel-PPTX
12c3ab9bc5d7f56f89457f09142ceb3e1d6183be
9100e9882491c0ff90c8aa8860bd54b01bb604ad
refs/heads/master
2021-01-04T06:36:10.347447
2020-02-14T05:35:29
2020-02-14T05:35:29
240,432,964
0
0
null
null
null
null
UTF-8
Python
false
false
4,281
py
from matplotlib import pyplot as plt, font_manager, rc font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name() rc('font', family=font_name) import json import openpyxl import os from pptx import Presentation from pptx.util import Pt from pptx.util import Inches from datetime import date import pptx import boto3 # if __name__ == '__main__': def make_snr(): # datenow = str(date.today())[2:4] + str(date.today())[5:7] + str(date.today())[8:10] name = 'tester' filename = 'createCSV/' + datenow + 'test.csv' excelname = 'Excel/' + datenow + 'test' + name + '.xlsx' picturename = 'chartPicture/avgSnrChart_' pptname = 'ppt/' + datenow + 'test' + name + '.pptx' titletext = 'title' f = open(filename, 'r') station_key = dict() station_col = 2 # 초기 기지국 별 셀 위치 xbar_row = 2 # 초기 x축 셀 위치 cnt_col = 2 # 초기 개수 셀 위치 cnt_row = 2 # 초기 개수 셀 위치 count = [] # 개수를 받는 빈 리스트 선언 # # load pptx prs = Presentation(pptname) title_slide_layout = prs.slide_layouts[0] slide = prs.slides.add_slide(title_slide_layout) title = slide.shapes.title subtitle = slide.placeholders[1] title.text = "title" subtitle.text = titletext slide_layout = prs.slide_layouts[1] slide = prs.slides.add_slide(slide_layout) slide_title_shape = slide.shapes.title slide_title_shape.text = "STATION LIST("+titletext+")" slide_body_shape = slide.placeholders[1] # text box 객체 slide_body_tf = slide_body_shape.text_frame # load excel write_wb = openpyxl.load_workbook(excelname) write_ws = write_wb.create_sheet('third') for line in f: rawdata = json.loads(line.replace("'", "\"")) if rawdata['payload']['station'] in station_key: if rawdata['payload']['snr'] != 'null': station_key[rawdata['payload']['station']].append(float(rawdata['payload']['snr'])) else: if rawdata['payload']['snr'] != 'null': station_key[rawdata['payload']['station']] = [float(rawdata['payload']['snr'])] write_ws.cell(row=1, column=1, value='x축범위') for items in station_key.items(): avg = round(sum(items[1]) / len(items[1]), 2) ys, xs, patches = plt.hist(items[1], range=(0, 40), color='r', edgecolor='black', linewidth=1.2, rwidth=0.8, bins=40, alpha=0.4) count.append(ys) write_ws.cell(row=1, column=station_col, value='snr(station : ' + items[0] + ')') station_col = station_col + 1 plt.xlabel('snr') plt.ylabel('개수') plt.title('snr 차트\n(station : ' + items[0] + ') 평균 : ' + str(avg)) plt.grid() plt.axvline(avg, linestyle='--') plt.minorticks_on() check_picture = os.path.exists(picturename + items[0] + '.png') if check_picture == True: # delete picture os.unlink(picturename + items[0] + '.png') # save picture plt.savefig(picturename + items[0] + '.png', dpi=500) else: # save picture plt.savefig(picturename + items[0] + '.png', dpi=500) # show chart # plt.show() plt.clf() # initialize # save pptx slide_p = slide_body_tf.add_paragraph() slide_p.text = items[0] slide_p.font.size = Pt(17) slide_layout = prs.slide_layouts[6] slide = prs.slides.add_slide(slide_layout) slide.shapes.add_picture(picturename + items[0] + '.png', Inches(0.5), Inches(0.5), width=Inches(9), height=Inches(6)) prs.save(pptname) # insert excel for i in range(0,len(xs)-1): write_ws.cell(row=xbar_row, column=1, value=xs[i]) xbar_row = xbar_row + 1 for i in range(len(count)): for j in range(len(count[i])): write_ws.cell(row=cnt_row, column=cnt_col, value=int(count[i][j])) cnt_row = cnt_row + 1 if cnt_row == 42: cnt_row = 2 cnt_col = cnt_col + 1 # save excel write_wb.save(excelname)
832974b9068a90cd72f7987a17131faae3924d37
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
/xcp2k/classes/_cell3.py
c1150112a0c843de3db8f2c0d137662bf75a7671
[]
no_license
obaica/xcp2k
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
6e15c2c95658f545102595dc1783f5e03a9e6916
refs/heads/master
2020-07-15T17:27:43.378835
2019-02-11T16:32:24
2019-02-11T16:32:24
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,148
py
from xcp2k.inputsection import InputSection from _cell_ref1 import _cell_ref1 class _cell3(InputSection): def __init__(self): InputSection.__init__(self) self.A = None self.B = None self.C = None self.Abc = None self.Alpha_beta_gamma = None self.Cell_file_name = None self.Cell_file_format = None self.Periodic = None self.Multiple_unit_cell = None self.Symmetry = None self.CELL_REF = _cell_ref1() self._name = "CELL" self._keywords = {'A': 'A', 'Cell_file_format': 'CELL_FILE_FORMAT', 'C': 'C', 'B': 'B', 'Symmetry': 'SYMMETRY', 'Alpha_beta_gamma': 'ALPHA_BETA_GAMMA', 'Multiple_unit_cell': 'MULTIPLE_UNIT_CELL', 'Periodic': 'PERIODIC', 'Abc': 'ABC', 'Cell_file_name': 'CELL_FILE_NAME'} self._subsections = {'CELL_REF': 'CELL_REF'} self._aliases = {'Angles': 'Alpha_beta_gamma'} @property def Angles(self): """ See documentation for Alpha_beta_gamma """ return self.Alpha_beta_gamma @Angles.setter def Angles(self, value): self.Alpha_beta_gamma = value
73ebe4706416324038c098bffa8ff1673ac13aab
96a08c181df0873ed61645c63cbb2870b713ec56
/lane.py
65c5343077343eeb0669dafd3d2ffb52b11db41a
[]
no_license
KnightFoxii/Lane-Detection-system
fe33cff4ba5a9b5c9a8c1a77a88633437b1a5555
5991e2b57fe9cf42cc4b7ce3f859ee5dbd7ca3bd
refs/heads/master
2022-12-01T22:16:59.298843
2020-08-20T15:59:58
2020-08-20T15:59:58
289,047,663
0
0
null
null
null
null
UTF-8
Python
false
false
2,940
py
# Import the required libraries import cv2 import numpy as np import matplotlib.pyplot as plt def canny_edge_detector(image): # Convert the image color to grayscale gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Reduce noise from the image blur = cv2.GaussianBlur(gray_image, (5, 5), 0) canny = cv2.Canny(blur, 50, 150) return canny def region_of_interest(image): height = image.shape[0] polygons = np.array([ [(200, height), (1100, height), (550, 250)] ]) mask = np.zeros_like(image) # Fill poly-function deals with multiple polygon cv2.fillPoly(mask, polygons, 255) # Bitwise operation between canny image and mask image masked_image = cv2.bitwise_and(image, mask) return masked_image def create_coordinates(image, line_parameters): slope, intercept = line_parameters y1 = image.shape[0] y2 = int(y1 * (3 / 5)) x1 = int((y1 - intercept) / slope) x2 = int((y2 - intercept) / slope) return np.array([x1, y1, x2, y2]) def average_slope_intercept(image, lines): left_fit = [] right_fit = [] for line in lines: x1, y1, x2, y2 = line.reshape(4) # It will fit the polynomial and the intercept and slope parameters = np.polyfit((x1, x2), (y1, y2), 1) slope = parameters[0] intercept = parameters[1] if slope < 0: left_fit.append((slope, intercept)) else: right_fit.append((slope, intercept)) left_fit_average = np.average(left_fit, axis = 0) right_fit_average = np.average(right_fit, axis = 0) left_line = create_coordinates(image, left_fit_average) right_line = create_coordinates(image, right_fit_average) return np.array([left_line, right_line]) def display_lines(image, lines): line_image = np.zeros_like(image) if lines is not None: for x1, y1, x2, y2 in lines: cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10) return line_image # Path of dataset directory cap = cv2.VideoCapture("datasets\test2.mp4") while(cap.isOpened()): _, frame = cap.read() canny_image = canny_edge_detector(frame) cropped_image = region_of_interest(canny_image) lines = cv2.HoughLinesP(cropped_image, 2, np.pi / 180, 100, np.array([]), minLineLength = 40, maxLineGap = 5) averaged_lines = average_slope_intercept(frame, lines) line_image = display_lines(frame, averaged_lines) combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1) cv2.imshow("results", combo_image) # When the below two will be true and will press the 'q' on # our keyboard, we will break out from the loop # # wait 0 will wait for infinitely between each frames. # 1ms will wait for the specified time only between each frames if cv2.waitKey(1) & 0xFF == ord('q'): break # close the video file cap.release() # destroy all the windows that is currently on cv2.destroyAllWindows()
7414ad7eafe29973d4611d6f86ba80c8f4db9c97
6944f0aa994045e04d01445c70667f25184736de
/softmax.py
3e3448b10d5387abd321f6d71d0bf74275f9fc2e
[]
no_license
sumantsaxena/ML_Projects
af5918b01238977433cbe0d13587ec5b196924f6
37847f4c828be65392423f43196543f2f1769252
refs/heads/main
2023-07-17T23:58:50.442728
2021-09-05T18:26:29
2021-09-05T18:26:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,539
py
import numpy as np from scipy.special import softmax from scipy.special import log_softmax data = np.genfromtxt("HamiltonCases.csv", delimiter=',') X = data[:, 0:4] y = data[:, 4] z0 = (y < X.min(axis=1)).astype(int) z2 = (y > X.max(axis=1)).astype(int) z1 = ((z0 == 0) & (z2 == 0)).astype(int) y = np.concatenate([z0[:, np.newaxis], z1[:, np.newaxis], z2[:, np.newaxis]], axis=1) X = np.concatenate([X, np.ones(len(X))[:, np.newaxis]], axis=1) m = 200 Xtrain = X[0:m, :] ytrain = y[0:m, :] target = np.argmax(ytrain, axis=1) theta = np.random.randn(5, 3) o = np.matmul(Xtrain, theta) predict = np.argmax(o, axis=1) before = np.sum(predict == target) eta = 0.001 n_iterations = 1000 for i in range(n_iterations): # o= x.Θ o = np.matmul(Xtrain, theta) # y~ =softmax(o) p = softmax(o, axis=1) # NLL =-log(y~) -> -log(softmax(o,axis=1)) -> log_softmax(o) NLL = -log_softmax(o, axis=1) # L= y*(-log(y^)) # =- y*(NLL) L = np.tensordot(NLL, ytrain, axes=[[0, 1], [0, 1]]) / m print(L) # # (y^ - y).x = Xtrain.(p-ytrain) gradient = np.matmul(Xtrain.T, (p - ytrain)) / m """ theta = theta -eta * gradient """ theta = theta - eta * gradient print(theta) o = np.matmul(Xtrain, theta) predict = np.argmax(o, axis=1) print(before, np.sum(predict == target)) Xtest = X[m:, :] ytest = y[m:, :] o = np.matmul(Xtest, theta) predict = np.argmax(o, axis=1) act = np.argmax(ytest, axis=1) print(np.sum(predict == act), np.sum(predict != act))
ae5b5e91cf43266b95ffaeb5f1795e03a00655ff
f445450ac693b466ca20b42f1ac82071d32dd991
/generated_tempdir_2019_09_15_163300/generated_part003803.py
26d534cc630c79581554130b81c7f37de6f38777
[]
no_license
Upabjojr/rubi_generated
76e43cbafe70b4e1516fb761cabd9e5257691374
cd35e9e51722b04fb159ada3d5811d62a423e429
refs/heads/master
2020-07-25T17:26:19.227918
2019-09-15T15:41:48
2019-09-15T15:41:48
208,357,412
4
1
null
null
null
null
UTF-8
Python
false
false
5,094
py
from sympy.abc import * from matchpy.matching.many_to_one import CommutativeMatcher from matchpy import * from matchpy.utils import VariableWithCount from collections import deque from multiset import Multiset from sympy.integrals.rubi.constraints import * from sympy.integrals.rubi.utility_function import * from sympy.integrals.rubi.rules.miscellaneous_integration import * from sympy import * class CommutativeMatcher62345(CommutativeMatcher): _instance = None patterns = { 0: (0, Multiset({0: 1}), [ (VariableWithCount('i2.2.2.0', 1, 1, S(0)), Add) ]), 1: (1, Multiset({1: 1}), [ (VariableWithCount('i2.2.3.0', 1, 1, S(0)), Add) ]), 2: (2, Multiset({2: 1}), [ (VariableWithCount('i2.2.1.2.0', 1, 1, S(0)), Add) ]) } subjects = {} subjects_by_id = {} bipartite = BipartiteGraph() associative = Add max_optional_count = 1 anonymous_patterns = set() def __init__(self): self.add_subject(None) @staticmethod def get(): if CommutativeMatcher62345._instance is None: CommutativeMatcher62345._instance = CommutativeMatcher62345() return CommutativeMatcher62345._instance @staticmethod def get_match_iter(subject): subjects = deque([subject]) if subject is not None else deque() subst0 = Substitution() # State 62344 subst1 = Substitution(subst0) try: subst1.try_add_variable('i2.2.2.1.0_1', S(1)) except ValueError: pass else: pass # State 62346 if len(subjects) >= 1: tmp2 = subjects.popleft() subst2 = Substitution(subst1) try: subst2.try_add_variable('i2.2.2.1.0', tmp2) except ValueError: pass else: pass # State 62347 if len(subjects) == 0: pass # 0: x*f yield 0, subst2 subjects.appendleft(tmp2) subst1 = Substitution(subst0) try: subst1.try_add_variable('i2.2.3.1.0_1', S(1)) except ValueError: pass else: pass # State 63479 if len(subjects) >= 1: tmp5 = subjects.popleft() subst2 = Substitution(subst1) try: subst2.try_add_variable('i2.2.3.1.0', tmp5) except ValueError: pass else: pass # State 63480 if len(subjects) == 0: pass # 1: x*f yield 1, subst2 subjects.appendleft(tmp5) subst1 = Substitution(subst0) try: subst1.try_add_variable('i2.2.1.2.1.0_1', S(1)) except ValueError: pass else: pass # State 65481 if len(subjects) >= 1: tmp8 = subjects.popleft() subst2 = Substitution(subst1) try: subst2.try_add_variable('i2.2.1.2.1.0', tmp8) except ValueError: pass else: pass # State 65482 if len(subjects) == 0: pass # 2: x*d yield 2, subst2 subjects.appendleft(tmp8) if len(subjects) >= 1 and isinstance(subjects[0], Mul): tmp10 = subjects.popleft() associative1 = tmp10 associative_type1 = type(tmp10) subjects11 = deque(tmp10._args) matcher = CommutativeMatcher62349.get() tmp12 = subjects11 subjects11 = [] for s in tmp12: matcher.add_subject(s) for pattern_index, subst1 in matcher.match(tmp12, subst0): pass if pattern_index == 0: pass # State 62350 if len(subjects) == 0: pass # 0: x*f yield 0, subst1 if pattern_index == 1: pass # State 63481 if len(subjects) == 0: pass # 1: x*f yield 1, subst1 if pattern_index == 2: pass # State 65483 if len(subjects) == 0: pass # 2: x*d yield 2, subst1 subjects.appendleft(tmp10) return yield from matchpy.matching.many_to_one import CommutativeMatcher from .generated_part003804 import * from collections import deque from matchpy.utils import VariableWithCount from multiset import Multiset
b9a35d9b23d72056681ea97f3befda8018e6c7fd
6611fc8f80e9d2ba6879ce6fae3069a66d34cc82
/python-39 objectoriented.py
83c9d8af9aa6bb896ec4c3fe127660d05ea1ac38
[]
no_license
mass234/broPython12hoursTutorials
6feae46e7f6a934b264e15e15dca1f6616958136
7d908752f4709519a9fc6885f25e8433434710ce
refs/heads/main
2023-07-09T04:16:43.939355
2021-08-16T08:04:03
2021-08-16T08:04:03
396,685,248
0
0
null
null
null
null
UTF-8
Python
false
false
383
py
from car import Car car_1 = Car("Chevy","Corvette",2021,"blue") car_2 = Car("Ford","Mustang",2022,"red") # print(car_1.make) # print(car_1.model) # print(car_1.year) # print(car_1.color) car_1.drive() car_1.stop() # print(car_2.make) # print(car_2.model) # print(car_2.year) # print(car_2.color) car_2.drive() car_2.stop() Car.wheels = 2 print(Car.wheels)
ca948906b190235d198874d255c25307202a2b12
bee57d5be5e4d249275b6709b6f2582591b72036
/Dummy K_means.py
45f26929301c1d897515fe61aa784e2a3a9caa65
[]
no_license
0zymandias11/ML
a6b3d30cd5f469b817b51b0f1bf13657cde1b423
76f6d99549becc15f914595aaec210105fea7495
refs/heads/master
2023-03-19T15:26:22.291801
2019-02-10T21:41:03
2019-02-10T21:41:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,673
py
# -*- coding: utf-8 -*- """ Created on Sun Jan 6 00:59:29 2019 @author: priya """ import numpy as np import random import matplotlib.pyplot as plt l=[] for i in range(50): l.append([]) for j in range(2): l[i].append(random.randint(1,100)) X=np.array(l) plt.scatter(X[:,0], X[:,1], s=150) print("initial plot") plt.show() colors = 50*["g","r","c","b","k"] class K_means: def __init__(self, k=3, tol=0.0001, max_iter=300): self.k = k self.tol = tol self.max_iter = max_iter def fit(self,data): self.centroids ={} for i in range(self.k): self.centroids[i]=data[i] for i in range(self.max_iter): self.classifications={} for i in range(self.k): self.classifications[i]=[] for features in X: distances =[np.linalg.norm(features -self.centroids[i]) for i in self.centroids] i=distances.index(min(distances)) self.classifications[i].append(features) prev_centroids =dict(self.centroids) for i in self.classifications: self.centroids[i]=np.average(self.classifications[i],axis=0) optimized=True for i in self.centroids: original_centroid=prev_centroids[i] current_centroid=self.centroids[i] if np.sum((current_centroid-original_centroid)/original_centroid*100.0) > self.tol: optimized = False if optimized: break def predict(self,data): classifications=[] for features in data: distances =[np.linalg.norm(features -self.centroids[i]) for i in self.centroids] i=distances.index(min(distances)) classifications.append(i) return np.array(classifications) clf = K_means() clf.fit(X) y_pred=clf.predict(X) for centroid in clf.centroids: plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1], marker="o", color="y", s=150, linewidths=5) for classification in clf.classifications: color = colors[classification] for featureset in clf.classifications[classification]: plt.scatter(featureset[0], featureset[1], marker="+", color=color, s=150, linewidths=5) plt.show()
e66c08b871e76eeaf4e11f234bb1968803b42de1
3b6074316cbce6eebb13407f221759b2a47b5548
/othellologic.py
e3789b9576c01f332829ee97f13a364a700fcd97
[]
no_license
asiddiq1/Othello
a240b0eebbc883778e8537a584268da50f77f4b1
e9638149f3d95e8db130367a3461b83b37e31315
refs/heads/master
2021-09-11T16:51:08.945852
2018-04-10T00:45:48
2018-04-10T00:45:48
107,196,174
0
0
null
null
null
null
UTF-8
Python
false
false
6,408
py
#Aisha Siddiq 81047072 Project 4 class InvalidMoveError(Exception): '''Raises an error whenever an invalid move is made ''' pass class OthelloGame: #Public Functions def __init__(self, row, column, turn, top_player_piece, bottom_player_piece): '''Initializes all objects inserted by the user ''' self.Black = 'B' self.White = 'W' self.NONE = '.' self.Tie = 'NONE' self._row = row self._column = column self._turn = turn self._topcorner = top_player_piece self._bottomcorner = bottom_player_piece def create_board(self): '''Create the board with the four pieces in the middle ''' self.game_board = [] for row in range(self._row): self.game_board.append([self.NONE] * self._column) self.game_board[int(self._row/2) -1][int(self._column/2)-1] = self._topcorner self.game_board[int(self._row/2) -1][int(self._column/2)] = self._bottomcorner self.game_board[int(self._row/2)][int(self._column/2) -1] = self._bottomcorner self.game_board[int(self._row/2)][int(self._column/2)] = self._topcorner def make_move(self, row, col): '''The user inputs the row/col and if the move is valid, update the board if invalid then raise an error ''' if self._valid_move(row,col): self._flip_pieces(row,col) self._turn = self._switch_color() self.player_score() else: raise InvalidMoveError() def player_score(self): '''Counts the score of each player ''' self.total_white = 0 self.total_black = 0 for row in self.game_board: for col in row: if col == self.Black: self.total_black += 1 elif col == self.White: self.total_white += 1 def winning_player(self)->bool: '''Returns false if there is a move on the board that is valid, if there isn't then it checks if the other player has a valid move. If both players don't have a valid move available then there is a winning player(true)''' for row in range(self._row): for col in range(self._column): if self._valid_move(row, col): return False self._turn = self._switch_color() for row in range(self._row): for col in range(self._column): if self._valid_move(row, col): return False return True def winner_most_points(self): '''Winning option (player with the most points) ''' if self.winning_player(): if self.total_black > self.total_white: return self.Black elif self.total_white > self.total_black: return self.White else: return self.Tie def winner_least_points(self): '''Winning option (player with the least points) ''' if self.winning_player(): if self.total_black > self.total_white: return self.White elif self.total_white > self.total_black: return self.White else: return self.Tie #Private functions def _switch_color(self): '''Switches the color of the players ''' if self._turn == self.Black: return self.White elif self._turn == self.White: return self.Black def _check_valid(self, row, col, rowdelta, coldelta)->bool: '''Returns true if the row/col selected is a valid position on the board ''' starting_point = self.game_board[row][col] seen_opposite_color = False if starting_point != self.NONE: return False else: for i in range(1, max(self._row, self._column)): if self._valid_column_number(col + coldelta * i) and self._valid_row_number(row + rowdelta * i): current_point = self.game_board[row + rowdelta * i][col + coldelta * i] if current_point == self._switch_color(): seen_opposite_color = True continue elif current_point == self._turn and seen_opposite_color: return True else: return False return False def _valid_move(self, row, col)->bool: '''Returns true/false if there is a move on the board that is valid ''' return self._check_valid(row, col, 0, 1)\ or self._check_valid(row, col, 1, 1)\ or self._check_valid(row, col, 1, 0)\ or self._check_valid(row, col, 1, -1)\ or self._check_valid(row, col, 0, -1)\ or self._check_valid(row, col, -1, -1)\ or self._check_valid(row, col, -1, 0)\ or self._check_valid(row, col, -1, 1) def _flip_pieces(self, row, col): '''If the position selected is valid flip the pieces on the board ''' delta = [(0,1), (1,1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)] for rowdelta, coldelta in delta: if self._check_valid(row, col, rowdelta, coldelta): for i in range(1, max(self._row, self._column)): current_point = self.game_board[row + rowdelta * i][col + coldelta * i] if current_point == self._switch_color(): self.game_board[row + rowdelta * i][col + coldelta * i] = self._turn continue else: break self.game_board[row][col] = self._turn def _valid_column_number(self, valid_column)->bool: '''Checks to see if the piece is in a valid column ''' return 0 <= valid_column < self._column def _valid_row_number(self, valid_row)->bool: '''Checks to see if the piece is in a valid row ''' return 0 <= valid_row < self._row
e95625894d5cba62471ce44e67b02160ea805c8f
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/CodeJamCrawler/16_0_3/shangtai/codejamC.py
be3aad06742fb467f6074f6d0a440327d6d7dc65
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405091
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
999
py
T = int(raw_input()) N, J = map(int, raw_input().split()) def is_prime(n): if n == 2 or n == 3: return True if n < 2 or n%2 == 0: return False if n < 9: return True if n%3 == 0: return False r = int(n**0.5) f = 5 while f <= r: if n%f == 0: return False if n%(f+2) == 0: return False f +=6 return True def primefactors(x): loop=2 while loop<=x: if x%loop==0: x/=loop return loop else: loop+=1 print "Case #1:" j=0 for candidate in xrange(2**(N-2)): candidate=candidate<<1 candidate+=(1+(1<<(N-1))) candidate="{0:b}".format(candidate) factorlist=[candidate] for base in xrange(2,11): candidatebase=int(candidate,base) if is_prime(candidatebase): break else: factorlist.append(primefactors(candidatebase)) if len(factorlist)==10: j+=1 for i in factorlist: print i, print if j==J: break
1c7f22e097b3a2d83172781b98634e53be397dc9
5a19bcfbdf57eff9863d3e958fd00674022c9065
/Meritt_Josh_Midterm/Part_2/Meritt_Josh_Midterm_part_2.py
613e1ee38d6e5c0d6b72c27a2a2ce5cf74246237
[]
no_license
M3ritt/ISC_215
06f57109f284d805a11406fb0792a5b20c3358a9
06d68c9cf88d264f65e5af7e8e8e04c3a68dbf10
refs/heads/main
2023-03-19T23:38:49.096647
2021-03-11T21:28:03
2021-03-11T21:28:03
340,162,873
0
0
null
null
null
null
UTF-8
Python
false
false
2,110
py
""" @Authors Josh Meritt : 805157393 Midterm part 2: Simple Linear Regression """ import os import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression def get_file(): cwd = os.getcwd() file = 'Salary_Education_Data.csv' path = cwd + "\\" + file while True: try: dataset = pd.read_csv(path) read_data(dataset) return except FileNotFoundError: file = input("[Error] File was not found. Please enter a correct file: ") def read_data(dataset): x = dataset.iloc[:,:-1].values y = dataset.iloc[:,-1].values creating_sets(x,y) def creating_sets(x, y): x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 0) #print(x_train, x_test, y_train, y_test) regressor = LinearRegression() regressor.fit(x_train, y_train) y_predictions = regressor.predict(x_test) print("[Y_prediction]:", y_predictions) print("[Y_test]:", y_test) create_training_set(x_train, y_train, regressor) plt.clf() #Need to clear current plot or will be combined when plotting test set create_test_set(x_test, y_test, x_train, regressor) def create_training_set(x_train, y_train, regressor): plt.scatter(x_train, y_train, color = 'red') plt.plot(x_train, regressor.predict(x_train), color = 'blue') plt.xlabel('Years of Education (Training set)') plt.ylabel('Salary') plt.savefig('TrainingSet.png') plt.show() print('[Process complete] TrainingSet.png') def create_test_set(x_test, y_test, x_train, regressor): plt.scatter(x_test, y_test, color = 'green') plt.plot(x_train, regressor.predict(x_train), color = 'blue') plt.xlabel('Years of Education (Test set)') plt.ylabel('Salary') plt.savefig('TestSet.png') plt.show() print('[Process complete] TestSet.png') if __name__ == '__main__': get_file(); print("[Exiting program]")
8a9561159e82907417c9e0b374b3f8f11bf078ac
72e11a80587342b3f278d4df18406cd4ce7531e8
/pip-10.0.0.dev0-py3.6.egg/pip/_internal/index.py
7c1c210b697a85f199e819826dd94ed5eab4da72
[]
no_license
EnjoyLifeFund/Debian_py36_packages
740666f290cef73a4f634558ccf3fd4926addeda
1985d4c73fabd5f08f54b922e73a9306e09c77a5
refs/heads/master
2021-08-24T02:17:24.349195
2017-12-06T06:18:35
2017-12-06T06:18:35
113,167,612
1
0
null
null
null
null
UTF-8
Python
false
false
40,528
py
"""Routines related to PyPI, indexes""" from __future__ import absolute_import import cgi import itertools import logging import mimetypes import os import posixpath import re import sys import warnings from collections import namedtuple from pip._vendor import html5lib, requests, six from pip._vendor.distlib.compat import unescape from pip._vendor.packaging import specifiers from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging.version import parse as parse_version from pip._vendor.requests.exceptions import SSLError from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip._internal.compat import ipaddress from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path from pip._internal.exceptions import ( BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, UnsupportedWheel, ) from pip._internal.models import PyPI from pip._internal.pep425tags import get_supported from pip._internal.utils.deprecation import RemovedInPip11Warning from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import ( ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, cached_property, normalize_path, splitext, ) from pip._internal.utils.packaging import check_requires_python from pip._internal.wheel import Wheel, wheel_ext __all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder'] SECURE_ORIGINS = [ # protocol, hostname, port # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) ("https", "*", "*"), ("*", "localhost", "*"), ("*", "127.0.0.0/8", "*"), ("*", "::1/128", "*"), ("file", "*", None), # ssh is always secure. ("ssh", "*", "*"), ] logger = logging.getLogger(__name__) class InstallationCandidate(object): def __init__(self, project, version, location): self.project = project self.version = parse_version(version) self.location = location self._key = (self.project, self.version, self.location) def __repr__(self): return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format( self.project, self.version, self.location, ) def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, InstallationCandidate): return NotImplemented return method(self._key, other._key) class PackageFinder(object): """This finds packages. This is meant to match easy_install's technique for looking for packages, by reading pages and looking for appropriate links. """ def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None, platform=None, versions=None, abi=None, implementation=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param platform: A string or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platform passed in. These packages will only be downloaded for distribution: they will not be built locally. :param versions: A list of strings or None. This is passed directly to pep425tags.py in the get_supported() method. :param abi: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param implementation: A string or None. This is passed directly to pep425tags.py in the get_supported() method. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'" ) # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # The valid tags to check potential found wheel candidates against self.valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available." ) break def get_formatted_locations(self): lines = [] if self.index_urls and self.index_urls != [PyPI.simple_url]: lines.append( "Looking in indexes: {}".format(", ".join(self.index_urls)) ) if self.find_links: lines.append( "Looking in links: {}".format(", ".join(self.find_links)) ) return "\n".join(lines) def add_dependency_links(self, links): # # FIXME: this shouldn't be global list this, it should only # # apply to requirements of the package that specifies the # # dependency_links value # # FIXME: also, we should track comes_from (i.e., use Link) if self.process_dependency_links: warnings.warn( "Dependency Links processing has been deprecated and will be " "removed in a future release.", RemovedInPip11Warning, ) self.dependency_links.extend(links) @staticmethod def _sort_locations(locations, expand_dir=False): """ Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls) """ files = [] urls = [] # puts the url for the given file path into the appropriate list def sort_path(path): url = path_to_url(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: is_local_path = os.path.exists(url) is_file_url = url.startswith('file:') if is_local_path or is_file_url: if is_local_path: path = url else: path = url_to_path(url) if os.path.isdir(path): if expand_dir: path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif is_file_url: urls.append(url) elif os.path.isfile(path): sort_path(path) else: logger.warning( "Url '%s' is ignored: it is neither a file " "nor a directory.", url) elif is_url(url): # Only add url with clear scheme urls.append(url) else: logger.warning( "Url '%s' is ignored. It is either a non-existing " "path or lacks a specific scheme.", url) return files, urls def _candidate_sort_key(self, candidate): """ Function used to generate link sort key for link tuples. The greater the return value, the more preferred it is. If not finding wheels, then sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs 2. wheels ordered via Wheel.support_index_min(self.valid_tags) 3. source archives Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ support_num = len(self.valid_tags) build_tag = tuple() if candidate.location.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(candidate.location.filename) if not wheel.supported(self.valid_tags): raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) pri = -(wheel.support_index_min(self.valid_tags)) if wheel.build_tag is not None: match = re.match(r'^(\d+)(.*)$', wheel.build_tag) build_tag_groups = match.groups() build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist pri = -(support_num) return (candidate.version, build_tag, pri) def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) protocol = origin[0].rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in (SECURE_ORIGINS + self.secure_origins): if protocol != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address( origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) else secure_origin[1].decode("utf8") ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if (origin[1] and origin[1].lower() != secure_origin[1].lower() and secure_origin[1] != "*"): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS we " "recommend you use HTTPS instead, otherwise you may silence " "this warning and allow it anyway with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False def _get_index_urls_locations(self, project_name): """Returns the locations found via self.index_urls Checks the url_name on the main (first in the list) index and use this url_name to produce all locations """ def mkurl_pypi_url(url): loc = posixpath.join( url, urllib_parse.quote(canonicalize_name(project_name))) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. if not loc.endswith('/'): loc = loc + '/' return loc return [mkurl_pypi_url(url) for url in self.index_urls] def find_all_candidates(self, project_name): """Find all available InstallationCandidate for project_name This checks index_urls, find_links and dependency_links. All versions found are returned as an InstallationCandidate list. See _link_package_versions for details on which files are accepted """ index_locations = self._get_index_urls_locations(project_name) index_file_loc, index_url_loc = self._sort_locations(index_locations) fl_file_loc, fl_url_loc = self._sort_locations( self.find_links, expand_dir=True) dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links) file_locations = ( Link(url) for url in itertools.chain( index_file_loc, fl_file_loc, dep_file_loc) ) # We trust every url that the user has given us whether it was given # via --index-url or --find-links # We explicitly do not trust links that came from dependency_links # We want to filter out any thing which does not have a secure origin. url_locations = [ link for link in itertools.chain( (Link(url) for url in index_url_loc), (Link(url) for url in fl_url_loc), (Link(url) for url in dep_url_loc), ) if self._validate_secure_origin(logger, link) ] logger.debug('%d location(s) to search for versions of %s:', len(url_locations), project_name) for location in url_locations: logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) formats = fmt_ctl_formats(self.format_control, canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links (Link(url, '-f') for url in self.find_links), search ) page_versions = [] for page in self._get_pages(url_locations, project_name): logger.debug('Analyzing links from page %s', page.url) with indent_log(): page_versions.extend( self._package_versions(page.links, search) ) dependency_versions = self._package_versions( (Link(url) for url in self.dependency_links), search ) if dependency_versions: logger.debug( 'dependency_links found: %s', ', '.join([ version.location.url for version in dependency_versions ]) ) file_versions = self._package_versions(file_locations, search) if file_versions: file_versions.sort(reverse=True) logger.debug( 'Local files found: %s', ', '.join([ url_to_path(candidate.location.url) for candidate in file_versions ]) ) # This is an intentional priority ordering return ( file_versions + find_links_versions + page_versions + dependency_versions ) def find_requirement(self, req, upgrade): """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ all_candidates = self.find_all_candidates(req.name) # Filter out anything which doesn't match our specifier compatible_versions = set( req.specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). [str(c.version) for c in all_candidates], prereleases=( self.allow_all_prereleases if self.allow_all_prereleases else None ), ) ) applicable_candidates = [ # Again, converting to str to deal with debundling. c for c in all_candidates if str(c.version) in compatible_versions ] if applicable_candidates: best_candidate = max(applicable_candidates, key=self._candidate_sort_key) else: best_candidate = None if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) else: installed_version = None if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, ', '.join( sorted( set(str(c.version) for c in all_candidates), key=parse_version, ) ) ) raise DistributionNotFound( 'No matching distribution found for %s' % req ) best_installed = False if installed_version and ( best_candidate is None or best_candidate.version <= installed_version): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( 'Existing installed version (%s) is most up-to-date and ' 'satisfies requirement', installed_version, ) else: logger.debug( 'Existing installed version (%s) satisfies requirement ' '(most up-to-date version is %s)', installed_version, best_candidate.version, ) return None if best_installed: # We have an existing version, and its the best version logger.debug( 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, ', '.join(sorted(compatible_versions, key=parse_version)) or "none", ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, ', '.join(sorted(compatible_versions, key=parse_version)) ) return best_candidate.location def _get_pages(self, locations, project_name): """ Yields (page, page_url) from the given locations, skipping locations that have errors. """ seen = set() for location in locations: if location in seen: continue seen.add(location) page = self._get_page(location) if page is None: continue yield page _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') def _sort_links(self, links): """ Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates """ eggs, no_eggs = [], [] seen = set() for link in links: if link not in seen: seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return no_eggs + eggs def _package_versions(self, links, search): result = [] for link in self._sort_links(links): v = self._link_package_versions(link, search) if v is not None: result.append(v) return result def _log_skipped_link(self, link, reason): if link not in self.logged_links: logger.debug('Skipping link %s; %s', link, reason) self.logged_links.add(link) def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, 'not a file') return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link( link, 'unsupported archive format: %s' % ext) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( link, 'No binaries permitted for %s' % search.supplied) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if canonicalize_name(wheel.name) != search.canonical: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if not wheel.supported(self.valid_tags): self._log_skipped_link( link, 'it is not compatible with this Python') return version = wheel.version # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, 'No sources permitted for %s' % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link( link, 'Python version is incorrect') return try: support_this_python = check_requires_python(link.requires_python) except specifiers.InvalidSpecifier: logger.debug("Package %s has an invalid Requires-Python entry: %s", link.filename, link.requires_python) support_this_python = True if not support_this_python: logger.debug("The package %s is incompatible with the python" "version in use. Acceptable python versions are:%s", link, link.requires_python) return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link) def _get_page(self, link): return HTMLPage.get_page(link, session=self.session) def egg_info_matches( egg_info, search_name, link, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)): """Pull the version part out of a string. :param egg_info: The string to parse. E.g. foo-2.1 :param search_name: The name of the package this belongs to. None to infer the name. Note that this cannot unambiguously parse strings like foo-2-2 which might be foo, 2-2 or foo-2, 2. :param link: The link the string came from, for logging on failure. """ match = _egg_info_re.search(egg_info) if not match: logger.debug('Could not parse version from link: %s', link) return None if search_name is None: full_match = match.group(0) return full_match[full_match.index('-'):] name = match.group(0).lower() # To match the "safe" name that pkg_resources creates: name = name.replace('_', '-') # project name and version must be separated by a dash look_for = search_name.lower() + "-" if name.startswith(look_for): return match.group(0)[len(look_for):] else: return None class HTMLPage(object): """Represents one page, along with its URL""" def __init__(self, content, url, headers=None): # Determine if we have any encoding information in our headers encoding = None if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: encoding = params['charset'] self.content = content self.parsed = html5lib.parse( self.content, transport_encoding=encoding, namespaceHTMLElements=False, ) self.url = url self.headers = headers def __str__(self): return self.url @classmethod def get_page(cls, link, skip_archives=True, session=None): if session is None: raise TypeError( "get_page() missing 1 required keyword argument: 'session'" ) url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: if skip_archives: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = cls._get_content_type( url, session=session, ) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", "Cache-Control": "max-age=600", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = cls(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: cls._handle_fail(link, exc, url) except SSLError as exc: reason = ("There was a problem confirming the ssl certificate: " "%s" % exc) cls._handle_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: cls._handle_fail(link, "connection error: %s" % exc, url) except requests.Timeout: cls._handle_fail(link, "timed out", url) else: return inst @staticmethod def _handle_fail(link, reason, url, meth=None): if meth is None: meth = logger.debug meth("Could not fetch URL %s: %s - skipping", link, reason) @staticmethod def _get_content_type(url, session): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in {'http', 'https'}: # FIXME: some warning or something? # assertion error? return '' resp = session.head(url, allow_redirects=True) resp.raise_for_status() return resp.headers.get("Content-Type", "") @cached_property def base_url(self): bases = [ x for x in self.parsed.findall(".//base") if x.get("href") is not None ] if bases and bases[0].get("href"): return bases[0].get("href") else: return self.url @property def links(self): """Yields all links in the page""" for anchor in self.parsed.findall(".//a"): if anchor.get("href"): href = anchor.get("href") url = self.clean_link( urllib_parse.urljoin(self.base_url, href) ) pyrequire = anchor.get('data-requires-python') pyrequire = unescape(pyrequire) if pyrequire else None yield Link(url, self, requires_python=pyrequire) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) def clean_link(self, url): """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" return self._clean_re.sub( lambda match: '%%%2x' % ord(match.group(0)), url) class Link(object): def __init__(self, url, comes_from=None, requires_python=None): """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) comes_from: instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. """ # url can be a UNC windows share if url.startswith('\\\\'): url = path_to_url(url) self.url = url self.comes_from = comes_from self.requires_python = requires_python if requires_python else None def __str__(self): if self.requires_python: rp = ' (requires-python:%s)' % self.requires_python else: rp = '' if self.comes_from: return '%s (from %s)%s' % (self.url, self.comes_from, rp) else: return str(self.url) def __repr__(self): return '<Link %s>' % self def __eq__(self, other): if not isinstance(other, Link): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Link): return NotImplemented return self.url != other.url def __lt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url < other.url def __le__(self, other): if not isinstance(other, Link): return NotImplemented return self.url <= other.url def __gt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url > other.url def __ge__(self, other): if not isinstance(other, Link): return NotImplemented return self.url >= other.url def __hash__(self): return hash(self.url) @property def filename(self): _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) name = posixpath.basename(path.rstrip('/')) or netloc name = urllib_parse.unquote(name) assert name, ('URL %r produced no filename' % self.url) return name @property def scheme(self): return urllib_parse.urlsplit(self.url)[0] @property def netloc(self): return urllib_parse.urlsplit(self.url)[1] @property def path(self): return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) def splitext(self): return splitext(posixpath.basename(self.path.rstrip('/'))) @property def ext(self): return self.splitext()[1] @property def url_without_fragment(self): scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') @property def subdirectory_fragment(self): match = self._subdirectory_fragment_re.search(self.url) if not match: return None return match.group(1) _hash_re = re.compile( r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' ) @property def hash(self): match = self._hash_re.search(self.url) if match: return match.group(2) return None @property def hash_name(self): match = self._hash_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) @property def is_wheel(self): return self.ext == wheel_ext @property def is_artifact(self): """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ from pip._internal.vcs import vcs if self.scheme in vcs.all_schemes: return False return True FormatControl = namedtuple('FormatControl', 'no_binary only_binary') """This object has two fields, no_binary and only_binary. If a field is falsy, it isn't set. If it is {':all:'}, it should match all packages except those listed in the other field. Only one field can be set to {':all:'} at a time. The rest of the time exact package name matches are listed, with any given package only showing up in one field at a time. """ def fmt_ctl_handle_mutual_exclude(value, target, other): new = value.split(',') while ':all:' in new: other.clear() target.clear() target.add(':all:') del new[:new.index(':all:') + 1] if ':none:' not in new: # Without a none, we want to discard everything as :all: covers it return for name in new: if name == ':none:': target.clear() continue name = canonicalize_name(name) other.discard(name) target.add(name) def fmt_ctl_formats(fmt_ctl, canonical_name): result = {"binary", "source"} if canonical_name in fmt_ctl.only_binary: result.discard('source') elif canonical_name in fmt_ctl.no_binary: result.discard('binary') elif ':all:' in fmt_ctl.only_binary: result.discard('source') elif ':all:' in fmt_ctl.no_binary: result.discard('binary') return frozenset(result) def fmt_ctl_no_binary(fmt_ctl): fmt_ctl_handle_mutual_exclude( ':all:', fmt_ctl.no_binary, fmt_ctl.only_binary) Search = namedtuple('Search', 'supplied canonical formats') """Capture key aspects of a search. :attribute supplied: The user supplied package. :attribute canonical: The canonical package name. :attribute formats: The formats allowed for this package. Should be a set with 'binary' or 'source' or both in it. """
4161ab8e5b053e034447ffc09fd26f16745bd00c
a52c74a91f2de9ecf08559b1a4e6e8e07b34702d
/2_Loops_and_Lists/1_word_loop.py
7791bd34c847def50c2d35dd772884270974bb3c
[]
no_license
Demdillypickles/Python_Demos
63d5624817031437dba950d22e6a9149e0ae294d
899c25d14a1310bca227057d77713cc93c2356b3
refs/heads/main
2023-04-15T13:30:35.095365
2021-04-22T12:56:10
2021-04-22T12:56:10
360,518,026
0
0
null
null
null
null
UTF-8
Python
false
false
262
py
from time import sleep separator = "#---------------#\n\n" word = input("Give me a word!\n>>> ") for x in word: print(x) sleep(0.5) print('\n' + separator) for i in range(0, 20): print(word[0: i % len(word) + 1]) sleep(0.5)
39c5075ed44cd2213f1a886f5908e0a89c393c13
f886c00fa022cc997f18e8d1228510624e274af8
/BlackJack.py
6ee9eda15954c7ceb8a820b76fecc59a6746e41f
[]
no_license
manugangadhar/Python-Bootcamp-Zero-to-Hero
9eaecfd557dca62c404cd9ed0e83fcf77d2ee58b
d7d75bc4789e20c4e655100257422f7bd9bd4183
refs/heads/master
2020-04-16T23:35:16.007948
2019-02-18T10:23:03
2019-02-18T10:23:03
166,017,373
0
0
null
null
null
null
UTF-8
Python
false
false
17,056
py
import random card_types = ['s', 'h', 'd', 'c'] global players_list players_list = {} global dealer_cards dealer_cards = [] global fiftytwo_cards fiftytwo_cards = set() class Blackjack: def __init__(self, name, bet, player_cards, gain_loss): self.name = name self.bet = bet self.player_cards = player_cards self.gain_loss = gain_loss def card_distribute(self, set_of_cards): self.player_cards.append(set_of_cards.pop()) def player_stand(self): pass def player_hit(self, set_of_cards): print(f"*****Player {self.name} will take a hit*****") self.player_cards.append(set_of_cards.pop()) def player_split(self): pass def calculate_wins(self): self.gain_loss = self.gain_loss + self.bet return self.gain_loss def calculate_loss(self): self.gain_loss = self.gain_loss - self.bet return self.gain_loss def value_of_cards(self, player_cards): card_values = [] for i in player_cards: if i[1:] in ['J','Q','K']: card_values.append(10) elif i[1:] in ['1','2','3','4','5','6','7','8','9','10']: card_values.append(int(i[1:])) else: if (21 - sum(card_values)) >= 11: card_values.append(11) else: card_values.append(1) return sum(card_values) def show_cards(self, player_cards): for i in self.player_cards: if i[0] == 's': print('------------') print(f'|{i[1:]} |') print('| |') print('| Spade - |') print('| |') print(f'| {i[1:]}|') print('------------') if i[0] == 'h': print('------------') print(f'|{i[1:]} |') print('| |') print('| Heart - |') print('| |') print(f'| {i[1:]}|') print('------------') if i[0] == 'd': print('------------') print(f'|{i[1:]} |') print('| |') print('| Diamond -|') print('| |') print(f'| {i[1:]}|') print('------------') if i[0] == 'c': print('------------') print(f'|{i[1:]} |') print('| |') print('| clubs - |') print('| |') print(f'| {i[1:]}|') print('------------') def card_suffle(fiftytwo_cards): random.shuffle(fiftytwo_cards) random.shuffle(fiftytwo_cards) return fiftytwo_cards def card_distribute_dealer(set_cards): global set_of_cards dealer_cards.append(set_of_cards.pop()) def show_cards_dealer(dealer_cards): for i in dealer_cards: if i[0] == 's': print('------------') print(f'|{i[1:]} |') print('| |') print('| Spade - |') print('| |') print(f'| {i[1:]}|') print('------------') if i[0] == 'h': print('------------') print(f'|{i[1:]} |') print('| |') print('| Heart - |') print('| |') print(f'| {i[1:]}|') print('------------') if i[0] == 'd': print('------------') print(f'|{i[1:]} |') print('| |') print('| Diamond -|') print('| |') print(f'| {i[1:]}|') print('------------') if i[0] == 'c': print('------------') print(f'|{i[1:]} |') print('| |') print('| clubs - |') print('| |') print(f'| {i[1:]}|') print('------------') def check_value_of_dealercards(dealer_cards): card_values = [] for i in dealer_cards: if i[1:] in ['J', 'Q', 'K']: card_values.append(10) elif i[1:] in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']: card_values.append(int(i[1:])) else: if (21 - sum(card_values)) >= 11: card_values.append(11) else: card_values.append(1) return sum(card_values) def populate_cards(card_types): cards_fiftytwo = [] for i in card_types: for j in range(1, 11): if j == 1: cards_fiftytwo.append(i + 'A') else: cards_fiftytwo.append(i+str(j)) cards_fiftytwo.append(i+'J') cards_fiftytwo.append(i+'Q') cards_fiftytwo.append(i+'K') return cards_fiftytwo def card_distribution(players_list, set_of_cards): for i in players_list.values(): i.card_distribute(set_of_cards) card_distribute_dealer(set_of_cards) for i in players_list.values(): i.card_distribute(set_of_cards) print("#######################################") print("#Player {} cards are displayed below#".format(i.name)) print("#######################################") i.show_cards(i.player_cards) print("Total value of your cards is {}".format(i.value_of_cards(i.player_cards))) card_distribute_dealer(set_of_cards) print("#######################################") print("#Dealer first card is displayed below#") print("#######################################") show_cards_dealer([dealer_cards[0]]) temp_cards = populate_cards(card_types) set_of_cards = card_suffle(temp_cards) print(set_of_cards) set_of_cards.reverse() number_players = int(input("Choose number of players who wish to play BlackJack\n")) for i in range(1, number_players + 1): name = input("Enter Player+{} name: ".format(str(i))) bet = int(input("Enter your betting amount: ")) player_cards = [] players_list[str(i)] = Blackjack(name, bet, player_cards,0) while 1: card_distribution(players_list, set_of_cards) if check_value_of_dealercards(dealer_cards) == 21: print("JackPot for Dealer he got 21") print("#######################################") print("#Dealer cards are displayed below#") print("#######################################") show_cards_dealer(dealer_cards) for i in players_list.values(): print(f"Player {i.name} loss or gain is {i.calculate_loss()}") if int(input("Do you wish to continue? Enter 0 or 1")): dealers_cards = [] for i in players_list.values(): i.player_cards = [] continue else: print("*********Bye***** Bye*****") quit() else: print("Dealer doesn't have 21") count_number_players_greater_21 = 0 players_on_stay = [] for i in players_list.values(): while 1: if i.value_of_cards(i.player_cards) == 21: print(f"Jackpot for player {i.name}") print("#######################################") print("#Player {} cards are displayed below#".format(i.name)) print("#######################################") i.show_cards(i.player_cards) print(f"Player {i.name} Total gains or loss is {i.calculate_wins()}") i.player_cards = [] break if i.value_of_cards(i.player_cards) < 21: hit_stand_split = int(input(f"Player {i.name}, press 1 for hit, press 2 for stand, press 3 for split")) if hit_stand_split == 1: i.card_distribute(set_of_cards) print("#######################################") print("#Player {} cards are displayed below#".format(i.name)) print("#######################################") i.show_cards(i.player_cards) print("Total value of your cards is {}".format(i.value_of_cards(i.player_cards))) continue if hit_stand_split == 2: players_on_stay.append(i) print(f"Player {i.name} has opted for stay on value {i.value_of_cards(i.player_cards)}") print("#######################################") print("#Player {} cards are displayed below#".format(i.name)) print("#######################################") i.show_cards(i.player_cards) break if hit_stand_split == 3: pass if i.value_of_cards(i.player_cards) > 21: count_number_players_greater_21 = count_number_players_greater_21 + 1 print(f"Player {i.name} cards value is more than 21") print("#######################################") print("#Player {} cards are displayed below#".format(i.name)) print("#######################################") i.show_cards(i.player_cards) print(f"Player {i.name} Total gains or loss is {i.calculate_loss()}") i.player_cards = [] break if count_number_players_greater_21 == number_players: print("All players lost and Dealer Won") dealers_cards = [] for i in players_list.values(): i.player_cards = [] if int(input("Do you wish to continue? Enter 0 or 1")): continue else: print("*********Bye***** Bye*****") quit() else: print("#######################################") print("#Dealer cards are displayed below######") print("#######################################") show_cards_dealer(dealer_cards) while 1: if check_value_of_dealercards(dealer_cards) > 21: print("Dealer lost the game") print("#######################################") print("#Dealer cards are displayed below######") print("#######################################") show_cards_dealer(dealer_cards) for i in players_on_stay: print(f"Player {i.name} Total Gains or Loss is {i.calculate_wins()}") i.player_cards = [] players_on_stay = [] dealers_cards = [] if int(input("Do you wish to continue? Enter 0 or 1")): break else: print("*********Bye***** Bye*****") quit() if check_value_of_dealercards(dealer_cards) == 21: print("JackPot for Dealer he got 21") print("#######################################") print("#Dealer cards are displayed below#") print("#######################################") show_cards_dealer(dealer_cards) dealers_cards = [] for i in players_on_stay: print(f"Player {i.name} loss or gain is {i.calculate_loss()}") i.player_cards = [] players_on_stay = [] if int(input("Do you wish to continue? Enter 0 or 1")): break else: print("*********Bye***** Bye*****") quit() if check_value_of_dealercards(dealer_cards) < 21: count_negative = 0 count_positive = 0 for i in players_on_stay: if check_value_of_dealercards(dealer_cards) - i.value_of_cards(i.player_cards) < 0: count_negative = count_negative + 1 if check_value_of_dealercards(dealer_cards) - i.value_of_cards(i.player_cards) > 0: count_positive = count_positive + 1 if (count_positive < count_negative) and (21 - check_value_of_dealercards(dealer_cards)) >= 6: card_distribute_dealer(set_of_cards) print("Dealer went for hit as he is having less than 21") print("#######################################") print("#Dealer cards are displayed below#") print("#######################################") show_cards_dealer(dealer_cards) continue if (count_positive < count_negative) and (21 - check_value_of_dealercards(dealer_cards)) < 6: print("Dealer decided to stay and no more hits") for i in players_on_stay: if i.value_of_cards(i.player_cards) > check_value_of_dealercards(dealers_cards): print(f"Player {i.name} loss or gain is {i.calculate_win()}") elif i.value_of_cards(i.player_cards) == check_value_of_dealercards(dealers_cards): print(f"No Loss or Gain for player {i.name}") else: print(f"Player {i.name} loss or gain is {i.calculate_loss()}") dealers_cards = [] for i in players_on_stay: print(f"Player {i.name} loss or gain is {i.calculate_loss()}") i.player_cards = [] players_on_stay = [] if int(input("Do you wish to continue? Enter 0 or 1")): break else: print("*********Bye***** Bye*****") quit() if (count_positive > count_negative) and (21 - check_value_of_dealercards(dealer_cards)) < 6: print("Dealer decided to stay and no more hits") for i in players_on_stay: if i.value_of_cards(i.player_cards) > check_value_of_dealercards(dealers_cards): print(f"Player {i.name} loss or gain is {i.calculate_win()}") elif i.value_of_cards(i.player_cards) == check_value_of_dealercards(dealers_cards): print(f"No Loss or Gain for player {i.name}") else: print(f"Player {i.name} loss or gain is {i.calculate_loss()}") dealers_cards = [] for i in players_on_stay: print(f"Player {i.name} loss or gain is {i.calculate_loss()}") i.player_cards = [] players_on_stay = [] if int(input("Do you wish to continue? Enter 0 or 1")): break else: print("*********Bye***** Bye*****") quit() if count_positive > count_negative and (21 - check_value_of_dealercards(dealer_cards)) >= 6: card_distribute_dealer(set_of_cards) print("Dealer went for hit as he is having less than 21") print("#######################################") print("#Dealer cards are displayed below#") print("#######################################") show_cards_dealer(dealer_cards) continue if count_positive == count_negative and (21 - check_value_of_dealercards(dealer_cards)) >= 6: card_distribute_dealer(set_of_cards) print("Dealer went for hit as he is having less than 21") print("#######################################") print("#Dealer cards are displayed below#") print("#######################################") show_cards_dealer(dealer_cards) continue if count_positive == count_negative and (21 - check_value_of_dealercards(dealer_cards)) < 6: print("Dealer decided to stay and no loss no gain for rest of the players") print(f"No Loss or Gain for player {i.name}") dealers_cards = [] for i in players_on_stay: i.player_cards = [] players_on_stay = [] if int(input("Do you wish to continue? Enter 0 or 1")): break else: print("*********Bye***** Bye*****") quit()
5046ab134fa58280291b52068b117f161cc3c023
69cc3a2d036f5a7d26844913a8ae6167a26c33c4
/univ_softplus.py
ad4aeb9711d3d7f39805910a2f127a9e0ee39323
[]
no_license
JakeSigwart/Curve_fitting
c6758ce69b1e22973940418f4ccc5333046038f8
67adbd2d51aff023379143e17fe154cb7e21b6ce
refs/heads/master
2020-03-22T01:59:03.302925
2018-07-01T15:57:29
2018-07-01T15:57:29
139,342,405
0
0
null
null
null
null
UTF-8
Python
false
false
2,591
py
#The purpose of this program is to show that a neural network is a universal approximator #In order to be a univaersal approximator, the network must have non-linearities i.e. softplus function #I am training the network to approximate sin x on [-2*pi, 2*pi] #Results: import os import math import time import random import numpy as np import matplotlib.pyplot as plt import tensorflow as tf path = os.path.dirname(__file__) save_path = path + "\\univ_softplus\\classifier.ckpt" sess = tf.Session() inputs = tf.placeholder(tf.float32, shape=[None, 1], name='inputs') labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels') inputs_1 = tf.map_fn(lambda x_val: tf.fill([64], 1.000)*x_val, inputs) W_1 = tf.Variable(tf.truncated_normal([64, 64],mean=0.0,stddev=0.088), name='W_1') b_1 = tf.Variable(tf.constant(0.0005, shape=[64]), name='b_1') h_1 = tf.matmul(inputs_1, W_1) + b_1 W_2 = tf.Variable(tf.truncated_normal([64, 64],mean=0.0,stddev=0.088), name='W_2') b_2 = tf.Variable(tf.constant(0.0005, shape=[64]), name='b_2') h_2 = tf.matmul(h_1, W_2) + b_2 h_actv_2 = tf.nn.softplus(h_2, name='h_actv_2') W_3 = tf.Variable(tf.truncated_normal([64, 64],mean=0.0,stddev=0.088), name='W_3') b_3 = tf.Variable(tf.constant(0.0005, shape=[64]), name='b_3') h_3 = tf.matmul(h_actv_2, W_3) + b_3 h_actv_3 = tf.nn.softplus(h_3, name='h_actv_3') W_4 = tf.Variable(tf.truncated_normal([64,1],mean=0.0,stddev=0.088), name='W_4') h_output = tf.matmul(h_actv_3, W_4) loss = tf.reduce_mean(tf.square(h_output - labels)) Optimize = tf.train.AdamOptimizer(5e-3).minimize(loss) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) if os.path.isfile(path + "\\univ_softplus\\checkpoint"): saver.restore(sess, save_path) print("Model restored.") else: print('Building new model...') #TRAINING## num_batches = 1024 batch_size = 1024 for i in range(num_batches): #get random sine values x = 4*math.pi*np.random.sample(batch_size) - 2*math.pi x = x.reshape([batch_size,1]) y = np.zeros(shape=[batch_size, 1], dtype=float) #y = sin of x[] for index in range(batch_size): y[index,0] = math.sin(x[index]) gradients, loss_out, output = sess.run([Optimize, loss, h_output], feed_dict={inputs: x, labels: y}) print(str(x[0]) + ', ' + str(output[0])) print("Batch: " + str(i) + " Cross Entropy: " + str(loss_out)) save_path = saver.save(sess, path + "\\univ_softplus\\classifier.ckpt") plt.scatter(x, output) plt.xlabel('Independant Var x') plt.ylabel('NN Aproximation of sin(x)') plt.show()
e892918ab46f8e39c2e19115b98da50e402eff2c
72b8cedcfeb3b98abf2965efa5f5ba7b02563647
/train.py
4d917ecf1f2ae99e6a3d49be9bd8166ed2c77d53
[]
no_license
18205097282/DBFC-Net
48f0c1139d719c3769658dec16303a1bfafe5702
5166ffe45651595eb81fee6be0879173e12cec9b
refs/heads/master
2022-11-27T03:11:33.487739
2020-07-12T14:20:29
2020-07-12T14:20:29
260,202,702
0
0
null
null
null
null
UTF-8
Python
false
false
5,738
py
import os import time import torch from torch.autograd import Variable from util import AverageMeter, Log from rankingloss import * def train(train_loader, train_loader1, train_loader2, train_loader3, args, model, criterion, center_loss, optimizer, epoch, num_epochs): print(len(train_loader), len(train_loader1), len(train_loader2), len(train_loader3)) """ (图片)1499 (视频)1499 (音频)1500 (文本文档)1000""" count = 0 since = time.time() running_loss0 = AverageMeter() running_loss1 = AverageMeter() running_loss2 = AverageMeter() running_loss3 = AverageMeter() running_loss4 = AverageMeter() running_loss5 = AverageMeter() running_loss6 = AverageMeter() running_loss7 = AverageMeter() running_loss = AverageMeter() log = Log() model.train() image_acc = 0 text_acc = 0 video_acc = 0 audio_acc = 0 for (i, (input, target)), (j, (input1, target1)), (k, (input2, target2)), (p, (input3, target3)) in zip( enumerate(train_loader), enumerate(train_loader1), enumerate(train_loader2), enumerate(train_loader3)): """(i,j,k,p) 是 (n,n,n,n) n从0到999结束,故,共只迭代1000次! 有问题!""" input_var = Variable(input.cuda()) input_var1 = Variable(input1.cuda()) input_var2 = Variable(input2.cuda()) input_var3 = Variable(input3.cuda()) targets = torch.cat((target, target1, target2, target3), 0) targets = Variable(targets.cuda()) target_var = Variable(target.cuda()) target_var1 = Variable(target1.cuda()) target_var2 = Variable(target2.cuda()) target_var3 = Variable(target3.cuda()) outputs, feature = model(input_var, input_var1, input_var2, input_var3) size = int(outputs.size(0) / 4) img = outputs.narrow(0, 0, size) vid = outputs.narrow(0, size, size) aud = outputs.narrow(0, 2 * size, size) txt = outputs.narrow(0, 3 * size, size) _, predict1 = torch.max(img, 1) # 0是按列找,1是按行找 _, predict2 = torch.max(vid, 1) # 0是按列找,1是按行找 _, predict3 = torch.max(txt, 1) # 0是按列找,1是按行找 _, predict4 = torch.max(aud, 1) # 0是按列找,1是按行找 image_acc += torch.sum(torch.squeeze(predict1.float() == target_var.float())).item() / float( target_var.size()[0]) video_acc += torch.sum(torch.squeeze(predict2.float() == target_var1.float())).item() / float( target_var1.size()[0]) audio_acc += torch.sum(torch.squeeze(predict4.float() == target_var2.float())).item() / float( target_var2.size()[0]) text_acc += torch.sum(torch.squeeze(predict3.float() == target_var3.float())).item() / float( target_var3.size()[0]) loss0 = criterion(img, target_var) loss1 = criterion(vid, target_var1) loss2 = criterion(aud, target_var2) loss3 = criterion(txt, target_var3) loss4 = loss0 + loss1 + loss2 + loss3 loss5 = center_loss(feature, targets) * 0.001 if (args.loss_choose == 'r'): loss6, _ = ranking_loss(targets, feature, margin=1, margin2=0.5, squared=False) loss6 = loss6 * 0.1 else: loss6 = 0.0 loss = loss4 + loss5 + loss6 # +loss7 # print(loss) batchsize = input_var.size(0) running_loss0.update(loss0.item(), batchsize) running_loss1.update(loss1.item(), batchsize) running_loss2.update(loss2.item(), batchsize) running_loss3.update(loss3.item(), batchsize) running_loss4.update(loss4.item(), batchsize) running_loss5.update(loss5.item(), batchsize) # running_loss7.update(loss7.item(), batchsize) if (args.loss_choose == 'r'): running_loss6.update(loss6.item(), batchsize) running_loss.update(loss.item(), batchsize) optimizer.zero_grad() loss.backward() for param in center_loss.parameters(): param.grad.data *= (1. / 0.001) optimizer.step() count += 1 if (i % args.print_freq == 0): print('-' * 20) print('Epoch [{0}/{1}][{2}/{3}]'.format(epoch, num_epochs, i, len(train_loader))) print('Image Loss: {loss.avg:.5f}'.format(loss=running_loss0)) print('Video Loss: {loss.avg:.5f}'.format(loss=running_loss1)) print('Audio Loss: {loss.avg:.5f}'.format(loss=running_loss2)) print('Text Loss: {loss.avg:.5f}'.format(loss=running_loss3)) print('AllMedia Loss: {loss.avg:.5f}'.format(loss=running_loss4)) print('Center Loss: {loss.avg:.5f}'.format(loss=running_loss5)) # print('separate Loss: {loss.avg:.5f}'.format(loss=running_loss7)) if (args.loss_choose == 'r'): print('Ranking Loss: {loss.avg:.5f}'.format(loss=running_loss6)) print('All Loss: {loss.avg:.5f}'.format(loss=running_loss)) # log.save_train_info(epoch, i, len(train_loader), running_loss) print("训练第%d个epoch:" % epoch) print("image:", image_acc / len(train_loader3)) print("text:", text_acc / len(train_loader3)) print("video:", video_acc / len(train_loader3)) print("audio:", audio_acc / len(train_loader3)) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60), "训练了%d个batch" % count)
9b655a2950c9d31a015a40bfc9dd87f0b5641102
9c47be5ddab07802e5812b8f1a2f6830b9323f10
/untitled/modules/arr_procedures.py
eef9193968c70b9d41514f0724e83fe31f4609c7
[]
no_license
hotriluc/pyth_diploma
db2c8d41dd20f69ac0028f8eb6dc08ab8efb9202
e2a92802487cd4d67d7ab05f3f19254e08ed746b
refs/heads/master
2021-07-10T06:05:34.582939
2020-10-30T20:24:04
2020-10-30T20:24:04
210,030,207
0
0
null
null
null
null
UTF-8
Python
false
false
12,223
py
import copy import itertools import pickle import numpy as np from modules.calculations import coprimes, printFullStat, getPair, getMax #==============================BASIC MANIPULATION WITH ARRAYS============================== # write string def writeListInFile(aList:list,filepath): f = open(filepath,"w") for i in range(0,len(aList)): f.write(str(i)+") "+str(aList[i])+"\n") f.close() # write binary mode def writeListInBinFile(aList:list,file_name): with open(file_name, 'wb') as F: # Dump the list to file pickle.dump(aList, F) F.close() def loadFromBinFile(file_name): with open(file_name, 'rb') as F: aList = pickle.load(F) F.close() return aList def print_2d_arr(aList): for row in aList: for item in row: print("{0:5d}".format(item), end="") print("") # print list of list(signals) def print_sig_in_list(aList): i = 0 for item in aList: print(i, ") ", item) i += 1 # using simple pop and insert we created Shifting list with zeroing def ShiftRight(aList: list, steps: int): # for negative steps if steps < 0: steps = abs(steps) for i in range(steps): # pop first element # everything is shifted to the left after we pop aList.pop(0) # adding to the end 0 aList.append(0) else: for i in range(steps): # insert zero to the 0 position aList.insert(0, 0) # poping last el # everything is shifted to right aList.pop() def CyclicShiftRight(aList: list, steps: int): # for negative steps if steps < 0: steps = abs(steps) for i in range(steps): # adding to the end popped 0th el aList.append(aList.pop(0)) else: for i in range(steps): # adding to the beginning popped(last) el aList.insert(0, aList.pop()) # def ShiftRight(aList:list,pos:int): # return aList[pos:len(aList):]*0 + aList[0:pos:] # for i in range(1,len(aList)-1): #============================== ADVANCED MANIPULATION WITH ARRAYS(SIGNALS============================== #============================== DECIMATION============================== def decimation(a_List: list, b_List: list, d: int): for i in range(0, len(a_List)): pos = (d + d * i) % len(a_List) b_List[i] = a_List[pos] # Getting all signals created with decimation # (number of signals = Euler totient function of signal length) def getDecimation(source_signal): #getting source singal length sig_len = len(source_signal) # Copy source signal (in order to not interfere source signal) sig1_ = copy.deepcopy(source_signal) # Creating list for storing signals decimation_list = list() #Geting coprimes of sign len e.g len= 256 coprimes = [1,3,5,7 ... 255] # index 1 because of coprimes method return tuple of 2element # (the number of total coprimes, and list of coprimes) # coprime_list = coprimes(sig_len)[1] # For each coprime of source signal length we will create a signal using decimation for i in range(len(coprime_list)): # tmp for signal we are going to get with decimation sig2_ = [0 for i in range(sig_len)] # creating rest signals with decimation decimation(sig1_, sig2_, coprime_list[i]) # appending decimation_list with list # that contains decimated sig and # decimation coefficient that used to create that sig decimation_list.append([sig2_, coprime_list[i]]) return decimation_list # decimation list consist lists that have elements signals and its decimtion coefficient def get_decimated_signals(decimation_list): only_decimated_signals = list() for i in range(0, len(decimation_list)): only_decimated_signals.append(decimation_list[i][0]) return only_decimated_signals #============================== DERIVATIVE SIGNALS FORMATION============================== # ansambles must contain same number of signals # USING WITH HADAMAR DISCRETE SIGNALS in order to get derivative signals def derivativeSig(ansamble_sig1: list, ansamble_sig2: list): der_sig_list = [] for i in range(0, len(ansamble_sig1)): tmp = np.array(ansamble_sig1[i]) * np.array(ansamble_sig2[i]) der_sig_list.append(tmp.tolist()) return der_sig_list def derivativeSigALL(ansamble_sig1: list, ansamble_sig2: list): der_sig_list = [] sig_comb_list = [] for i in range(0, len(ansamble_sig1)): for j in range(0, len(ansamble_sig2)): der_sig_list.append(np.multiply(ansamble_sig1[i], ansamble_sig2[j]).tolist()) sig_comb_list.append((i, j)) return der_sig_list, sig_comb_list # Same as above but used starting and ending point of inner cycle (from to) # of course you can used the first mentioned by passing slice of an array(list) # but if you do that index of your arrays will stat from 0 for slice # but this from to allows us to identify which exactly hadamar sinal was used to form # derivative def derivativeSigFromTo(ansamble_sig1: list, hadamar_sig: list,hadam_from,hadam_to): der_sig_list = [] sig_comb_list = [] for i in range(0, len(ansamble_sig1)): for j in range(hadam_from, hadam_to): der_sig_list.append(np.multiply(ansamble_sig1[i], hadamar_sig[j]).tolist()) sig_comb_list.append((i, j)) return der_sig_list, sig_comb_list # You should get combinations using function above (derivativeSigFromTo) # def print_derivative(dersig, combinations): for i in range(0, len(dersig)): print("CS#{0} and HADAMAR#{1}".format(combinations[i][0], combinations[i][1])) print(dersig[i]) #============================== CORRELATION============================== def calculate_correlation_coef(sig1_: list, sig2_: list): R = 0 for i in range(0, len(sig1_)): tmp = sig1_[i] * sig2_[i] R += tmp return R # source_sig - source signal is NOT SHIFTED shifted_sig - copy of the signal WILL BE SHIFTED ( for pereodic auto # correlation)/ other signal (for pereodic cross correlation) flag = true for APEREODIC correaltion # else default for PEREODIC def getCorellation(source_sig: list, shifted_sig: list, flag: bool = False): # Creating copy of shifted signal to not trasform array from main program(for further usage) # because in Python list contains not val but reference to objects tmp_shifted_sig = copy.deepcopy(shifted_sig) correl_list = list() r = calculate_correlation_coef(source_sig, tmp_shifted_sig) correl_list.append(r) for i in range(0, len(source_sig)): if flag == False: CyclicShiftRight(tmp_shifted_sig, 1) else: ShiftRight(tmp_shifted_sig, 1) r = calculate_correlation_coef(source_sig, tmp_shifted_sig) correl_list.append(r) return correl_list # for i in range(0,len(sig1_)): # E.g you have ansamble of signals and you want to know all cross-correlation between all possible pairs # for derivative signals(with HADAMAR) # ONLY TO GET CROSS CORRELATION def cross_corel_btwn_pairs(list_with_signals: list, mode_name): aList = list() for pair in itertools.combinations(list_with_signals, 2): a_sig, b_sig = pair # print(pair) if mode_name == "PFVK": r = getCorellation(a_sig, b_sig) if mode_name == "AFVK": r = getCorellation(a_sig, b_sig, True) aList.append(r) return aList # the same logic as above function # but here you passing list pair # this one without usin itertools def cross_corel_btwn_pairs2(list_with_signals: list, pair_list: list, mode_name): aList = list() for x, y in pair_list: # print(pair) if mode_name == "PFVK": r = getCorellation(list_with_signals[x], list_with_signals[y]) if mode_name == "AFVK": r = getCorellation(list_with_signals[x], list_with_signals[y], True) aList.append(r) return aList # Having list with signals # getting list of list with signals' pereodic/apereodic auto correlation function def auto_corel_all(list_with_signals: list, mode_name): aList = list() sig_num_list = list() for item in list_with_signals: if mode_name == "PFAK": r = getCorellation(item, item) if mode_name == "AFAK": r = getCorellation(item, item, True) aList.append(r) return aList # исходный сигнал с другими сигналами # getting list of correlations of all signals def corel_source_and_rest(source_sig, list_with_signals: list, mode_name): aList = list() for item in list_with_signals: if mode_name == "PFVK": r = getCorellation(source_sig, item) if mode_name == "AFVK": r = getCorellation(source_sig, item, True) if mode_name == "PFAK": r = getCorellation(item, item) if mode_name == "AFAK": r = getCorellation(item, item, True) # print(r) aList.append(r) return aList #============================== TESTING CORRELATION============================== # Used for testing ensemble of signals and printed their statistics # using closure to keep DRY def ansamble_correlation(mode): def fak_stat(ansamble_of_sig): print(mode) sig_len = len(ansamble_of_sig[0]) if len(ansamble_of_sig) > 0: asnsam_fak_list = auto_corel_all(ansamble_of_sig, mode) printFullStat(asnsam_fak_list, 1, sig_len - 1, True) printFullStat(asnsam_fak_list, 1, sig_len - 1) def fvk_stat(ansamble_of_sig): print(mode) sig_len = len(ansamble_of_sig[0]) if len(ansamble_of_sig) > 0: pair_list = getPair([i for i in range(0, len(ansamble_of_sig))]) # cross_correl_btwn_pairs you can use anothe method correl_source_and_rest fvk_sig_list = cross_corel_btwn_pairs(ansamble_of_sig, mode) printFullStat(fvk_sig_list, 0, sig_len, True, list_of_num=pair_list) printFullStat(fvk_sig_list, 0, sig_len, list_of_num=pair_list) if (mode == 'PFAK') or (mode =='AFAK'): return fak_stat elif (mode == 'PFVK') or (mode =='AFVK'): return fvk_stat else: print("Something wrong there is no mode like that") # FOR 1 SIGNAL def test_auto_correl (source_sig): print("Signal: ",source_sig) start_point = 1 finish_point = len(source_sig)-1 sig1_ = copy.deepcopy(source_sig) sig2_ = copy.deepcopy(source_sig) print("PFAK") pereodic_auto_corel_list = getCorellation(sig1_, sig2_) print("R = ", pereodic_auto_corel_list) print("Rmax = ", getMax(pereodic_auto_corel_list, start_point, finish_point , True)) print("AFAK") apereodic_auto_corel_list = getCorellation(sig1_, sig2_, True) print("R = ", apereodic_auto_corel_list) print("Rmax = ", getMax(apereodic_auto_corel_list, start_point, finish_point)) return pereodic_auto_corel_list, apereodic_auto_corel_list # 1 source singal with each from ansamble def test_cross_correl (source_sig, ansamble_of_sig): sig1_ = copy.deepcopy(source_sig) ansamble_of_pereodic_cross_corel_list = corel_source_and_rest(sig1_, ansamble_of_sig, "PFVK") print("\nPFVK") printFullStat(ansamble_of_pereodic_cross_corel_list, 0, len(source_sig), True) printFullStat(ansamble_of_pereodic_cross_corel_list, 0, len(source_sig)) ansamble_of_apereodic_cross_corel_list = corel_source_and_rest(sig1_, ansamble_of_sig, "AFVK") print("\nAFVK") printFullStat(ansamble_of_apereodic_cross_corel_list, 0, len(source_sig), True) printFullStat(ansamble_of_apereodic_cross_corel_list, 0, len(source_sig))
afec745bb4473e5a96a1af30aa862110d6309a68
db0633d8ec23937f087bfdccf53afe8d584906c2
/datasets/harem/harem.py
68ca465314e3be3910a1c30b445f013349cacf59
[ "Apache-2.0" ]
permissive
pranavnt/datasets
8b7d671c9f9a9a28097e361d43494b779dd2117b
f2439a743012055b4ad8d5c1b758658afe246f3e
refs/heads/master
2023-02-23T01:14:13.066679
2021-01-27T02:03:36
2021-01-27T02:03:36
318,725,185
1
0
Apache-2.0
2021-01-27T02:03:38
2020-12-05T07:05:31
null
UTF-8
Python
false
false
11,684
py
# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """HAREM dataset""" from __future__ import absolute_import, division, print_function import json import logging import unicodedata from typing import List, Tuple import datasets _CITATION = """ @inproceedings{santos2006harem, title={Harem: An advanced ner evaluation contest for portuguese}, author={Santos, Diana and Seco, Nuno and Cardoso, Nuno and Vilela, Rui}, booktitle={quot; In Nicoletta Calzolari; Khalid Choukri; Aldo Gangemi; Bente Maegaard; Joseph Mariani; Jan Odjik; Daniel Tapias (ed) Proceedings of the 5 th International Conference on Language Resources and Evaluation (LREC'2006)(Genoa Italy 22-28 May 2006)}, year={2006} } """ _DESCRIPTION = """ The HAREM is a Portuguese language corpus commonly used for Named Entity Recognition tasks. It includes about 93k words, from 129 different texts, from several genres, and language varieties. The split of this dataset version follows the division made by [1], where 7% HAREM documents are the validation set and the miniHAREM corpus (with about 65k words) is the test set. There are two versions of the dataset set, a version that has a total of 10 different named entity classes (Person, Organization, Location, Value, Date, Title, Thing, Event, Abstraction, and Other) and a "selective" version with only 5 classes (Person, Organization, Location, Value, and Date). It's important to note that the original version of the HAREM dataset has 2 levels of NER details, namely "Category" and "Sub-type". The dataset version processed here ONLY USE the "Category" level of the original dataset. [1] Souza, Fábio, Rodrigo Nogueira, and Roberto Lotufo. "BERTimbau: Pretrained BERT Models for Brazilian Portuguese." Brazilian Conference on Intelligent Systems. Springer, Cham, 2020. """ _HOMEPAGE = "https://www.linguateca.pt/primeiroHAREM/harem_coleccaodourada_en.html" _LICENSE = "" _URLs = { "default": { "train": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-total-train.json", "dev": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-total-dev.json", "test": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/MiniHAREM-total.json", }, "selective": { "train": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-selective-train.json", "dev": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/FirstHAREM-selective-dev.json", "test": "https://raw.githubusercontent.com/neuralmind-ai/portuguese-bert/master/ner_evaluation/data/MiniHAREM-selective.json", }, } # method extracted from https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py#L77-L89 def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False # method extracted from https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py#L53-L62 def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False class Token: """Info about a single token.""" def __init__(self, text: str, tail: str = ""): if not isinstance(text, str) or not text: raise TypeError("text should be a non-empty string.") self.text = text self.tail = tail def __len__(self): return len(self.text) + len(self.tail) def __add__(self, char): self.text += char return self def reconstruct_text_from_tokens(tokens: List[Token], include_last_tail: bool = False) -> str: """Concatenates the text of a sequence of tokens.""" def text_generator(tokens): for i, token in enumerate(tokens): yield token.text if i < len(tokens) - 1 or include_last_tail: yield token.tail return "".join(piece for piece in text_generator(tokens)) def tokenize(text: str) -> Tuple[List[Token], List[int]]: """ Perform whitespace and punctuation tokenization keeping track of char alignment""" doc_tokens = [] char_to_word_offset = [] new_word = True curr_token = None def begin_new_token(doc_tokens, text): token = Token(text=text) doc_tokens.append(token) return token for offset, c in enumerate(text): if _is_whitespace(c): new_word = True if curr_token: curr_token.tail += c else: if _is_punctuation(c): curr_token = begin_new_token(doc_tokens, c) new_word = True else: if new_word: curr_token = begin_new_token(doc_tokens, c) else: curr_token += c new_word = False # OBS: Whitespaces that appear before any tokens will have offset -1 # char_to_word_offset.append(len(doc_tokens) - 1) char_to_word_offset.append(max(0, len(doc_tokens) - 1)) return doc_tokens, char_to_word_offset class HAREM(datasets.GeneratorBasedBuilder): """HAREM dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="default", version=VERSION, description="All the tags (PESSOA, ORGANIZACAO, LOCAL, TEMPO, VALOR, ABSTRACCAO, ACONTECIMENTO, COISA, OBRA, OUTRO) will be used", ), datasets.BuilderConfig( name="selective", version=VERSION, description="Only a subset of the tags (PESSOA, ORGANIZACAO, LOCAL, TEMPO, VALOR) will be used", ), ] DEFAULT_CONFIG_NAME = "default" def _info(self): tags = [ "O", "B-PESSOA", "I-PESSOA", "B-ORGANIZACAO", "I-ORGANIZACAO", "B-LOCAL", "I-LOCAL", "B-TEMPO", "I-TEMPO", "B-VALOR", "I-VALOR", ] if self.config.name == "default": tags += [ "B-ABSTRACCAO", "I-ABSTRACCAO", "B-ACONTECIMENTO", "I-ACONTECIMENTO", "B-COISA", "I-COISA", "B-OBRA", "I-OBRA", "B-OUTRO", "I-OUTRO", ] features = datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=tags)), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" my_urls = _URLs[self.config.name] data_dir = dl_manager.download_and_extract(my_urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"], "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"], "split": "test"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["dev"], "split": "dev"}, ), ] def _generate_examples(self, filepath, split): """ Yields examples. """ logging.info("⏳ Generating examples from = %s", filepath) with open(filepath, "r", encoding="utf-8") as f: input_data = json.load(f) id_ = 0 for document in input_data: doc_text = document["doc_text"] doc_id = document["doc_id"] doc_tokens, char_to_word_offset = tokenize(doc_text) tags = ["O"] * len(doc_tokens) def set_label(index, tag): if tags[index] != "O": logging.warning( "Overwriting tag %s at position %s to %s", tags[index], index, tag, ) tags[index] = tag for entity in document["entities"]: entity_text = entity["text"] entity_type = entity["label"] start_token = None end_token = None entity_start_offset = entity["start_offset"] entity_end_offset = entity["end_offset"] start_token = char_to_word_offset[entity_start_offset] # end_offset is NOT inclusive to the text, e.g., # entity_text == doc_text[start_offset:end_offset] end_token = char_to_word_offset[entity_end_offset - 1] assert start_token <= end_token, "End token cannot come before start token." reconstructed_text = reconstruct_text_from_tokens(doc_tokens[start_token : (end_token + 1)]) assert ( entity_text.strip() == reconstructed_text ), "Entity text and reconstructed text are not equal: %s != %s" % ( entity_text, reconstructed_text, ) for token_index in range(start_token, end_token + 1): if token_index == start_token: tag = "B-" + entity_type else: tag = "I-" + entity_type set_label(token_index, tag) yield id_, { "id": doc_id, "tokens": [x.text for x in doc_tokens], "ner_tags": tags, } id_ += 1
db6aeb882a3c913eaa74cd0748b30eb5a94b2128
b21c5a443e4e30308f3a62cbd9043285e8f61623
/Algorithms/MedianOfTwoSortedArrays.py
ec8ec42922e48fd27d7964ba0820d283cad62384
[]
no_license
RobertoGuzmanJr/PythonToolsAndExercises
eec9053214ddcf43b661f4ce427d6eda5bf4b840
8ec61b002204ecc4b87abe09f8dffcfe3aaa07e6
refs/heads/master
2022-09-24T01:15:11.074889
2020-06-08T20:55:09
2020-06-08T20:55:09
257,700,241
0
0
null
null
null
null
UTF-8
Python
false
false
1,421
py
class Solution: def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: m = len(nums1) n = len(nums2) p1 = p2 = 0 mids = [] s = 0 #even case if (m + n) % 2 == 0: while s <= (m+n)/2: val = 0 if p1 == m: val = nums2[p2] p2 += 1 elif p2 == n: val = nums1[p1] p1 += 1 elif nums1[p1] <= nums2[p2]: val = nums1[p1] p1 += 1 else: val = nums2[p2] p2 += 1 if s in [((m+n)/2),((m+n)/2)-1]: mids.append(val) s += 1 #odd case else: while s <= (m+n)/2: val = 0 if p1 == m: val = nums2[p2] p2 += 1 elif p2 == n: val = nums1[p1] p1 += 1 elif nums1[p1] <= nums2[p2]: val = nums1[p1] p1 += 1 else: val = nums2[p2] p2 += 1 if s in [(m+n-1)/2]: mids.append(val) s += 1 return mean(mids)
165151141ee57f68ac0c0c19274ab313a9f6137a
5ce59b426bbb13ade60aedba09e097672b100c56
/practice_setup.py
b8e097ea03ea79e3b6555b887038bb1554adfa3d
[]
no_license
connoralbrecht/AI-Final-Project
b9dced5b1fad2fa3ce0ff3dd4e900b6aab0d7747
0e5875b501d2ed86bbb60e723dcfdfc573b42908
refs/heads/master
2020-04-09T06:27:39.551499
2018-12-06T22:50:20
2018-12-06T22:50:20
160,113,461
0
0
null
2018-12-05T13:59:41
2018-12-03T01:07:26
Python
UTF-8
Python
false
false
15,270
py
# Created by Minbiao Han and Roman Sharykin # AI fall 2018 from __future__ import print_function from __future__ import division from builtins import range from past.utils import old_div import MalmoPython import json import logging import math import os import random import sys import time import re import uuid from collections import namedtuple from operator import add from random import * import numpy as np import practice EntityInfo = namedtuple('EntityInfo', 'x, y, z, name') # Create one agent host for parsing: agent_hosts = [MalmoPython.AgentHost()] # Parse the command-line options: agent_hosts[0].addOptionalFlag( "debug,d", "Display debug information.") agent_hosts[0].addOptionalIntArgument("agents,a", "Number of agents to use, including observer.", 2) agent_hosts[0].addOptionalStringArgument("map,m", "Name of map to be used", "practice") agent_hosts[0].addOptionalIntArgument("port,p", "The port to start on", 10000) agent_hosts[0].addOptionalFloatArgument("noise,n", "Enemy chance to randomly move", 0.3) try: agent_hosts[0].parse( sys.argv ) except RuntimeError as e: print('ERROR:',e) print(agent_hosts[0].getUsage()) exit(1) if agent_hosts[0].receivedArgument("help"): print(agent_hosts[0].getUsage()) exit(0) DEBUG = agent_hosts[0].receivedArgument("debug") INTEGRATION_TEST_MODE = agent_hosts[0].receivedArgument("test") agents_requested = agent_hosts[0].getIntArgument("agents") NUM_AGENTS = max(1, agents_requested) # Will be NUM_AGENTS robots running around, plus one static observer. map_requested = agent_hosts[0].getStringArgument("map") PORT = agent_hosts[0].getIntArgument("port") NOISE = agent_hosts[0].getFloatArgument("noise") # Create the rest of the agent hosts - one for each robot, plus one to give a bird's-eye view: agent_hosts += [MalmoPython.AgentHost() for x in range(1, NUM_AGENTS) ] # Set up debug output: for ah in agent_hosts: ah.setDebugOutput(DEBUG) # Turn client-pool connection messages on/off. if sys.version_info[0] == 2: sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately else: import functools print = functools.partial(print, flush=True) def safeStartMission(agent_host, my_mission, my_client_pool, my_mission_record, role, expId): used_attempts = 0 max_attempts = 5 print("Calling startMission for role", role) while True: try: # Attempt start: agent_host.startMission(my_mission, my_client_pool, my_mission_record, role, expId) break except MalmoPython.MissionException as e: errorCode = e.details.errorCode if errorCode == MalmoPython.MissionErrorCode.MISSION_SERVER_WARMING_UP: print("Server not quite ready yet - waiting...") time.sleep(2) elif errorCode == MalmoPython.MissionErrorCode.MISSION_INSUFFICIENT_CLIENTS_AVAILABLE: print("Not enough available Minecraft instances running.") used_attempts += 1 if used_attempts < max_attempts: print("Will wait in case they are starting up.", max_attempts - used_attempts, "attempts left.") time.sleep(2) elif errorCode == MalmoPython.MissionErrorCode.MISSION_SERVER_NOT_FOUND: print("Server not found - has the mission with role 0 been started yet?") used_attempts += 1 if used_attempts < max_attempts: print("Will wait and retry.", max_attempts - used_attempts, "attempts left.") time.sleep(2) else: print("Other error:", e.message) print("Waiting will not help here - bailing immediately.") exit(1) if used_attempts == max_attempts: print("All chances used up - bailing now.") exit(1) print("startMission called okay.") def safeWaitForStart(agent_hosts): print("Waiting for the mission to start", end=' ') start_flags = [False for a in agent_hosts] start_time = time.time() time_out = 120 # Allow a two minute timeout. while not all(start_flags) and time.time() - start_time < time_out: states = [a.peekWorldState() for a in agent_hosts] start_flags = [w.has_mission_begun for w in states] errors = [e for w in states for e in w.errors] if len(errors) > 0: print("Errors waiting for mission start:") for e in errors: print(e.text) print("Bailing now.") exit(1) time.sleep(0.1) print(".", end=' ') if time.time() - start_time >= time_out: print("Timed out while waiting for mission to start - bailing.") exit(1) print() print("Mission has started.") def getLayout(name): matrix = tryToLoad("layouts/" + name) return matrix def tryToLoad(fullname): if (not os.path.exists(fullname)): return None f = open(fullname) Matrix = [line.strip() for line in f] f.close() return Matrix level_mat = getLayout(map_requested + ".lay") def drawItems(x, z): return '<DrawItem x="' + str(x) + '" y="56" z="' + str(z) + '" type="apple"/>' def GenBlock(x, y, z, blocktype): return '<DrawBlock x="' + str(x) + '" y="' + str(y) + '" z="' + str(z) + '" type="' + blocktype + '"/>' def GenPlayerStart(x, z): return '<Placement x="' + str(x + 0.5) + '" y="56" z="' + str(z + 0.5) + '" yaw="0"/>' def GenEnemyStart(x, z): return '<Placement x="' + str(x + 0.5) + '" y="56" z="' + str(z + 0.5) + '" yaw="0"/>' pStart = {'x': 0, 'z': 0} eStart = {'x': 0, 'z': 0} pCurr = {'x': 0, 'z': 0} eCurr = {'x': 0, 'z': 0} possible_dests = [] def mazeCreator(): genstring = "" genstring += GenBlock(0, 65, 0, "glass") + "\n" for i in range(len(level_mat)): for j in range(len(level_mat[0])): if level_mat[i][j] == "%": genstring += GenBlock(i, 54, j, "diamond_block") + "\n" genstring += GenBlock(i, 55, j, "diamond_block") + "\n" genstring += GenBlock(i, 56, j, "diamond_block") + "\n" elif level_mat[i][j] == "P": pStart['x'] = i pStart['z'] = j pCurr['x'] = i pCurr['z'] = j elif level_mat[i][j] == ".": genstring += GenBlock(i, 55, j, "glowstone") + "\n" possible_dests.append((i, j)) elif level_mat[i][j] == "G": eStart['x'] = i eStart['z'] = j eCurr['x'] = i eCurr['z'] = j return genstring def invMake(): xml = "" for i in range(0, 39): xml += '<InventoryObject type="diamond_axe" slot="' + str(i) + '" quantity="1"/>' return(xml) def getXML(reset): # Set up the Mission XML: xml = '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?> <Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <About> <Summary>Hello world!</Summary> </About> <ServerSection> <ServerHandlers> <FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/> <DrawingDecorator> ''' + mazeCreator() + ''' </DrawingDecorator> <ServerQuitFromTimeUp timeLimitMs="100000"/> <ServerQuitWhenAnyAgentFinishes/> </ServerHandlers> </ServerSection> <AgentSection mode="Survival"> <Name>Player</Name> <AgentStart> ''' + GenPlayerStart(pStart['x'], pStart['z']) + ''' </AgentStart> <AgentHandlers> <DiscreteMovementCommands/> <ObservationFromFullStats/> <ObservationFromGrid> <Grid name="floor3x3W"> <min x="-1" y="0" z="-1"/> <max x="1" y="0" z="1"/> </Grid> <Grid name="floor3x3F"> <min x="-1" y="-1" z="-1"/> <max x="1" y="-1" z="1"/> </Grid> </ObservationFromGrid> </AgentHandlers> </AgentSection> <AgentSection mode="Survival"> <Name>Enemy</Name> <AgentStart> ''' + GenEnemyStart(eStart['x'], eStart['z']) + ''' <Inventory>''' + invMake() + '''</Inventory> </AgentStart> <AgentHandlers> <DiscreteMovementCommands/> <ObservationFromFullStats/> <ObservationFromGrid> <Grid name="floor3x3W"> <min x="-1" y="0" z="-1"/> <max x="1" y="0" z="1"/> </Grid> <Grid name="floor3x3F"> <min x="-1" y="-1" z="-1"/> <max x="1" y="-1" z="1"/> </Grid> </ObservationFromGrid> </AgentHandlers> </AgentSection> </Mission>''' return xml client_pool = MalmoPython.ClientPool() for x in range(PORT, PORT + NUM_AGENTS + 1): client_pool.add( MalmoPython.ClientInfo('127.0.0.1', x) ) print("Running mission") # Create mission xml - use forcereset if this is the first mission. my_mission = MalmoPython.MissionSpec(getXML("true"), True) experimentID = str(uuid.uuid4()) for i in range(len(agent_hosts)): safeStartMission(agent_hosts[i], my_mission, client_pool, MalmoPython.MissionRecordSpec(), i, experimentID) safeWaitForStart(agent_hosts) time.sleep(1) running = True # Everything prior to here is mostly boring setup stuff. After this is the more interesting bits current_pos = [(0,0) for x in range(NUM_AGENTS)] # When an agent is killed, it stops getting observations etc. Track this, so we know when to bail. timed_out = False g_score = 0 selected_dest=(0,0) dest_reached= False dest_scores = [0 for x in possible_dests] #dest_probs = [1/len(possible_dests) for x in possible_dests] selected_dest = choice(possible_dests) # This while loop represents one iteration of the "game" while not timed_out and not dest_reached: print('global score:', g_score) print("--------- START OF TURN -------------") for i in range(NUM_AGENTS): ah = agent_hosts[i] world_state = ah.getWorldState() if world_state.is_mission_running == False: timed_out = True if world_state.is_mission_running and world_state.number_of_observations_since_last_state > 0: msg = world_state.observations[-1].text ob = json.loads(msg) #print(current_pos[i]) # Handles enemy movement if ob['Name'] == 'Enemy': if "XPos" in ob and "ZPos" in ob: current_pos[i] = (ob[u'XPos'], ob[u'ZPos']) #print("Enemy initial pos ", current_pos[i]) print('enemy moving:') practice.enemyMoveDest(ah, current_pos[i], world_state, selected_dest, NOISE) ah = agent_hosts[i] world_state = ah.getWorldState() if world_state.is_mission_running and world_state.number_of_observations_since_last_state > 0: msg = world_state.observations[-1].text ob = json.loads(msg) if "XPos" in ob and "ZPos" in ob: current_pos[i] = (ob[u'XPos'], ob[u'ZPos']) #print("Enemy updated pos ", current_pos[i]) eCurr['x'] = current_pos[i][0] eCurr['z'] = current_pos[i][1] if (current_pos[i] == (pCurr['x'], pCurr['z'])): g_score -= 100 timed_out = True break if ((current_pos[i][0] - 0.5, current_pos[i][1] - 0.5) == selected_dest): print("Enemy reached destination!") dest_reached= True g_score -=30 break time.sleep(0.1) # Handles agent movement if ob['Name'] == 'Player': if "XPos" in ob and "ZPos" in ob: current_pos[i] = (ob[u'XPos'], ob[u'ZPos']) #print("Agent initial pos ", current_pos[i]) if (current_pos[i] == (eCurr['x'], eCurr['z'])): g_score -= 100 timed_out = True break print('agent moving') dest_scores=practice.agentMove(ah, current_pos[i], world_state, possible_dests, (eCurr['x'], eCurr['z']),dest_scores) ah = agent_hosts[i] world_state = ah.getWorldState() if world_state.is_mission_running and world_state.number_of_observations_since_last_state > 0: msg = world_state.observations[-1].text ob = json.loads(msg) if "XPos" in ob and "ZPos" in ob: current_pos[i] = (ob[u'XPos'], ob[u'ZPos']) #print("Agent updated pos ", current_pos[i]) if ((current_pos[i][0] - 0.5, current_pos[i][1] - 0.5) == selected_dest): #print("Agent reached destination!") dest_reached= True g_score += 50 break if (current_pos[i] == (eCurr['x'], eCurr['z'])): g_score -= 100 timed_out = True break #g_score -= 1 pCurr['x'] = current_pos[i][0] pCurr['z'] = current_pos[i][1] if((pCurr['x']*10)%5 != 0 or (pCurr['z']*10)%5 != 0 or (eCurr['x']*10)%5 != 0 or (eCurr['z']*10)%5 != 0): print(pCurr['x'], " ", pCurr['z']) g_score -= 100 timed_out = True print("TIMED OUT") break time.sleep(0.05) print(g_score) print("Waiting for mission to end ", end=' ') # Mission should have ended already, but we want to wait until all the various agent hosts # have had a chance to respond to their mission ended message. hasEnded = True while not hasEnded: hasEnded = True # assume all good print(".", end="") time.sleep(0.1) for ah in agent_hosts: world_state = ah.getWorldState() if world_state.is_mission_running: hasEnded = False # all not good time.sleep(2)
79d371c1b3c959cd4a3518ccc51afaf080344bfe
04293bd329a143a638bb0fc1291ada6f0192be24
/save_reload.py
2bab4a30f8aa8b3a556d40c00ec4886fe8fafbf4
[]
no_license
qq1065507891/pytorch
1b3b2e882652d3279e180dd754b7af5b414a4956
41f211313a2e38aae3a375d5abb032173967ad9f
refs/heads/master
2020-04-07T16:23:22.051506
2018-11-21T12:11:15
2018-11-21T12:11:15
158,527,088
0
0
null
null
null
null
UTF-8
Python
false
false
1,668
py
import torch import matplotlib.pyplot as plt x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) y = x.pow(2) + 0.2*torch.rand(x.size()) def save(): net1 = torch.nn.Sequential( torch.nn.Linear(1, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), ) optimizer = torch.optim.SGD(net1.parameters(), lr=0.5) loss_func = torch.nn.MSELoss() for i in range(1000): prediction = net1(x) loss = loss_func(prediction, y) optimizer.zero_grad() loss.backward() optimizer.step() plt.figure(1, figsize=(10, 3)) plt.subplot(131) plt.title('NET1') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) torch.save(net1, 'net.pkl') torch.save(net1.state_dict(), 'net_params.pkl') def restore_net(): net2 = torch.load('net.pkl') prediction = net2(x) plt.subplot(132) plt.title('Net2') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) def restore_params(): net3 = torch.nn.Sequential( torch.nn.Linear(1, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), ) net3.load_state_dict(torch.load('net_params.pkl')) prediction = net3(x) # plot result plt.subplot(133) plt.title('Net3') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) plt.show() # save net1 save() # restore entire net (may slow) restore_net() # restore only the net parameters restore_params()
cf2775cb365ae36dbb028cd97bbc7706b62a071c
d24f09c896f75a64abf04fb0b6b2c1702a35fce7
/P11_Grid_Search.py
0f11baa740381618a9055f380619318673d3f814
[]
no_license
sarvesh10491/Python-Hackerrank-Problems
16a7b566958dbd429b12d5b83346dd9c87eebab2
f797307a44b76612a6f11d71e335469a5acab7f3
refs/heads/master
2020-03-19T07:53:00.054963
2018-06-16T06:01:06
2018-06-16T06:01:06
136,156,694
0
0
null
null
null
null
UTF-8
Python
false
false
1,859
py
# https://www.hackerrank.com/challenges/the-grid-search/problem import math import os import random import re import sys # Function to find all possible starting indices of pattern substring in main string def found_at(parentstr, substr): indices=[] i = parentstr.find(substr) while i >= 0: indices.append(i) i = parentstr.find(substr, i+1) return indices def gridSearch(G, P): gi=0 pi=0 # Check grid only till we can ensure pattern can fit to be found & not search entire grid to last line while gi<(len(G)-len(P)+1): idx=G[gi].find(P[pi]) if idx!=-1: # 1st line of pattern found in one of the line # Find indices of all matching paterns in line # ps = [pat.start() for pat in re.finditer(re.escape(P[pi]), G[gi])] <= This didnt work as it skips current successful pattern in next search ps = found_at(G[gi],P[pi]) # print("Found in line",gi,"at",ps) for k in ps: # For each index as starting point idx=k tgi=gi tpi=0 while tpi<len(P): # Check all subsequent lines in grid to see if respective subsequent lines in pattern also exist in them tidx=G[tgi+tpi].find(P[tpi],idx) if tidx!=idx: break else: tpi+=1 if tpi==len(P): return ("YES") gi+=1 else: gi+=1 return ("NO") # Input if __name__ == '__main__': t = int(input()) for t_itr in range(t): RC = input().split() R = int(RC[0]) C = int(RC[1]) G = [] for _ in range(R): G_item = input() G.append(G_item) rc = input().split() r = int(rc[0]) c = int(rc[1]) P = [] for _ in range(r): P_item = input() P.append(P_item) result = gridSearch(G, P) print(result)
9a6666ffe7fd9c01862329091ec04e6fb5b1e21a
781e2692049e87a4256320c76e82a19be257a05d
/all_data/exercism_data/python/rna-transcription/a033dd3f296e4281ad7fae26e02d0a4d.py
612ab04228445c3976c52b87575abb7b15c6a2a3
[]
no_license
itsolutionscorp/AutoStyle-Clustering
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
refs/heads/master
2020-12-11T07:27:19.291038
2016-03-16T03:18:00
2016-03-16T03:18:42
59,454,921
4
0
null
2016-05-23T05:40:56
2016-05-23T05:40:56
null
UTF-8
Python
false
false
262
py
DNA_TO_RNA = { 'G' :'C', 'C' : 'G', 'T' : 'A', 'A' : 'U', } def to_rna(dna): rna = '' for c in dna: if c not in DNA_TO_RNA: raise ValueError("illegal nucleotide '%s' in dna" % c) rna = rna + DNA_TO_RNA[c] return rna
db3e53c2289ed0be9042c4809d70477cec9c9646
4db0ecdaf1121c72a38feb3fe80c05467f8f9815
/plot_roc_multi.py
391ed21bd63ebb705d48b2bf95fdfa52c29306d3
[]
no_license
zhiyong-zhou/roc_utils
46d2f57e3cac7be3672a911873463720eae4177d
d747e72e0542b51b9cade370e2a3efad731740d0
refs/heads/master
2023-07-15T19:27:28.243152
2019-12-06T03:06:00
2019-12-06T03:06:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,027
py
""" plot_roc_multi: Plots multiple ROC curves on the same graph (useful for comparison). Note: for more detailed information on a single ROC see 'plot_roc'. Author: Keith Kenemer """ import os,sys import numpy as np import pickle import matplotlib.pyplot as plt from sklearn.metrics import auc # process command line if len(sys.argv) < 2: print("\nUsage: plot_roc_multi <roc1.pkl> <roc2.pkl> ... " ) print("roc<x>.pkl: pickled (tpr,fpr,thr) tuple output from sklearn roc_curve()" ) print("\n") exit() # setup plot plt.xlabel('FPR') plt.ylabel('TPR') plt.xscale('log') plt.title('ROC curve comparison') # load & plot saved roc data colors = ['b', 'g', 'r','m','c'] for k in range(1,len(sys.argv) ): with open(sys.argv[k],"rb") as f: roc = pickle.load(f, encoding = 'latin1') fpr = roc[0] tpr = roc[1] plt.plot(fpr,tpr, color = colors[k%len(colors)], linewidth = 1, label = sys.argv[k] ) # show completed plot plt.grid() plt.legend(loc='lower right') plt.show()
71119c97936e2b9ffd38515d8759e7a17e791b0f
3ec08df086670b0399a4455cea6e44076c389ad8
/tests/components/nest/test_sensor_sdm.py
b1dddcd94949ee159b88ab815db62514d2431f6f
[ "Apache-2.0" ]
permissive
misialq/home-assistant
1cb5c2a80e375c85cc0a36dc8c7a2734fb1940eb
af5fd74d6f936b159dd06cd19770110ea0dd3d7e
refs/heads/dev
2023-08-18T09:05:01.747456
2023-01-28T06:14:36
2023-01-28T06:14:36
230,528,524
0
0
Apache-2.0
2023-04-21T06:58:51
2019-12-27T22:45:56
Python
UTF-8
Python
false
false
9,373
py
""" Test for Nest sensors platform for the Smart Device Management API. These tests fake out the subscriber/devicemanager, and are not using a real pubsub subscriber. """ from typing import Any from google_nest_sdm.event import EventMessage import pytest from homeassistant.components.sensor import ( ATTR_STATE_CLASS, SensorDeviceClass, SensorStateClass, ) from homeassistant.const import ( ATTR_DEVICE_CLASS, ATTR_FRIENDLY_NAME, ATTR_UNIT_OF_MEASUREMENT, PERCENTAGE, STATE_UNAVAILABLE, UnitOfTemperature, ) from homeassistant.core import HomeAssistant from homeassistant.helpers import device_registry as dr, entity_registry as er from .common import DEVICE_ID, CreateDevice, FakeSubscriber, PlatformSetup @pytest.fixture def platforms() -> list[str]: """Fixture to setup the platforms to test.""" return ["sensor"] @pytest.fixture def device_traits() -> dict[str, Any]: """Fixture that sets default traits used for devices.""" return {"sdm.devices.traits.Info": {"customName": "My Sensor"}} async def test_thermostat_device( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ): """Test a thermostat with temperature and humidity sensors.""" create_device.create( { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.1, }, "sdm.devices.traits.Humidity": { "ambientHumidityPercent": 35.0, }, } ) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is not None assert temperature.state == "25.1" assert ( temperature.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS ) assert ( temperature.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TEMPERATURE ) assert temperature.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT assert temperature.attributes.get(ATTR_FRIENDLY_NAME) == "My Sensor Temperature" humidity = hass.states.get("sensor.my_sensor_humidity") assert humidity is not None assert humidity.state == "35" assert humidity.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE assert humidity.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.HUMIDITY assert humidity.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT assert humidity.attributes.get(ATTR_FRIENDLY_NAME) == "My Sensor Humidity" registry = er.async_get(hass) entry = registry.async_get("sensor.my_sensor_temperature") assert entry.unique_id == f"{DEVICE_ID}-temperature" assert entry.domain == "sensor" entry = registry.async_get("sensor.my_sensor_humidity") assert entry.unique_id == f"{DEVICE_ID}-humidity" assert entry.domain == "sensor" device_registry = dr.async_get(hass) device = device_registry.async_get(entry.device_id) assert device.name == "My Sensor" assert device.model == "Thermostat" assert device.identifiers == {("nest", DEVICE_ID)} async def test_thermostat_device_available( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ): """Test a thermostat with temperature and humidity sensors that is Online.""" create_device.create( { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.1, }, "sdm.devices.traits.Humidity": { "ambientHumidityPercent": 35.0, }, "sdm.devices.traits.Connectivity": {"status": "ONLINE"}, } ) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is not None assert temperature.state == "25.1" humidity = hass.states.get("sensor.my_sensor_humidity") assert humidity is not None assert humidity.state == "35" async def test_thermostat_device_unavailable( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ): """Test a thermostat with temperature and humidity sensors that is Offline.""" create_device.create( { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.1, }, "sdm.devices.traits.Humidity": { "ambientHumidityPercent": 35.0, }, "sdm.devices.traits.Connectivity": {"status": "OFFLINE"}, } ) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is not None assert temperature.state == STATE_UNAVAILABLE humidity = hass.states.get("sensor.my_sensor_humidity") assert humidity is not None assert humidity.state == STATE_UNAVAILABLE async def test_no_devices(hass: HomeAssistant, setup_platform: PlatformSetup): """Test no devices returned by the api.""" await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is None humidity = hass.states.get("sensor.my_sensor_humidity") assert humidity is None async def test_device_no_sensor_traits( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ) -> None: """Test a device with applicable sensor traits.""" create_device.create({}) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is None humidity = hass.states.get("sensor.my_sensor_humidity") assert humidity is None @pytest.mark.parametrize("device_traits", [{}]) # Disable default name async def test_device_name_from_structure( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ) -> None: """Test a device without a custom name, inferring name from structure.""" create_device.create( raw_traits={ "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.2, }, }, raw_data={ "parentRelations": [ {"parent": "some-structure-id", "displayName": "Some Room"} ], }, ) await setup_platform() temperature = hass.states.get("sensor.some_room_temperature") assert temperature is not None assert temperature.state == "25.2" async def test_event_updates_sensor( hass: HomeAssistant, subscriber: FakeSubscriber, create_device: CreateDevice, setup_platform: PlatformSetup, ) -> None: """Test a pubsub message received by subscriber to update temperature.""" create_device.create( { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.1, }, } ) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is not None assert temperature.state == "25.1" # Simulate a pubsub message received by the subscriber with a trait update event = EventMessage( { "eventId": "some-event-id", "timestamp": "2019-01-01T00:00:01Z", "resourceUpdate": { "name": DEVICE_ID, "traits": { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 26.2, }, }, }, }, auth=None, ) await subscriber.async_receive_event(event) await hass.async_block_till_done() # Process dispatch/update signal temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is not None assert temperature.state == "26.2" @pytest.mark.parametrize("device_type", ["some-unknown-type"]) async def test_device_with_unknown_type( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ) -> None: """Test a device without a custom name, inferring name from structure.""" create_device.create( { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.1, }, } ) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature is not None assert temperature.state == "25.1" assert temperature.attributes.get(ATTR_FRIENDLY_NAME) == "My Sensor Temperature" registry = er.async_get(hass) entry = registry.async_get("sensor.my_sensor_temperature") assert entry.unique_id == f"{DEVICE_ID}-temperature" assert entry.domain == "sensor" device_registry = dr.async_get(hass) device = device_registry.async_get(entry.device_id) assert device.name == "My Sensor" assert device.model is None assert device.identifiers == {("nest", DEVICE_ID)} async def test_temperature_rounding( hass: HomeAssistant, create_device: CreateDevice, setup_platform: PlatformSetup ) -> None: """Test the rounding of overly precise temperatures.""" create_device.create( { "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 25.15678, }, } ) await setup_platform() temperature = hass.states.get("sensor.my_sensor_temperature") assert temperature.state == "25.2"
9271866dce787c1407b384f9a384b7bdc0d9bf89
286043f55a36f9d5844986b0eea58674b1c37353
/src/stickNAUTA/__init__.py
0ba48412d9b2f62c4f7b4de0f52e14ea7e0f8c5d
[ "MIT" ]
permissive
blacknoize404/stickNAUTA
a88526463b563e54a28ca815fbd8ab5791885be9
30a5d5a178435844dbafe9743fb7317deb71195c
refs/heads/main
2023-04-15T23:15:39.461398
2021-04-11T19:00:58
2021-04-11T19:00:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
143
py
from .NautaSession import NautaSession __version__ = '1.0.0' __all__ = ['NautaSession'] __author__ = 'stickM4N [email protected]'
698615984a24120282d332cfef57d98cdf075fb5
0c325cf7a68ef51067ed8db566d525a20de5b635
/python/xlrd_and_xlwt/xlrd_test.py
ff5eb4eb3632e68644309cd097ce90b78ddb3c9c
[]
no_license
alinzel/NOTES
2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241
3e0594641a605580e920d0b08a251fbc99f34e2f
refs/heads/master
2023-01-08T22:48:30.762625
2020-01-17T09:14:47
2020-01-17T09:14:47
175,339,492
0
0
null
2022-12-27T15:01:19
2019-03-13T03:28:08
HTML
UTF-8
Python
false
false
3,865
py
# TODO xlrd--一个从excel文件中读取和格式化数据信息的库,无论是xls还是xlsx文件 import xlrd # 打开excel文件,返回实例对象-<xlrd.book.Book object at 0x000001ED41180898> excel = xlrd.open_workbook(r"./excel/2017年人员电子档案.xlsx") #r-->保持原始字符串,不转义 # 获取sheet的名字,返回名字列表-['2017-6-22', '测试'] sheet_names = excel.sheet_names() # 获取sheet对象,返回对象列表-[<xlrd.sheet.Sheet object at 0x0000023A57014CC0>, <xlrd.sheet.Sheet object at 0x0000023A57014CF8>] sheets = excel.sheets() # 获取sheet总数,返回数字-2 sheet_num = excel.nsheets # 获取某一个sheet对象 sheet_index = excel.sheet_by_index(0) # 根据索引 sheet_name = excel.sheet_by_name("测试") # 根据名称 # 获取sheet对象相关信息 name = sheet_index.name # 返回sheet名称 rows = sheet_index.nrows # 返回行数 cols = sheet_index.ncols # 返回列数 # 批量获取单元格信息 row_value = sheet_index.row_values(2, 0, 4) # 获取某一行的值,返回列表,TODO 参数依次,第二行,从0开始,到第4列 col_value = sheet_index.col_values(0, 0, 4) row = sheet_index.row(2) # 获取某一行的值和类型,不支持切片-[text:'123', text:'456', text:'789', text:'147', text:'11111111', text:'258', text:''] col = sheet_index.col(1) slice_row = sheet_index.row_slice(2, 0, 4) # 获取某一行的值和类型,支持切片 slice_col = sheet_index.col_slice(0, 0, 4) # 获取特定单元格 cell_value = sheet_index.cell(1,2).value # 获取第2行,第三列的值 cell_value_ = sheet_index.cell_value(1,2) # 获取单元格栏信息 print(xlrd.cellname(0,1)) print(xlrd.cellnameabs(0,1)) print(xlrd.colname(8)) # 写入数据库 import pymysql # 连接数据库 coon = pymysql.connect( host="192.168.200.10", db="test_zwl", user="bdsdata", password="357135", port=3306 ) cur = coon.cursor() # TODO 查询 # sql = "select * from file" # cur.execute(sql) # result = cur.fetchone() # print(result) # TODO 插入数据 row_num = sheet_index.nrows col_num = sheet_index.ncols # 构造sql语句,批量插入数据库 values(),(),(),没有选择一条一条的插入 sql = "insert into file values" for i in range(1,row_num): # 控制每一行 for j in range(0,col_num): # 控制列 item = sheet_index.cell_value(i, j) # 获取指定单元格数值 # TODO 数据库中的空值两种形式,一种空字符串--数据库显示空白,另一种是null,且不能用引号包裹起来--数据库显示为null if item == "": item = "Null" value = str(item) else: value = '"' + str(item) + '"' if i != row_num-1: if j == 0 : sql += "(" + str(i) + ","+ value + "," # TODO 插入的item 要用 ”“包起来,不然报错 1064,但是null不可以包 elif j == col_num-1: sql += value + ")," else: sql += value + "," else: if j == 0 : sql += "(" + str(i) + ","+ value + "," elif j == col_num-1: sql += value + ")" else: sql += value + "," # break # print(sql) # try: # cur.execute(sql) # coon.commit() # TODO 不要忘记提交啊 # except: # coon.rollback() value_list = [] for i in range(1,row_num): row_v = sheet_index.row_values(i) row_v = [None if row == "" else row for row in row_v ] # None在数据库显示为Null value_list.append(row_v) sql_many = "insert into file (name,area,department,job_state,phone,in_date,out_date)values(%s,%s,%s,%s,%s,%s,%s)" try: cur.executemany(sql_many,value_list) coon.commit() # TODO 不要忘记提交啊 except Exception as e: print(e) coon.rollback() cur.close() coon.close()
a5fa4b57b83b141e36d8a93815e1e8d828b4aaba
4772576b2f7601fb3295cec7756c832c250ffbc2
/max.py
9816e594e10a7248d892a993d1deccedf0c7b493
[]
no_license
Dhineshkumarraveendiran/Guvi
db3a956025299fcb2fd06911cc322403c0027ca1
3904a980fa59dd079473a4d68c345ed5116160f1
refs/heads/master
2020-04-15T04:59:55.974890
2019-05-15T10:11:59
2019-05-15T10:11:59
164,405,170
0
0
null
null
null
null
UTF-8
Python
false
false
78
py
#r n =int(input()) li=list(map(int,input().split())) max1=max(li) print(max1)
69837db1e369e1f6ffa70b28dd26005a8cc1e844
d04e45045781d6c8c4f02f3b7f2c48418fbab330
/fornumber.py
c50c2497a2210c63f7b53de79a954aca8558a1fd
[]
no_license
alcal3/CSS-301-Portfolio
7825d99dc3841bfc8e611e11d6a624744ced2d8a
bec01c246b9f4a0662b64d0d134deca454a1f442
refs/heads/master
2020-05-09T20:31:04.642477
2019-06-11T21:07:32
2019-06-11T21:07:32
181,410,241
0
0
null
null
null
null
UTF-8
Python
false
false
198
py
#aleks calderon #4.23.2019 #iterative program part a for x in range(1, 20): #for loop goes through numbers 1-20 if x < 1: x = 1 else: x = x * x - 1 print(x)
65f5d5d7db31e03fff05009390b6ac2b06cc7f29
5d58fa1d54855f18bad5688de4459af8d461c0ac
/plugins/callback/yaml.py
40bc0191f254fdf8b7a04ea6c86e06ff50051353
[]
no_license
nasirhm/general
b3b52f6e31be3de8bae0414da620d8cdbb2c2366
5ccd89933297f5587dae5cd114e24ea5c54f7ce5
refs/heads/master
2021-01-04T07:03:21.121102
2020-02-13T20:59:56
2020-02-13T20:59:56
240,440,187
1
0
null
2020-02-14T06:08:14
2020-02-14T06:08:13
null
UTF-8
Python
false
false
4,855
py
# (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: yaml type: stdout short_description: yaml-ized Ansible screen output description: - Ansible output that can be quite a bit easier to read than the default JSON formatting. requirements: - set as stdout in configuration extends_documentation_fragment: - default_callback ''' import yaml import json import re import string import sys from ansible.module_utils._text import to_bytes, to_text from ansible.module_utils.six import string_types from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy from ansible.plugins.callback.default import CallbackModule as Default # from http://stackoverflow.com/a/15423007/115478 def should_use_block(value): """Returns true if string should be in block format""" for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": if c in value: return True return False def my_represent_scalar(self, tag, value, style=None): """Uses block style for multi-line strings""" if style is None: if should_use_block(value): style = '|' # we care more about readable than accuracy, so... # ...no trailing space value = value.rstrip() # ...and non-printable characters value = ''.join(x for x in value if x in string.printable) # ...tabs prevent blocks from expanding value = value.expandtabs() # ...and odd bits of whitespace value = re.sub(r'[\x0b\x0c\r]', '', value) # ...as does trailing space value = re.sub(r' +\n', '\n', value) else: style = self.default_style node = yaml.representer.ScalarNode(tag, value, style=style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node return node class CallbackModule(Default): """ Variation of the Default output which uses nicely readable YAML instead of JSON for printing results. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'community.general.yaml' def __init__(self): super(CallbackModule, self).__init__() yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result")) # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(module_response_deepcopy(result)) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] dumped = '' # put changed and skipped into a header line if 'changed' in abridged_result: dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' del abridged_result['changed'] if 'skipped' in abridged_result: dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' del abridged_result['skipped'] # if we already have stdout, we don't need stdout_lines if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: abridged_result['stdout_lines'] = '<omitted>' # if we already have stderr, we don't need stderr_lines if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: abridged_result['stderr_lines'] = '<omitted>' if abridged_result: dumped += '\n' dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) # indent by a couple of spaces dumped = '\n '.join(dumped.split('\n')).rstrip() return dumped def _serialize_diff(self, diff): return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
c1dd30a27620a1d021d4dd38f8ff19892a4ca76c
a3160604e980796bfc93ca0fbc647f9767022a14
/lstm.py
6d3df3d34441194159fea72d263e1008fb1ec197
[ "MIT" ]
permissive
JayeshKriplani/Sentiment-Analysis-on-Tweets
033b67fa902d833116a2deb512ea904b051dbd09
1a507c3a18f749f5fa615f72a85ff348d96a06aa
refs/heads/master
2023-08-18T19:05:57.255228
2021-10-07T17:53:21
2021-10-07T17:53:21
300,169,736
0
5
MIT
2020-10-15T10:35:03
2020-10-01T06:22:45
Jupyter Notebook
UTF-8
Python
false
false
4,535
py
import numpy as np import sys from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation from keras.layers import Embedding from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau from keras.layers import LSTM import utils from keras.preprocessing.sequence import pad_sequences # Performs classification using LSTM network. FREQ_DIST_FILE = '../train-processed-freqdist.pkl' BI_FREQ_DIST_FILE = '../train-processed-freqdist-bi.pkl' TRAIN_PROCESSED_FILE = '../train-processed.csv' TEST_PROCESSED_FILE = '../test-processed.csv' GLOVE_FILE = './dataset/glove-seeds.txt' dim = 200 def get_glove_vectors(vocab): print 'Looking for GLOVE vectors' glove_vectors = {} found = 0 with open(GLOVE_FILE, 'r') as glove_file: for i, line in enumerate(glove_file): utils.write_status(i + 1, 0) tokens = line.split() word = tokens[0] if vocab.get(word): vector = [float(e) for e in tokens[1:]] glove_vectors[word] = np.array(vector) found += 1 print '\n' print 'Found %d words in GLOVE' % found return glove_vectors def get_feature_vector(tweet): words = tweet.split() feature_vector = [] for i in range(len(words) - 1): word = words[i] if vocab.get(word) is not None: feature_vector.append(vocab.get(word)) if len(words) >= 1: if vocab.get(words[-1]) is not None: feature_vector.append(vocab.get(words[-1])) return feature_vector def process_tweets(csv_file, test_file=True): tweets = [] labels = [] print 'Generating feature vectors' with open(csv_file, 'r') as csv: lines = csv.readlines() total = len(lines) for i, line in enumerate(lines): if test_file: tweet_id, tweet = line.split(',') else: tweet_id, sentiment, tweet = line.split(',') feature_vector = get_feature_vector(tweet) if test_file: tweets.append(feature_vector) else: tweets.append(feature_vector) labels.append(int(sentiment)) utils.write_status(i + 1, total) print '\n' return tweets, np.array(labels) if __name__ == '__main__': train = len(sys.argv) == 1 np.random.seed(1337) vocab_size = 90000 batch_size = 500 max_length = 40 filters = 600 kernel_size = 3 vocab = utils.top_n_words(FREQ_DIST_FILE, vocab_size, shift=1) glove_vectors = get_glove_vectors(vocab) tweets, labels = process_tweets(TRAIN_PROCESSED_FILE, test_file=False) embedding_matrix = np.random.randn(vocab_size + 1, dim) * 0.01 for word, i in vocab.items(): glove_vector = glove_vectors.get(word) if glove_vector is not None: embedding_matrix[i] = glove_vector tweets = pad_sequences(tweets, maxlen=max_length, padding='post') shuffled_indices = np.random.permutation(tweets.shape[0]) tweets = tweets[shuffled_indices] labels = labels[shuffled_indices] if train: model = Sequential() model.add(Embedding(vocab_size + 1, dim, weights=[embedding_matrix], input_length=max_length)) model.add(Dropout(0.4)) model.add(LSTM(128)) model.add(Dense(64)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) filepath = "./models/lstm-{epoch:02d}-{loss:0.3f}-{acc:0.3f}-{val_loss:0.3f}-{val_acc:0.3f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor="loss", verbose=1, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, min_lr=0.000001) print model.summary() model.fit(tweets, labels, batch_size=128, epochs=5, validation_split=0.1, shuffle=True, callbacks=[checkpoint, reduce_lr]) else: model = load_model(sys.argv[1]) print model.summary() test_tweets, _ = process_tweets(TEST_PROCESSED_FILE, test_file=True) test_tweets = pad_sequences(test_tweets, maxlen=max_length, padding='post') predictions = model.predict(test_tweets, batch_size=128, verbose=1) results = zip(map(str, range(len(test_tweets))), np.round(predictions[:, 0]).astype(int)) utils.save_results_to_csv(results, 'lstm.csv')
5c49e0ec04fe15cf08be854625cc496120e28c5f
eb9f655206c43c12b497c667ba56a0d358b6bc3a
/python/helpers/typeshed/stubs/keyboard/keyboard/_keyboard_event.pyi
9c511fdccf59bc88a0fc4b133c00ab9036b835c7
[ "MIT", "Apache-2.0" ]
permissive
JetBrains/intellij-community
2ed226e200ecc17c037dcddd4a006de56cd43941
05dbd4575d01a213f3f4d69aa4968473f2536142
refs/heads/master
2023-09-03T17:06:37.560889
2023-09-03T11:51:00
2023-09-03T12:12:27
2,489,216
16,288
6,635
Apache-2.0
2023-09-12T07:41:58
2011-09-30T13:33:05
null
UTF-8
Python
false
false
827
pyi
from typing_extensions import Literal from ._canonical_names import canonical_names as canonical_names, normalize_name as normalize_name KEY_DOWN: Literal["down"] KEY_UP: Literal["up"] class KeyboardEvent: event_type: Literal["down", "up"] | None scan_code: int name: str | None time: float | None device: str | None modifiers: tuple[str, ...] | None is_keypad: bool | None def __init__( self, event_type: Literal["down", "up"] | None, scan_code: int, name: str | None = ..., time: float | None = ..., device: str | None = ..., modifiers: tuple[str, ...] | None = ..., is_keypad: bool | None = ..., ) -> None: ... def to_json(self, ensure_ascii: bool = ...) -> str: ... def __eq__(self, other: object) -> bool: ...
4144585f59160e7268a01a9b954689f44dcc5460
44a6e88da453a2e368b014e403843b0c955f21f4
/utils/make_mock_solid_dir.py
49e00504ae9b25d4b9a7a94ae096e077cf8d7ffc
[ "Artistic-2.0" ]
permissive
golharam/genomics
a26b1f9366203ec059cc2e49281909bfc16e6ab4
ca0c7c239b0f04353e2f2fa897db9c24a1211596
refs/heads/master
2020-08-06T10:28:21.604129
2019-09-27T07:51:41
2019-09-27T07:51:41
212,943,378
0
0
Artistic-2.0
2019-10-05T04:25:24
2019-10-05T04:25:23
null
UTF-8
Python
false
false
1,728
py
#!/usr/bin/env python # # make_mock_solid_dir.py: make mock SOLiD directory for test purposes # Copyright (C) University of Manchester 2011 Peter Briggs # ######################################################################## # # make_mock_solid_dir.py # ######################################################################### """make_mock_solid_dir.py Makes a mock SOLiD run directory with run_definition and barcode statistic files plus mock csfasta and qual files, which can be used to test other programs and scrips with. It uses the TestUtils class from the SolidData module to build and populate the mock directory structure. Usage: make_mock_solid_dir.py """ ####################################################################### # Import modules that this module depends on ####################################################################### # import os import sys # Put ../share onto Python search path for modules SHARE_DIR = os.path.abspath( os.path.normpath( os.path.join(os.path.dirname(sys.argv[0]),'..','share'))) sys.path.append(SHARE_DIR) try: from bcftbx.test.test_SolidData import TestUtils except ImportError as ex: print("Error importing modules: %s" % ex) if __name__ == "__main__": paired_end = False if '--paired-end' in sys.argv: paired_end = True elif len(sys.argv) > 1: print("Usage: %s [--paired-end]" % os.path.basename(sys.argv[0])) sys.exit(1) # Make mock solid directory if paired_end: solid_dir = TestUtils().make_solid_dir_paired_end('solid0123_20111014_PE_BC') else: solid_dir = TestUtils().make_solid_dir('solid0123_20111014_FRAG_BC') print("Constructed mock dir: %s" % solid_dir)
191671c9fb6640b6e84f4f70774511da1adad553
3c7b3948a33ff9c3478e90d9d531e2f25a02659f
/slackbot/run.py
63a3add248963ccfb0a1da59e0a352e60edca0c3
[]
no_license
h0uk1/RaspLock
3b1834908a6af999e8525c6999e32096036d72e4
498a7b2caab004c3313f5cd17540e6e25e0fde9b
refs/heads/master
2020-04-17T07:04:14.970052
2019-02-22T06:12:29
2019-02-22T06:12:29
166,352,227
0
0
null
null
null
null
UTF-8
Python
false
false
167
py
# coding: utf-8 from slackbot.bot import Bot def main(): bot = Bot() bot.run() if __name__ == "__main__": print('start slackbot') main()
11754e433ee8f5985f0ae11f9bae4e8dc50213e1
6e8f2e28479566dbaa338300b2d61f784ff83f97
/.history/code/tensorboard_utils_20210411113117.py
69315f5c5b16b26260ed37152698eb1eba53cc5e
[]
no_license
eeng5/CV-final-project
55a7d736f75602858233ebc380c4e1d67ab2b866
580e28819560b86f6974959efb1d31ef138198fc
refs/heads/main
2023-04-09T21:28:21.531293
2021-04-21T19:57:22
2021-04-21T19:57:22
352,703,734
0
0
null
null
null
null
UTF-8
Python
false
false
7,424
py
""" Project 4 - CNNs CS1430 - Computer Vision Brown University """ import io import os import re import sklearn.metrics import numpy as np import tensorflow as tf from matplotlib import pyplot as plt import hyperparameters as hp def plot_to_image(figure): """ Converts a pyplot figure to an image tensor. """ buf = io.BytesIO() plt.savefig(buf, format='png') plt.close(figure) buf.seek(0) image = tf.image.decode_png(buf.getvalue(), channels=4) image = tf.expand_dims(image, 0) return image class ImageLabelingLogger(tf.keras.callbacks.Callback): """ Keras callback for logging a plot of test images and their predicted labels for viewing in Tensorboard. """ def __init__(self, logs_path, datasets): super(ImageLabelingLogger, self).__init__() self.datasets = datasets self.task = datasets.task self.logs_path = logs_path print("Done setting up image labeling logger.") def on_epoch_end(self, epoch, logs=None): self.log_image_labels(epoch, logs) def log_image_labels(self, epoch_num, logs): """ Writes a plot of test images and their predicted labels to disk. """ fig = plt.figure(figsize=(9, 9)) count = 0 for batch in self.datasets.test_data: # changed from train to test for i, image in enumerate(batch[0]): plt.subplot(5, 5, count+1) correct_class_idx = batch[1][i] probabilities = self.model(np.array([image])).numpy()[0] predict_class_idx = np.argmax(probabilities) image = np.clip(image, 0., 1.) plt.imshow(image, cmap='gray') is_correct = correct_class_idx == predict_class_idx title_color = 'g' if is_correct else 'r' plt.title( self.datasets.idx_to_class[predict_class_idx], color=title_color) plt.axis('off') count += 1 if count == 25: break if count == 25: break figure_img = plot_to_image(fig) file_writer_il = tf.summary.create_file_writer( self.logs_path + os.sep + "image_labels") with file_writer_il.as_default(): tf.summary.image("Image Label Predictions", figure_img, step=epoch_num) class ConfusionMatrixLogger(tf.keras.callbacks.Callback): """ Keras callback for logging a confusion matrix for viewing in Tensorboard. """ def __init__(self, logs_path, datasets): super(ConfusionMatrixLogger, self).__init__() self.datasets = datasets self.logs_path = logs_path def on_epoch_end(self, epoch, logs=None): self.log_confusion_matrix(epoch, logs) def log_confusion_matrix(self, epoch, logs): """ Writes a confusion matrix plot to disk. """ test_pred = [] test_true = [] count = 0 for i in self.datasets.test_data: test_pred.append(self.model.predict(i[0])) test_true.append(i[1]) count += 1 if count >= 1500 / hp.batch_size: break test_pred = np.array(test_pred) test_pred = np.argmax(test_pred, axis=-1).flatten() test_true = np.array(test_true).flatten() # Source: https://www.tensorflow.org/tensorboard/image_summaries cm = sklearn.metrics.confusion_matrix(test_true, test_pred) figure = self.plot_confusion_matrix( cm, class_names=self.datasets.classes) cm_image = plot_to_image(figure) file_writer_cm = tf.summary.create_file_writer( self.logs_path + os.sep + "confusion_matrix") with file_writer_cm.as_default(): tf.summary.image( "Confusion Matrix (on validation set)", cm_image, step=epoch) def plot_confusion_matrix(self, cm, class_names): """ Plots a confusion matrix returned by sklearn.metrics.confusion_matrix(). """ # Source: https://www.tensorflow.org/tensorboard/image_summaries figure = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Greens) plt.title("Confusion matrix") plt.colorbar() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) cm = np.around(cm.astype('float') / cm.sum(axis=1) [:, np.newaxis], decimals=2) threshold = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return figure class CustomModelSaver(tf.keras.callbacks.Callback): """ Custom Keras callback for saving weights of networks. """ def __init__(self, checkpoint_dir, task, max_num_weights=5): super(CustomModelSaver, self).__init__() self.checkpoint_dir = checkpoint_dir self.task = task self.max_num_weights = max_num_weights def on_epoch_end(self, epoch, logs=None): """ At epoch end, weights are saved to checkpoint directory. """ min_acc_file, max_acc_file, max_acc, num_weights = \ self.scan_weight_files() cur_acc = logs["val_sparse_categorical_accuracy"] # Only save weights if test accuracy exceeds the previous best # weight file if cur_acc > max_acc: save_name = "weights.e{0:03d}-acc{1:.4f}.h5".format( epoch, cur_acc) if self.task == '1': self.model.save_weights( self.checkpoint_dir + os.sep + "your." + save_name) else: # Only save weights of classification head of VGGModel self.model.head.save_weights( self.checkpoint_dir + os.sep + "vgg." + save_name) # Ensure max_num_weights is not exceeded by removing # minimum weight if self.max_num_weights > 0 and \ num_weights + 1 > self.max_num_weights: os.remove(self.checkpoint_dir + os.sep + min_acc_file) def scan_weight_files(self): """ Scans checkpoint directory to find current minimum and maximum accuracy weights files as well as the number of weights. """ min_acc = float('inf') max_acc = 0 min_acc_file = "" max_acc_file = "" num_weights = 0 files = os.listdir(self.checkpoint_dir) for weight_file in files: if weight_file.endswith(".h5"): num_weights += 1 file_acc = float(re.findall( r"[+-]?\d+\.\d+", weight_file.split("acc")[-1])[0]) if file_acc > max_acc: max_acc = file_acc max_acc_file = weight_file if file_acc < min_acc: min_acc = file_acc min_acc_file = weight_file return min_acc_file, max_acc_file, max_acc, num_weights
9442061d1c5d28bd09a835998a2e53cfa07e48e2
bb33e6be8316f35decbb2b81badf2b6dcf7df515
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py
219372a39b6b37e617c2e86dffba37acfa9ed26a
[]
no_license
StranikS-Scan/WorldOfTanks-Decompiled
999c9567de38c32c760ab72c21c00ea7bc20990c
d2fe9c195825ececc728e87a02983908b7ea9199
refs/heads/1.18
2023-08-25T17:39:27.718097
2022-09-22T06:49:44
2022-09-22T06:49:44
148,696,315
103
39
null
2022-09-14T17:50:03
2018-09-13T20:49:11
Python
UTF-8
Python
false
false
736
py
# Python bytecode 2.7 (decompiled from Python 2.7) # Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent class EventBoardsAwardsOverlayMeta(BaseDAAPIComponent): def changeFilter(self, id): self._printOverrideError('changeFilter') def as_setHeaderS(self, data): return self.flashObject.as_setHeader(data) if self._isDAAPIInited() else None def as_setVehicleS(self, data): return self.flashObject.as_setVehicle(data) if self._isDAAPIInited() else None def as_setDataS(self, data): return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
6f1f8161ba95d3088ba7f50b93a121664fb1a322
57abd17391c6ef691509dae512c102f6635dab9b
/tensorflow_datasets/scripts/create_new_dataset.py
6e57f703111ebe42c66b6fd4f7d3415e908e0bac
[ "Apache-2.0" ]
permissive
SinghKislay/datasets
434e50eb3b8584849192f3cabe7305429cc62363
bc09dd59826975f57c861da4bea23fa5d63d61cf
refs/heads/master
2020-05-02T22:27:34.771036
2019-04-10T18:14:41
2019-04-10T18:14:41
176,097,632
0
0
Apache-2.0
2019-03-17T12:25:56
2019-03-17T12:25:55
null
UTF-8
Python
false
false
6,919
py
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Generate the minimal source code for a new dataset. python -m tensorflow_datasets.scripts.create_new_dataset \ --dataset dataset_name \ --type dataset_type """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import app from absl import flags from tensorflow.io import gfile from tensorflow_datasets.core import naming from tensorflow_datasets.core.utils import py_utils FLAGS = flags.FLAGS _DATASET_TYPE = ['image', 'video', 'audio', 'text', 'structured', 'translate'] flags.DEFINE_string('tfds_dir', None, 'Root directory of tfds (auto-computed)') flags.DEFINE_string('dataset', None, 'Dataset name') flags.DEFINE_enum('type', None, _DATASET_TYPE, 'Dataset type') _HEADER = """\ \"""{TODO}: Add a description here.\""" from __future__ import absolute_import from __future__ import division from __future__ import print_function """ _DATASET_DEFAULT_IMPORTS = """\ import tensorflow_datasets as tfds\n """ _DATASET_TEST_DEFAULTS_IMPORTS = """\ from tensorflow_datasets import testing from tensorflow_datasets.{dataset_type} import {dataset_name} """ _CITATION = """\ # {TODO}: BibTeX citation _CITATION = \""" \"""\n """ _DESCRIPTION = """\ # {TODO}: _DESCRIPTION = \""" \"""\n """ _DATASET_DEFAULTS = """\ class {dataset_cls}(tfds.core.GeneratorBasedBuilder): \"""{TODO}: Short description of my dataset.\""" # {TODO}: Set up version. VERSION = tfds.core.Version('0.1.0') def _info(self): # {TODO}: Specifies the tfds.core.DatasetInfo object return tfds.core.DatasetInfo( builder=self, # This is the description that will appear on the datasets page. description=_DESCRIPTION, # tfds.features.FeatureConnectors features=tfds.features.FeaturesDict({{ # These are the features of your dataset like images, labels ... }}), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=(), # Homepage of the dataset for documentation urls=[], citation=_CITATION, ) def _split_generators(self, dl_manager): # {TODO}: Downloads the data and defines the splits # dl_manager is a tfds.download.DownloadManager that can be used to # download and extract URLs return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, # {TODO}: Tune the number of shards such that each shard # is < 4 GB. num_shards=10, # These kwargs will be passed to _generate_examples gen_kwargs={{}}, ), ] def _generate_examples(self): # {TODO}: Yields examples from the dataset yield {{}}\n """ _DATASET_TEST_DEFAULTS = """\ class {dataset_cls}Test(testing.DatasetBuilderTestCase): # {TODO}: DATASET_CLASS = {dataset_name}.{dataset_cls} SPLITS = {{ "train": 3, # Number of fake train example "test": 1, # Number of fake test example }} # If you are calling `download/download_and_extract` with a dict, like: # dl_manager.download({{'some_key': 'http://a.org/out.txt', ...}}) # then the tests needs to provide the fake output paths relative to the # fake data directory # DL_EXTRACT_RESULT = {{'some_key': 'output_file1.txt', ...}} if __name__ == "__main__": testing.test_main() """ _CHECKSUM_FILE = """\ # {TODO}: If your dataset downloads files, then the checksums will be # automatically added here when running the download_and_prepare script # with --register_checksums. """ def create_dataset_file(root_dir, data): """Create a new dataset from a template.""" file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py') context = ( _HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION + _DESCRIPTION + _DATASET_DEFAULTS ) with gfile.GFile(file_path.format(**data), 'w') as f: f.write(context.format(**data)) def add_the_init(root_dir, data): """Append the new dataset file to the __init__.py.""" init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py') context = ( 'from tensorflow_datasets.{dataset_type}.{dataset_name} import ' '{dataset_cls} # {TODO} Sort alphabetically\n' ) with gfile.GFile(init_file.format(**data), 'a') as f: f.write(context.format(**data)) def create_dataset_test_file(root_dir, data): """Create the test file associated with the dataset.""" file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}_test.py') context = ( _HEADER + _DATASET_TEST_DEFAULTS_IMPORTS + _DATASET_TEST_DEFAULTS) with gfile.GFile(file_path.format(**data), 'w') as f: f.write(context.format(**data)) def create_fake_data(root_dir, data): fake_examples_dir = os.path.join( root_dir, 'testing', 'test_data', 'fake_examples', '{dataset_name}') fake_examples_dir = fake_examples_dir.format(**data) gfile.makedirs(fake_examples_dir) fake_path = os.path.join( fake_examples_dir, 'TODO-add_fake_data_in_this_directory.txt') with gfile.GFile(fake_path, 'w') as f: f.write('{TODO}: Add fake data in this directory'.format(**data)) def create_checksum_file(root_dir, data): checksum_path = os.path.join(root_dir, 'url_checksums', '{dataset_name}.txt') with gfile.GFile(checksum_path.format(**data), 'w') as f: f.write(_CHECKSUM_FILE.format(**data)) def main(_): dataset_name = FLAGS.dataset dataset_type = FLAGS.type root_dir = FLAGS.tfds_dir if not root_dir: root_dir = py_utils.tfds_dir() data = dict( dataset_name=dataset_name, dataset_type=dataset_type, dataset_cls=naming.snake_to_camelcase(dataset_name), TODO='TODO({})'.format(dataset_name), ) create_dataset_file(root_dir, data) add_the_init(root_dir, data) create_dataset_test_file(root_dir, data) create_fake_data(root_dir, data) create_checksum_file(root_dir, data) print( 'Dataset generated in {}\n' 'You can start with searching TODO({}).\n' 'Please check this ' '`https://github.com/tensorflow/datasets/blob/master/docs/add_dataset.md`' 'for details.'.format(root_dir, dataset_name) ) if __name__ == '__main__': app.run(main)
2fedba0cbdc8a5b29280723b6940c2f71a7bda36
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03546/s442051818.py
0f991e8c96376c793a2bab1a38286d3a650be9e2
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
324
py
h, _ = map(int, input().split()) r = range(10) c = [[int(i) for i in input().split()] for _ in r] for k in r: for i in r: for j in r: c[i][j] = min(c[i][j], c[i][k] + c[k][j]) else: a = [[int(i) for i in input().split()] for _ in range(h)] print(sum(c[i][1] for i in sum(a, []) if i != -1))
3a2ef9fe9d1ee5ac10f8616a6a5a5e7696776e60
da85c029410ee7d0c7474269d68f568caabb7e5d
/PrimeNo.py
f5fcf0a49da74b5d4176944f5450ceb4d164fd59
[]
no_license
apoorvasrivastava98/Python
65f561c90a24c2a6c21cf0c31808a45c7561320c
1c206fd059d912f1037d5c2fa26b447c574daa6d
refs/heads/master
2021-01-15T00:59:37.465295
2021-01-09T08:47:46
2021-01-09T08:47:46
242,821,409
0
0
null
null
null
null
UTF-8
Python
false
false
208
py
def prime(num): c=0 for i in range (1,num+1): if(num % i == 0): c=c+1 if (c == 2): print("Prime number") else: print("Not prime number") prime(15)
2f23cbd42dee001993bc154511cf225da4760ce6
f07a42f652f46106dee4749277d41c302e2b7406
/Data Set/bug-fixing-5/caaa5d634f104a58a218ff663dfc926195e3acaf-<test_notify_sentry_app_and_plugin_with_same_slug>-bug.py
2bb03ab90a78aded197d894082a935887a555e3a
[]
no_license
wsgan001/PyFPattern
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
cc347e32745f99c0cd95e79a18ddacc4574d7faa
refs/heads/main
2023-08-25T23:48:26.112133
2021-10-23T14:11:22
2021-10-23T14:11:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
725
py
def test_notify_sentry_app_and_plugin_with_same_slug(self): event = self.get_event() self.create_sentry_app(organization=event.organization, name='Notify', is_alertable=True) plugin = MagicMock() plugin.is_enabled.return_value = True plugin.should_notify.return_value = True rule = self.get_rule(data={ 'service': 'notify', }) with patch('sentry.plugins.plugins.get') as get_plugin: get_plugin.return_value = plugin results = list(rule.after(event=event, state=self.get_state())) assert (len(results) is 2) assert (plugin.should_notify.call_count is 1) assert (results[0].callback is notify_sentry_app) assert (results[1].callback is plugin.rule_notify)
27c6f04530538b5ac8c71908ab91361f20ecc16b
d554b1aa8b70fddf81da8988b4aaa43788fede88
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4013/codes/1671_1079.py
c3bb6e4c00135c7fac261439e8a41a85fc6fb9ce
[]
no_license
JosephLevinthal/Research-projects
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
60d5fd6eb864a5181f4321e7a992812f3c2139f9
refs/heads/master
2022-07-31T06:43:02.686109
2020-05-23T00:24:26
2020-05-23T00:24:26
266,199,309
1
0
null
null
null
null
UTF-8
Python
false
false
627
py
# Ao testar sua solução, não se limite ao caso de exemplo. from math import * # Leitura dos lados do triangulo a, b, and c a = float(input ("Lado 1: ")) b = float(input ("Lado 2: ")) c = float(input ("Lado 3: ")) print("Entradas:", a, ",", b, ",", c) # Testa se pelo menos uma das entradas eh negativa if ((a > 0) or (b > 0) or (c > 0 )): # Testa se medidas correspondem aas de um triangulo if ((a < b + c) and (b < a + c) and (c < a + b)): s = (a + b + c) / 2.0 area = sqrt(s * (s-a) * (s-b) * (s-c)) area = round(area, 3) print("Area:", area) else: print("Area: invalida") else: print("Area: invalida")
ecaba36b4b380ae04e3c98b69fa92b4a3c677435
0964a05c266d52c2840fe52a4a550c1e88d03b22
/scanner.py
e08ff775182f91a43ce9a846e3ee2cb1b4b8c57f
[]
no_license
ngriskauskas/CS4280
a0d5244e129589d97fa9a9924a41629ed0fc592c
ca5898f3877edb2ca18f61d5a80c9501de49a069
refs/heads/main
2023-02-02T20:43:51.505489
2020-12-14T06:13:03
2020-12-14T06:13:03
321,250,672
0
0
null
null
null
null
UTF-8
Python
false
false
1,285
py
from enum import Enum from filter import Filter keywords = ["start", "stop", "iter", "void", "int", "exit", "scanf", "printf", "main", "if", "then", "let", "data", "func"] operators = ["=", "=>", "=<", "==", ":", "+", "-", "*", "/", "%", ".", "(", ")", ",", "{", "}", ";", "[", "]"] class TokenID(Enum): IDENT_tk = 1 NUM_tk = 2 KW_tk = 3 OP_tk = 4 EOF_tk = 5 class Token: def __init__(self, id, instance, line): self.id = id self.instance = instance self.line = line class Scanner: def __init__(self, fileName): self.Filter = Filter(fileName) def GetNextToken(self): (word, line) = self.Filter.GetNextString() if word == "EOF": return Token(TokenID.EOF_tk, word, line) if word in keywords: return Token(TokenID.KW_tk, word, line) elif word in operators: return Token(TokenID.OP_tk, word, line) elif word.isnumeric(): return Token(TokenID.NUM_tk, word[0: 7], line) elif word[0].islower() and word.isalnum(): return Token(TokenID.IDENT_tk, word[0: 7], line) else: raise Exception("Scanner Failed to read", word, line)
a629ff545360e6bd157e394d377cbc1f1330141e
98c6ea9c884152e8340605a706efefbea6170be5
/examples/data/Assignment_6/mtttaf002/question1.py
9e72945c0d743ddcf7d64cd2596254bb5b69226b
[]
no_license
MrHamdulay/csc3-capstone
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
refs/heads/master
2021-03-12T21:55:57.781339
2014-09-22T02:22:22
2014-09-22T02:22:22
22,372,174
0
0
null
null
null
null
UTF-8
Python
false
false
542
py
"""produce right aligned list of names tafara mtutu 20 apr 2014""" names = [] count = 0 aligned = [] sort = "" #ask user for names print("Enter strings (end with DONE):") name = input() while name.lower() != "done": if count < len(name): count = len(name) names.append(name) name = input() #make length of equal to the length of longest string for i in names: sort = " "*(count-len(i)) + i aligned.append(sort) print() print("Right-aligned list:") for j in aligned: print(j)
a145c3315ea277e5e90983ce02f14e0bc9163f53
b825709ddfe1394daa0579e53d69994cb680735b
/zad2_2.py
5d8ada69e05ff442c077304398ec5de5c3eeb5cc
[]
no_license
ZajMar/Python2017MZ
150873aa23688671b3cfab5df3e6eb684991db18
d8cd0197f34914397ba6c10180629b79b43b1732
refs/heads/master
2021-08-29T10:49:55.290201
2017-12-13T19:27:34
2017-12-13T19:27:34
114,157,742
0
0
null
null
null
null
UTF-8
Python
false
false
797
py
import math class Complex(): real=0 imag=0 def __init__(self, real, imag=0.0): self.real = real self.imag = imag def __str__(self): return '(%s, %s)' % (self.real, self.imag) def add(x,y): return Complex(x.real+y.real,x.imag+y.imag) def sub(x,y): return Complex(x.real-y.real,x.imag-y.imag) def mod(self): return math.sqrt(self.real*self.real+self.imag*self.imag) def _sub_(self,other): return Complex(self.real-other.real,self.imag-other.imag) def _add_(self,other): return Complex(self.real+other.real,self.imag+other.imag) def neg(self): return Complex(-self.real,-self.imag) a = Complex (1.0,1.0) b=Complex(3.0,4.0) print("a = "+str(a)) print("b = "+str(b)) print("Sum: ") print(b.add(a)) print("Mod: ") print(a.mod()) print("Neg: ") print(b.neg())
10d914f403ac5bfd4aacc7330c3db318947f429e
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
/generated-libraries/python/netapp/net/net_ifgrp_info.py
51fb53a5a5d184165370e0966a17a0a5662d4247
[ "MIT" ]
permissive
radekg/netapp-ontap-lib-gen
530ec3248cff5ead37dc2aa47ced300b7585361b
6445ebb071ec147ea82a486fbe9f094c56c5c40d
refs/heads/master
2016-09-06T17:41:23.263133
2015-01-14T17:40:46
2015-01-14T17:40:46
29,256,898
2
0
null
null
null
null
UTF-8
Python
false
false
6,702
py
from netapp.netapp_object import NetAppObject class NetIfgrpInfo(NetAppObject): """ Network interface group information When returned as part of the output, all elements of this typedef are reported, unless limited by a set of desired attributes specified by the caller. <p> When used as input to specify desired attributes to return, omitting a given element indicates that it shall not be returned in the output. In contrast, by providing an element (even with no value) the caller ensures that a value for that element will be returned, given that the value can be retrieved. <p> When used as input to specify queries, any element can be omitted in which case the resulting set of objects is not constrained by any specific value of that attribute. """ _node = None @property def node(self): """ Specifies the name of node. Attributes: key, required-for-create, non-modifiable """ return self._node @node.setter def node(self, val): if val != None: self.validate('node', val) self._node = val _up_ports = None @property def up_ports(self): """ Specifies all active ports of an ifgrp. Attributes: non-creatable, non-modifiable """ return self._up_ports @up_ports.setter def up_ports(self, val): if val != None: self.validate('up_ports', val) self._up_ports = val _down_ports = None @property def down_ports(self): """ Specifies all inactive ports of an ifgrp. Attributes: non-creatable, non-modifiable """ return self._down_ports @down_ports.setter def down_ports(self, val): if val != None: self.validate('down_ports', val) self._down_ports = val _mac_address = None @property def mac_address(self): """ Specifies the MAC address of the ifgrp. For example: '02:0c:29:78:e1:b7' Attributes: non-creatable, non-modifiable """ return self._mac_address @mac_address.setter def mac_address(self, val): if val != None: self.validate('mac_address', val) self._mac_address = val _ifgrp_name = None @property def ifgrp_name(self): """ Specifies the interface group name. Attributes: key, required-for-create, non-modifiable """ return self._ifgrp_name @ifgrp_name.setter def ifgrp_name(self, val): if val != None: self.validate('ifgrp_name', val) self._ifgrp_name = val _mode = None @property def mode(self): """ Specifies the link policy for the ifgrp. Possible values: <ul> <li> 'multimode - All links are simultaneously active', <li> 'multimode_lacp - Link state is managed by the switch using link aggregation control protocol (LACP) (IEEE 802.3ad)', <li> 'singlemode - Only one link is active at a time' </ul> Attributes: required-for-create, non-modifiable """ return self._mode @mode.setter def mode(self, val): if val != None: self.validate('mode', val) self._mode = val _port_participation = None @property def port_participation(self): """ Port participation state of the ifgrp. Attributes: non-creatable, non-modifiable Possible values: <ul> <li> "full" - Indicates all the ifgrp ports are active, <li> "partial" - Indicates not all the ifgrp ports are active, <li> "none" - Indicates none of the ifgrp ports is active </ul> """ return self._port_participation @port_participation.setter def port_participation(self, val): if val != None: self.validate('port_participation', val) self._port_participation = val _ports = None @property def ports(self): """ List of ports associated with this ifgrp. Attributes: non-creatable, non-modifiable """ return self._ports @ports.setter def ports(self, val): if val != None: self.validate('ports', val) self._ports = val _distribution_function = None @property def distribution_function(self): """ Specifies the traffic distribution function for the ifgrp. Attributes: required-for-create, non-modifiable Possible values: <ul> <li> "mac" - Network traffic is distributed on the basis of MAC addresses, <li> "ip" - Network traffic is distributed on the basis of IP addresses, <li> "sequential" - Network traffic is distributed round-robin to each interface, <li> "port" - Network traffic is distributed by transport layer address 4-tuple </ul> """ return self._distribution_function @distribution_function.setter def distribution_function(self, val): if val != None: self.validate('distribution_function', val) self._distribution_function = val @staticmethod def get_api_name(): return "net-ifgrp-info" @staticmethod def get_desired_attrs(): return [ 'node', 'up-ports', 'down-ports', 'mac-address', 'ifgrp-name', 'mode', 'port-participation', 'ports', 'distribution-function', ] def describe_properties(self): return { 'node': { 'class': basestring, 'is_list': False, 'required': 'optional' }, 'up_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' }, 'down_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' }, 'mac_address': { 'class': basestring, 'is_list': False, 'required': 'optional' }, 'ifgrp_name': { 'class': basestring, 'is_list': False, 'required': 'optional' }, 'mode': { 'class': basestring, 'is_list': False, 'required': 'optional' }, 'port_participation': { 'class': basestring, 'is_list': False, 'required': 'optional' }, 'ports': { 'class': basestring, 'is_list': True, 'required': 'optional' }, 'distribution_function': { 'class': basestring, 'is_list': False, 'required': 'optional' }, }
b86128aee5418c0b7ac108bd068d443064cc3ec0
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_sermon.py
40a9742cbaf0299a9d7ec6767d646bfc24b37d57
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
537
py
#calss header class _SERMON(): def __init__(self,): self.name = "SERMON" self.definitions = [u'a part of a Christian church ceremony in which a priest gives a talk on a religious or moral subject, often based on something written in the Bible: ', u'a long talk in which someone advises other people how they should behave in order to be better people: '] self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.specie = 'nouns' def run(self, obj1 = [], obj2 = []): return self.jsondata