blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60d6b3c1bcbeb94d4e9ef6ec3cae76c2813e40d3
|
e8c16328e22b8ccda2e3d2998d2ec1689d79914b
|
/exercise_2017/4th_week/assignment_3.py
|
0560ed43bded8ade0cf108b15ae00d92b918ec66
|
[
"MIT"
] |
permissive
|
Taewan-P/python_study
|
e2e68cc0f1467832dace22e27cc70d217560cf2c
|
f347e370aaa33aba5ab233252bcd759b94615348
|
refs/heads/master
| 2021-05-11T03:44:04.495186 | 2018-01-18T03:02:31 | 2018-01-18T03:02:31 | 117,923,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,426 |
py
|
# -*- coding: utf-8 -*-
# UTF-8 encoding when using korean
def get_number():
return input("수를 입력하세요.\n")
def print_result(srm,res):
print(srm,"의 제곱근은",res,"입니다.")
def isfloat(s):
(m,_,n) = s.partition(".")
return m.isdigit() and (n.isdigit() or n=="") or \
m == "" and n.isdigit()
#여기서 리턴뒤에 있는 문장을 실행하면 참 또는 거짓으로 출력된다!
def stop():
cont = input('계속하시겠습니까? (y/n)')
while not (cont == 'y' or cont == 'n'):
cont = input('계속하시겠습니까? (y/n)')
return cont == 'n' #리턴 뒤에 cont 가 n 이면 True 값을 리턴해주고, 아니면 False를 반환하기 때문에 38번줄 참조
def safe_sqrt():
import math
print("제곱근을 구해드립니다.")
print("0이상의 수를 입력하세요.")
while True:
srm = get_number()
if not isfloat(srm):
srm=get_number()
while not isfloat(srm): #while 뒤에 not isfloat이 참이면 밑에줄을 실행 -> 실수(isfloat)가 아니기(not) 떄문에 실행하는거다.
srm=get_number()
res = math.sqrt(float(srm))
print_result(srm,round(res,4))
if stop(): #stop함수가 참 또는 거짓으로 나오는데 if문은 주어진 문장이 참이면 그밑의 줄을 실행한다.
break #if 주어진 문장 \n 조건문 이면 주어진 문장을 실행한다.
print("안녕히 가세요.")
safe_sqrt()
|
[
"[email protected]"
] | |
98f0028a74f0d0904417f230f3f664a432f033bb
|
d636f648520ad6fae258676c1ceb5d0102446347
|
/manage/lib/python3.7/site-packages/django_crontab/app_settings.py
|
4a5d97226606b8a46c72a088dd05e1c409648415
|
[
"MIT"
] |
permissive
|
judebues/softmanage
|
ac21bb5c17c7309eb8adea8082991d0826ab8892
|
3882534422c09cc3a6978890e51fff9ff465de24
|
refs/heads/master
| 2023-07-19T11:23:06.300066 | 2020-06-16T11:29:41 | 2020-06-16T11:29:41 | 262,804,390 | 1 | 1 |
MIT
| 2021-09-22T19:00:16 | 2020-05-10T14:26:04 |
Python
|
UTF-8
|
Python
| false | false | 2,086 |
py
|
from __future__ import print_function
import os
import re
import sys
from importlib import import_module
class Settings():
def __init__(self, settings):
self.CRONJOBS = getattr(settings, 'CRONJOBS', [])
self.CRONTAB_EXECUTABLE = getattr(settings, 'CRONTAB_EXECUTABLE', '/usr/bin/crontab')
self.CRONTAB_LINE_REGEXP = re.compile(r'^\s*(([^#\s]+\s+){5})([^#\n]*)\s*(#\s*([^\n]*)|$)')
self.CRONTAB_LINE_PATTERN = '%(time)s %(command)s # %(comment)s\n'
self.DJANGO_PROJECT_NAME = getattr(settings, 'CRONTAB_DJANGO_PROJECT_NAME', os.environ['DJANGO_SETTINGS_MODULE'].split('.')[0])
self.DJANGO_SETTINGS_MODULE = getattr(settings, 'CRONTAB_DJANGO_SETTINGS_MODULE', None)
if hasattr(settings, 'CRONTAB_DJANGO_MANAGE_PATH'):
self. DJANGO_MANAGE_PATH = settings.CRONTAB_DJANGO_MANAGE_PATH
# check if it's really there
if not os.path.exists(self.DJANGO_MANAGE_PATH):
print('ERROR: No manage.py file found at "%s". Check settings.CRONTAB_DJANGO_MANAGE_PATH!' % self.DJANGO_MANAGE_PATH)
else:
def ext(fpath):
return os.path.splitext(fpath)[0] + '.py'
try: # Django 1.3
self.DJANGO_MANAGE_PATH = ext(import_module(self.DJANGO_PROJECT_NAME + '.manage').__file__)
except ImportError:
try: # Django 1.4+
self.DJANGO_MANAGE_PATH = ext(import_module('manage').__file__)
except ImportError:
print('ERROR: Can\'t find your manage.py - please define settings.CRONTAB_DJANGO_MANAGE_PATH')
self.PYTHON_EXECUTABLE = getattr(settings, 'CRONTAB_PYTHON_EXECUTABLE', sys.executable)
self.CRONTAB_COMMENT = getattr(settings, 'CRONTAB_COMMENT', 'django-cronjobs for %s' % self.DJANGO_PROJECT_NAME)
self.COMMAND_PREFIX = getattr(settings, 'CRONTAB_COMMAND_PREFIX', '')
self.COMMAND_SUFFIX = getattr(settings, 'CRONTAB_COMMAND_SUFFIX', '')
self.LOCK_JOBS = getattr(settings, 'CRONTAB_LOCK_JOBS', False)
|
[
"[email protected]"
] | |
b132654f95a630b812537c9e3a690566a368409c
|
cd4affc719de5a3376a1ca36f47c7d7c61b80a7e
|
/Hard/Reaching_Points/Reaching_Points.py
|
01c94922637e6bcdf15c0518cf8dd8dc26db2c21
|
[] |
no_license
|
vijayyevatkar/project_lc
|
cce88f2a3a79d70a04ec651b18a3853d9a97e4b7
|
150d035c93f6b18ff4dd815b82c99e34482e3ddf
|
refs/heads/main
| 2023-08-22T13:46:16.046499 | 2021-09-19T15:46:57 | 2021-09-19T15:46:57 | 358,313,196 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,376 |
py
|
class Solution:
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
# Recursive Solution:
# return Solution.reachUsingRecursion(self,int sx, int sy, int tx, int ty)
# Time Optimized Solution:
return Solution.reachUsingModulo(self,sx, sy, tx, ty)
def reachUsingRecursion(self, sx: int, sy: int, tx: int, ty: int) -> bool:
# Stop recursion base conditions
if sx==tx and sy==ty:
return True
if sx>tx or sy>ty:
return False
# Recurse on both children
return Solution.reachUsingRecursion(self,sx+sy,sy,tx,ty) or Solution.reachUsingRecursion(self,sx,sx+sy,tx,ty)
def reachUsingModulo(self, sx: int, sy: int, tx: int, ty: int) -> bool:
# Loop until you reach the top of the tree -> the source, the root
while tx >= sx and ty >= sy:
if tx > ty:
# if x coordinate has reached its source, just check if y coordinate can be reached using x
if sy == ty:
return (tx-sx) % sy == 0
tx = tx % ty
else:
# if y coordinate has reached its source, just check if x coordinate can be reached using y
if sx == tx:
return (ty-sy) % sx == 0
ty = ty % tx
return False
|
[
"[email protected]"
] | |
64cbda61223f81c79910528502d17dcd24f4c131
|
19581ba1a714140b033dae0a7f625878bdd63b0c
|
/torchdiffeq/covar/fixed_grid.py
|
2b707d6571d63f6cfb0ac8817d11200f9196de51
|
[
"MIT"
] |
permissive
|
gaozhihan/torchdiffeq
|
4473cebeef24651e60ce86a5c2a911fb2a170a1c
|
414781617d595ba01cc3f23382e25ab890f4ca66
|
refs/heads/master
| 2023-01-29T05:02:39.447312 | 2020-12-06T10:26:58 | 2020-12-06T10:26:58 | 255,856,434 | 1 | 0 |
MIT
| 2020-11-06T09:29:03 | 2020-04-15T08:42:17 |
Python
|
UTF-8
|
Python
| false | false | 1,208 |
py
|
import torch
from torchdiffeq.covar.solvers import FixedGridODESolver_Covar
from torchdiffeq._impl.rk_common import rk4_alt_step_func
class Euler_Covar(FixedGridODESolver_Covar):
order = 1
def __init__(self, eps=0., **kwargs):
super(Euler_Covar, self).__init__(**kwargs)
self.eps = torch.as_tensor(eps, dtype=self.dtype, device=self.device)
def _step_func(self, func, t, dt, y):
return dt * func(t + self.eps, y)
class Midpoint_Covar(FixedGridODESolver_Covar):
order = 2
def __init__(self, eps=0., **kwargs):
super(Midpoint_Covar, self).__init__(**kwargs)
self.eps = torch.as_tensor(eps, dtype=self.dtype, device=self.device)
def _step_func(self, func, t, dt, y):
half_dt = 0.5 * dt
y_mid = y + func(t + self.eps, y) * half_dt
return dt * func(t + half_dt, y_mid)
class RK4_Covar(FixedGridODESolver_Covar):
order = 4
def __init__(self, eps=0., **kwargs):
super(RK4_Covar, self).__init__(**kwargs)
self.eps = torch.as_tensor(eps, dtype=self.dtype, device=self.device)
def _step_func(self, func, t, dt, y):
return rk4_alt_step_func(func, t + self.eps, dt - 2 * self.eps, y)
|
[
"[email protected]"
] | |
b35033ec98fe9219a88a33c5d879b2e4f1a887b3
|
fe7cd741a44249a46d8396e658c0edd70f1b7972
|
/Unit_A/logfile_viewer.py
|
79be1903b1e5c03504acfcd79d51e9a28e95f9c4
|
[] |
no_license
|
ArjunChauhan0910/SLAM
|
199b476ae4d9ae3b9ea284e83eb2fb2807e0ed57
|
531310018dbbffcb7733304fd1cacd0661936885
|
refs/heads/main
| 2023-02-24T15:26:24.467216 | 2021-02-04T06:02:44 | 2021-02-04T06:02:44 | 335,854,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,545 |
py
|
# Python routines to inspect a ikg LEGO robot logfile.
# Author: Claus Brenner, 28.10.2012
from tkinter import *
from tkinter import filedialog as tkFileDialog
from lego_robot import *
from math import sin, cos, pi
# The canvas and world extents of the scene.
# Canvas extents in pixels, world extents in millimeters.
canvas_extents = (600, 600)
world_extents = (2000.0, 2000.0)
# The extents of the sensor canvas.
sensor_canvas_extents = canvas_extents
# The maximum scanner range used to scale scan measurement drawings,
# in millimeters.
max_scanner_range = 2200.0
class DrawableObject(object):
def draw(self, at_step):
print ("To be overwritten - will draw a certain point in time:", at_step)
def background_draw(self):
print ("Background draw.")
class Trajectory(DrawableObject):
def __init__(self, points, canvas,
point_size2 = 2, background_color = "gray", cursor_color = "red"):
self.points = points
self.canvas = canvas
self.point_size2 = point_size2
self.background_color = background_color
self.cursor_color = cursor_color
self.cursor_object = None
self.cursor_object2 = None
def background_draw(self):
if self.points:
p_xy_only = []
for p in self.points:
self.canvas.create_oval(\
p[0]-self.point_size2, p[1]-self.point_size2,
p[0]+self.point_size2, p[1]+self.point_size2,
fill=self.background_color, outline="")
p_xy_only.append(p[0:2])
self.canvas.create_line(*p_xy_only, fill=self.background_color)
def draw(self, at_step):
if self.cursor_object:
self.canvas.delete(self.cursor_object)
self.cursor_object = None
self.canvas.delete(self.cursor_object2)
self.cursor_object2 = None
if at_step < len(self.points):
p = self.points[at_step]
self.cursor_object = self.canvas.create_oval(\
p[0]-self.point_size2-1, p[1]-self.point_size2-1,
p[0]+self.point_size2+1, p[1]+self.point_size2+1,
fill=self.cursor_color, outline="")
if len(p) > 2:
self.cursor_object2 = self.canvas.create_line(p[0], p[1],
p[0] + cos(p[2]) * 50,
p[1] - sin(p[2]) * 50,
fill = self.cursor_color)
class ScannerData(DrawableObject):
def __init__(self, list_of_scans, canvas, canvas_extents, scanner_range):
self.canvas = canvas
self.canvas_extents = canvas_extents
self.cursor_object = None
# Convert polar scanner measurements into xy form, in canvas coords.
# Store the result in self.scan_polygons.
self.scan_polygons = []
for s in list_of_scans:
poly = [ to_sensor_canvas((0,0), canvas_extents, scanner_range) ]
i = 0
for m in s:
angle = LegoLogfile.beam_index_to_angle(i)
x = m * cos(angle)
y = m * sin(angle)
poly.append(to_sensor_canvas((x,y), canvas_extents, scanner_range))
i += 1
poly.append(to_sensor_canvas((0,0), canvas_extents, scanner_range))
self.scan_polygons.append(poly)
def background_draw(self):
# Draw x axis.
self.canvas.create_line(
self.canvas_extents[0]/2, self.canvas_extents[1]/2,
self.canvas_extents[0]/2, 20,
fill="black")
self.canvas.create_text(
self.canvas_extents[0]/2 + 10, 20, text="x" )
# Draw y axis.
self.canvas.create_line(
self.canvas_extents[0]/2, self.canvas_extents[1]/2,
20, self.canvas_extents[1]/2,
fill="black")
self.canvas.create_text(
20, self.canvas_extents[1]/2 - 10, text="y" )
# Draw big disk in the scan center.
self.canvas.create_oval(
self.canvas_extents[0]/2-20, self.canvas_extents[1]/2-20,
self.canvas_extents[0]/2+20, self.canvas_extents[1]/2+20,
fill="gray", outline="")
def draw(self, at_step):
if self.cursor_object:
self.canvas.delete(self.cursor_object)
self.cursor_object = None
if at_step < len(self.scan_polygons):
self.cursor_object = self.canvas.create_polygon(self.scan_polygons[at_step], fill="blue")
class Landmarks(DrawableObject):
# In contrast other classes, Landmarks stores the original world coords and
# transforms them when drawing.
def __init__(self, landmarks, canvas, canvas_extents, world_extents, color = "gray"):
self.landmarks = landmarks
self.canvas = canvas
self.canvas_extents = canvas_extents
self.world_extents = world_extents
self.color = color
def background_draw(self):
for l in self.landmarks:
if l[0] =='C':
x, y = l[1:3]
ll = to_world_canvas((x - l[3], y - l[3]), self.canvas_extents, self.world_extents)
ur = to_world_canvas((x + l[3], y + l[3]), self.canvas_extents, self.world_extents)
self.canvas.create_oval(ll[0], ll[1], ur[0], ur[1], fill=self.color)
def draw(self, at_step):
# Landmarks are background only.
pass
class Points(DrawableObject):
def __init__(self, points, canvas, color = "red", radius = 5):
self.points = points
self.canvas = canvas
self.color = color
self.radius = radius
self.cursor_objects = []
def background_draw(self):
pass
def draw(self, at_step):
if self.cursor_objects:
map(self.canvas.delete, self.cursor_objects)
self.cursor_objects = []
if at_step < len(self.points):
for c in self.points[at_step]:
self.cursor_objects.append(self.canvas.create_oval(
c[0]-self.radius, c[1]-self.radius,
c[0]+self.radius, c[1]+self.radius,
fill=self.color))
# World canvas is x right, y up, and scaling according to canvas/world extents.
def to_world_canvas(world_point, canvas_extents, world_extents):
"""Transforms a point from world coord system to world canvas coord system."""
x = int(world_point[0] / world_extents[0] * canvas_extents[0])
y = int(canvas_extents[1] - 1 - world_point[1] / world_extents[1] * canvas_extents[1])
return (x, y)
# Sensor canvas is "in driving direction", with x up, y left, (0,0) in the center
# and scaling according to canvas_extents and max_scanner_range.
def to_sensor_canvas(sensor_point, canvas_extents, scanner_range):
"""Transforms a point from sensor coordinates to sensor canvas coord system."""
scale = canvas_extents[0] / 2.0 / scanner_range
x = int(canvas_extents[0] / 2.0 - sensor_point[1] * scale)
y = int(canvas_extents[1] / 2.0 - 1 - sensor_point[0] * scale)
return (x, y)
def slider_moved(index):
"""Callback for moving the scale slider."""
i = int(index)
# Call all draw objects.
for d in draw_objects:
d.draw(i)
# Print info about current point.
info.config(text=logfile.info(i))
def add_file():
filename = tkFileDialog.askopenfilename(filetypes = [("all files", ".*"), ("txt files", ".txt")])
if filename and filename not in all_file_names:
all_file_names.append(filename)
load_data()
def load_data():
global canvas_extents, sensor_canvas_extents, world_extents, max_scanner_range
for filename in all_file_names:
logfile.read(filename)
global draw_objects
draw_objects = []
scale.configure(to=logfile.size()-1)
# Insert: landmarks.
draw_objects.append(Landmarks(logfile.landmarks, world_canvas, canvas_extents, world_extents))
# Insert: reference trajectory.
positions = [to_world_canvas(pos, canvas_extents, world_extents) for pos in logfile.reference_positions]
draw_objects.append(Trajectory(positions, world_canvas,
cursor_color="red", background_color="#FFB4B4"))
# Insert: filtered trajectory.
if logfile.filtered_positions:
if len(logfile.filtered_positions[0]) > 2:
positions = [tuple(list(to_world_canvas(pos, canvas_extents, world_extents)) + [pos[2]]) for pos in logfile.filtered_positions]
else:
positions = [to_world_canvas(pos, canvas_extents, world_extents) for pos in logfile.filtered_positions]
draw_objects.append(Trajectory(positions, world_canvas,
cursor_color="blue", background_color="lightblue"))
# Insert: scanner data.
draw_objects.append(ScannerData(logfile.scan_data, sensor_canvas,
sensor_canvas_extents, max_scanner_range))
# Insert: detected cylinders, in scanner coord system.
if logfile.detected_cylinders:
positions = [[to_sensor_canvas(pos, sensor_canvas_extents, max_scanner_range)
for pos in cylinders_one_scan ]
for cylinders_one_scan in logfile.detected_cylinders ]
draw_objects.append(Points(positions, sensor_canvas, "#88FF88"))
# Insert: detected cylinders, in world coord system.
if logfile.detected_cylinders and logfile.filtered_positions and \
len(logfile.filtered_positions[0]) > 2:
positions = []
for i in range(min(len(logfile.detected_cylinders), len(logfile.filtered_positions))):
this_pose_positions = []
pos = logfile.filtered_positions[i]
dx = cos(pos[2])
dy = sin(pos[2])
for pole in logfile.detected_cylinders[i]:
x = pole[0] * dx - pole[1] * dy + pos[0]
y = pole[0] * dy + pole[1] * dx + pos[1]
p = to_world_canvas((x,y), canvas_extents, world_extents)
this_pose_positions.append(p)
positions.append(this_pose_positions)
draw_objects.append(Points(positions, world_canvas, "#88FF88"))
# Start new canvas and do all background drawing.
world_canvas.delete(ALL)
sensor_canvas.delete(ALL)
for d in draw_objects:
d.background_draw()
# Main program.
if __name__ == '__main__':
# Construct logfile (will be read in load_data()).
logfile = LegoLogfile()
# Setup GUI stuff.
root = Tk()
frame1 = Frame(root)
frame1.pack()
world_canvas = Canvas(frame1,width=canvas_extents[0],height=canvas_extents[1],bg="white")
world_canvas.pack(side=LEFT)
sensor_canvas = Canvas(frame1,width=sensor_canvas_extents[0],height=sensor_canvas_extents[1],bg="white")
sensor_canvas.pack(side=RIGHT)
scale = Scale(root, orient=HORIZONTAL, command = slider_moved)
scale.pack(fill=X)
info = Label(root)
info.pack()
frame2 = Frame(root)
frame2.pack()
load = Button(frame2,text="Load (additional) logfile",command=add_file)
load.pack(side=LEFT)
reload_all = Button(frame2,text="Reload all",command=load_data)
reload_all.pack(side=RIGHT)
# The list of objects to draw.
draw_objects = []
# Ask for file.
all_file_names = []
add_file()
root.mainloop()
root.destroy()
|
[
"[email protected]"
] | |
95303882335933bf48710ea4c6a92ec77ab6fa8b
|
71748e7379548d75fcf6713f0e6d66d6db1c2bbd
|
/4AL16IS051_SHETTY _TANVI/Jaishma Ma'am/Challenge 1/p1.py
|
94f3b31275c157f72cf5617f380da3fcadaba83b
|
[] |
no_license
|
alvas-education-foundation/ISE_4th_Year_Coding_challenge
|
fcf78c755cc924bea7e905e67c1e30385cf5af0b
|
96cfc92d679576dab15ef7d1cb6773f2082abfb2
|
refs/heads/master
| 2022-11-17T09:19:23.851817 | 2020-07-21T09:59:40 | 2020-07-21T09:59:40 | 265,195,858 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 233 |
py
|
@@ -0,0 +1,26 @@
1)
x = input("")
n = int(x)
if n % 2 == 1:
print("Weird")
elif n % 2 == 0 and 2 <= n <= 5:
print("Not Weird")
elif n % 2 == 0 and 6 <= n <= 20:
print("Weird")
else:
print("Not Weird")
|
[
"[email protected]"
] | |
354df2411085e8381b5ff43d457d0bb490a9d2c0
|
cb92ded6ec9b8d82d3de6c8f5a330f09e7f0fb6d
|
/mySite/toDo/views.py
|
0432e245a412b0d5c2ba8db9a49f47c08d4e6380
|
[] |
no_license
|
davidpujol/toDo
|
617037aa31be99da038fce305020fa5b413216b1
|
f26bd1368df515374430208d921a25f2b998a161
|
refs/heads/master
| 2022-09-30T10:07:36.563275 | 2019-03-01T21:01:38 | 2019-03-01T21:01:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,742 |
py
|
from django.shortcuts import render, redirect
from .models import User, ToDoList, SingleTask
import hashlib
from random import randint
# Import smtplib for the actual sending function
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Create your views here.
def main(request):
return render(request, 'toDo/main.html', None)
def login(request):
return render(request, 'toDo/logIn.html', None)
def register (request, problem=0):
return render(request, 'toDo/register.html', {'problem':problem})
def registrationInfo(request):
if request.POST['password'] != request.POST["password2"]:
return render(request, 'toDo/register.html', {'problem': 2})
if User.objects.filter(email=request.POST['email'],confirmed=False).count() >=1:
User.objects.get(email=request.POST['email'], confirmed=False).delete()
if User.objects.filter(user = request.POST['username']).count() >= 1:
return render(request, 'toDo/register.html', {'problem': 1})
if User.objects.filter(email=request.POST['email']).count() >= 1:
return render(request, 'toDo/register.html', {'problem': 2})
else:
sendEmail(request.POST['email']) #we send mail for confirmation
User.objects.create(user=request.POST['username'], password=hashlib.sha256(request.POST['password'].encode('utf-8')).hexdigest(), email=request.POST['email'], name=request.POST["name"])
return render(request, 'toDo/confirmationSent.html', None)
def sendEmail(receiver_email):
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
sender_email = "[email protected]"
password = 'iavm2umr'
message = MIMEMultipart("alternative")
message["Subject"] = "multipart test"
message["From"] = sender_email
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
html = """\
<html>
<body>
<p>
Hello dear new user,<br>
We would like to welcome you to our toDo platform that will bring order into your life by enabling you
to keep track of your pending tasks as well as the completed ones.<br><br>
We wanted to remind you that in case of having any doubt in any functionality, please do not hesitate
to contact us so we can help you as soon as possible.<br><br>
Before so, and for security porpuses, we need you to confirm your account.<br>
</p>
<p>Please click on the following link <a href="localhost:8000/toDo/confirmationProcess">Click_here</a></p>
</body>
</html>
"""
# Turn these into plain/html MIMEText objects
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part2)
# Create secure connection with server and send email
context = ssl._create_unverified_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
)
def identifing (request):
try:
global usuari
global remember
email = request.POST['email']
password = hashlib.sha256(request.POST['password'].encode('utf-8')).hexdigest()
usuari = User.objects.get(email=email, password=password, confirmed=True)
if 'rememberMe' in request.POST and request.POST['rememberMe'] == 'on':
remember = True
else:
remember = False
return redirect(personalSite)
except:
return render(request, 'toDo/logIn.html', None)
def personalSite(request):
try:
global usuari
list = usuari.todolist_set.all()
empty = len(list) == 0
return render(request, 'toDo/personalSite.html', {'name':usuari, 'list':list, 'empty':empty})
except:
return render(request, 'toDo/logIn.html', None)
def specificList (request):
try:
#make sure that chose_list returns only one
if 'chosen_list' in request.POST:
global chosen
chosen = request.POST['chosen_list']
if 'Delete' in request.POST:
ToDoList.objects.get(title=chosen).delete()
return redirect('personalSite')
else:
try:
list = ToDoList.objects.get(title=chosen)
tasks = list.singletask_set.all()
except:
return redirect(personalSite)
return render(request, 'toDo/specificList.html', {'list':list, 'tasks':tasks})
except:
return render(request, 'toDo/logIn.html', None)
def processChanges (request):
try:
#those that are marked have to be marked as completed
list = ToDoList.objects.get(title=chosen)
tasks = list.singletask_set.all()
for e in tasks:
t = list.singletask_set.get(task=e.task)
if e.task in request.POST and request.POST[e.task] == 'on': #we permenently select
t.completed = True
else:
t.completed= False
t.save()
return redirect('specificList')
except:
return render(request, 'toDo/logIn.html', None)
def addTask(request):
try:
l = ToDoList.objects.get(title=chosen)
if 'newTask' in request.POST and request.POST['newTask'] != '':
SingleTask.objects.create(task=request.POST['newTask'], completed=False, list = l)
return redirect('specificList')
except:
return render(request, 'toDo/logIn.html', None)
def addList(request):
try:
if 'newList' in request.POST and request.POST['newList'] != '':
ToDoList.objects.create(title=request.POST['newList'], owner = usuari)
return redirect('personalSite')
except:
return render(request, 'toDo/logIn.html', None)
def logout(request):
global usuari
usuari = None
return redirect('main')
def contact(request):
return render(request, 'toDo/contact.html', None)
def confirmation(request):
p = hashlib.sha256(request.POST['password'].encode('utf-8')).hexdigest()
if User.objects.get(email=request.POST['email'], password=p):
u = User.objects.get(email=request.POST['email'], password=p)
u.confirmed = True
u.save()
return render(request, 'toDo/confirmation.html', None)
else:
return redirect(confirmationProcess)
def confirmationProcess(request):
return render(request, 'toDo/confirmationProcess.html', None)
|
[
"[email protected]"
] | |
a9625efecd45a7fb3e4a24d22d5c9bdcebcb29c7
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/homeassistant/components/homekit/type_triggers.py
|
b239d67877c7d22f4ee6b162d2d1ac3e503fea4d
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 |
Apache-2.0
| 2023-02-21T23:40:57 | 2019-10-31T04:33:09 |
Python
|
UTF-8
|
Python
| false | false | 4,485 |
py
|
"""Class to hold all sensor accessories."""
from __future__ import annotations
import logging
from typing import Any
from pyhap.const import CATEGORY_SENSOR
from homeassistant.core import CALLBACK_TYPE, Context
from homeassistant.helpers import entity_registry
from homeassistant.helpers.trigger import async_initialize_triggers
from .accessories import TYPES, HomeAccessory
from .aidmanager import get_system_unique_id
from .const import (
CHAR_NAME,
CHAR_PROGRAMMABLE_SWITCH_EVENT,
CHAR_SERVICE_LABEL_INDEX,
CHAR_SERVICE_LABEL_NAMESPACE,
SERV_SERVICE_LABEL,
SERV_STATELESS_PROGRAMMABLE_SWITCH,
)
from .util import cleanup_name_for_homekit
_LOGGER = logging.getLogger(__name__)
@TYPES.register("DeviceTriggerAccessory")
class DeviceTriggerAccessory(HomeAccessory):
"""Generate a Programmable switch."""
def __init__(
self,
*args: Any,
device_triggers: list[dict[str, Any]] | None = None,
device_id: str | None = None,
) -> None:
"""Initialize a Programmable switch accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR, device_id=device_id)
assert device_triggers is not None
self._device_triggers = device_triggers
self._remove_triggers: CALLBACK_TYPE | None = None
self.triggers = []
assert device_triggers is not None
ent_reg = entity_registry.async_get(self.hass)
for idx, trigger in enumerate(device_triggers):
type_: str = trigger["type"]
subtype: str | None = trigger.get("subtype")
unique_id = f'{type_}-{subtype or ""}'
if (entity_id := trigger.get("entity_id")) and (
entry := ent_reg.async_get(entity_id)
):
unique_id += f"-entity_unique_id:{get_system_unique_id(entry)}"
trigger_name_parts = []
if entity_id and (state := self.hass.states.get(entity_id)):
trigger_name_parts.append(state.name)
trigger_name_parts.append(type_.replace("_", " ").title())
if subtype:
trigger_name_parts.append(subtype.replace("_", " ").title())
trigger_name = cleanup_name_for_homekit(" ".join(trigger_name_parts))
serv_stateless_switch = self.add_preload_service(
SERV_STATELESS_PROGRAMMABLE_SWITCH,
[CHAR_NAME, CHAR_SERVICE_LABEL_INDEX],
unique_id=unique_id,
)
self.triggers.append(
serv_stateless_switch.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT,
value=0,
valid_values={"Trigger": 0},
)
)
serv_stateless_switch.configure_char(CHAR_NAME, value=trigger_name)
serv_stateless_switch.configure_char(
CHAR_SERVICE_LABEL_INDEX, value=idx + 1
)
serv_service_label = self.add_preload_service(
SERV_SERVICE_LABEL, unique_id=unique_id
)
serv_service_label.configure_char(CHAR_SERVICE_LABEL_NAMESPACE, value=1)
serv_stateless_switch.add_linked_service(serv_service_label)
async def async_trigger(
self,
run_variables: dict[str, Any],
context: Context | None = None,
skip_condition: bool = False,
) -> None:
"""Trigger button press.
This method is a coroutine.
"""
reason = ""
if "trigger" in run_variables and "description" in run_variables["trigger"]:
reason = f' by {run_variables["trigger"]["description"]}'
_LOGGER.debug("Button triggered%s - %s", reason, run_variables)
idx = int(run_variables["trigger"]["idx"])
self.triggers[idx].set_value(0)
# Attach the trigger using the helper in async run
# and detach it in async stop
async def run(self) -> None:
"""Handle accessory driver started event."""
self._remove_triggers = await async_initialize_triggers(
self.hass,
self._device_triggers,
self.async_trigger,
"homekit",
self.display_name,
_LOGGER.log,
)
async def stop(self) -> None:
"""Handle accessory driver stop event."""
if self._remove_triggers:
self._remove_triggers()
@property
def available(self) -> bool:
"""Return available."""
return True
|
[
"[email protected]"
] | |
d509193feed47690856d7954eeb3a65b8d83f164
|
53342fdbd43fc45d099b18e0a742ba0cf483cd09
|
/bio.py
|
93325b8a80061a81ae7e96ef3c435a05bee77ced
|
[] |
no_license
|
nikhil-8/nikii
|
dfb70689d98630a3325ffb1c6a36b9c0d55a737e
|
a0a6bd2d57d3a2912c215a19460bfe76e972374e
|
refs/heads/master
| 2020-03-28T22:22:34.316423 | 2018-09-18T04:33:24 | 2018-09-18T04:33:24 | 149,227,793 | 0 | 0 | null | 2018-09-18T04:33:25 | 2018-09-18T04:13:31 |
Python
|
UTF-8
|
Python
| false | false | 487 |
py
|
def nck_recursive(n,k):
if k==0 or k==n:
return 1
else:
return nck_recursive(n-1,k)+nck_recursive(n-1,k-1)
def fact(n):
if n<=1:
return 1
else:
return n*fact(n-1)
def nck_factorial(n,k):
return fact(n)/(fact(k)*fact(n-k))
def nck_multiplicative(n,k):
result=1
for i in range(1,k+1):
result=result*(n-(k-i))/i
return result
n=5
k=2
print(nck_recursive(n,k))
print(nck_factorial(n,k))
print(nck_multiplicative(n,k))
|
[
"[email protected]"
] | |
d9dc1783a8a2c7d80a6996f4b51b4e630eeb659b
|
58c0c6cd1da0a0b70c14787fbbd5a5af5161ac15
|
/venv/Lib/site-packages/pycrypto-2.6.1-py3.8-win-amd64.egg/Crypto/Cipher/_CAST.py
|
85b77c1af5f2c55359fda906ba42f17a52eb918b
|
[
"MIT"
] |
permissive
|
RafaelHMachado/Cioffis_Automation
|
6454d33558a4f4b63412d1d068726ca73feddeea
|
07965ca71c3d4e78f5cee1fce4ba0bbfe2db9811
|
refs/heads/main
| 2023-06-06T07:04:11.182106 | 2021-07-03T07:39:28 | 2021-07-03T07:39:28 | 382,553,454 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, importlib.util
__file__ = pkg_resources.resource_filename(__name__, '_CAST.pyd')
__loader__ = None; del __bootstrap__, __loader__
spec = importlib.util.spec_from_file_location(__name__,__file__)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
__bootstrap__()
|
[
"[email protected]"
] | |
ff50b8f30a0c9607cc3c9a654f610c8d63a7c3f4
|
d3108dae4e54d529c54376858c14997935c4d8c3
|
/blog/models.py
|
391c6ae3761eef7990f7cb2eb8b160db5357060c
|
[] |
no_license
|
KevinCaires/my-first-blog
|
50aed9bc59beb03f5053bebb662218ec4ec70e8c
|
24485bf34fb02b8f95a011eccb5425c06084c4ac
|
refs/heads/master
| 2020-06-25T12:08:44.053718 | 2019-07-28T22:41:56 | 2019-07-28T22:41:56 | 199,303,315 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 549 |
py
|
from django.db import models
from django.conf import settings
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length = 200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
published_date = models.DateTimeField(blank=True, null = True)
def publish(self):
self.published_date = timezone.now
self.save()
def __str__(self):
return self.title
|
[
"[email protected]"
] | |
cd36102d0cb3116ba36287cc2022aac5a2878df7
|
71c78a4532b976efec978ecbbd66b1e919a2a2a5
|
/Face-Rekognition/local.py
|
60c8ea2ae43a8a433ffad0cb040c301b5fbd54b6
|
[] |
no_license
|
santoshmn26/AWS-Rekognition
|
949f7a2d9e9e643a66b6eb2d0083b549fa66f58a
|
f1af324eea26b46c192796a1cfc8b63d558bf59e
|
refs/heads/master
| 2020-04-11T00:06:35.799901 | 2019-06-18T18:53:48 | 2019-06-18T18:53:48 | 161,375,716 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
import boto3
if __name__ == "__main__":
imageFile="image.jpg" # image file to detect labels
client=boto3.client('rekognition')
with open(imageFile, 'rb') as image:
response = client.detect_labels(Image={'Bytes': image.read()})
print('Detected labels in ' + imageFile)
for label in response['Labels']:
print (label['Name'] + ' : ' + str(label['Confidence']))
print('Done...')
|
[
"[email protected]"
] | |
4b3822b5c0d569f8b20c18b8207cce3c03cef02d
|
50aba82ba2ffee44aadf136e1b57bcdae881f14e
|
/passmark_scraper.py
|
1277a8fcb603608ddd74a6acfdd24f4675f41bd6
|
[] |
no_license
|
JuniorJPDJ/benchmarks-scraper
|
022e9198624a269178c4ef3904470a9326d82e3f
|
5e38fc690f964abce81b1df6d8d7241d7480156d
|
refs/heads/master
| 2020-12-14T05:48:50.832906 | 2020-12-13T11:27:06 | 2020-12-13T11:27:06 | 234,662,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,576 |
py
|
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
# TODO: additional parameters from child rows
def get_page(base_url, part_url):
r = requests.get(base_url)
bs = BeautifulSoup(r.text, "html.parser")
table = bs.find(id='cputable') # for every benchmark hw it's the same, for gpu and hdd
thead = ['ID', 'URL'] + [next(e.strings) for e in table.find('thead').find('tr').find_all('th')]
tbody = []
for tr in table.find('tbody').find_all('tr', class_=False):
tds = tr.find_all('td')
part_id = tr['id'][3:]
tr_a = [part_id, part_url+part_id]
tr_a.extend([e.text for e in tds])
tbody.append(tr_a)
return thead, tbody
if __name__ == '__main__':
import argparse
import csv
base_urls = {
"CPU": "https://www.cpubenchmark.net/CPU_mega_page.html",
"GPU": "https://www.videocardbenchmark.net/GPU_mega_page.html",
"HDD": "https://www.harddrivebenchmark.net/hdd-mega-page.html"
}
part_urls = {
'CPU': 'https://www.cpubenchmark.net/cpu.php?id=',
'GPU': 'https://www.videocardbenchmark.net/gpu.php?id=',
'HDD': 'https://www.harddrivebenchmark.net/hdd.php?id='
}
arg = argparse.ArgumentParser()
arg.add_argument("--type", default="CPU", choices=["CPU", "GPU", "HDD"], type=str.upper)
arg.add_argument('out', type=argparse.FileType('w', encoding='UTF-8'))
args = arg.parse_args()
print("Starting parsing Passmark", args.type, "category")
csv_writer = csv.writer(args.out)
thead, tbody = get_page(base_urls[args.type], part_urls[args.type])
csv_writer.writerow(thead)
csv_writer.writerows(tbody)
args.out.close()
|
[
"[email protected]"
] | |
4c6b1a6636984c7b952738d5986a607427a08d95
|
6418c2124c615c2af7768fea657ebe1ed9891830
|
/basic/migrations/0004_auto_20200728_1041.py
|
2b3e7a0bef2a80fdb9880f23e4284610790006ff
|
[] |
no_license
|
santhosh0208/CuriosityRadar
|
64029840cd83b787faef2211805146c32d7d4554
|
cde9182417ec2e076d28822eb2195a7cf94e4446
|
refs/heads/master
| 2023-07-30T19:40:04.548950 | 2020-07-29T07:00:22 | 2020-07-29T07:00:22 | 283,222,622 | 0 | 0 | null | 2021-06-10T19:58:24 | 2020-07-28T13:36:37 |
HTML
|
UTF-8
|
Python
| false | false | 385 |
py
|
# Generated by Django 3.0.1 on 2020-07-28 05:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic', '0003_auto_20200722_1504'),
]
operations = [
migrations.AlterField(
model_name='article',
name='published',
field=models.DateField(blank=True),
),
]
|
[
"[email protected]"
] | |
9784c4b479a66fc78e1e4b9de3f345170ca0af04
|
dd1937591047258a846d48fc150c10bdf23cbf67
|
/confesAPI/students/urls.py
|
f7abf465169f3a84f9afdc82bbfbccdd1251d662
|
[] |
no_license
|
AnkitRusia/Confession
|
9b8fece2e7b1a1a9f5da07331731d02f2be7a170
|
ca799276dd3aee2fa051912e59e1f64c31530330
|
refs/heads/main
| 2023-06-17T10:26:09.346237 | 2021-07-10T08:39:29 | 2021-07-10T08:39:29 | 384,569,867 | 0 | 0 | null | 2021-07-10T08:39:30 | 2021-07-09T23:16:09 |
Python
|
UTF-8
|
Python
| false | false | 651 |
py
|
from django.conf.urls import url
from django.conf.urls.static import static
from django.conf import settings
from students import views
urlpatterns = [
url(r'^student/$', views.studentAPI),
url(r'^student/([0-9]+)$', views.studentAPI),
url(r'^post/$', views.postAPI),
url(r'^post/([0-9]+)$', views.postAPI),
url(r'^StudentImage/([0-9]+)$', views.SaveStudentImage),
url(r'^PostImage/([0-9]+)$', views.SavePostImage),
url(r'^postByName/$', views.viewPostByName),
url(r'^likePost/([0-9]+)$', views.likePost),
url(r'^addcomment/$', views.addComment),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
c4acc48e7f4000ebbf4268909ad39fdf1dab8ec8
|
ae11eda73ad0a61f8f7f894314bd9aa40798b50a
|
/MyAnalysis/IsolationTools/python/muonDirectionalPFIsolations_cff.py
|
8d48cf05750f63c872425cdbbf934b676a67f71b
|
[] |
no_license
|
hbakhshi/NTupleProducer
|
087a7286f7352e9f6c517d257d7f195280db058d
|
eec377339008d2139128059d7127f9a2184c080c
|
refs/heads/master
| 2021-01-22T14:32:44.891691 | 2014-06-10T12:48:12 | 2014-06-10T12:48:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,644 |
py
|
import FWCore.ParameterSet.Config as cms
import MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi
### DR=0.1 cone
# Charged Hadron isolation
muonDirPFIsoChHad01 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad01.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad01.deltaR = 0.1
muonDirPFIsoChHad01.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad01 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad01.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad01.deltaR = 0.1
muonDirPFIsoNHad01.directional = True
# Photon isolation
muonDirPFIsoPhoton01 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton01.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton01.deltaR = 0.1
muonDirPFIsoPhoton01.directional = True
### DR=0.2 cone
# Charged Hadron isolation
muonDirPFIsoChHad02 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad02.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad02.deltaR = 0.2
muonDirPFIsoChHad02.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad02 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad02.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad02.deltaR = 0.2
muonDirPFIsoNHad02.directional = True
# Photon isolation
muonDirPFIsoPhoton02 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton02.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton02.deltaR = 0.2
muonDirPFIsoPhoton02.directional = True
### DR=0.3 cone
# Charged Hadron isolation
muonDirPFIsoChHad03 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad03.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad03.deltaR = 0.3
muonDirPFIsoChHad03.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad03 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad03.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad03.deltaR = 0.3
muonDirPFIsoNHad03.directional = True
# Photon isolation
muonDirPFIsoPhoton03 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton03.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton03.deltaR = 0.3
muonDirPFIsoPhoton03.directional = True
### DR=0.4 cone
# Charged Hadron isolation
muonDirPFIsoChHad04 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad04.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad04.deltaR = 0.4
muonDirPFIsoChHad04.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad04 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad04.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad04.deltaR = 0.4
muonDirPFIsoNHad04.directional = True
# Photon isolation
muonDirPFIsoPhoton04 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton04.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton04.deltaR = 0.4
muonDirPFIsoPhoton04.directional = True
### DR=0.5 cone
# Charged Hadron isolation
muonDirPFIsoChHad05 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad05.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad05.deltaR = 0.5
muonDirPFIsoChHad05.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad05 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad05.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad05.deltaR = 0.5
muonDirPFIsoNHad05.directional = True
# Photon isolation
muonDirPFIsoPhoton05 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton05.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton05.deltaR = 0.5
muonDirPFIsoPhoton05.directional = True
### DR=0.6 cone
# Charged Hadron isolation
muonDirPFIsoChHad06 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad06.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad06.deltaR = 0.6
muonDirPFIsoChHad06.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad06 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad06.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad06.deltaR = 0.6
muonDirPFIsoNHad06.directional = True
# Photon isolation
muonDirPFIsoPhoton06 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton06.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton06.deltaR = 0.6
muonDirPFIsoPhoton06.directional = True
### DR=0.7 cone
# Charged Hadron isolation
muonDirPFIsoChHad07 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoChHad07.pfTypes = cms.untracked.vint32(1)
muonDirPFIsoChHad07.deltaR = 0.7
muonDirPFIsoChHad07.directional = True
# Neutral Hadron isolation
muonDirPFIsoNHad07 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoNHad07.pfTypes = cms.untracked.vint32(5)
muonDirPFIsoNHad07.deltaR = 0.7
muonDirPFIsoNHad07.directional = True
# Photon isolation
muonDirPFIsoPhoton07 = MyAnalysis.IsolationTools.muonPFIsoSingleType_cfi.muonPFIsoSingleTypeMapProd.clone()
muonDirPFIsoPhoton07.pfTypes = cms.untracked.vint32(4)
muonDirPFIsoPhoton07.deltaR = 0.7
muonDirPFIsoPhoton07.directional = True
|
[
"[email protected]"
] | |
c601fd8fe124c10697e778d4e7b133a307e9818e
|
be37b034e4b05543967e4534eaaf7326e68506ae
|
/examples/example1.py
|
9d03e9ccf8da9c3f55f98fc35465e35258086b33
|
[
"MIT"
] |
permissive
|
nvms/booker
|
50795290b643a1e83ec48a9fe84b47d5c8fec020
|
acc5b8c98f90b0f687285d2d2d9b9cf58d680a99
|
refs/heads/master
| 2020-04-14T22:10:59.766846 | 2019-08-16T16:44:23 | 2019-08-16T16:44:23 | 164,153,765 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,310 |
py
|
from __future__ import print_function
import booker
import time
import sys
import datetime
import threading
def hour(): return datetime.datetime.now().hour
def minute(): return datetime.datetime.now().minute
def second(): return datetime.datetime.now().second
def ms(): return datetime.datetime.now().microsecond
booker.verbose = True
# ---
# Using the decorator.
@booker.task('in 2 seconds')
def hello_world(thing='world'):
print('- Hello, {}!'.format(thing))
# ---
# Using the decorator.
@booker.task('in 5 seconds')
def in_five_seconds():
print('- It\'s been 5 seconds.')
@booker.task('in 10 seconds')
def in_ten_seconds():
print('- It\'s been 10 seconds.')
print(booker.elapsed_since_epoch())
# ---
# Using the decorator, with a label.
@booker.task('every 1 second', 'my-ping-task')
def ping():
print('- Ping')
# ---
# Using .do().
def pong():
print('- Pong')
booker.do(pong, 'every 1 second in 2 seconds', 'my-pong-task')
def print_task_status():
print('- Printing the status of all tasks:')
for task in booker.tasks():
print(task)
booker.do(print_task_status, 'in 3 seconds')
# ---
# Using .do().
def cancel_by_label(label):
print('- Cancelling tasks with label: \'{}\'...'.format(label))
booker.cancel(label)
booker.do(lambda: cancel_by_label('my-ping-task'), 'in 12 seconds')
# ---
# Using .do().
booker.do(lambda: print('- Cancelling all tasks in 1 second...'), 'in 19 seconds')
booker.do(booker.cancel_all, 'in 20 seconds')
# ---
# Using the decorator.
@booker.task('every 1 second')
def print_time():
sys.stdout.write('{:02d}:{:02d}:{:02d}:{:02d}\n'.format(hour(), minute(), second(), ms()))
# ---
# Using .do().
def never_run():
print('This should never be run and instead will produce a warning.')
schedule = booker.Schedule(interval=1, tts=5, ttl=3)
booker.do(never_run, schedule, 'task-that-never-runs')
# ---
# An alternative way to build a schedule.
mystr = '6 seconds'
schedule = booker.get_schedule('in ' + mystr)
schedule.tts = schedule.tts + 1 # Add 1 second to the start time.
booker.do(lambda: print('- It\'s been 7 seconds'), schedule)
# ---
# Keep alive.
def main():
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
74fc93f5b17de8fccb5bfec4f66dbd8208b5892b
|
f64e9de2a5f8cd677180172591256a651fe20cbc
|
/examples/btpyparse.py
|
1c95636adae45c597450aa2233f432a5957cb747
|
[
"MIT"
] |
permissive
|
schlichtanders/pyparsing-2.0.3-OrderedDict
|
c85455a65ea610a959a41d035175912ba3762e11
|
50bb1a10b63ac623ef58ffa3ee59bb08be172ff4
|
refs/heads/master
| 2021-01-10T04:07:10.044763 | 2015-10-23T11:03:14 | 2015-10-23T11:03:14 | 44,807,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,376 |
py
|
""" Pyparsing parser for BibTeX files
A standalone parser using pyparsing.
pyparsing has a simple and expressive syntax so the grammar is easy to read and
write.
Matthew Brett 2010
Simplified BSD license
"""
from pyparsingOD import (Regex, Suppress, ZeroOrMore, Group, Optional, Forward,
SkipTo, CaselessLiteral, Dict)
class Macro(object):
""" Class to encapsulate undefined macro references """
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Macro("%s")' % self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
# Character literals
LCURLY,RCURLY,LPAREN,RPAREN,QUOTE,COMMA,AT,EQUALS,HASH = map(Suppress,'{}()",@=#')
def bracketed(expr):
""" Return matcher for `expr` between curly brackets or parentheses """
return (LPAREN + expr + RPAREN) | (LCURLY + expr + RCURLY)
# Define parser components for strings (the hard bit)
chars_no_curly = Regex(r"[^{}]+")
chars_no_curly.leaveWhitespace()
chars_no_quotecurly = Regex(r'[^"{}]+')
chars_no_quotecurly.leaveWhitespace()
# Curly string is some stuff without curlies, or nested curly sequences
curly_string = Forward()
curly_item = Group(curly_string) | chars_no_curly
curly_string << LCURLY + ZeroOrMore(curly_item) + RCURLY
# quoted string is either just stuff within quotes, or stuff within quotes, within
# which there is nested curliness
quoted_item = Group(curly_string) | chars_no_quotecurly
quoted_string = QUOTE + ZeroOrMore(quoted_item) + QUOTE
# Numbers can just be numbers. Only integers though.
number = Regex('[0-9]+')
# Basis characters (by exclusion) for variable / field names. The following
# list of characters is from the btparse documentation
any_name = Regex('[^\s"#%\'(),={}]+')
# btparse says, and the test bibs show by experiment, that macro and field names
# cannot start with a digit. In fact entry type names cannot start with a digit
# either (see tests/bibs). Cite keys can start with a digit
not_digname = Regex('[^\d\s"#%\'(),={}][^\s"#%\'(),={}]*')
# Comment comments out to end of line
comment = (AT + CaselessLiteral('comment') +
Regex("[\s{(].*").leaveWhitespace())
# The name types with their digiteyness
not_dig_lower = not_digname.copy().setParseAction(lambda t: t[0].lower())
macro_def = not_dig_lower.copy()
macro_ref = not_dig_lower.copy().setParseAction(lambda t : Macro(t[0].lower()))
field_name = not_dig_lower.copy()
# Spaces in names mean they cannot clash with field names
entry_type = not_dig_lower('entry_type')
cite_key = any_name('cite_key')
# Number has to be before macro name
string = (number | macro_ref | quoted_string | curly_string)
# There can be hash concatenation
field_value = string + ZeroOrMore(HASH + string)
field_def = Group(field_name + EQUALS + field_value)
entry_contents = Dict(ZeroOrMore(field_def + COMMA) + Optional(field_def))
# Entry is surrounded either by parentheses or curlies
entry = (AT + entry_type + bracketed(cite_key + COMMA + entry_contents))
# Preamble is a macro-like thing with no name
preamble = AT + CaselessLiteral('preamble') + bracketed(field_value)
# Macros (aka strings)
macro_contents = macro_def + EQUALS + field_value
macro = AT + CaselessLiteral('string') + bracketed(macro_contents)
# Implicit comments
icomment = SkipTo('@').setParseAction(lambda t : t.insert(0, 'icomment'))
# entries are last in the list (other than the fallback) because they have
# arbitrary start patterns that would match comments, preamble or macro
definitions = Group(comment |
preamble |
macro |
entry |
icomment)
# Start symbol
bibfile = ZeroOrMore(definitions)
def parse_str(str):
return bibfile.parseString(str)
if __name__ == '__main__':
# Run basic test
txt = """
Some introductory text
(implicit comment)
@ARTICLE{Authors2011,
author = {First Author and Second Author and Third Author},
title = {An article about {S}omething},
journal = "Journal of Articles",
year = {2011},
volume = {16},
pages = {1140--1141},
number = {2}
}
"""
print('\n\n'.join(defn.dump() for defn in parse_str(txt)))
|
[
"[email protected]"
] | |
d56f9ff4d9977c5cae0f12e69328846bbc0dd91b
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/io_thps_scene/export_shared.py
|
f75d8e3457209c8ec5a53569e993701e92bf9c6a
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 63,550 |
py
|
#############################################
# SCENE EXPORT - SHARED COMPONENTS
#############################################
import bpy
import bmesh
import struct
import mathutils
import math
from . import helpers, collision, prefs, material, autosplit
from bpy.props import *
from . prefs import *
from . autosplit import *
from . helpers import *
from . collision import *
from . material import *
from . constants import *
from . qb import *
from . level_manifest import *
from . export_thug1 import export_scn_sectors
from . export_thug2 import export_scn_sectors_ug2
from . export_thps4 import *
class ExportError(Exception):
pass
# METHODS
#############################################
def pack_pre(root_dir, files, output_file):
pack = struct.pack
with open(output_file, "wb") as outp:
outp.write(pack("I", 0))
outp.write(pack("I", 0xABCD0003)) # version
outp.write(pack("I", len(files))) # num files
for file in files:
adjusted_fn = bytes(os.path.relpath(file, root_dir), 'ascii') + b"\x00"
if len(adjusted_fn) % 4 != 0:
adjusted_fn = adjusted_fn + (b'\x00' * (4 - (len(adjusted_fn) % 4)))
with open(file, "rb") as inp:
data = inp.read()
outp.write(pack("I", len(data))) # data size
outp.write(pack("I", 0)) # compressed data size
outp.write(pack("I", len(adjusted_fn))) # file name size
outp.write(pack("I", crc_from_string(bytes(os.path.relpath(file, root_dir), 'ascii')))) # file name checksum
outp.write(adjusted_fn) # file name
outp.write(data) # data
offs = outp.tell()
if offs % 4 != 0:
outp.write(b'\x00' * (4 - (offs % 4)))
total_bytes = outp.tell()
outp.seek(0)
outp.write(pack("I", total_bytes))
#----------------------------------------------------------------------------------
def do_export(operator, context, target_game):
is_model = False
self = operator
import subprocess, shutil, datetime
addon_prefs = context.user_preferences.addons[ADDON_NAME].preferences
base_files_dir_error = prefs._get_base_files_dir_error(addon_prefs)
if base_files_dir_error:
self.report({"ERROR"}, "Base files directory error: {} Check the base files directory addon preference. Aborting export.".format(base_files_dir_error))
return {"CANCELLED"}
base_files_dir = addon_prefs.base_files_dir
if target_game == "THPS4":
DEFAULT_SKY_SCN = self.skybox_name + "scn.dat"
DEFAULT_SKY_TEX = self.skybox_name + "tex.dat"
elif target_game == "THUG1":
DEFAULT_SKY_SCN = self.skybox_name + ".scn.xbx"
DEFAULT_SKY_TEX = self.skybox_name + ".tex.xbx"
elif target_game == "THUG2":
DEFAULT_SKY_SCN = self.skybox_name + ".scn.xbx"
DEFAULT_SKY_TEX = self.skybox_name + ".tex.xbx"
else:
raise Exception("Unknown target game: {}".format(target_game))
start_time = datetime.datetime.now()
filename = self.filename
directory = self.directory
j = os.path.join
def md(dir):
if not os.path.exists(dir):
os.makedirs(dir)
ext_pre = (".prx" if target_game == "THUG2" else ".pre")
ext_col = (".col" if (target_game == "THUG1" and not self.pack_pre) else ".col.xbx" )
ext_scn = (".scn" if (target_game == "THUG1" and not self.pack_pre) else ".scn.xbx" )
ext_tex = (".tex" if (target_game == "THUG1" and not self.pack_pre) else ".tex.xbx" )
ext_qb = ".qb"
if target_game == "THPS4":
ext_col = "col.dat"
ext_scn = "scn.dat"
ext_tex = "tex.dat"
self.report({'OPERATOR'}, "")
self.report({'INFO'}, "-" * 20)
self.report({'INFO'}, "Starting export of {} at {}".format(filename, start_time.time()))
orig_objects, temporary_objects = [], []
import sys
logging_fh = logging.FileHandler(j(directory, filename + "_export.log"), mode='w')
logging_fh.setFormatter(logging.Formatter("{asctime} [{levelname}] {message}", style='{', datefmt="%H:%M:%S"))
logging_ch = logging.StreamHandler(sys.stdout)
logging_ch.setFormatter(logging.Formatter("{asctime} [{levelname}] {message}", style='{', datefmt="%H:%M:%S"))
set_export_scale(operator.export_scale)
try:
LOG.addHandler(logging_fh)
LOG.addHandler(logging_ch)
LOG.setLevel(logging.DEBUG)
# Create shadow caster objects (hacky!)
generate_shadowcasters()
if self.generate_col_file or self.generate_scn_file or self.generate_scripts_files:
orig_objects, temporary_objects = autosplit._prepare_autosplit_objects(operator, context,target_game)
path = j(directory, "Levels", filename)
md(path)
if self.generate_col_file:
export_col(filename + ext_col, path, target_game, self)
if self.generate_scn_file:
self.report({'OPERATOR'}, "Generating scene file... ")
export_scn(filename + ext_scn, path, target_game, self, is_model)
if self.generate_tex_file:
md(path)
self.report({'OPERATOR'}, "Generating tex file... ")
export_tex(filename + ext_tex, path, target_game, self)
# ********************************************************
# Export cubemap DDS textures
if True:
_lightmap_folder = bpy.path.basename(bpy.context.blend_data.filepath)[:-6] # = Name of blend file
_folder = bpy.path.abspath("//Tx_Cubemap/{}".format(_lightmap_folder))
for ob in bpy.data.objects:
if ob.type == 'EMPTY' and ob.thug_empty_props and ob.thug_empty_props.empty_type == 'CubemapProbe' \
and ob.thug_cubemap_props and ob.thug_cubemap_props.exported == True:
shutil.copy("{}/{}.dds".format(_folder, ob.name),
j(path, "{}.dds".format(ob.name)))
# ********************************************************
if self.generate_scn_file and self.generate_sky:
skypath = j(directory, "Levels", filename + "_sky")
md(skypath)
shutil.copy(
j(base_files_dir, 'default_sky', DEFAULT_SKY_SCN),
j(skypath, filename + "_sky" + ext_scn))
shutil.copy(
j(base_files_dir, 'default_sky', DEFAULT_SKY_TEX),
j(skypath, filename + "_sky" + ext_tex))
compilation_successful = None
if self.generate_scripts_files:
self.report({'OPERATOR'}, "Generating QB files... ")
export_qb(filename, path, target_game, self)
old_cwd = os.getcwd()
os.chdir(path)
compilation_successful = True
import platform
wine = [] if platform.system() == "Windows" else ["wine"]
# #########################
# Build NODEARRAY qb file
try:
print("Compiling {}.txt to QB...".format(filename))
roq_output = subprocess.run(wine + [
j(base_files_dir, "roq.exe"),
"-c",
filename + ".txt"
], stdout=subprocess.PIPE)
if os.path.exists(filename + ".qb"):
os.remove(filename + ".qb")
if os.path.exists(filename + ".txt.qb"):
os.rename(filename + ".txt.qb", filename + ".qb")
else:
self.report({"ERROR"}, "{}\n\nCompiler output:\nFailed to compile the QB file.".format(
'\n'.join(reversed(roq_output.stdout.decode().split("\r\n")))))
compilation_successful = False
finally:
os.chdir(old_cwd)
# /Build NODEARRAY qb file
# #########################
# #########################
# Build _SCRIPTS qb file
if os.path.exists(j(path, filename + "_scripts.txt")):
print("Compiling {}_scripts.txt to QB...".format(filename))
os.chdir(path)
try:
roq_output = subprocess.run(wine + [
j(base_files_dir, "roq.exe"),
"-c",
filename + "_scripts.txt"
], stdout=subprocess.PIPE)
if os.path.exists(filename + "_scripts.qb"):
os.remove(filename + "_scripts.qb")
if os.path.exists(filename + "_scripts.txt.qb"):
os.rename(filename + "_scripts.txt.qb", filename + "_scripts.qb")
else:
self.report({"ERROR"}, "{}\n\nCompiler output:\nFailed to compile the QB file.".format(
'\n'.join(reversed(roq_output.stdout.decode().split("\r\n")))))
compilation_successful = False
finally:
os.chdir(old_cwd)
# /Build _SCRIPTS qb file
# #########################
# #########################
# Build PRE files
if self.pack_pre and target_game != 'THPS4':
md(j(directory, "pre"))
# Export all level files to a single PRE container
if False:
pack_files = []
pack_files.append(j(path, filename + ext_scn))
pack_files.append(j(path, filename + ext_tex))
pack_files.append(j(path, filename + ext_col))
pack_files.append(j(path, filename + ext_qb))
pack_files.append(j(path, filename + "_scripts" + ext_qb))
if self.generate_sky:
pack_files.append(j(skypath, filename + "_sky" + ext_scn))
pack_files.append(j(skypath, filename + "_sky" + ext_tex))
pack_pre( directory, pack_files, j(directory, "pre", filename + ext_pre) )
self.report({'OPERATOR'}, "Exported " + j(directory, "pre", filename + ext_pre))
# Export all level files using the classic multi-PRE container setup
else:
if self.generate_scripts_files:
pack_files = []
pack_files.append(j(path, filename + ext_qb))
pack_files.append(j(path, filename + "_scripts" + ext_qb))
if target_game == "THUG2":
pack_files.append(j(path, filename + "_thugpro" + ext_qb))
pack_pre( directory, pack_files, j(directory, "pre", filename + "_scripts" + ext_pre) )
else:
pack_pre( directory, pack_files, j(directory, "pre", filename + ext_pre) )
self.report({'OPERATOR'}, "Exported " + j(directory, "pre", filename + ext_pre))
if self.generate_col_file:
pack_files = []
pack_files.append(j(path, filename + ext_col))
pack_pre( directory, pack_files, j(directory, "pre", filename + "col" + ext_pre) )
self.report({'OPERATOR'}, "Exported " + j(directory, "pre", filename + "col" + ext_pre))
if self.generate_scn_file:
pack_files = []
pack_files.append(j(path, filename + ext_scn))
pack_files.append(j(path, filename + ext_tex))
if self.generate_sky:
pack_files.append(j(skypath, filename + "_sky" + ext_scn))
pack_files.append(j(skypath, filename + "_sky" + ext_tex))
pack_pre( directory, pack_files, j(directory, "pre", filename + "scn" + ext_pre) )
self.report({'OPERATOR'}, "Exported " + j(directory, "pre", filename + "scn" + ext_pre))
# /Build PRE files
# #########################
# Remove shadow caster objects (so hacky!)
cleanup_shadowcasters()
# Make sure our generated grass materials/textures are removed after export
cleanup_grass_materials()
end_time = datetime.datetime.now()
if (compilation_successful is None) or compilation_successful:
print("EXPORT COMPLETE! Thank you for waiting :)")
self.report({'INFO'}, "Exported level {} at {} (time taken: {})".format(filename, end_time.time(), end_time - start_time))
else:
print("EXPORT FAILED! Uh oh :(")
self.report({'WARNING'}, "Failed exporting level {} at {} (time taken: {})".format(filename, end_time.time(), end_time - start_time))
# -------------------------------------------------
# Final step: generate level manifest .json file!
# -------------------------------------------------
export_level_manifest_json(filename, directory, self, context.scene.thug_level_props)
except ExportError as e:
self.report({'ERROR'}, "Export failed.\nExport error: {}".format(str(e)))
except Exception as e:
LOG.debug(e)
raise
finally:
LOG.removeHandler(logging_fh)
LOG.removeHandler(logging_ch)
autosplit._cleanup_autosplit_objects(operator, context, target_game, orig_objects, temporary_objects)
return {'FINISHED'}
#----------------------------------------------------------------------------------
def do_export_model(operator, context, target_game):
is_model = True
self = operator
import subprocess, shutil, datetime
addon_prefs = context.user_preferences.addons[ADDON_NAME].preferences
base_files_dir_error = prefs._get_base_files_dir_error(addon_prefs)
if base_files_dir_error:
self.report({"ERROR"}, "Base files directory error: {} Check the base files directory addon preference. Aborting export.".format(base_files_dir_error))
return {"CANCELLED"}
base_files_dir = addon_prefs.base_files_dir
if not target_game == "THUG1" and not target_game == "THUG2" and not target_game == "THPS4":
raise Exception("Unknown target game: {}".format(target_game))
start_time = datetime.datetime.now()
filename = self.filename
directory = self.directory
j = os.path.join
def md(dir):
if not os.path.exists(dir):
os.makedirs(dir)
ext_col = (".col" if target_game == "THUG1" else ".col.xbx" )
ext_scn = (".mdl" if target_game == "THUG1" else ".mdl.xbx" )
ext_tex = (".tex" if target_game == "THUG1" else ".tex.xbx" )
ext_qb = ".qb"
if self.model_type == "skin":
ext_scn = (".skin" if target_game == "THUG1" else ".skin.xbx" )
if target_game == "THPS4":
ext_col = "col.dat"
ext_scn = "skin.dat" if self.model_type == "skin" else "mdl.dat"
ext_tex = "tex.dat"
self.report({'OPERATOR'}, "")
self.report({'INFO'}, "-" * 20)
self.report({'INFO'}, "Starting export of {} at {}".format(filename, start_time.time()))
orig_objects, temporary_objects = [], []
import sys
logging_fh = logging.FileHandler(j(directory, filename + "_export.log"), mode='w')
logging_fh.setFormatter(logging.Formatter("{asctime} [{levelname}] {message}", style='{', datefmt="%H:%M:%S"))
logging_ch = logging.StreamHandler(sys.stdout)
logging_ch.setFormatter(logging.Formatter("{asctime} [{levelname}] {message}", style='{', datefmt="%H:%M:%S"))
set_export_scale(operator.export_scale)
try:
LOG.addHandler(logging_fh)
LOG.addHandler(logging_ch)
LOG.setLevel(logging.DEBUG)
orig_objects, temporary_objects = autosplit._prepare_autosplit_objects(operator, context,target_game)
path = j(directory, "Models/" + filename)
md(path)
# Generate COL file
self.report({'OPERATOR'}, "Generating collision file... ")
export_col(filename + ext_col, path, target_game, self)
# Generate SCN/MDL file
self.report({'OPERATOR'}, "Generating scene file... ")
export_scn(filename + ext_scn, path, target_game, self, is_model)
# Generate TEX file
self.report({'OPERATOR'}, "Generating tex file... ")
export_tex(filename + ext_tex, path, target_game, self)
# Maybe generate QB file
compilation_successful = None
if self.generate_scripts_files:
self.report({'OPERATOR'}, "Generating QB files... ")
export_model_qb(filename, path, target_game, self)
old_cwd = os.getcwd()
os.chdir(path)
compilation_successful = True
import platform
wine = [] if platform.system() == "Windows" else ["wine"]
try:
roq_output = subprocess.run(wine + [
j(base_files_dir, "roq.exe"),
"-c",
filename + ".txt"
], stdout=subprocess.PIPE)
if os.path.exists(filename + ".qb"):
os.remove(filename + ".qb")
if os.path.exists(filename + ".txt.qb"):
os.rename(filename + ".txt.qb", filename + ".qb")
else:
self.report({"ERROR"}, "{}\n\nCompiler output:\nFailed to compile the QB file.".format(
'\n'.join(reversed(roq_output.stdout.decode().split("\r\n")))))
compilation_successful = False
finally:
os.chdir(old_cwd)
end_time = datetime.datetime.now()
if (compilation_successful is None) or compilation_successful:
self.report({'INFO'}, "Exported model {} at {} (time taken: {})".format(filename, end_time.time(), end_time - start_time))
else:
self.report({'WARNING'}, "Failed exporting model {} at {} (time taken: {})".format(filename, end_time.time(), end_time - start_time))
except ExportError as e:
self.report({'ERROR'}, "Export failed.\nExport error: {}".format(str(e)))
except Exception as e:
LOG.debug(e)
raise
finally:
LOG.removeHandler(logging_fh)
LOG.removeHandler(logging_ch)
autosplit._cleanup_autosplit_objects(operator, context, target_game, orig_objects, temporary_objects)
return {'FINISHED'}
#----------------------------------------------------------------------------------
def generate_shadowcasters():
print("Creating shadow casters...")
out_objects = [o for o in bpy.data.objects
if (o.type == "MESH"
and getattr(o, 'thug_export_scene', True)
and not o.get("thug_autosplit_object_no_export_hack", False))]
scene = bpy.context.scene
sc_count = -1
sc_mat_count = -1
for ob in out_objects:
if not ob.thug_cast_shadow:
continue
ob_name = ob.name
if ob.name.endswith("_SCN"):
ob_name = ob.name[:-4]
print("Creating shadow caster object(s) for mesh: {}".format(ob_name))
sc_count += 1
new_ob = ob.copy()
new_ob.data = ob.data.copy()
# Create empty collision mesh, and an SCN mesh
new_col_mesh = bpy.data.meshes.new(name="GEN_ShadowCaster_" + str(sc_count) + "_MESH")
new_col_ob = bpy.data.objects.new(name="GEN_ShadowCaster_" + str(sc_count), object_data=new_col_mesh)
new_ob.name = "GEN_ShadowCaster_" + str(sc_count) + "_SCN"
new_col_ob.thug_object_class = "LevelObject"
new_ob.thug_object_class = "LevelGeometry"
new_ob.thug_export_scene = True
new_ob.thug_export_collision = False
#new_ob.scale[0] = 1.1
#new_ob.scale[1] = 1.1
#new_ob.scale[2] = 1.1
new_col_ob.thug_export_scene = False
new_col_ob.thug_export_collision = True
for mat_slot in new_ob.material_slots:
sc_mat_count += 1
orig_mat = mat_slot.material
mat_slot.material = mat_slot.material.copy()
mat_slot.material.thug_material_props.use_new_mats = False
mat_slot.material.thug_material_props.specular_power = -0.23
mat_slot.material.name = "GEN_Mat_ShadowCaster_" + str(sc_mat_count)
scene.objects.link(new_ob)
scene.objects.link(new_col_ob)
#helpers._flip_normals(new_ob)
def cleanup_shadowcasters():
print("Removing shadow casters...")
for ob in bpy.data.objects:
if ob.name.startswith("GEN_ShadowCaster_"):
bpy.data.objects.remove(ob)
for mat in bpy.data.materials:
if mat.name.startswith("GEN_Mat_ShadowCaster_"):
bpy.data.materials.remove(mat)
#----------------------------------------------------------------------------------
def export_scn(filename, directory, target_game, operator=None, is_model=False):
def w(fmt, *args):
outp.write(struct.pack(fmt, *args))
output_file = os.path.join(directory, filename)
with open(output_file, "wb") as outp:
w("3I", 1, 1, 1)
if target_game == "THPS4":
export_materials_th4(outp, target_game, operator, is_model)
else:
export_materials(outp, target_game, operator, is_model)
if target_game == "THUG2":
export_scn_sectors_ug2(outp, operator, is_model)
elif target_game == "THUG1":
export_scn_sectors(outp, operator, is_model)
elif target_game == "THPS4":
export_scn_sectors_th4(outp, operator, is_model)
else:
raise Exception("Unknown target game: {}".format(target_game))
w("i", 0) # number of hierarchy objects
#----------------------------------------------------------------------------------
def export_col(filename, directory, target_game, operator=None):
from io import BytesIO
p = Printer()
output_file = os.path.join(directory, filename)
bm = bmesh.new()
# Applies modifiers and triangulates mesh - unless the 'speed hack' export option is on
def triang(o):
if operator.speed_hack:
final_mesh = o.data
bm.clear()
bm.from_mesh(final_mesh)
else:
final_mesh = o.to_mesh(bpy.context.scene, True, 'PREVIEW')
if helpers._need_to_flip_normals(o):
temporary_object = helpers._make_temp_obj(final_mesh)
try:
bpy.context.scene.objects.link(temporary_object)
# temporary_object.matrix_world = o.matrix_world
helpers._flip_normals(temporary_object)
finally:
if bpy.context.mode != "OBJECT":
bpy.ops.object.mode_set(mode="OBJECT")
bpy.context.scene.objects.unlink(temporary_object)
bpy.data.objects.remove(temporary_object)
bm.clear()
bm.from_mesh(final_mesh)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.faces.ensure_lookup_table()
bm.faces.index_update()
bpy.data.meshes.remove(final_mesh)
return
out_objects = [o for o in bpy.data.objects
if (o.type == "MESH"
and getattr(o, 'thug_export_collision', True)
and not o.get("thug_autosplit_object_no_export_hack", False))]
total_verts = 0 # sum(len(bm.verts) for o in out_objects if [triang(o)])
total_faces = 0 # sum(len(bm.faces) for o in out_objects if [triang(o)])
with open(output_file, "wb") as outp:
def w(fmt, *args):
outp.write(struct.pack(fmt, *args))
verts_out = BytesIO()
intensities_out = BytesIO()
faces_out = BytesIO()
thug2_thing_out = BytesIO()
nodes_out = BytesIO()
col_version = 10
if target_game == 'THUG1':
col_version = 9
elif target_game == 'THPS4':
col_version = 8
w("i", col_version) # version
w("i", len(out_objects)) # num objects
total_verts_offset = outp.tell()
w("i", total_verts)
w("i", total_faces if target_game != 'THPS4' else 0) # large faces
w("i", 0) # small faces
w("i", total_verts) # large verts
w("i", 0) # small verts
w("i", 0) # padding
obj_face_offset = 0
obj_vert_offset = 0
obj_bsp_offset = 0
obj_intensity_offset = 0
bsp_nodes_size = 0
node_face_index_offset = 0
node_faces = []
DBG = lambda *args: LOG.debug(" ".join(str(arg) for arg in args))
for o in out_objects:
def w(fmt, *args):
outp.write(struct.pack(fmt, *args))
LOG.debug("Exporting object: {}".format(o.name))
triang(o)
total_verts += len(bm.verts)
total_faces += len(bm.faces)
if "thug_checksum" in o:
w("i", o["thug_checksum"])
else:
clean_name = get_clean_name(o)
if is_hex_string(clean_name):
w("I", int(clean_name, 0)) # checksum
else:
w("I", crc_from_string(bytes(clean_name, 'ascii'))) # checksum
w("H", o.thug_col_obj_flags)
if len(bm.verts) > 2**16:
raise ExportError("Too many vertices in an object: {} (has {}, max is {}). Consider using Autosplit.".format(o.name, len(bm.verts), 2**16))
w("H", len(bm.verts))
MAX_TRIS = 6000 # min(6000, 2**16)
#if (len(bm.faces) * (3 if target_game == "THUG2" else 1)) > MAX_TRIS:
if len(bm.faces) > MAX_TRIS:
raise ExportError("Too many tris in an object: {} (has {}, max is {}). Consider using Autosplit.".format(
o.name,
len(bm.faces),
MAX_TRIS))
# 2**16 // (3 if target_game == "THUG2" else 1)))
w("H", len(bm.faces))
w("?", False) # use face small
w("?", False) # use fixed verts
w("I", obj_face_offset)
if target_game == 'THPS4':
obj_face_offset += SIZEOF_LARGE_FACE_THPS4 * len(bm.faces)
else:
obj_face_offset += SIZEOF_LARGE_FACE * len(bm.faces)
obj_matrix = get_scale_matrix(o) if o.thug_object_class == "LevelObject" else o.matrix_world
#obj_matrix = o.matrix_world
if operator.is_park_editor:
# AFAIK we don't modify the bounding box for dictionary collision, only the scene.
# But if this changes I'll update it here!
bbox = get_bbox2(bm.verts, obj_matrix, operator.is_park_editor)
else:
bbox = get_bbox2(bm.verts, obj_matrix)
w("4f", *bbox[0])
w("4f", *bbox[1])
w("I", obj_vert_offset)
if target_game == 'THPS4':
obj_vert_offset += len(bm.verts)
else:
obj_vert_offset += SIZEOF_FLOAT_VERT * len(bm.verts)
w("I", obj_bsp_offset)
obj_bsp_tree = make_bsp_tree(o, bm.faces[:], obj_matrix)
obj_bsp_offset += len(list(iter_tree(obj_bsp_tree))) * SIZEOF_BSP_NODE
# THPS4: Intensity list does not exist, intensity is appended to each vert
if target_game == 'THPS4':
w("I", 0)
else:
w("I", obj_intensity_offset)
obj_intensity_offset += len(bm.verts)
w("I", 0) # padding
def w(fmt, *args):
verts_out.write(struct.pack(fmt, *args))
for v in bm.verts:
w("3f", *to_thug_coords(obj_matrix * v.co))
if target_game == 'THPS4':
w("B", 0xFF) # Intensity data(?)
w("B", 0xFF) # Intensity data(?)
w("B", 0xFF) # Intensity data(?)
w("B", 0xFF) # Intensity data(?)
if target_game != 'THPS4':
def w(fmt, *args):
intensities_out.write(struct.pack(fmt, *args))
intensity_layer = bm.loops.layers.color.get("intensity")
if not intensity_layer:
intensity_layer = bm.loops.layers.color.get("bake")
if not intensity_layer:
intensity_layer = bm.loops.layers.color.get("color")
if intensity_layer:
intensities_list = {}
for face in bm.faces:
for loop in face.loops:
tmp_intensity = int((( loop[intensity_layer].r + loop[intensity_layer].g + loop[intensity_layer].b ) / 3.0) * 255)
intensities_list[loop.vert] = tmp_intensity
for vert in bm.verts:
if vert in intensities_list:
w('B', intensities_list[vert])
else:
w('B', 128)
else:
intensities_out.write(b'\xff' * len(bm.verts))
def w(fmt, *args):
faces_out.write(struct.pack(fmt, *args))
cfl = bm.faces.layers.int.get("collision_flags")
ttl = bm.faces.layers.int.get("terrain_type")
# bm.verts.ensure_lookup_table()
# Face flags are output here!
for face in bm.faces:
if cfl and (face[cfl] & FACE_FLAGS["mFD_TRIGGER"]):
if o.thug_triggerscript_props.template_name_txt == "" or o.thug_triggerscript_props.template_name_txt == "None" or \
(o.thug_triggerscript_props.template_name_txt == "Custom" and o.thug_triggerscript_props.custom_name == ""):
# This object has a Trigger face, but no TriggerScript assigned
# Normally this would crash the game, so let's create and assign a blank script!
get_triggerscript("io_thps_scene_NullScript")
#o.thug_triggerscript_props.template_name = "Custom"
o.thug_triggerscript_props.template_name_txt = "Custom"
o.thug_triggerscript_props.custom_name = "io_thps_scene_NullScript"
LOG.debug("WARNING: Object {} has trigger faces but no TriggerScript. A blank script was assigned.".format(o.name))
#raise Exception("Collision object " + o.name + " has a trigger face with no TriggerScript attached to the object! This is for your own safety!")
w("H", face[cfl] if cfl else 0)
tt = collision._resolve_face_terrain_type(o, bm, face)
w("H", tt)
for vert in face.verts:
w("H", vert.index)
if target_game == 'THPS4':
w("H", 0) # Padding?
if target_game == "THUG2":
def w(fmt, *args):
thug2_thing_out.write(struct.pack(fmt, *args))
thug2_thing_out.write(b'\x00' * len(bm.faces))
#p("I am at: {}", outp.tell())
def w(fmt, *args):
nodes_out.write(struct.pack(fmt, *args))
bsp_nodes_start = bsp_nodes_size
node_list, node_indices = tree_to_list(obj_bsp_tree)
for idx, node in enumerate(node_list):
# assert idx == node_indices[id(node)]
# DBG(node_indices[id(node)])
bsp_nodes_size += SIZEOF_BSP_NODE
if isinstance(node, BSPLeaf):
w("B", 0xFF if target_game == 'THPS4' else 3) # the axis it is split on (0 = X, 1 = Y, 2 = Z, 3 = Leaf)
w("B", 0) # padding
w("H", len(node.faces) * (3 if False and target_game == "THUG2" else 1))
w("I", node_face_index_offset)
# exported |= set(node.faces)
for face in node.faces:
# assert bm.faces[face.index] == face
node_faces.append(face.index)
node_face_index_offset += len(node.faces) * (3 if False and target_game == "THUG2" else 1)
#if target_game == 'THPS4':
# # Padding?
# w("I", 0xFFFFFFFF)
# w("I", 0xFFFFFFFF)
else:
split_axis_and_point = (
(node.split_axis & 0x3) |
# 1 |
(int(node.split_point * 16.0) << 2)
)
w("i", split_axis_and_point)
w("I", (bsp_nodes_start + node_indices[id(node.left)] * SIZEOF_BSP_NODE))
def w(fmt, *args):
outp.write(struct.pack(fmt, *args))
tmp_offset = outp.tell()
outp.seek(total_verts_offset)
w("i", total_verts)
w("i", total_faces)
w("i", 0) # small faces
w("i", total_verts)
outp.seek(tmp_offset)
LOG.debug("offset obj list: {}".format(outp.tell()))
outp.write(b'\x00' * calc_alignment_diff(outp.tell(), 16))
LOG.debug("offset verts: {}".format(outp.tell()))
outp.write(verts_out.getbuffer())
if target_game != 'THPS4':
LOG.debug("offset intensities: {}".format(outp.tell()))
# intensity
outp.write(intensities_out.getbuffer())
alignment_diff = calc_alignment_diff(outp.tell(), 4)
if alignment_diff != 0:
LOG.debug("A: ".format(alignment_diff))
outp.write(b'\x00' * alignment_diff)
# outp.write(b'\x00' * calc_alignment_diff(SIZEOF_FLOAT_VERT * total_verts + total_verts), 4)
LOG.debug("offset faces: {}".format(outp.tell()))
outp.write(faces_out.getbuffer())
if target_game == "THUG2":
# alignment_diff = calc_alignment_diff(total_verts, 4)
alignment_diff = calc_alignment_diff(outp.tell(), 2)
if alignment_diff != 0:
LOG.debug("B: {}".format(alignment_diff))
outp.write(b'\x00' * alignment_diff)
else:
# LOG.debug("B TH1!")
if total_faces & 1:
outp.write(b'\x00' * 2)
if target_game == "THUG2":
LOG.debug("offset thug2 thing: {}".format(outp.tell()))
outp.write(thug2_thing_out.getbuffer())
alignment_diff = calc_alignment_diff(outp.tell(), 4)
if alignment_diff != 0:
LOG.debug("C: {}".format(alignment_diff))
outp.write(b'\x00' * alignment_diff)
LOG.debug("offset nodes: {}".format(outp.tell()))
w("I", bsp_nodes_size)
outp.write(nodes_out.getbuffer())
for face in node_faces:
w("H", face)
bm.free()
#----------------------------------------------------------------------------------
def calc_alignment_diff(offset, alignment):
assert offset >= 0 and alignment >= 0
if offset % alignment == 0:
return 0
return alignment - (offset % alignment)
#----------------------------------------------------------------------------------
#- Runs the 'Quick export', validating the settings first
#----------------------------------------------------------------------------------
def maybe_export_scene(operator, scene):
def scene_settings_are_valid(level_props):
return (level_props.scene_name != '' and level_props.export_props.target_game != '' and \
level_props.export_props.directory != '' and level_props.export_props.scene_type != '' )
if not hasattr(scene, 'thug_level_props') or not hasattr(scene.thug_level_props, 'export_props'):
operator.report({'ERROR'}, "Unable to run quick export - scene settings were not found!")
#raise Exception('Unable to run quick export - scene settings were not found!')
return False
if not scene_settings_are_valid(scene.thug_level_props):
operator.report({'ERROR'}, "Invalid scene settings. Enter a scene name and select the game/export dir/export type first!")
#raise Exception('Unable to run quick export - scene settings are not valid. Make sure you enter a scene name and select the game/export dir/export type first!')
return False
scene.thug_level_props.export_props.filename = scene.thug_level_props.scene_name
scene.thug_level_props.export_props.directory = bpy.path.abspath(scene.thug_level_props.export_props.directory)
if scene.thug_level_props.export_props.scene_type == 'Level':
do_export(scene.thug_level_props.export_props, bpy.context, scene.thug_level_props.export_props.target_game)
else:
do_export_model(scene.thug_level_props.export_props, bpy.context, scene.thug_level_props.export_props.target_game)
return True
# OPERATORS
#############################################
class SceneToTHPS4Files(bpy.types.Operator): #, ExportHelper):
bl_idname = "export.scene_to_th4_xbx"
bl_label = "Scene to THPS4 level files"
# bl_options = {'REGISTER', 'UNDO'}
def report(self, category, message):
LOG.debug("OP: {}: {}".format(category, message))
super().report(category, message)
filename = StringProperty(name="File Name")
directory = StringProperty(name="Directory")
always_export_normals = BoolProperty(name="Export normals", default=False)
use_vc_hack = BoolProperty(name="Vertex color hack",
description = "Doubles intensity of vertex colours. Enable if working with an imported scene that appears too dark in game."
, default=False)
speed_hack = BoolProperty(name="No modifiers (speed hack)",
description = "Don't apply any modifiers to objects. Much faster with large scenes, but all mesh must be triangles prior to export.", default=False)
# AUTOSPLIT SETTINGS
autosplit_everything = BoolProperty(name="Autosplit All",
description = "Applies the autosplit setting to all objects in the scene, with default settings.", default=False)
autosplit_faces_per_subobject = IntProperty(name="Faces Per Subobject",
description="The max amount of faces for every created subobject.",
default=800, min=50, max=6000)
autosplit_max_radius = FloatProperty(name="Max Radius",
description="The max radius of for every created subobject.",
default=2000, min=100, max=5000)
# /AUTOSPLIT SETTINGS
pack_pre = BoolProperty(name="Pack files into .prx", default=True)
is_park_editor = BoolProperty(name="Is Park Editor",
description="Use this option when exporting a park editor dictionary.", default=False)
generate_tex_file = BoolProperty(name="Generate a .tex file", default=True)
generate_scn_file = BoolProperty(name="Generate a .scn file", default=True)
generate_sky = BoolProperty(name="Generate skybox", default=True,description="Check to export a skybox with this scene.")
generate_col_file = BoolProperty(name="Generate a .col file", default=True)
generate_scripts_files = BoolProperty(name="Generate scripts", default=True)
# filepath = StringProperty()
skybox_name = StringProperty(name="Skybox name", default="THUG_Sky")
export_scale = FloatProperty(name="Export scale", default=1)
mipmap_offset = IntProperty(
name="Mipmap offset",
description="Offsets generation of mipmaps (default is 0). For example, setting this to 1 will make the base texture 1/4 the size. Use when working with very large textures.",
min=0, max=4, default=0)
only_offset_lightmap = BoolProperty(name="Only Lightmaps", default=False, description="Mipmap offset only applies to lightmap textures.")
def execute(self, context):
return do_export(self, context, "THPS4")
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(self, context):
self.layout.row().prop(self, "generate_sky", toggle=True, icon='MAT_SPHERE_SKY')
if self.generate_sky:
box = self.layout.box().column(True)
box.row().prop(self, "skybox_name")
self.layout.row().prop(self, "always_export_normals", toggle=True, icon='SNAP_NORMAL')
self.layout.row().prop(self, "use_vc_hack", toggle=True, icon='COLOR')
self.layout.row().prop(self, "speed_hack", toggle=True, icon='FF')
self.layout.row().prop(self, "autosplit_everything", toggle=True, icon='MOD_EDGESPLIT')
if self.autosplit_everything:
box = self.layout.box().column(True)
box.row().prop(self, "autosplit_faces_per_subobject")
box.row().prop(self, "autosplit_max_radius")
self.layout.row().prop(self, "pack_pre", toggle=True, icon='PACKAGE')
self.layout.row().prop(self, "is_park_editor", toggle=True, icon='PACKAGE')
self.layout.row().prop(self, "generate_tex_file", toggle=True, icon='TEXTURE_DATA')
self.layout.row().prop(self, "generate_scn_file", toggle=True, icon='SCENE_DATA')
self.layout.row().prop(self, "generate_col_file", toggle=True, icon='OBJECT_DATA')
self.layout.row().prop(self, "generate_scripts_files", toggle=True, icon='FILE_SCRIPT')
self.layout.row().prop(self, "export_scale")
box = self.layout.box().column(True)
box.row().prop(self, "mipmap_offset")
box.row().prop(self, "only_offset_lightmap")
#----------------------------------------------------------------------------------
class SceneToTHPS4Model(bpy.types.Operator): #, ExportHelper):
bl_idname = "export.scene_to_th4_model"
bl_label = "Scene to THPS4 model"
# bl_options = {'REGISTER', 'UNDO'}
def report(self, category, message):
LOG.debug("OP: {}: {}".format(category, message))
super().report(category, message)
filename = StringProperty(name="File Name")
directory = StringProperty(name="Directory")
always_export_normals = BoolProperty(name="Export normals", default=False)
is_park_editor = BoolProperty(name="Is Park Editor", default=False, options={'HIDDEN'})
use_vc_hack = BoolProperty(name="Vertex color hack",
description = "Doubles intensity of vertex colours. Enable if working with an imported scene that appears too dark in game."
, default=False)
speed_hack = BoolProperty(name="No modifiers (speed hack)",
description = "Don't apply any modifiers to objects. Much faster with large scenes, but all mesh must be triangles prior to export.", default=False)
# AUTOSPLIT SETTINGS
autosplit_everything = BoolProperty(name="Autosplit All"
, description = "Applies the autosplit setting to all objects in the scene, with default settings."
, default=False)
autosplit_faces_per_subobject = IntProperty(name="Faces Per Subobject",
description="The max amount of faces for every created subobject.",
default=800, min=50, max=6000)
autosplit_max_radius = FloatProperty(name="Max Radius",
description="The max radius of for every created subobject.",
default=2000, min=100, max=5000)
# /AUTOSPLIT SETTINGS
model_type = EnumProperty(items = (
("skin", ".skin", "Character skin, used for playable characters and pedestrians."),
("mdl", ".mdl", "Model used for vehicles and other static mesh."),
), name="Model Type", default="skin")
generate_scripts_files = BoolProperty(
name="Generate scripts",
default=True)
export_scale = FloatProperty(name="Export scale", default=1)
mipmap_offset = IntProperty(
name="Mipmap offset",
description="Offsets generation of mipmaps (default is 0). For example, setting this to 1 will make the base texture 1/4 the size. Use when working with very large textures.",
min=0, max=4, default=0)
only_offset_lightmap = BoolProperty(name="Only Lightmaps", default=False, description="Mipmap offset only applies to lightmap textures.")
def execute(self, context):
return do_export_model(self, context, "THPS4")
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(self, context):
self.layout.row().prop(self, "always_export_normals", toggle=True, icon='SNAP_NORMAL')
self.layout.row().prop(self, "use_vc_hack", toggle=True, icon='COLOR')
self.layout.row().prop(self, "speed_hack", toggle=True, icon='FF')
self.layout.row().prop(self, "autosplit_everything", toggle=True, icon='MOD_EDGESPLIT')
if self.autosplit_everything:
box = self.layout.box().column(True)
box.row().prop(self, "autosplit_faces_per_subobject")
box.row().prop(self, "autosplit_max_radius")
self.layout.row().prop(self, "model_type", expand=True)
self.layout.row().prop(self, "generate_scripts_files", toggle=True, icon='FILE_SCRIPT')
self.layout.row().prop(self, "export_scale")
box = self.layout.box().column(True)
box.row().prop(self, "mipmap_offset")
box.row().prop(self, "only_offset_lightmap")
#----------------------------------------------------------------------------------
class SceneToTHUGFiles(bpy.types.Operator): #, ExportHelper):
bl_idname = "export.scene_to_thug_xbx"
bl_label = "Scene to THUG1 level files"
# bl_options = {'REGISTER', 'UNDO'}
def report(self, category, message):
LOG.debug("OP: {}: {}".format(category, message))
super().report(category, message)
filename = StringProperty(name="File Name")
directory = StringProperty(name="Directory")
always_export_normals = BoolProperty(name="Export normals", default=False)
use_vc_hack = BoolProperty(name="Vertex color hack",
description = "Doubles intensity of vertex colours. Enable if working with an imported scene that appears too dark in game."
, default=False)
speed_hack = BoolProperty(name="No modifiers (speed hack)",
description = "Don't apply any modifiers to objects. Much faster with large scenes, but all mesh must be triangles prior to export.", default=False)
# AUTOSPLIT SETTINGS
autosplit_everything = BoolProperty(name="Autosplit All",
description = "Applies the autosplit setting to all objects in the scene, with default settings.", default=False)
autosplit_faces_per_subobject = IntProperty(name="Faces Per Subobject",
description="The max amount of faces for every created subobject.",
default=800, min=50, max=6000)
autosplit_max_radius = FloatProperty(name="Max Radius",
description="The max radius of for every created subobject.",
default=2000, min=100, max=5000)
# /AUTOSPLIT SETTINGS
pack_pre = BoolProperty(name="Pack files into .prx", default=True)
is_park_editor = BoolProperty(name="Is Park Editor",
description="Use this option when exporting a park editor dictionary.", default=False)
generate_tex_file = BoolProperty(name="Generate a .tex file", default=True)
generate_scn_file = BoolProperty(name="Generate a .scn file", default=True)
generate_sky = BoolProperty(name="Generate skybox", default=True,description="Check to export a skybox with this scene.")
generate_col_file = BoolProperty(name="Generate a .col file", default=True)
generate_scripts_files = BoolProperty(name="Generate scripts", default=True)
# filepath = StringProperty()
skybox_name = StringProperty(name="Skybox name", default="THUG_Sky")
export_scale = FloatProperty(name="Export scale", default=1)
mipmap_offset = IntProperty(
name="Mipmap offset",
description="Offsets generation of mipmaps (default is 0). For example, setting this to 1 will make the base texture 1/4 the size. Use when working with very large textures.",
min=0, max=4, default=0)
only_offset_lightmap = BoolProperty(name="Only Lightmaps", default=False, description="Mipmap offset only applies to lightmap textures.")
def execute(self, context):
return do_export(self, context, "THUG1")
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(self, context):
self.layout.row().prop(self, "generate_sky", toggle=True, icon='MAT_SPHERE_SKY')
if self.generate_sky:
box = self.layout.box().column(True)
box.row().prop(self, "skybox_name")
self.layout.row().prop(self, "always_export_normals", toggle=True, icon='SNAP_NORMAL')
self.layout.row().prop(self, "use_vc_hack", toggle=True, icon='COLOR')
self.layout.row().prop(self, "speed_hack", toggle=True, icon='FF')
self.layout.row().prop(self, "autosplit_everything", toggle=True, icon='MOD_EDGESPLIT')
if self.autosplit_everything:
box = self.layout.box().column(True)
box.row().prop(self, "autosplit_faces_per_subobject")
box.row().prop(self, "autosplit_max_radius")
self.layout.row().prop(self, "pack_pre", toggle=True, icon='PACKAGE')
self.layout.row().prop(self, "is_park_editor", toggle=True, icon='PACKAGE')
self.layout.row().prop(self, "generate_tex_file", toggle=True, icon='TEXTURE_DATA')
self.layout.row().prop(self, "generate_scn_file", toggle=True, icon='SCENE_DATA')
self.layout.row().prop(self, "generate_col_file", toggle=True, icon='OBJECT_DATA')
self.layout.row().prop(self, "generate_scripts_files", toggle=True, icon='FILE_SCRIPT')
self.layout.row().prop(self, "export_scale")
box = self.layout.box().column(True)
box.row().prop(self, "mipmap_offset")
box.row().prop(self, "only_offset_lightmap")
#----------------------------------------------------------------------------------
class SceneToTHUGModel(bpy.types.Operator): #, ExportHelper):
bl_idname = "export.scene_to_thug_model"
bl_label = "Scene to THUG1 model"
# bl_options = {'REGISTER', 'UNDO'}
def report(self, category, message):
LOG.debug("OP: {}: {}".format(category, message))
super().report(category, message)
filename = StringProperty(name="File Name")
directory = StringProperty(name="Directory")
always_export_normals = BoolProperty(name="Export normals", default=False)
is_park_editor = BoolProperty(name="Is Park Editor", default=False, options={'HIDDEN'})
use_vc_hack = BoolProperty(name="Vertex color hack",
description = "Doubles intensity of vertex colours. Enable if working with an imported scene that appears too dark in game."
, default=False)
speed_hack = BoolProperty(name="No modifiers (speed hack)",
description = "Don't apply any modifiers to objects. Much faster with large scenes, but all mesh must be triangles prior to export.", default=False)
# AUTOSPLIT SETTINGS
autosplit_everything = BoolProperty(name="Autosplit All"
, description = "Applies the autosplit setting to all objects in the scene, with default settings."
, default=False)
autosplit_faces_per_subobject = IntProperty(name="Faces Per Subobject",
description="The max amount of faces for every created subobject.",
default=800, min=50, max=6000)
autosplit_max_radius = FloatProperty(name="Max Radius",
description="The max radius of for every created subobject.",
default=2000, min=100, max=5000)
# /AUTOSPLIT SETTINGS
model_type = EnumProperty(items = (
("skin", ".skin", "Character skin, used for playable characters and pedestrians."),
("mdl", ".mdl", "Model used for vehicles and other static mesh."),
), name="Model Type", default="skin")
generate_scripts_files = BoolProperty(
name="Generate scripts",
default=True)
export_scale = FloatProperty(name="Export scale", default=1)
mipmap_offset = IntProperty(
name="Mipmap offset",
description="Offsets generation of mipmaps (default is 0). For example, setting this to 1 will make the base texture 1/4 the size. Use when working with very large textures.",
min=0, max=4, default=0)
only_offset_lightmap = BoolProperty(name="Only Lightmaps", default=False, description="Mipmap offset only applies to lightmap textures.")
def execute(self, context):
return do_export_model(self, context, "THUG1")
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(self, context):
self.layout.row().prop(self, "always_export_normals", toggle=True, icon='SNAP_NORMAL')
self.layout.row().prop(self, "use_vc_hack", toggle=True, icon='COLOR')
self.layout.row().prop(self, "speed_hack", toggle=True, icon='FF')
self.layout.row().prop(self, "autosplit_everything", toggle=True, icon='MOD_EDGESPLIT')
if self.autosplit_everything:
box = self.layout.box().column(True)
box.row().prop(self, "autosplit_faces_per_subobject")
box.row().prop(self, "autosplit_max_radius")
self.layout.row().prop(self, "model_type", expand=True)
self.layout.row().prop(self, "generate_scripts_files", toggle=True, icon='FILE_SCRIPT')
self.layout.row().prop(self, "export_scale")
box = self.layout.box().column(True)
box.row().prop(self, "mipmap_offset")
box.row().prop(self, "only_offset_lightmap")
# OPERATORS
#############################################
class SceneToTHUG2Files(bpy.types.Operator): #, ExportHelper):
bl_idname = "export.scene_to_thug2_xbx"
bl_label = "Scene to THUG2/PRO level files"
def report(self, category, message):
LOG.debug("OP: {}: {}".format(category, message))
super().report(category, message)
filename = StringProperty(name="File Name")
directory = StringProperty(name="Directory")
always_export_normals = BoolProperty(name="Export normals", default=False)
use_vc_hack = BoolProperty(name="Vertex color hack",
description = "Doubles intensity of vertex colours. Enable if working with an imported scene that appears too dark in game."
, default=False)
speed_hack = BoolProperty(name="No modifiers (speed hack)",
description = "Don't apply any modifiers to objects. Much faster with large scenes, but all mesh must be triangles prior to export.", default=False)
# AUTOSPLIT SETTINGS
autosplit_everything = BoolProperty(name="Autosplit All"
, description = "Applies the autosplit setting to all objects in the scene, with default settings."
, default=False)
autosplit_faces_per_subobject = IntProperty(name="Faces Per Subobject",
description="The max amount of faces for every created subobject.",
default=800, min=50, max=6000)
autosplit_max_radius = FloatProperty(name="Max Radius",
description="The max radius of for every created subobject.",
default=2000, min=100, max=5000)
# /AUTOSPLIT SETTINGS
is_park_editor = BoolProperty(name="Is Park Editor",
description="Use this option when exporting a park editor dictionary.", default=False)
pack_pre = BoolProperty(name="Pack files into .prx", default=True)
generate_tex_file = BoolProperty(
name="Generate a .tex file",
description="If you have already generated a .tex file, and didn't change/add any new images in meantime, you can uncheck this.", default=True)
generate_scn_file = BoolProperty(name="Generate a .scn file", default=True)
generate_col_file = BoolProperty(name="Generate a .col file", default=True)
generate_sky = BoolProperty(name="Generate skybox", default=True,description="Check to export a skybox with this scene.")
generate_scripts_files = BoolProperty(name="Generate scripts", default=True)
skybox_name = StringProperty(name="Skybox name", default="THUG2_Sky")
export_scale = FloatProperty(name="Export scale", default=1)
mipmap_offset = IntProperty(name="Mipmap offset",
description="Offsets generation of mipmaps (default is 0). For example, setting this to 1 will make the base texture 1/4 the size. Use when working with very large textures.",
min=0, max=4, default=0)
only_offset_lightmap = BoolProperty(name="Only Lightmaps", default=False, description="Mipmap offset only applies to lightmap textures.")
def execute(self, context):
return do_export(self, context, "THUG2")
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(self, context):
self.layout.row().prop(self, "generate_sky", toggle=True, icon='MAT_SPHERE_SKY')
if self.generate_sky:
box = self.layout.box().column(True)
box.row().prop(self, "skybox_name")
self.layout.row().prop(self, "always_export_normals", toggle=True, icon='SNAP_NORMAL')
self.layout.row().prop(self, "use_vc_hack", toggle=True, icon='COLOR')
self.layout.row().prop(self, "speed_hack", toggle=True, icon='FF')
self.layout.row().prop(self, "autosplit_everything", toggle=True, icon='MOD_EDGESPLIT')
if self.autosplit_everything:
box = self.layout.box().column(True)
box.row().prop(self, "autosplit_faces_per_subobject")
box.row().prop(self, "autosplit_max_radius")
self.layout.row().prop(self, "pack_pre", toggle=True, icon='PACKAGE')
self.layout.row().prop(self, "generate_tex_file", toggle=True, icon='TEXTURE_DATA')
self.layout.row().prop(self, "generate_scn_file", toggle=True, icon='SCENE_DATA')
self.layout.row().prop(self, "generate_col_file", toggle=True, icon='OBJECT_DATA')
self.layout.row().prop(self, "generate_scripts_files", toggle=True, icon='FILE_SCRIPT')
self.layout.row().prop(self, "export_scale")
box = self.layout.box().column(True)
box.row().prop(self, "mipmap_offset")
box.row().prop(self, "only_offset_lightmap")
#----------------------------------------------------------------------------------
class SceneToTHUG2Model(bpy.types.Operator): #, ExportHelper):
bl_idname = "export.scene_to_thug2_model"
bl_label = "Scene to THUG2 model"
# bl_options = {'REGISTER', 'UNDO'}
def report(self, category, message):
LOG.debug("OP: {}: {}".format(category, message))
super().report(category, message)
filename = StringProperty(name="File Name")
directory = StringProperty(name="Directory")
always_export_normals = BoolProperty(name="Export normals", default=False)
use_vc_hack = BoolProperty(name="Vertex color hack",
description = "Doubles intensity of vertex colours. Enable if working with an imported scene that appears too dark in game."
, default=False)
speed_hack = BoolProperty(name="No modifiers (speed hack)",
description = "Don't apply any modifiers to objects. Much faster with large scenes, but all mesh must be triangles prior to export.", default=False)
# AUTOSPLIT SETTINGS
autosplit_everything = BoolProperty(name="Autosplit All",
description = "Applies the autosplit setting to all objects in the scene, with default settings.", default=False)
autosplit_faces_per_subobject = IntProperty(name="Faces Per Subobject",
description="The max amount of faces for every created subobject.",
default=800, min=50, max=6000)
autosplit_max_radius = FloatProperty(name="Max Radius",
description="The max radius of for every created subobject.",
default=2000, min=100, max=5000)
# /AUTOSPLIT SETTINGS
is_park_editor = BoolProperty(name="Is Park Editor", default=False, options={'HIDDEN'})
model_type = EnumProperty(items = (
("skin", ".skin", "Character skin, used for playable characters and pedestrians."),
("mdl", ".mdl", "Model used for vehicles and other static mesh."),
), name="Model Type", default="skin")
generate_scripts_files = BoolProperty(name="Generate scripts", default=True)
export_scale = FloatProperty(name="Export scale", default=1)
mipmap_offset = IntProperty(name="Mipmap offset",
description="Offsets generation of mipmaps (default is 0). For example, setting this to 1 will make the base texture 1/4 the size. Use when working with very large textures.",
min=0, max=4, default=0)
only_offset_lightmap = BoolProperty(name="Only Lightmaps", default=False, description="Mipmap offset only applies to lightmap textures.")
def execute(self, context):
return do_export_model(self, context, "THUG2")
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(self, context):
self.layout.row().prop(self, "always_export_normals", toggle=True, icon='SNAP_NORMAL')
self.layout.row().prop(self, "use_vc_hack", toggle=True, icon='COLOR')
self.layout.row().prop(self, "speed_hack", toggle=True, icon='FF')
self.layout.row().prop(self, "autosplit_everything", toggle=True, icon='MOD_EDGESPLIT')
if self.autosplit_everything:
box = self.layout.box().column(True)
box.row().prop(self, "autosplit_faces_per_subobject")
box.row().prop(self, "autosplit_max_radius")
self.layout.row().prop(self, "model_type", expand=True)
self.layout.row().prop(self, "generate_scripts_files", toggle=True, icon='FILE_SCRIPT')
self.layout.row().prop(self, "export_scale")
box = self.layout.box().column(True)
box.row().prop(self, "mipmap_offset")
box.row().prop(self, "only_offset_lightmap")
class THUGQuickExport(bpy.types.Operator):
bl_idname = "export.thug_quick_export"
bl_label = "Quick Export"
def execute(self, context):
if maybe_export_scene(self, context.scene):
self.report({'INFO'}, "Quick export successfully completed!")
return {'FINISHED'}
# PANELS
#############################################
#----------------------------------------------------------------------------------
class THUGExportTools(bpy.types.Panel):
bl_label = "TH Export Tools"
bl_region_type = "TOOLS"
bl_space_type = "VIEW_3D"
bl_category = "THUG Tools"
@classmethod
def poll(cls, context):
return context.user_preferences.addons[ADDON_NAME].preferences.object_settings_tools
def draw(self, context):
if not context.scene: return
scene = context.scene
box = self.layout.box().column(True)
box.row().operator(THUGQuickExport.bl_idname, text=THUGQuickExport.bl_label, icon='PACKAGE')
|
[
"[email protected]"
] | |
84ec14edf9f60af5d9d9081ebb73bf5f9c70611f
|
3d9146f1ef0b2ccdd684096625cd03f3bb199c10
|
/convert_Slurm_to_SGE.py
|
de9e61a593fc9b16e9ef219f1431fddee549d51d
|
[] |
no_license
|
StanfordBioinformatics/GBSC_Billing
|
b674d3646ab0b7a5f1d9b08d692fd83a84b3c566
|
ebf4e52bef46bf3024486b6a902f03bf2c1c5cc6
|
refs/heads/master
| 2023-08-21T17:18:59.236964 | 2023-08-03T18:21:42 | 2023-08-03T18:21:42 | 13,337,415 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,266 |
py
|
#!/usr/bin/env python3
#===============================================================================
#
# convert_Slurm_to_SGE.py - Converts Slurm Accounting file to SGE Accounting file
#
# ARGS:
# 1st: the Slurm Accounting file to be input
# 2nd: the name of the file in which to put the SGE Accounting output
#
# SWITCHES:
# --billing_root: Location of BillingRoot directory (overrides BillingConfig.xlsx)
# [default if no BillingRoot in BillingConfig.xlsx or switch given: CWD]
#
# INPUT:
# Slurm Accounting snapshot file (from command sacct --format=ALL).
#
# OUTPUT:
# SGE Accounting file
# Various messages about current processing status to STDOUT.
#
# ASSUMPTIONS:
#
# AUTHOR:
# Keith Bettinger
#
#==============================================================================
#=====
#
# IMPORTS
#
#=====
import argparse
import calendar
import csv
import re
import time
import os.path
import sys
# For SGE and Slurm CSV dialect definitions
import job_accounting_file
# Simulate an "include billing_common.py".
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
exec(compile(open(os.path.join(SCRIPT_DIR, "billing_common.py"), "rb").read(), os.path.join(SCRIPT_DIR, "billing_common.py"), 'exec'))
#=====
#
# CONSTANTS
#
#=====
#=====
#
# GLOBALS
#
#=====
# In billing_common.py
global ACCOUNTING_FIELDS
#=====
#
# FUNCTIONS
#
#=====
def get_nodes_from_nodelist(nodelist_str):
#
# I can split the node list by commas, but some node have suffix lists between square brackets, ALSO delimited by commas.
#
# Need to convert commas to semicolons in lists marked by [ ]'s
match_bracket_lists = re.findall('(\[.*?\]+)', nodelist_str)
for substr in match_bracket_lists:
new_substr = substr.replace(',', ';')
nodelist_str = nodelist_str.replace(substr, new_substr)
# Now, with the commas only separating the node, we can split the node list by commas (still need to unpack square bracket lists).
node_list_before_unpacking = nodelist_str.split(',')
node_list = []
for node in node_list_before_unpacking:
# Try to break node up into prefix-[suffixes].
match_prefix_and_bracket_lists = re.search('^(?P<prefix>[^\[]+)\[(?P<suffixes>[^\]]+)\]$', node)
# If node doesn't match pattern above, add the whole node name.
if not match_prefix_and_bracket_lists:
node_list.append(node)
else:
match_dict = match_prefix_and_bracket_lists.groupdict()
prefix = match_dict['prefix']
suffixes = match_dict['suffixes'].split(';')
for suffix in suffixes:
node_list.append(prefix + suffix)
return node_list
def slurm_time_to_seconds(slurm_time_str):
# Convert string of form DD-HH:MM:SS to seconds
elapsed_days_split = slurm_time_str.split('-')
if len(elapsed_days_split) == 1:
elapsed_days = 0
elapsed_hms = elapsed_days_split[0]
elif len(elapsed_days_split) == 2:
elapsed_days = int(elapsed_days_split[0])
elapsed_hms = elapsed_days_split[1]
else:
print("Time string of", slurm_time_str, "is malformed.", file=sys.stderr)
seconds = (elapsed_days * 86400) + sum(int(x) * 60 ** i for i, x in enumerate(reversed(elapsed_hms.split(":"))))
return seconds
def convert_slurm_file_to_sge_file(slurm_fp, sge_fp):
# Read in the header line from the Slurm file to use for the DictReader
header = slurm_fp.readline()
fieldnames = header.split('|')
reader = csv.DictReader(slurm_fp,fieldnames=fieldnames,dialect="slurm")
writer = csv.DictWriter(sge_fp,fieldnames=ACCOUNTING_FIELDS,dialect="sge")
sge_row = {} # Create new dictionary for the output row.
line_num = 0
for slurm_row in reader:
sge_row.clear()
sge_row['qname'] = slurm_row['Partition']
sge_row['hostname'] = slurm_row['NodeList']
sge_row['group'] = slurm_row['Group']
sge_row['owner'] = slurm_row['User']
sge_row['job_name'] = slurm_row['JobName']
sge_row['job_number'] = slurm_row['JobIDRaw']
sge_row['account'] = slurm_row['Account']
sge_row['submission_time'] = calendar.timegm(time.strptime(slurm_row['Submit'],"%Y-%m-%dT%H:%M:%S"))
sge_row['start_time'] = calendar.timegm(time.strptime(slurm_row['Start'],"%Y-%m-%dT%H:%M:%S"))
sge_row['end_time'] = calendar.timegm(time.strptime(slurm_row['End'],"%Y-%m-%dT%H:%M:%S"))
sge_row['failed'] = 0 # TODO: convert Slurm states to SGE failed states
(return_value, signal) = slurm_row['ExitCode'].split(':')
if signal == 0:
sge_row['exit_status'] = int(return_value)
else:
sge_row['exit_status'] = 128 + int(signal)
elapsed_seconds = slurm_time_to_seconds(slurm_row['Elapsed'])
elapsed_raw_seconds = int(slurm_row['ElapsedRaw'])
if elapsed_seconds != elapsed_raw_seconds:
print("Elapsed string of %s does not equal ElapsedRaw value of %d." % (slurm_row['Elapsed'], elapsed_raw_seconds), file=sys.stderr)
sge_row['ru_wallclock'] = elapsed_seconds
sge_row['project'] = slurm_row['WCKey']
sge_row['department'] = "NoDept"
sge_row['granted_pe'] = "NoPE"
sge_row['slots'] = slurm_row['NCPUS']
sge_row['cpu'] = slurm_row['TotalCPU']
if slurm_row['MaxDiskRead'] != '':
sge_row['io'] = int(slurm_row['MaxDiskRead'])
if slurm_row['MaxDiskWrite'] != '':
sge_row['io'] += int(slurm_row['MaxDiskWrite'])
if slurm_row['ReqGRES'] == '':
sge_row['category'] = slurm_row['ReqTRES']
elif slurm_row['ReqTRES'] == '':
sge_row['category'] = slurm_row['ReqGRES']
else:
sge_row['category'] = "%s;%s" % (slurm_row['ReqTRES'], slurm_row['ReqGRES'])
sge_row['max_vmem'] = slurm_row['MaxVMSize']
# Output row to SGE file.
writer.writerow(sge_row)
line_num += 1
if line_num % 10000 == 0:
sys.stderr.write('.')
sys.stderr.flush()
print(file=sys.stderr)
#=====
#
# SCRIPT BODY
#
#=====
parser = argparse.ArgumentParser()
parser.add_argument("--slurm_accounting_file",
default=None,
help='The Slurm accounting file to read [default = stdin]')
parser.add_argument("--sge_accounting_file",
default=None,
help='The SGE accounting file to output to [default = stdout]')
parser.add_argument("-r", "--billing_root",
default=None,
help='The Billing Root directory [default = None]')
parser.add_argument("-v", "--verbose", action="store_true",
default=False,
help='Get real chatty [default = false]')
args = parser.parse_args()
#
# Process arguments.
#
# Override billing_root with switch args, if present.
if args.billing_root is not None:
billing_root = args.billing_root
else:
# Use the current directory.
billing_root = os.getcwd()
# Get absolute path for billing_root directory.
billing_root = os.path.abspath(billing_root)
# Use switch arg for accounting_file if present, else use file in BillingRoot.
if args.slurm_accounting_file is not None:
slurm_accounting_file = os.path.abspath(args.slurm_accounting_file)
else:
slurm_accounting_file = "STDIN"
if args.sge_accounting_file is not None:
sge_accounting_file = os.path.abspath(args.sge_accounting_file)
else:
sge_accounting_file = "STDOUT"
#
# Output the state of arguments.
#
print("Slurm --> SGE Conversion arguments:", file=sys.stderr)
print(" Slurm accounting file: %s" % slurm_accounting_file, file=sys.stderr)
print(" SGE accounting file: %s" % sge_accounting_file, file=sys.stderr)
#
# Open the two files
#
if slurm_accounting_file == "STDIN":
slurm_accounting_fp = sys.stdin
else:
slurm_accounting_fp = open(slurm_accounting_file, "r")
if sge_accounting_file == "STDOUT":
sge_accounting_fp = sys.stdout
else:
sge_accounting_fp = open(sge_accounting_file, "w")
convert_slurm_file_to_sge_file(slurm_accounting_fp, sge_accounting_fp)
print("Conversion complete!", file=sys.stderr)
|
[
"[email protected]"
] | |
8622b2d94f0c53415695d91e3a9d2910a617db2e
|
6b0b220ed3d5a2f0d71f5bd4a4eba459f6e0b54d
|
/CycleGan/CycleGAN/Unetmodel.py
|
b0ba2e592a75e4ce3e14ec66bc91cf4e2493f35f
|
[] |
no_license
|
menglaili/ECE285Project
|
969b19a9577a4bc46c2491fcfd02bc3016099295
|
0b4739516c6424125b40b5907339b0e013936e5c
|
refs/heads/master
| 2020-06-02T08:16:55.997425 | 2019-07-10T20:25:48 | 2019-07-10T20:25:48 | 191,095,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,984 |
py
|
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
# Initialize the MSELoss function
criterion1 = nn.MSELoss()
# Initialize the L1Loss function
criterion2 = nn.L1Loss()
real_label = Variable(torch.cuda.FloatTensor(1).fill_(1.0), requires_grad = False)
fake_label = Variable(torch.cuda.FloatTensor(1).fill_(0.0), requires_grad = False)
def cal_loss_Gan(D, real, fake):
'''
input:
D--Discriminator
real--X from X domain or Y from Y domain
fake--F(Y) generated by using Y from Y domain or G(X) generated by using X from X domain
'''
pred_real = D(real)
pred_fake = D(fake.detach())
loss_D_real = criterion1(pred_real, real_label)
loss_D_fake = criterion1(pred_fake, fake_label)
loss_D = 0.5 * (loss_D_fake + loss_D_real)
return loss_D
def cal_loss_Cycle(net, real, fake):
'''
input:
net:
G--Generator which generate image from X domain to Y domain
or F--Generator which generate image from Y domain to X domain
real--X from X domain or Y from Y domain
fake--F(Y) generated by using Y from Y domain or G(X) generated by using X from X domain
return: Cycle loss
'''
loss_Cycle = criterion2(real, net(fake))
return loss_Cycle
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Discriminator(nn.Module):
def __init__(self,input_nc):
super(Discriminator, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1,padding=1)
self.conv2 = nn.Conv2d(16, 64, kernel_size=3, stride=1,padding=1)
self.conv3 = nn.Conv2d(64, 256, kernel_size=3, stride=1,padding=1)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1,padding=1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(256 * 56 * 56, 100)
self.fc2 = nn.Linear(100, 1)
self.features = nn.Sequential(self.conv1, self.relu, self.conv2,self.relu, self.maxpool,
self.conv3,self.relu, self.conv4,self.relu, self.maxpool )
self.determine = nn.Sequential(self.fc1,self.relu,self.fc2)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.determine(x)
return x
class Generator(nn.Module):
def __init__(self, D, C=64): # D has to be even
super(Generator, self).__init__()
self.D = int((D - 2)/2) # the repeat part is layer with pooling/unpooling
# conv
self.conv = nn.ModuleList()
self.conv.append(nn.Conv2d(3, C, 3, padding = (1,1)))
for k in range(self.D):
dilation = 2**(k)
self.conv.append(nn.Conv2d(C, C, 3, padding = dilation, dilation = dilation))
# k += 1
dilation = 2**(self.D + 1)
self.conv.append(nn.Conv2d(C, C, 3, padding = dilation, dilation = dilation))
self.conv.append(nn.Conv2d(C, C, 3, padding = dilation, dilation = dilation))
for l in range(self.D):
dilation = 2**(k - (l+1))
self.conv.append(nn.Conv2d(C, C, 3, padding = dilation, dilation = dilation))
self.conv.append(nn.Conv2d(C, 3, 3, padding = (1,1)))
# initialize of conv
for i in range(D+1): # without ReLu not initialize
nn.init.kaiming_normal_(self.conv[i].weight.data, mode='fan_in', nonlinearity='relu')
# bn
self.bn = nn.ModuleList()
for k in range(D):
self.bn.append(nn.BatchNorm2d(C, C))
# initialize of bn
for k in range(D):
nn.init.constant_(self.bn[k].weight.data, 1.25 * np.sqrt(C))
self.relu = nn.ReLU(inplace = True)
def forward(self, x):
D = int(self.D * 2 + 2)
h = self.relu(self.conv[0](x))
# pooling
feature = []
feature.append(h)
torch.backends.cudnn.benchmark=True
for j in range(self.D):
h = self.relu(self.bn[j](self.conv[j+1](h)))
if j != self.D-1:
feature.append(h)
torch.backends.cudnn.benchmark=False
# reverse feature
feature.reverse()
torch.backends.cudnn.benchmark=True
h1 = self.relu(self.bn[j+1](self.conv[j+2](h)))
h2 = self.relu(self.bn[j+2](self.conv[j+3](h1)))
h = (h + h2)/(2**0.5)
# unpooling
for l in range(self.D):
h = self.relu(self.bn[l+j+3](self.conv[l+j+4](h)))
h = (h + feature[l])/(2**0.5)
torch.backends.cudnn.benchmark=False
y = self.conv[D+1](h) + x
return y
|
[
"[email protected]"
] | |
5f3f39608a38d86ff22999affdb2aa8d25fb22ae
|
e3eead40e93fdf5186269536edefab4f08e9a5a2
|
/LeetCode/75-sort_colors.py
|
f9126b00d4c74a0e97e76d064217b730e50cc3d7
|
[] |
no_license
|
davll/practical-algorithms
|
bbc930b42363cae00ce39e8a686854c19131d334
|
0e35e4cc87bd41144b8e34302aafe776fec1b356
|
refs/heads/master
| 2021-08-22T13:12:34.555074 | 2020-03-28T08:56:13 | 2020-03-28T08:56:13 | 147,224,029 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 883 |
py
|
# https://leetcode.com/problems/sort-colors/
def sort_colours_v1(nums):
count = [0] * 3
for x in nums:
count[x] += 1
print("count = " + str(count))
start = 0
for k in range(3):
for i in range(count[k]):
nums[i+start] = k
start += count[k]
def sort_colours_v2(nums):
n = len(nums)
if n < 2:
return
i, tail0, head2 = 0, 0, n-1
while i <= head2:
if nums[i] == 0:
nums[i], nums[tail0] = nums[tail0], 0
tail0 += 1
i += 1
elif nums[i] == 2:
nums[i], nums[head2] = nums[head2], 2
head2 -= 1
else:
i += 1
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
sort_colours_v2(nums)
|
[
"[email protected]"
] | |
bd4114cc6c10a32f930cd922e6d54539cf293efd
|
3d8825018662bc69b37933cc1023d6e21419bb3e
|
/plotUSA_GDP_and_GNI_final.py
|
f148955ac4dfe7aaa5254dd86209fd4d903f4129
|
[
"MIT"
] |
permissive
|
cjekel/USA_GDP_per_capita_inflation_adjust
|
cd743971b2de0880f9c43c3131d047f10a41c673
|
3cd62dcfa05eb1fe4383f0b15d8511f042f92039
|
refs/heads/master
| 2023-09-03T03:44:02.933026 | 2016-06-01T09:43:07 | 2016-06-01T09:43:07 | 59,950,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,603 |
py
|
import numpy as np
import matplotlib.pyplot as plt
# close all figures
plt.close('all')
years = np.array([1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGDP = np.array([543300000000.,563300000000.,605100000000.,638600000000.,685800000000.,743700000000.,815000000000.,861700000000.,942500000000.,1019900000000.,1075884000000.,1167770000000.,1282449000000.,1428549000000.,1548825000000.,1688923000000.,1877587000000.,2085951000000.,2356571000000.,2632143000000.,2862505000000.,3210956000000.,3344991000000.,3638137000000.,4040693000000.,4346734000000.,4590155000000.,4870217000000.,5252629000000.,5657693000000.,5979589000000.,6174043000000.,6539299000000.,6878718000000.,7308755000000.,7664060000000.,8100201000000.,8608515000000.,9089168000000.,9660624000000.,10284779000000.,10621824000000.,10977514000000.,11510670000000.,12274928000000.,13093726000000.,13855888000000.,14477635000000.,14718582000000.,14418739000000.,14964372000000.,15517926000000.,16163158000000.,16768053000000.,17419000000000.])
# GDP data from the worldbank http://data.worldbank.org/indicator/NY.GDP.MKTP.CD/countries/US?display=graph
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI = np.array([29.6, 29.9, 30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years, usaGDP)
plt.xlabel('Year')
plt.ylabel('GDP in Current USD')
plt.grid(True)
plt.show()
# Adjust GDP for 1960 USD
usaGDP1960 = usaGDP / (usaCPI / usaCPI[0])
plt.figure()
plt.plot(years, usaGDP1960)
plt.xlabel('Year')
plt.ylabel('GDP adjusted for inflation in 1960 USD')
plt.grid(True)
plt.show()
# Adjust GDP for 2014 USD
usaGDP2014 = usaGDP / (usaCPI / usaCPI[-1])
plt.figure()
plt.plot(years, usaGDP2014)
plt.xlabel('Year')
plt.ylabel('GDP adjusted for inflation in 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([180671000,183691000,186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGDPpercapita = usaGDP / usaPop
plt.figure()
plt.plot(years, usaGDPpercapita)
plt.xlabel('Year')
plt.ylabel('GDP per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GDP per Capita to 1960s numbers
usaGDPpercapita1960 = usaGDPpercapita / (usaCPI / usaCPI[0])
plt.figure()
plt.plot(years, usaGDPpercapita1960)
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation in 1960 USD')
plt.grid(True)
plt.show()
# adjust GDP per Capita to 2014s numbers
usaGDPpercapita2014 = usaGDPpercapita / (usaCPI / usaCPI[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014)
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# define a function to adjust the CPI based on an over or under estimation of
# the inflation rate, where rate is the percent increase or decrease change
# where a precentage overesimate of 5% would be inputted as 1.05
def adjustCPI(cpi, rate):
demo = []
for i, j in enumerate(cpi):
demo.append(j * (rate**i))
return demo
# what if we underestimated inflation?
cpiOverFive = adjustCPI(usaCPI, 1.005)
# what if we underestimated inflation?
cpiUnderFive = adjustCPI(usaCPI, 0.995)
# adjust GDP per Capita to 2014s numbers
usaGDPpercapita2014OverFive = usaGDPpercapita / (cpiOverFive / cpiOverFive[-1])
usaGDPpercapita2014UnderFive = usaGDPpercapita / (cpiUnderFive / cpiUnderFive[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014, label='normal')
plt.plot(years, usaGDPpercapita2014OverFive, label='under')
plt.plot(years, usaGDPpercapita2014UnderFive, label='over')
plt.legend()
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
years2 = np.array([1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGNI = np.array([612178550047.646,646233886826.65,692328219512.945,753294530375.941,824183577234.192,868295290971.962,952033980993.251,1027990251284.03,1098553055567.61,1183038457083.86,1320921418184.74,1548458249174.67,1711839855738.22,1842214711486.27,1958767403397.59,2117456144199.84,2401109359261.26,2751769589536.9,3048093901726.34,3303883972259.98,3297652203866.24,3411202239818.87,3828479505092.12,4164905103485.73,4601500378186.56,5200354088055.45,5765196251790.1,5888830786924.1,6029529322891.06,6164277951121.71,6612706041742.15,6883086506452.91,7302781827892.38,7760854970064.45,8184808773787.28,8558708987900.82,8869581532268.98,9425292191447.05,10178500697503.7,10498594829042.2,10776200783181,11589035965657.3,12790914724399.8,13693955258225.3,14345564947204.5,14651211130474,15002428215985,14740580035992.9,15143137264678.1,15727290871234.6,16501015978642.4,17001290051112.6,17611490812741.3])
# GNI data atlas method from the worldbank http://databank.worldbank.org/data/reports.aspx?source=2&country=USA&series=&period=#
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI2 = np.array([30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years2, usaGNI)
plt.xlabel('Year')
plt.ylabel('GNI in Current USD')
plt.grid(True)
plt.show()
# Adjust GNI for 1962 USD
usaGNI1962 = usaGNI / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNI1962)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# Adjust GNI for 2014 USD
usaGNI2014 = usaGNI / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNI2014)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGNIpercapita = usaGNI / usaPop
plt.figure()
plt.plot(years2, usaGNIpercapita)
plt.xlabel('Year')
plt.ylabel('GNI per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 1962s numbers
usaGNIpercapita1962 = usaGNIpercapita / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNIpercapita1962)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 2014s numbers
usaGNIpercapita2014 = usaGNIpercapita / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNIpercapita2014)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# close all figs
plt.close('all')
# save the final plots
# plot of the GDP and GNI in current USD
plt.figure()
plt.plot(years, usaGDP / 1.e12, '-k', label='GDP')
plt.plot(years2, usaGNI / 1.e12, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('Trillion USD')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI.png')
# plot of GDP and GNI per capita in current USD
plt.figure()
plt.plot(years, usaGDPpercapita, '-k', label='GDP')
plt.plot(years2, usaGNIpercapita, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('USD per capita')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita.png')
# plot of GDP and GNI per capita in 2014 USD
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='GDP')
plt.plot(years2, usaGNIpercapita2014, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('USD per capita adjusted for inflation to 2014 levels')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014.png')
# plot of GDP at 0.5, 1, and 2 perecent estimations
# what if CPI has underestimated inflation?
cpiUnderHalf = adjustCPI(usaCPI, 1.005)
cpiUnderOne = adjustCPI(usaCPI, 1.01)
cpiUnderTwo = adjustCPI(usaCPI, 1.02)
# what if CPI has overestimated inflation?
cpiOverHalf = adjustCPI(usaCPI, 0.995)
cpiOverOne = adjustCPI(usaCPI, 0.99)
cpiOverTwo = adjustCPI(usaCPI, 0.98)
# recalculate GDP basedd on the CPI values
usaGDPpercapita2014UnderHalf = usaGDPpercapita / (cpiUnderHalf / cpiUnderHalf[-1])
usaGDPpercapita2014UnderOne = usaGDPpercapita / (cpiUnderOne / cpiUnderOne[-1])
usaGDPpercapita2014UnderTwo = usaGDPpercapita / (cpiUnderTwo / cpiUnderTwo[-1])
usaGDPpercapita2014OverHalf = usaGDPpercapita / (cpiOverHalf / cpiOverHalf[-1])
usaGDPpercapita2014OverOne = usaGDPpercapita / (cpiOverOne / cpiOverOne[-1])
usaGDPpercapita2014OverTwo = usaGDPpercapita / (cpiOverTwo / cpiOverTwo[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderHalf, '--k', label='CPI each year adjusted +0.5%')
plt.plot(years, usaGDPpercapita2014OverHalf, '-.k', label='CPI each year adjusted -0.5%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_half.png')
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderOne, '--k', label='CPI each year adjusted +1.0%')
plt.plot(years, usaGDPpercapita2014OverOne, '-.k', label='CPI each year adjusted -1.0%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_one.png')
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderTwo, '--k', label='CPI each year adjusted +2.0%')
plt.plot(years, usaGDPpercapita2014OverTwo, '-.k', label='CPI each year adjusted -2.0%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_two.png')
|
[
"[email protected]"
] | |
e2748938ae4258aea4ad451b34fe2be73dc984c4
|
2d130df7050b72b8f1279f6c58c926d448ff71d6
|
/sources/Settings.py
|
407bf7dc42dc8db4ca5606a5158f9711559783a5
|
[
"Apache-2.0"
] |
permissive
|
rsoultan/Pygame-shoot-them-up
|
32b7ae27bd1289aecbd3fc89feb0e840b2da892f
|
0ae41522253b7405e6d00a8c4094de7480846535
|
refs/heads/main
| 2023-06-19T22:09:21.685512 | 2021-07-18T16:56:25 | 2021-07-18T16:56:25 | 380,590,101 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 103 |
py
|
SETTINGS = {
"FPS": 60,
"WIDTH": 1280,
"HEIGHT": 720,
"SOUNDS_VOLUME": 1.0,
"MUSICS_VOLUME": 0.1
}
|
[
"[email protected]"
] | |
a9b584a8ba68238e02f63cf1a91fe4c643084f2f
|
9eaf34eff324b39bf644bba527b6dced125f09e4
|
/model.py
|
6a8c53ffcf20e3fab0ffc6882944f7663941e6af
|
[] |
no_license
|
sirrobot01/bet9ja-pred
|
92d88eb76750a2244e563164e9d5e05a81006561
|
864a04e7da0e3af56d06c442031bf18b00db16bb
|
refs/heads/master
| 2023-04-09T15:01:29.462267 | 2023-03-16T12:45:03 | 2023-03-16T12:45:03 | 199,934,614 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,867 |
py
|
#IMPORTING LIBRARIES
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import classification_report
import pandas as pd
#############################################################################
#PREPARING DATA
data = np.loadtxt('data/data.csv', delimiter=',')
X, y = data[:,:-1], data[:, -1]
y = np.array(y)
X = np.array(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.25, random_state=0)
###########################################################################
#MODELS
models = {
'knn': KNeighborsClassifier(n_neighbors=10),
'naive': GaussianNB(),
'logre': LogisticRegression(C=1e5,solver='lbfgs', multi_class='auto'),
'linreg': LinearRegression(),
'svm': SVC(kernel='linear', gamma='auto', C=0.9),
'tree': DecisionTreeClassifier(max_leaf_nodes=3, random_state=0),
'random': RandomForestClassifier(n_estimators=100),
'mlp': MLPClassifier(),
'lda' : LinearDiscriminantAnalysis()
}
###########################################
#TRAINING SESSION
for md in models:
model = models[md]
model.fit(X_train, y_train)
print(classification_report(y_test, model.predict(y_test)))
'''print('[**] Using {} model'.format(md))
print('Train Accuracy: {:.2f}'.format(model.score(X_train, y_train) * 100.0))
print('Test Accuracy: {:.2f}'.format(model.score(X_test, y_test) * 100.0))
print('#############################')'''
|
[
"[email protected]"
] | |
cc5cc35dcac029fe60bef041be4dc9b131b9c2ef
|
cbdb6b9d3fda3fddfbb9a539dceb55fadf4b499a
|
/membership-finder.py
|
6641f7c54b0a930c3b7a9eae8625bffbf07dd52d
|
[] |
no_license
|
Roadmaster/trello-board-maker
|
548be267e3469d7b6b2a4c072a2d64afcf70825e
|
424b85d68a2ed7d43f3d3269dd6c930d065ea5f3
|
refs/heads/master
| 2020-11-26T18:26:24.873854 | 2015-06-04T17:02:02 | 2015-06-04T17:02:02 | 25,215,537 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,504 |
py
|
#!/usr/bin/python
# Copyright 2015 Canonical Ltd.
# Written by:
# Daniel Manrique <[email protected]>
#
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
# This utility tells you which members of a given organization in Trello
# do NOT have a membership in the Launchpad.net ~canonical group.
import argparse
from trello import *
import pprint
import sys
from launchpadlib.launchpad import Launchpad
# Needed:
# - trello API key and security token (see trello)
# - Name of the organization in trello
parser = argparse.ArgumentParser(
description="Identify members of the given organization which"
" are not in the given launchpad group",
epilog="To obtain the API key and security token, see notes.rst")
parser.add_argument("key", type=str, help='Trello API key')
parser.add_argument("token", type=str, help='Trello API security token')
parser.add_argument("--org", type=str,
help='Name of the organization as known by Trello.',
default='canonical')
parser.add_argument("--team", type=str,
help='Name of the team as known by launchpad.',
default='canonical')
args = parser.parse_args()
t = Trello(args.key, args.token)
lp = Launchpad.login_with("trello-team-checker", "production")
try:
org = t.get_organization(args.org)
except TrelloApiError as e:
print("Unable to read/write Trello data. Maybe the credentials"
" are expired? Error was: {}".format(e))
raise SystemExit
for member in org.members:
in_team = False
fullname = member._data['fullName']
lp_members = lp.people.findPerson(text=fullname)
if not lp_members:
print(u" not found in launchpad?".format(fullname))
else:
for lp_member in lp_members:
for membership in lp_member.memberships_details:
if membership.team.name == args.team:
in_team = True
print(u"{} in {}: {}".format(fullname, args.team, in_team))
|
[
"[email protected]"
] | |
6f0343847638fe6bb9b93a9dc9fb5e1ece86c95a
|
9d5514e50e5229abbe18e35779c68088e617cf94
|
/rinna_ja_gpt2/interact.py
|
d7139a2ffd01d7bac3559cf0875ab0b6798d67c8
|
[] |
no_license
|
ftnext/practice-dl-nlp
|
a1e40c32fed7754a1ea1208a1c6d87386b384f0e
|
2609097db5690c3e0b3fb3ea89ea4dbe8f52cc2c
|
refs/heads/master
| 2023-08-22T05:48:45.768945 | 2023-08-12T12:52:56 | 2023-08-12T12:52:56 | 231,755,178 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,795 |
py
|
import random
import numpy as np
import torch
from transformers import AutoModelForCausalLM, T5Tokenizer
def freeze_seed(seed=42):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
model_name = "rinna/japanese-gpt2-medium"
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
length = 100
temperature = 1.0
k = 0
p = 0.9
repetition_penalty = 1.0
num_return_sequences = 3
while True:
input_text = input("日本語テキストを入力してください(qで終了): ")
input_text = input_text.rstrip()
if not input_text:
continue
if input_text.lower() == "q":
break
freeze_seed()
input_ids = tokenizer.encode(input_text, return_tensors="pt")
output_sequences = model.generate(
input_ids=input_ids,
max_length=length + len(input_text),
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=True,
num_return_sequences=num_return_sequences,
)
generated_sequences = []
for idx, sequence in enumerate(output_sequences):
print(f"=== GENERATED SEQUENCE {idx + 1} ===")
sequence = sequence.tolist() # tensor -> list
text = tokenizer.decode(sequence, clean_up_tokenization_spaces=True)
# textの先頭の {input_text}</s> を除く?
total_sequence = (
input_text
+ text[
len(
tokenizer.decode(
input_ids[0], clean_up_tokenization_spaces=True
)
) :
]
)
print(total_sequence)
generated_sequences.append(total_sequence)
|
[
"[email protected]"
] | |
b5dc97e2850c52dccd210ba2894bed142eb5c1b9
|
dea198896f679e577a3fd0923e3fa4470da4b9cc
|
/journal/pyfakefs_mutants/AOR_BinOp_mutant_1507054997.py
|
d29db9215f379ca17584b73b34ae8921cc205ee6
|
[] |
no_license
|
naustarg/cbmcmutate
|
f138ab2b04b4be70d735de90815ac670ae6042ce
|
a6ee6fd395338bb2dfd6bdffabb2dc484cb303f1
|
refs/heads/master
| 2020-04-04T08:10:15.913309 | 2018-05-21T18:23:58 | 2018-05-21T18:23:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219,331 |
py
|
# line: 91
'A fake filesystem implementation for unit testing.\n\n:Includes:\n * FakeFile: Provides the appearance of a real file.\n * FakeDirectory: Provides the appearance of a real directory.\n * FakeFilesystem: Provides the appearance of a real directory hierarchy.\n * FakeOsModule: Uses FakeFilesystem to provide a fake os module replacement.\n * FakePathModule: Faked os.path module replacement.\n * FakeFileOpen: Faked file() and open() function replacements.\n\n:Usage:\n\n>>> from pyfakefs import fake_filesystem\n>>> filesystem = fake_filesystem.FakeFilesystem()\n>>> os_module = fake_filesystem.FakeOsModule(filesystem)\n>>> pathname = \'/a/new/dir/new-file\'\n\nCreate a new file object, creating parent directory objects as needed:\n\n>>> os_module.path.exists(pathname)\nFalse\n>>> new_file = filesystem.CreateFile(pathname)\n\nFile objects can\'t be overwritten:\n\n>>> os_module.path.exists(pathname)\nTrue\n>>> try:\n... filesystem.CreateFile(pathname)\n... except IOError as e:\n... assert e.errno == errno.EEXIST, \'unexpected errno: %d\' % e.errno\n... assert e.strerror == \'File already exists in fake filesystem\'\n\nRemove a file object:\n\n>>> filesystem.RemoveObject(pathname)\n>>> os_module.path.exists(pathname)\nFalse\n\nCreate a new file object at the previous path:\n\n>>> beatles_file = filesystem.CreateFile(pathname,\n... contents=\'Dear Prudence\\nWon\\\'t you come out to play?\\n\')\n>>> os_module.path.exists(pathname)\nTrue\n\nUse the FakeFileOpen class to read fake file objects:\n\n>>> file_module = fake_filesystem.FakeFileOpen(filesystem)\n>>> for line in file_module(pathname):\n... print(line.rstrip())\n...\nDear Prudence\nWon\'t you come out to play?\n\nFile objects cannot be treated like directory objects:\n\n>>> os_module.listdir(pathname) #doctest: +NORMALIZE_WHITESPACE\nTraceback (most recent call last):\n File "fake_filesystem.py", line 291, in listdir\n raise OSError(errno.ENOTDIR,\nOSError: [Errno 20] Fake os module: not a directory: \'/a/new/dir/new-file\'\n\nThe FakeOsModule can list fake directory objects:\n\n>>> os_module.listdir(os_module.path.dirname(pathname))\n[\'new-file\']\n\nThe FakeOsModule also supports stat operations:\n\n>>> import stat\n>>> stat.S_ISREG(os_module.stat(pathname).st_mode)\nTrue\n>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)\nTrue\n'
# line: 92
import codecs
# line: 93
import errno
# line: 94
import heapq
# line: 95
import io
# line: 96
import locale
# line: 97
import platform
# line: 98
import os
# line: 99
import sys
# line: 100
import time
# line: 101
import warnings
# line: 103
from collections import namedtuple
# line: 105
import stat
# line: 106
from copy import copy
# line: 108
__pychecker__ = 'no-reimportself'
# line: 110
__version__ = '3.3'
# line: 112
PERM_READ = 256
# line: 113
PERM_WRITE = 128
# line: 114
PERM_EXE = 64
# line: 115
PERM_DEF = 511
# line: 116
PERM_DEF_FILE = 438
# line: 117
PERM_ALL = 4095
# line: 119
_OpenModes = namedtuple('open_modes', 'must_exist can_read can_write truncate append must_not_exist')
# line: 125
_OPEN_MODE_MAP = {'r': (True, True, False, False, False, False), 'w': (False, False, True, True, False, False), 'a': (False, False, True, False, True, False), 'r+': (True, True, True, False, False, False), 'w+': (False, True, True, True, False, False), 'a+': (False, True, True, False, True, False), }
# line: 136
if ((sys.version_info[0] < 3) and (sys.platform != 'win32')):
# line: 137
_OPEN_MODE_MAP['rw'] = (True, True, True, False, False, False)
# line: 139
if (sys.version_info >= (3, 3)):
# line: 140
_OPEN_MODE_MAP['x'] = (False, False, True, False, False, True)
# line: 141
_OPEN_MODE_MAP['x+'] = (False, True, True, False, False, True)
# line: 143
if sys.platform.startswith('linux'):
# line: 146
_MAX_LINK_DEPTH = 40
else:
# line: 149
_MAX_LINK_DEPTH = 32
# line: 151
FAKE_PATH_MODULE_DEPRECATION = 'Do not instantiate a FakePathModule directly; let FakeOsModule instantiate it. See the FakeOsModule docstring for details.'
# line: 155
if (sys.platform == 'win32'):
# line: 157
OSError = WindowsError
# line: 160
class FakeLargeFileIoException(Exception):
# line: 163
'Exception thrown on unsupported operations for fake large files.\n Fake large files have a size with no real content.\n '
# line: 165
def __init__(self, file_path):
# line: 166
super(FakeLargeFileIoException, self).__init__(('Read and write operations not supported for fake large file: %s' % file_path))
# line: 171
def CopyModule(old):
# line: 172
'Recompiles and creates new module object.'
# line: 173
saved = sys.modules.pop(old.__name__, None)
# line: 174
new = __import__(old.__name__)
# line: 175
sys.modules[old.__name__] = saved
# line: 176
return new
# line: 179
class _FakeStatResult(object):
# line: 183
'Mimics os.stat_result for use as return type of `stat()` and similar.\n This is needed as `os.stat_result` has no possibility to set\n nanosecond times directly.\n '
# line: 184
long_type = (long if (sys.version_info < (3,)) else int)
# line: 186
def __init__(self, initial_time=None):
# line: 187
self.use_float = FakeOsModule.stat_float_times
# line: 188
self.st_mode = None
# line: 189
self.st_ino = None
# line: 190
self.st_dev = None
# line: 191
self.st_nlink = 0
# line: 192
self.st_uid = None
# line: 193
self.st_gid = None
# line: 194
self.st_size = None
# line: 195
if (initial_time is not None):
# line: 196
self._st_atime_ns = self.long_type((initial_time * 1000000000.0))
else:
# line: 198
self._st_atime_ns = None
# line: 199
self._st_mtime_ns = self._st_atime_ns
# line: 200
self._st_ctime_ns = self._st_atime_ns
# line: 202
def __eq__(self, other):
# line: 203
return (isinstance(other, _FakeStatResult) and (self._st_atime_ns == other._st_atime_ns) and (self._st_ctime_ns == other._st_ctime_ns) and (self._st_mtime_ns == other._st_mtime_ns) and (self.st_size == other.st_size) and (self.st_gid == other.st_gid) and (self.st_uid == other.st_uid) and (self.st_nlink == other.st_nlink) and (self.st_dev == other.st_dev) and (self.st_ino == other.st_ino) and (self.st_mode == other.st_mode))
# line: 217
def __ne__(self, other):
# line: 218
return (not (self == other))
# line: 220
def copy(self):
# line: 223
'Return a copy where the float usage is hard-coded to mimic the behavior\n of the real os.stat_result.\n '
# line: 224
use_float = self.use_float()
# line: 225
stat_result = copy(self)
# line: 226
stat_result.use_float = (lambda : use_float)
# line: 227
return stat_result
# line: 229
def set_from_stat_result(self, stat_result):
# line: 233
'Set values from a real os.stat_result.\n Note: values that are controlled by the fake filesystem are not set.\n This includes st_ino, st_dev and st_nlink.\n '
# line: 234
self.st_mode = stat_result.st_mode
# line: 235
self.st_uid = stat_result.st_uid
# line: 236
self.st_gid = stat_result.st_gid
# line: 237
self.st_size = stat_result.st_size
# line: 238
if (sys.version_info < (3, 3)):
# line: 239
self._st_atime_ns = self.long_type((stat_result.st_atime * 1000000000.0))
# line: 240
self._st_mtime_ns = self.long_type((stat_result.st_mtime * 1000000000.0))
# line: 241
self._st_ctime_ns = self.long_type((stat_result.st_ctime * 1000000000.0))
else:
# line: 243
self._st_atime_ns = stat_result.st_atime_ns
# line: 244
self._st_mtime_ns = stat_result.st_mtime_ns
# line: 245
self._st_ctime_ns = stat_result.st_ctime_ns
# line: 247
@property
# line: 247
def st_ctime(self):
# line: 249
'Return the creation time in seconds.'
# line: 250
ctime = (self._st_ctime_ns / 1000000000.0)
# line: 251
return (ctime if self.use_float() else int(ctime))
# line: 253
@property
# line: 253
def st_atime(self):
# line: 255
'Return the access time in seconds.'
# line: 256
atime = (self._st_atime_ns / 1000000000.0)
# line: 257
return (atime if self.use_float() else int(atime))
# line: 259
@property
# line: 259
def st_mtime(self):
# line: 261
'Return the modification time in seconds.'
# line: 262
mtime = (self._st_mtime_ns / 1000000000.0)
# line: 263
return (mtime if self.use_float() else int(mtime))
# line: 265
@st_ctime.setter
# line: 265
def st_ctime(self, val):
# line: 267
'Set the creation time in seconds.'
# line: 268
self._st_ctime_ns = self.long_type((val * 1000000000.0))
# line: 270
@st_atime.setter
# line: 270
def st_atime(self, val):
# line: 272
'Set the access time in seconds.'
# line: 273
self._st_atime_ns = self.long_type((val * 1000000000.0))
# line: 275
@st_mtime.setter
# line: 275
def st_mtime(self, val):
# line: 277
'Set the modification time in seconds.'
# line: 278
self._st_mtime_ns = self.long_type((val * 1000000000.0))
# line: 280
def __getitem__(self, item):
# line: 281
'Implement item access to mimic `os.stat_result` behavior.'
# line: 282
if (item == stat.ST_MODE):
# line: 283
return self.st_mode
# line: 284
if (item == stat.ST_INO):
# line: 285
return self.st_ino
# line: 286
if (item == stat.ST_DEV):
# line: 287
return self.st_dev
# line: 288
if (item == stat.ST_NLINK):
# line: 289
return self.st_nlink
# line: 290
if (item == stat.ST_UID):
# line: 291
return self.st_uid
# line: 292
if (item == stat.ST_GID):
# line: 293
return self.st_gid
# line: 294
if (item == stat.ST_SIZE):
# line: 295
return self.st_size
# line: 296
if (item == stat.ST_ATIME):
# line: 298
return int(self.st_atime)
# line: 299
if (item == stat.ST_MTIME):
# line: 300
return int(self.st_mtime)
# line: 301
if (item == stat.ST_CTIME):
# line: 302
return int(self.st_ctime)
# line: 304
if (sys.version_info >= (3, 3)):
# line: 306
@property
# line: 306
def st_atime_ns(self):
# line: 308
'Return the access time in nanoseconds.'
# line: 309
return self._st_atime_ns
# line: 311
@property
# line: 311
def st_mtime_ns(self):
# line: 313
'Return the modification time in nanoseconds.'
# line: 314
return self._st_mtime_ns
# line: 316
@property
# line: 316
def st_ctime_ns(self):
# line: 318
'Return the creation time in nanoseconds.'
# line: 319
return self._st_ctime_ns
# line: 321
@st_atime_ns.setter
# line: 321
def st_atime_ns(self, val):
# line: 323
'Set the access time in nanoseconds.'
# line: 324
self._st_atime_ns = val
# line: 326
@st_mtime_ns.setter
# line: 326
def st_mtime_ns(self, val):
# line: 328
'Set the modification time of the fake file in nanoseconds.'
# line: 329
self._st_mtime_ns = val
# line: 331
@st_ctime_ns.setter
# line: 331
def st_ctime_ns(self, val):
# line: 333
'Set the creation time of the fake file in nanoseconds.'
# line: 334
self._st_ctime_ns = val
# line: 337
class FakeFile(object):
# line: 353
"Provides the appearance of a real file.\n\n Attributes currently faked out:\n st_mode: user-specified, otherwise S_IFREG\n st_ctime: the time.time() timestamp of the file change time (updated\n each time a file's attributes is modified).\n st_atime: the time.time() timestamp when the file was last accessed.\n st_mtime: the time.time() timestamp when the file was last modified.\n st_size: the size of the file\n st_nlink: the number of hard links to the file\n st_ino: the inode number - a unique number identifying the file\n st_dev: a unique number identifying the (fake) file system device the file belongs to\n\n Other attributes needed by os.stat are assigned default value of None\n these include: st_uid, st_gid\n "
# line: 355
def __init__(self, name, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents=None, filesystem=None, encoding=None, errors=None):
# line: 371
'init.\n\n Args:\n name: name of the file/directory, without parent path information\n st_mode: the stat.S_IF* constant representing the file type (i.e.\n stat.S_IFREG, stat.S_IFDIR)\n contents: the contents of the filesystem object; should be a string or byte object for\n regular files, and a list of other FakeFile or FakeDirectory objects\n for FakeDirectory objects\n filesystem: the fake filesystem where the file is created.\n New in pyfakefs 2.9.\n encoding: if contents is a unicode string, the encoding used for serialization\n errors: the error mode used for encoding/decoding errors\n New in pyfakefs 3.2.\n '
# line: 372
self.name = name
# line: 373
self.stat_result = _FakeStatResult(time.time())
# line: 374
self.stat_result.st_mode = st_mode
# line: 375
self.encoding = encoding
# line: 376
self.errors = (errors or 'strict')
# line: 377
self._byte_contents = self._encode_contents(contents)
# line: 378
self.stat_result.st_size = (len(self._byte_contents) if (self._byte_contents is not None) else 0)
# line: 381
if (filesystem is None):
# line: 382
raise ValueError('filesystem shall not be None')
# line: 383
self.filesystem = filesystem
# line: 384
self.epoch = 0
# line: 385
self.parent_dir = None
# line: 387
@property
# line: 387
def byte_contents(self):
# line: 389
return self._byte_contents
# line: 391
@property
# line: 391
def contents(self):
# line: 393
'Return the contents as string with the original encoding.'
# line: 394
if ((sys.version_info >= (3, 0)) and isinstance(self.byte_contents, bytes)):
# line: 395
return self.byte_contents.decode((self.encoding or locale.getpreferredencoding(False)), errors=self.errors)
# line: 398
return self.byte_contents
# line: 400
def SetLargeFileSize(self, st_size):
# line: 413
"Sets the self.st_size attribute and replaces self.content with None.\n\n Provided specifically to simulate very large files without regards\n to their content (which wouldn't fit in memory).\n Note that read/write operations with such a file raise FakeLargeFileIoException.\n\n Args:\n st_size: (int) The desired file size\n\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space\n "
# line: 414
self._check_positive_int(st_size)
# line: 415
if self.st_size:
# line: 416
self.SetSize(0)
# line: 417
self.filesystem.ChangeDiskUsage(st_size, self.name, self.st_dev)
# line: 418
self.st_size = st_size
# line: 419
self._byte_contents = None
# line: 421
def _check_positive_int(self, size):
# line: 423
int_types = ((int, long) if (sys.version_info < (3, 0)) else int)
# line: 424
if ((not isinstance(size, int_types)) or (size < 0)):
# line: 425
raise IOError(errno.ENOSPC, ('Fake file object: size must be a non-negative integer, but is %s' % size), self.name)
# line: 429
def IsLargeFile(self):
# line: 430
'Return True if this file was initialized with size but no contents.'
# line: 431
return (self._byte_contents is None)
# line: 433
def _encode_contents(self, contents):
# line: 435
if ((sys.version_info >= (3, 0)) and isinstance(contents, str)):
# line: 436
contents = bytes(contents, (self.encoding or locale.getpreferredencoding(False)), self.errors)
elif ((sys.version_info < (3, 0)) and isinstance(contents, unicode)):
# line: 438
contents = contents.encode((self.encoding or locale.getpreferredencoding(False)), self.errors)
# line: 439
return contents
# line: 441
def _set_initial_contents(self, contents):
# line: 450
'Sets the file contents and size.\n Called internally after initial file creation.\n\n Args:\n contents: string, new content of file.\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space\n '
# line: 451
contents = self._encode_contents(contents)
# line: 452
st_size = len(contents)
# line: 454
if self._byte_contents:
# line: 455
self.SetSize(0)
# line: 456
current_size = (self.st_size or 0)
# line: 457
self.filesystem.ChangeDiskUsage((st_size - current_size), self.name, self.st_dev)
# line: 458
self._byte_contents = contents
# line: 459
self.st_size = st_size
# line: 460
self.epoch += 1
# line: 462
def SetContents(self, contents, encoding=None):
# line: 475
'Sets the file contents and size and increases the modification time.\n\n Args:\n contents: (str, bytes, unicode) new content of file.\n encoding: (str) the encoding to be used for writing the contents\n if they are a unicode string.\n If not given, the locale preferred encoding is used.\n New in pyfakefs 2.9.\n\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space.\n '
# line: 476
self.encoding = encoding
# line: 477
self._set_initial_contents(contents)
# line: 478
current_time = time.time()
# line: 479
self.st_ctime = current_time
# line: 480
self.st_mtime = current_time
# line: 482
def GetSize(self):
# line: 485
'Returns the size in bytes of the file contents.\n New in pyfakefs 2.9.\n '
# line: 486
return self.st_size
# line: 488
def GetPath(self):
# line: 489
'Return the full path of the current object.'
# line: 490
names = []
# line: 491
obj = self
# line: 492
while obj:
# line: 493
names.insert(0, obj.name)
# line: 494
obj = obj.parent_dir
# line: 495
sep = self.filesystem._path_separator(self.name)
# line: 496
return self.filesystem.NormalizePath(sep.join(names[1:]))
# line: 498
def SetSize(self, st_size):
# line: 507
'Resizes file content, padding with nulls if new size exceeds the old.\n\n Args:\n st_size: The desired size for the file.\n\n Raises:\n IOError: if the st_size arg is not a non-negative integer\n or if st_size exceeds the available file system space\n '
# line: 509
self._check_positive_int(st_size)
# line: 510
current_size = (self.st_size or 0)
# line: 511
self.filesystem.ChangeDiskUsage((st_size - current_size), self.name, self.st_dev)
# line: 512
if self._byte_contents:
# line: 513
if (st_size < current_size):
# line: 514
self._byte_contents = self._byte_contents[:st_size]
elif (sys.version_info < (3, 0)):
# line: 517
self._byte_contents = ('%s%s' % (self._byte_contents, ('\x00' * (st_size - current_size))))
else:
# line: 520
self._byte_contents += ('\x00' * (st_size - current_size))
# line: 521
self.st_size = st_size
# line: 522
self.epoch += 1
# line: 524
def SetATime(self, st_atime):
# line: 529
'Set the self.st_atime attribute.\n\n Args:\n st_atime: The desired access time.\n '
# line: 530
self.st_atime = st_atime
# line: 532
def SetMTime(self, st_mtime):
# line: 537
'Set the self.st_mtime attribute.\n\n Args:\n st_mtime: The desired modification time.\n '
# line: 538
self.st_mtime = st_mtime
# line: 540
def SetCTime(self, st_ctime):
# line: 546
'Set the self.st_ctime attribute.\n New in pyfakefs 3.0.\n\n Args:\n st_ctime: The desired creation time.\n '
# line: 547
self.st_ctime = st_ctime
# line: 549
def __getattr__(self, item):
# line: 550
'Forward some properties to stat_result.'
# line: 551
return getattr(self.stat_result, item)
# line: 553
def __setattr__(self, key, value):
# line: 554
'Forward some properties to stat_result.'
# line: 555
if (key in ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size', 'st_atime', 'st_mtime', 'st_ctime', 'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns')):
# line: 558
return setattr(self.stat_result, key, value)
# line: 559
return super(FakeFile, self).__setattr__(key, value)
# line: 561
def __str__(self):
# line: 562
return ('%s(%o)' % (self.name, self.st_mode))
# line: 564
def SetIno(self, st_ino):
# line: 571
'Set the self.st_ino attribute.\n Note that a unique inode is assigned automatically to a new fake file.\n Using this function does not guarantee uniqueness and should used with caution.\n\n Args:\n st_ino: (int) The desired inode.\n '
# line: 572
self.st_ino = st_ino
# line: 575
class FakeFileFromRealFile(FakeFile):
# line: 580
'Represents a fake file copied from the real file system.\n \n The contents of the file are read on demand only.\n New in pyfakefs 3.2.\n '
# line: 582
def __init__(self, file_path, filesystem, read_only=True):
# line: 593
'init.\n\n Args:\n file_path: path to the existing file.\n filesystem: the fake filesystem where the file is created.\n read_only: if set, the file is treated as read-only, e.g. a write access raises an exception;\n otherwise, writing to the file changes the fake file only as usually.\n\n Raises:\n OSError: if the file does not exist in the real file system.\n '
# line: 594
real_stat = os.stat(file_path)
# line: 596
super(FakeFileFromRealFile, self).__init__(name=os.path.basename(file_path), filesystem=filesystem)
# line: 598
self.stat_result.set_from_stat_result(real_stat)
# line: 599
if read_only:
# line: 600
self.st_mode &= 261924
# line: 601
self.file_path = file_path
# line: 602
self.contents_read = False
# line: 604
@property
# line: 604
def byte_contents(self):
# line: 606
if (not self.contents_read):
# line: 607
self.contents_read = True
# line: 608
with io.open(self.file_path, 'rb') as f:
# line: 609
self._byte_contents = f.read()
# line: 611
self.st_atime = os.stat(self.file_path).st_atime
# line: 612
return self._byte_contents
# line: 614
def IsLargeFile(self):
# line: 615
'The contents are never faked.'
# line: 616
return False
# line: 619
class FakeDirectory(FakeFile):
# line: 620
'Provides the appearance of a real directory.'
# line: 622
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
# line: 629
'init.\n\n Args:\n name: name of the file/directory, without parent path information\n perm_bits: permission bits. defaults to 0o777.\n filesystem: if set, the fake filesystem where the directory is created\n '
# line: 630
FakeFile.__init__(self, name, (stat.S_IFDIR | perm_bits), {}, filesystem=filesystem)
# line: 632
self.st_nlink += 1
# line: 634
def SetContents(self, contents, encoding=None):
# line: 635
error_class = (OSError if self.filesystem.is_windows_fs else IOError)
# line: 636
raise error_class(errno.EISDIR, 'Trying to write to directory')
# line: 638
@property
# line: 638
def contents(self):
# line: 640
'Return the list of contained directory entries.'
# line: 641
return self.byte_contents
# line: 643
@property
# line: 643
def ordered_dirs(self):
# line: 645
'Return the list of contained directory entry names ordered by creation order.'
# line: 646
return [item[0] for item in sorted(self.byte_contents.items(), key=(lambda entry: entry[1].st_ino))]
# line: 649
def AddEntry(self, path_object):
# line: 658
'Adds a child FakeFile to this directory.\n\n Args:\n path_object: FakeFile instance to add as a child of this directory.\n\n Raises:\n OSError: if the directory has no write permission (Posix only)\n OSError: if the file or directory to be added already exists\n '
# line: 659
if ((not (self.st_mode & PERM_WRITE)) and (not self.filesystem.is_windows_fs)):
# line: 660
raise OSError(errno.EACCES, 'Permission Denied', self.GetPath())
# line: 662
if (path_object.name in self.contents):
# line: 663
raise OSError(errno.EEXIST, 'Object already exists in fake filesystem', self.GetPath())
# line: 667
self.contents[path_object.name] = path_object
# line: 668
path_object.parent_dir = self
# line: 669
self.st_nlink += 1
# line: 670
path_object.st_nlink += 1
# line: 671
path_object.st_dev = self.st_dev
# line: 672
if (path_object.st_nlink == 1):
# line: 673
self.filesystem.ChangeDiskUsage(path_object.GetSize(), path_object.name, self.st_dev)
# line: 675
def GetEntry(self, pathname_name):
# line: 686
'Retrieves the specified child file or directory entry.\n\n Args:\n pathname_name: basename of the child object to retrieve.\n\n Returns:\n fake file or directory object.\n\n Raises:\n KeyError: if no child exists by the specified name.\n '
# line: 687
return self.contents[pathname_name]
# line: 689
def RemoveEntry(self, pathname_name, recursive=True):
# line: 701
'Removes the specified child file or directory.\n\n Args:\n pathname_name: basename of the child object to remove.\n recursive: if True (default), the entries in contained directories are deleted first.\n Needed to propagate removal errors (e.g. permission problems) from contained entries.\n New in pyfakefs 2.9.\n\n Raises:\n KeyError: if no child exists by the specified name.\n OSError: if user lacks permission to delete the file, or (Windows only) the file is open.\n '
# line: 702
entry = self.contents[pathname_name]
# line: 703
if ((entry.st_mode & PERM_WRITE) == 0):
# line: 704
raise OSError(errno.EACCES, 'Trying to remove object without write permission', pathname_name)
# line: 706
if (self.filesystem.is_windows_fs and self.filesystem.HasOpenFile(entry)):
# line: 707
raise OSError(errno.EACCES, 'Trying to remove an open file', pathname_name)
# line: 708
if (recursive and isinstance(entry, FakeDirectory)):
# line: 709
while entry.contents:
# line: 710
entry.RemoveEntry(list(entry.contents)[0])
elif (entry.st_nlink == 1):
# line: 712
self.filesystem.ChangeDiskUsage((- entry.GetSize()), pathname_name, entry.st_dev)
# line: 714
self.st_nlink -= 1
# line: 715
entry.st_nlink -= 1
# line: 716
assert (entry.st_nlink >= 0)
# line: 718
del self.contents[pathname_name]
# line: 720
def GetSize(self):
# line: 723
'Return the total size of all files contained in this directory tree.\n New in pyfakefs 2.9.\n '
# line: 724
return sum([item[1].GetSize() for item in self.contents.items()])
# line: 726
def HasParentObject(self, dir_object):
# line: 728
'Return `True` if dir_object is a direct or indirect parent directory,\n or if both are the same object.'
# line: 729
obj = self
# line: 730
while obj:
# line: 731
if (obj == dir_object):
# line: 732
return True
# line: 733
obj = obj.parent_dir
# line: 734
return False
# line: 736
def __str__(self):
# line: 737
description = (super(FakeDirectory, self).__str__() + ':\n')
# line: 738
for item in self.contents:
# line: 739
item_desc = self.contents[item].__str__()
# line: 740
for line in item_desc.split('\n'):
# line: 741
if line:
# line: 742
description = (((description + ' ') + line) + '\n')
# line: 743
return description
# line: 746
class FakeDirectoryFromRealDirectory(FakeDirectory):
# line: 751
'Represents a fake directory copied from the real file system.\n \n The contents of the directory are read on demand only.\n New in pyfakefs 3.2.\n '
# line: 753
def __init__(self, dir_path, filesystem, read_only):
# line: 765
'init.\n\n Args:\n dir_path: full directory path\n filesystem: the fake filesystem where the directory is created\n read_only: if set, all files under the directory are treated as read-only,\n e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only as usually.\n \n Raises:\n OSError if the directory does not exist in the real file system\n '
# line: 766
real_stat = os.stat(dir_path)
# line: 767
super(FakeDirectoryFromRealDirectory, self).__init__(name=os.path.split(dir_path)[1], perm_bits=real_stat.st_mode, filesystem=filesystem)
# line: 772
self.st_ctime = real_stat.st_ctime
# line: 773
self.st_atime = real_stat.st_atime
# line: 774
self.st_mtime = real_stat.st_mtime
# line: 775
self.st_gid = real_stat.st_gid
# line: 776
self.st_uid = real_stat.st_uid
# line: 777
self.dir_path = dir_path
# line: 778
self.read_only = read_only
# line: 779
self.contents_read = False
# line: 781
@property
# line: 781
def contents(self):
# line: 783
'Return the list of contained directory entries, loading them if not already loaded.'
# line: 784
if (not self.contents_read):
# line: 785
self.contents_read = True
# line: 786
self.filesystem.add_real_paths([os.path.join(self.dir_path, entry) for entry in os.listdir(self.dir_path)], read_only=self.read_only)
# line: 789
return self.byte_contents
# line: 791
def GetSize(self):
# line: 793
if (not self.contents_read):
# line: 794
return 0
# line: 795
return super(FakeDirectoryFromRealDirectory, self).GetSize()
# line: 798
class FakeFilesystem(object):
# line: 809
'Provides the appearance of a real directory tree for unit testing.\n\n Attributes:\n path_separator: The path separator, corresponds to `os.path.sep`.\n alternative_path_separator: Corresponds to `os.path.altsep`.\n is_windows_fs: `True` in a Windows file system, `False` otherwise.\n is_case_sensitive: `True` if a case-sensitive file system is assumed.\n root: The root `FakeDirectory` entry of the file system.\n cwd: The current working directory path.\n umask: The umask used for newly created files, see `os.umask`.\n '
# line: 811
def __init__(self, path_separator=os.path.sep, total_size=None):
# line: 823
"init.\n\n Args:\n path_separator: optional substitute for os.path.sep\n total_size: if not None, the total size in bytes of the root filesystem.\n New in pyfakefs 2.9.\n\n Example usage to emulate real file systems:\n filesystem = FakeFilesystem(\n alt_path_separator='/' if _is_windows else None)\n\n "
# line: 824
self.path_separator = path_separator
# line: 825
self.alternative_path_separator = os.path.altsep
# line: 826
if (path_separator != os.sep):
# line: 827
self.alternative_path_separator = None
# line: 832
self.is_windows_fs = (sys.platform == 'win32')
# line: 836
self.is_case_sensitive = (sys.platform not in ['win32', 'cygwin', 'darwin'])
# line: 838
self.root = FakeDirectory(self.path_separator, filesystem=self)
# line: 839
self.cwd = self.root.name
# line: 841
self.umask = os.umask(18)
# line: 842
os.umask(self.umask)
# line: 845
self.open_files = []
# line: 847
self._free_fd_heap = []
# line: 849
self._last_ino = 0
# line: 850
self._last_dev = 0
# line: 851
self.mount_points = {}
# line: 852
self.AddMountPoint(self.root.name, total_size)
# line: 854
@staticmethod
# line: 854
def _matching_string(matched, string):
# line: 858
'Return the string as byte or unicode depending \n on the type of matched, assuming string is an ASCII string.\n '
# line: 859
if (string is None):
# line: 860
return string
# line: 861
if (sys.version_info < (3,)):
# line: 862
if isinstance(matched, unicode):
# line: 863
return unicode(string)
else:
# line: 865
return string
elif isinstance(matched, bytes):
# line: 868
return bytes(string, 'ascii')
else:
# line: 870
return string
# line: 872
def _path_separator(self, path):
# line: 873
'Return the path separator as the same type as path'
# line: 874
return self._matching_string(path, self.path_separator)
# line: 876
def _alternative_path_separator(self, path):
# line: 877
'Return the alternative path separator as the same type as path'
# line: 878
return self._matching_string(path, self.alternative_path_separator)
# line: 880
def _IsLinkSupported(self):
# line: 882
return ((not self.is_windows_fs) or (sys.version_info >= (3, 2)))
# line: 884
def AddMountPoint(self, path, total_size=None):
# line: 900
'Add a new mount point for a filesystem device.\n The mount point gets a new unique device number.\n New in pyfakefs 2.9.\n\n Args:\n path: The root path for the new mount path.\n\n total_size: The new total size of the added filesystem device\n in bytes. Defaults to infinite size.\n\n Returns:\n The newly created mount point dict.\n\n Raises:\n OSError: if trying to mount an existing mount point again.\n '
# line: 901
path = self.NormalizePath(path)
# line: 902
if (path in self.mount_points):
# line: 903
raise OSError(errno.EEXIST, 'Mount point cannot be added twice', path)
# line: 904
self._last_dev += 1
# line: 905
self.mount_points[path] = {'idev': self._last_dev, 'total_size': total_size, 'used_size': 0, }
# line: 909
root_dir = (self.root if (path == self.root.name) else self.CreateDirectory(path))
# line: 910
root_dir.st_dev = self._last_dev
# line: 911
return self.mount_points[path]
# line: 913
def _AutoMountDriveIfNeeded(self, path, force=False):
# line: 914
if (self.is_windows_fs and (force or (not self._MountPointForPath(path)))):
# line: 915
drive = self.SplitDrive(path)[0]
# line: 916
if drive:
# line: 917
return self.AddMountPoint(path=drive)
# line: 919
def _MountPointForPath(self, path):
# line: 920
def to_str(string):
# line: 921
'Convert the str, unicode or byte object to a str using the default encoding.'
# line: 922
if ((string is None) or isinstance(string, str)):
# line: 923
return string
# line: 924
if (sys.version_info < (3, 0)):
# line: 925
return string.encode(locale.getpreferredencoding(False))
else:
# line: 927
return string.decode(locale.getpreferredencoding(False))
# line: 929
path = self.NormalizePath(self.NormalizeCase(path))
# line: 930
if (path in self.mount_points):
# line: 931
return self.mount_points[path]
# line: 932
mount_path = self._matching_string(path, '')
# line: 933
drive = self.SplitDrive(path)[:1]
# line: 934
for root_path in self.mount_points:
# line: 935
root_path = self._matching_string(path, root_path)
# line: 936
if (drive and (not root_path.startswith(drive))):
# line: 937
continue
# line: 938
if (path.startswith(root_path) and (len(root_path) > len(mount_path))):
# line: 939
mount_path = root_path
# line: 940
if mount_path:
# line: 941
return self.mount_points[to_str(mount_path)]
# line: 942
mount_point = self._AutoMountDriveIfNeeded(path, force=True)
# line: 943
assert mount_point
# line: 944
return mount_point
# line: 946
def _MountPointForDevice(self, idev):
# line: 947
for mount_point in self.mount_points.values():
# line: 948
if (mount_point['idev'] == idev):
# line: 949
return mount_point
# line: 951
def GetDiskUsage(self, path=None):
# line: 961
"Return the total, used and free disk space in bytes as named tuple,\n or placeholder values simulating unlimited space if not set.\n Note: This matches the return value of shutil.disk_usage().\n New in pyfakefs 2.9.\n\n Args:\n path: The disk space is returned for the file system device where\n path resides.\n Defaults to the root path (e.g. '/' on Unix systems).\n "
# line: 962
DiskUsage = namedtuple('usage', 'total, used, free')
# line: 963
if (path is None):
# line: 964
mount_point = self.mount_points[self.root.name]
else:
# line: 966
mount_point = self._MountPointForPath(path)
# line: 967
if (mount_point and (mount_point['total_size'] is not None)):
# line: 968
return DiskUsage(mount_point['total_size'], mount_point['used_size'], (mount_point['total_size'] - mount_point['used_size']))
# line: 970
return DiskUsage((((1024 * 1024) * 1024) * 1024), 0, (((1024 * 1024) * 1024) * 1024))
# line: 972
def SetDiskUsage(self, total_size, path=None):
# line: 986
"Changes the total size of the file system, preserving the used space.\n Example usage: set the size of an auto-mounted Windows drive.\n New in pyfakefs 2.9.\n\n Args:\n total_size: The new total size of the filesystem in bytes.\n\n path: The disk space is changed for the file system device where\n path resides.\n Defaults to the root path (e.g. '/' on Unix systems).\n\n Raises:\n IOError: if the new space is smaller than the used size.\n "
# line: 987
if (path is None):
# line: 988
path = self.root.name
# line: 989
mount_point = self._MountPointForPath(path)
# line: 990
if ((mount_point['total_size'] is not None) and (mount_point['used_size'] > total_size)):
# line: 991
raise IOError(errno.ENOSPC, ('Fake file system: cannot change size to %r bytes - used space is larger' % total_size), path)
# line: 994
mount_point['total_size'] = total_size
# line: 996
def ChangeDiskUsage(self, usage_change, file_path, st_dev):
# line: 1010
'Change the used disk space by the given amount.\n New in pyfakefs 2.9.\n\n Args:\n usage_change: Number of bytes added to the used space.\n If negative, the used space will be decreased.\n\n file_path: The path of the object needing the disk space.\n\n st_dev: The device ID for the respective file system.\n\n Raises:\n IOError: if usage_change exceeds the free file system space\n '
# line: 1011
mount_point = self._MountPointForDevice(st_dev)
# line: 1012
if mount_point:
# line: 1013
if (mount_point['total_size'] is not None):
# line: 1014
if ((mount_point['total_size'] - mount_point['used_size']) < usage_change):
# line: 1015
raise IOError(errno.ENOSPC, ('Fake file system: disk is full, failed to add %r bytes' % usage_change), file_path)
# line: 1018
mount_point['used_size'] += usage_change
# line: 1020
def GetStat(self, entry_path, follow_symlinks=True):
# line: 1034
"Return the os.stat-like tuple for the FakeFile object of entry_path.\n New in pyfakefs 3.0.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n follow_symlinks: if False and entry_path points to a symlink, the link itself is inspected\n instead of the linked object.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 1036
try:
# line: 1037
file_object = self.ResolveObject(entry_path, follow_symlinks, allow_fd=True)
# line: 1038
return file_object.stat_result.copy()
# line: 1039
except IOError as io_error:
# line: 1040
raise OSError(io_error.errno, io_error.strerror, entry_path)
# line: 1042
def ChangeMode(self, path, mode, follow_symlinks=True):
# line: 1051
'Change the permissions of a file as encoded in integer mode.\n New in pyfakefs 3.0.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n follow_symlinks: if False and entry_path points to a symlink, the link itself is affected\n instead of the linked object.\n '
# line: 1052
try:
# line: 1053
file_object = self.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 1054
except IOError as io_error:
# line: 1055
if (io_error.errno == errno.ENOENT):
# line: 1056
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1059
raise
# line: 1060
file_object.st_mode = ((file_object.st_mode & (~ PERM_ALL)) | (mode & PERM_ALL))
# line: 1062
file_object.st_ctime = time.time()
# line: 1064
def UpdateTime(self, path, times=None, ns=None, follow_symlinks=True):
# line: 1086
'Change the access and modified times of a file.\n New in pyfakefs 3.0.\n\n Args:\n path: (str) Path to the file.\n times: 2-tuple of int or float numbers, of the form (atime, mtime) \n which is used to set the access and modified times in seconds. \n If None, both times are set to the current time.\n ns: 2-tuple of int numbers, of the form (atime, mtime) which is \n used to set the access and modified times in nanoseconds. \n If None, both times are set to the current time.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: If `False` and entry_path points to a symlink, \n the link itself is queried instead of the linked object. \n New in Python 3.3. New in pyfakefs 3.0.\n \n Raises:\n TypeError: If anything other than the expected types is \n specified in the passed `times` or `ns` tuple, \n or if the tuple length is not equal to 2.\n ValueError: If both times and ns are specified.\n '
# line: 1087
if ((times is not None) and (ns is not None)):
# line: 1088
raise ValueError("utime: you may specify either 'times' or 'ns' but not both")
# line: 1089
if ((times is not None) and (len(times) != 2)):
# line: 1090
raise TypeError("utime: 'times' must be either a tuple of two ints or None")
# line: 1091
if ((ns is not None) and (len(ns) != 2)):
# line: 1092
raise TypeError("utime: 'ns' must be a tuple of two ints")
# line: 1094
try:
# line: 1095
file_object = self.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 1096
except IOError as io_error:
# line: 1097
if (io_error.errno == errno.ENOENT):
# line: 1098
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1101
raise
# line: 1102
if (times is not None):
# line: 1103
for file_time in times:
# line: 1104
if (not isinstance(file_time, (int, float))):
# line: 1105
raise TypeError('atime and mtime must be numbers')
# line: 1107
file_object.st_atime = times[0]
# line: 1108
file_object.st_mtime = times[1]
elif (ns is not None):
# line: 1110
for file_time in ns:
# line: 1111
if (not isinstance(file_time, int)):
# line: 1112
raise TypeError('atime and mtime must be ints')
# line: 1114
file_object.st_atime_ns = ns[0]
# line: 1115
file_object.st_mtime_ns = ns[1]
else:
# line: 1117
current_time = time.time()
# line: 1118
file_object.st_atime = current_time
# line: 1119
file_object.st_mtime = current_time
# line: 1121
def SetIno(self, path, st_ino):
# line: 1129
"Set the self.st_ino attribute of file at 'path'.\n Note that a unique inode is assigned automatically to a new fake file.\n Using this function does not guarantee uniqueness and should used with caution.\n\n Args:\n path: Path to file.\n st_ino: The desired inode.\n "
# line: 1130
self.GetObject(path).SetIno(st_ino)
# line: 1132
def AddOpenFile(self, file_obj):
# line: 1142
'Add file_obj to the list of open files on the filesystem.\n\n The position in the self.open_files array is the file descriptor number.\n\n Args:\n file_obj: file object to be added to open files list.\n\n Returns:\n File descriptor number for the file object.\n '
# line: 1143
if self._free_fd_heap:
# line: 1144
open_fd = heapq.heappop(self._free_fd_heap)
# line: 1145
self.open_files[open_fd] = file_obj
# line: 1146
return open_fd
# line: 1148
self.open_files.append(file_obj)
# line: 1149
return (len(self.open_files) - 1)
# line: 1151
def CloseOpenFile(self, file_des):
# line: 1158
'Remove file object with given descriptor from the list of open files.\n\n Sets the entry in open_files to None.\n\n Args:\n file_des: descriptor of file object to be removed from open files list.\n '
# line: 1159
self.open_files[file_des] = None
# line: 1160
heapq.heappush(self._free_fd_heap, file_des)
# line: 1162
def GetOpenFile(self, file_des):
# line: 1174
'Return an open file.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: an invalid file descriptor.\n TypeError: filedes is not an integer.\n\n Returns:\n Open file object.\n '
# line: 1175
if (not isinstance(file_des, int)):
# line: 1176
raise TypeError('an integer is required')
# line: 1177
if ((file_des >= len(self.open_files)) or (self.open_files[file_des] is None)):
# line: 1179
raise OSError(errno.EBADF, 'Bad file descriptor', file_des)
# line: 1180
return self.open_files[file_des]
# line: 1182
def HasOpenFile(self, file_object):
# line: 1191
'Return True if the given file object is in the list of open files.\n New in pyfakefs 2.9.\n\n Args:\n file_object: The FakeFile object to be checked.\n\n Returns:\n True if the file is open.\n '
# line: 1192
return (file_object in [wrapper.GetObject() for wrapper in self.open_files if wrapper])
# line: 1194
def NormalizePathSeparator(self, path):
# line: 1204
'Replace all appearances of alternative path separator with path separator.\n Do nothing if no alternative separator is set.\n New in pyfakefs 2.9.\n\n Args:\n path: the path to be normalized.\n\n Returns:\n The normalized path that will be used internally.\n '
# line: 1205
if (sys.version_info >= (3, 6)):
# line: 1206
path = os.fspath(path)
# line: 1207
if ((self.alternative_path_separator is None) or (not path)):
# line: 1208
return path
# line: 1209
return path.replace(self._alternative_path_separator(path), self._path_separator(path))
# line: 1211
def CollapsePath(self, path):
# line: 1230
"Mimic os.path.normpath using the specified path_separator.\n\n Mimics os.path.normpath using the path_separator that was specified\n for this FakeFilesystem. Normalizes the path, but unlike the method\n NormalizePath, does not make it absolute. Eliminates dot components\n (. and ..) and combines repeated path separators (//). Initial ..\n components are left in place for relative paths. If the result is an empty\n path, '.' is returned instead.\n\n This also replaces alternative path separator with path separator. That is,\n it behaves like the real os.path.normpath on Windows if initialized with\n '\\' as path separator and '/' as alternative separator.\n\n Args:\n path: (str) The path to normalize.\n\n Returns:\n (str) A copy of path with empty components and dot components removed.\n "
# line: 1231
path = self.NormalizePathSeparator(path)
# line: 1232
(drive, path) = self.SplitDrive(path)
# line: 1233
sep = self._path_separator(path)
# line: 1234
is_absolute_path = path.startswith(sep)
# line: 1235
path_components = path.split(sep)
# line: 1236
collapsed_path_components = []
# line: 1237
dot = self._matching_string(path, '.')
# line: 1238
dotdot = self._matching_string(path, '..')
# line: 1239
for component in path_components:
# line: 1240
if ((not component) or (component == dot)):
# line: 1241
continue
# line: 1242
if (component == dotdot):
# line: 1243
if (collapsed_path_components and (collapsed_path_components[(-1)] != dotdot)):
# line: 1246
collapsed_path_components.pop()
# line: 1247
continue
elif is_absolute_path:
# line: 1250
continue
# line: 1251
collapsed_path_components.append(component)
# line: 1252
collapsed_path = sep.join(collapsed_path_components)
# line: 1253
if is_absolute_path:
# line: 1254
collapsed_path = (sep + collapsed_path)
# line: 1255
return ((drive + collapsed_path) or dot)
# line: 1257
def NormalizeCase(self, path):
# line: 1267
'Return a normalized case version of the given path for case-insensitive\n file systems. For case-sensitive file systems, return path unchanged.\n New in pyfakefs 2.9.\n\n Args:\n path: the file path to be transformed\n\n Returns:\n A version of path matching the case of existing path elements.\n '
# line: 1268
def components_to_path():
# line: 1269
if (len(path_components) > len(normalized_components)):
# line: 1270
normalized_components.extend(path_components[len(normalized_components):])
# line: 1271
sep = self._path_separator(path)
# line: 1272
normalized_path = sep.join(normalized_components)
# line: 1273
if (path.startswith(sep) and (not normalized_path.startswith(sep))):
# line: 1274
normalized_path = (sep + normalized_path)
# line: 1275
return normalized_path
# line: 1277
if (self.is_case_sensitive or (not path)):
# line: 1278
return path
# line: 1279
path_components = self.GetPathComponents(path)
# line: 1280
normalized_components = []
# line: 1281
current_dir = self.root
# line: 1282
for component in path_components:
# line: 1283
if (not isinstance(current_dir, FakeDirectory)):
# line: 1284
return components_to_path()
# line: 1285
(dir_name, current_dir) = self._DirectoryContent(current_dir, component)
# line: 1286
if ((current_dir is None) or (isinstance(current_dir, FakeDirectory) and (current_dir._byte_contents is None) and (current_dir.st_size == 0))):
# line: 1290
return components_to_path()
# line: 1291
normalized_components.append(dir_name)
# line: 1292
return components_to_path()
# line: 1294
def NormalizePath(self, path):
# line: 1306
'Absolutize and minimalize the given path.\n\n Forces all relative paths to be absolute, and normalizes the path to\n eliminate dot and empty components.\n\n Args:\n path: path to normalize\n\n Returns:\n The normalized path relative to the current working directory, or the root\n directory if path is empty.\n '
# line: 1307
path = self.NormalizePathSeparator(path)
# line: 1308
if (not path):
# line: 1309
path = self.path_separator
elif (not self._StartsWithRootPath(path)):
# line: 1312
root_name = self._matching_string(path, self.root.name)
# line: 1313
empty = self._matching_string(path, '')
# line: 1314
path = self._path_separator(path).join(((((self.cwd != root_name) and self.cwd) or empty), path))
# line: 1316
if (path == self._matching_string(path, '.')):
# line: 1317
path = self.cwd
# line: 1318
return self.CollapsePath(path)
# line: 1320
def SplitPath(self, path):
# line: 1332
'Mimic os.path.split using the specified path_separator.\n\n Mimics os.path.split using the path_separator that was specified\n for this FakeFilesystem.\n\n Args:\n path: (str) The path to split.\n\n Returns:\n (str) A duple (pathname, basename) for which pathname does not\n end with a slash, and basename does not contain a slash.\n '
# line: 1333
(drive, path) = self.SplitDrive(path)
# line: 1334
path = self.NormalizePathSeparator(path)
# line: 1335
sep = self._path_separator(path)
# line: 1336
path_components = path.split(sep)
# line: 1337
if (not path_components):
# line: 1338
return ('', '')
# line: 1339
basename = path_components.pop()
# line: 1340
if (not path_components):
# line: 1341
return ('', basename)
# line: 1342
for component in path_components:
# line: 1343
if component:
# line: 1346
while (not path_components[(-1)]):
# line: 1347
path_components.pop()
# line: 1348
return ((drive + sep.join(path_components)), basename)
# line: 1350
return ((drive or sep), basename)
# line: 1352
def SplitDrive(self, path):
# line: 1363
'Splits the path into the drive part and the rest of the path.\n New in pyfakefs 2.9.\n\n Taken from Windows specific implementation in Python 3.5 and slightly adapted.\n\n Args:\n path: the full path to be split.\n\n Returns: a tuple of the drive part and the rest of the path, or of an empty string\n and the full path if drive letters are not supported or no drive is present.\n '
# line: 1364
if (sys.version_info >= (3, 6)):
# line: 1365
path = os.fspath(path)
# line: 1366
if self.is_windows_fs:
# line: 1367
if (len(path) >= 2):
# line: 1368
path = self.NormalizePathSeparator(path)
# line: 1369
sep = self._path_separator(path)
# line: 1371
if (sys.version_info >= (2, 7, 8)):
# line: 1372
if ((path[0:2] == (sep * 2)) and (path[2:3] != sep)):
# line: 1375
sep_index = path.find(sep, 2)
# line: 1376
if (sep_index == (-1)):
# line: 1377
return (path[:0], path)
# line: 1378
sep_index2 = path.find(sep, (sep_index + 1))
# line: 1379
if (sep_index2 == (sep_index + 1)):
# line: 1380
return (path[:0], path)
# line: 1381
if (sep_index2 == (-1)):
# line: 1382
sep_index2 = len(path)
# line: 1383
return (path[:sep_index2], path[sep_index2:])
# line: 1384
if (path[1:2] == self._matching_string(path, ':')):
# line: 1385
return (path[:2], path[2:])
# line: 1386
return (path[:0], path)
# line: 1388
def _JoinPathsWithDriveSupport(self, *all_paths):
# line: 1389
'Taken from Python 3.5 os.path.join() code in ntpath.py and slightly adapted'
# line: 1390
base_path = all_paths[0]
# line: 1391
paths_to_add = all_paths[1:]
# line: 1392
sep = self._path_separator(base_path)
# line: 1393
seps = [sep, self._alternative_path_separator(base_path)]
# line: 1394
(result_drive, result_path) = self.SplitDrive(base_path)
# line: 1395
for path in paths_to_add:
# line: 1396
(drive_part, path_part) = self.SplitDrive(path)
# line: 1397
if (path_part and (path_part[:1] in seps)):
# line: 1399
if (drive_part or (not result_drive)):
# line: 1400
result_drive = drive_part
# line: 1401
result_path = path_part
# line: 1402
continue
elif (drive_part and (drive_part != result_drive)):
# line: 1404
if (self.is_case_sensitive or (drive_part.lower() != result_drive.lower())):
# line: 1406
result_drive = drive_part
# line: 1407
result_path = path_part
# line: 1408
continue
# line: 1410
result_drive = drive_part
# line: 1412
if (result_path and (result_path[(-1):] not in seps)):
# line: 1413
result_path = (result_path + sep)
# line: 1414
result_path = (result_path + path_part)
# line: 1416
colon = self._matching_string(base_path, ':')
# line: 1417
if (result_path and (result_path[:1] not in seps) and result_drive and (result_drive[(-1):] != colon)):
# line: 1419
return ((result_drive - sep) + result_path)
# line: 1420
return (result_drive + result_path)
# line: 1422
def JoinPaths(self, *paths):
# line: 1431
'Mimic os.path.join using the specified path_separator.\n\n Args:\n *paths: (str) Zero or more paths to join.\n\n Returns:\n (str) The paths joined by the path separator, starting with the last\n absolute path in paths.\n '
# line: 1432
if (sys.version_info >= (3, 6)):
# line: 1433
paths = [os.fspath(path) for path in paths]
# line: 1434
if (len(paths) == 1):
# line: 1435
return paths[0]
# line: 1436
if self.is_windows_fs:
# line: 1437
return self._JoinPathsWithDriveSupport(*paths)
# line: 1438
joined_path_segments = []
# line: 1439
sep = self._path_separator(paths[0])
# line: 1440
for path_segment in paths:
# line: 1441
if self._StartsWithRootPath(path_segment):
# line: 1443
joined_path_segments = [path_segment]
else:
# line: 1445
if (joined_path_segments and (not joined_path_segments[(-1)].endswith(sep))):
# line: 1447
joined_path_segments.append(sep)
# line: 1448
if path_segment:
# line: 1449
joined_path_segments.append(path_segment)
# line: 1450
return self._matching_string(paths[0], '').join(joined_path_segments)
# line: 1452
def GetPathComponents(self, path):
# line: 1473
'Breaks the path into a list of component names.\n\n Does not include the root directory as a component, as all paths\n are considered relative to the root directory for the FakeFilesystem.\n Callers should basically follow this pattern:\n\n >>> file_path = self.NormalizePath(file_path)\n >>> path_components = self.GetPathComponents(file_path)\n >>> current_dir = self.root\n >>> for component in path_components:\n >>> if component not in current_dir.contents:\n >>> raise IOError\n >>> DoStuffWithComponent(current_dir, component)\n >>> current_dir = current_dir.GetEntry(component)\n\n Args:\n path: path to tokenize\n\n Returns:\n The list of names split from path\n '
# line: 1474
if ((not path) or (path == self._path_separator(path))):
# line: 1475
return []
# line: 1476
(drive, path) = self.SplitDrive(path)
# line: 1477
path_components = path.split(self._path_separator(path))
# line: 1478
assert (drive or path_components)
# line: 1479
if (not path_components[0]):
# line: 1481
path_components = path_components[1:]
# line: 1482
if drive:
# line: 1483
path_components.insert(0, drive)
# line: 1484
return path_components
# line: 1486
def StartsWithDriveLetter(self, file_path):
# line: 1496
'Return True if file_path starts with a drive letter.\n New in pyfakefs 2.9.\n\n Args:\n file_path: the full path to be examined.\n\n Returns:\n True if drive letter support is enabled in the filesystem and\n the path starts with a drive letter.\n '
# line: 1497
colon = self._matching_string(file_path, ':')
# line: 1498
return (self.is_windows_fs and (len(file_path) >= 2) and file_path[:1].isalpha and (file_path[1:2] == colon))
# line: 1501
def _StartsWithRootPath(self, file_path):
# line: 1502
root_name = self._matching_string(file_path, self.root.name)
# line: 1503
return (file_path.startswith(root_name) or ((not self.is_case_sensitive) and file_path.lower().startswith(root_name.lower())) or self.StartsWithDriveLetter(file_path))
# line: 1508
def _IsRootPath(self, file_path):
# line: 1509
root_name = self._matching_string(file_path, self.root.name)
# line: 1510
return ((file_path == root_name) or ((not self.is_case_sensitive) and (file_path.lower() == root_name.lower())) or ((len(file_path) == 2) and self.StartsWithDriveLetter(file_path)))
# line: 1514
def _EndsWithPathSeparator(self, file_path):
# line: 1515
return (file_path and (file_path.endswith(self._path_separator(file_path)) or ((self.alternative_path_separator is not None) and file_path.endswith(self._alternative_path_separator(file_path)))))
# line: 1519
def _DirectoryContent(self, directory, component):
# line: 1520
if (not isinstance(directory, FakeDirectory)):
# line: 1521
return (None, None)
# line: 1522
if (component in directory.contents):
# line: 1523
return (component, directory.contents[component])
# line: 1524
if (not self.is_case_sensitive):
# line: 1525
matching_content = [(subdir, directory.contents[subdir]) for subdir in directory.contents if (subdir.lower() == component.lower())]
# line: 1528
if matching_content:
# line: 1529
return matching_content[0]
# line: 1531
return (None, None)
# line: 1533
def Exists(self, file_path):
# line: 1544
'Return true if a path points to an existing file system object.\n\n Args:\n file_path: path to examine.\n\n Returns:\n (bool) True if the corresponding object exists.\n\n Raises:\n TypeError: if file_path is None.\n '
# line: 1545
if (sys.version_info >= (3, 6)):
# line: 1546
file_path = os.fspath(file_path)
# line: 1547
if (file_path is None):
# line: 1548
raise TypeError
# line: 1549
if (not file_path):
# line: 1550
return False
# line: 1551
try:
# line: 1552
file_path = self.ResolvePath(file_path)
# line: 1553
except (IOError, OSError):
# line: 1554
return False
# line: 1555
if (file_path == self.root.name):
# line: 1556
return True
# line: 1557
path_components = self.GetPathComponents(file_path)
# line: 1558
current_dir = self.root
# line: 1559
for component in path_components:
# line: 1560
current_dir = self._DirectoryContent(current_dir, component)[1]
# line: 1561
if (not current_dir):
# line: 1562
return False
# line: 1563
return True
# line: 1565
def ResolvePath(self, file_path, allow_fd=False, raw_io=True):
# line: 1601
"Follow a path, resolving symlinks.\n\n ResolvePath traverses the filesystem along the specified file path,\n resolving file names and symbolic links until all elements of the path are\n exhausted, or we reach a file which does not exist. If all the elements\n are not consumed, they just get appended to the path resolved so far.\n This gives us the path which is as resolved as it can be, even if the file\n does not exist.\n\n This behavior mimics Unix semantics, and is best shown by example. Given a\n file system that looks like this:\n\n /a/b/\n /a/b/c -> /a/b2 c is a symlink to /a/b2\n /a/b2/x\n /a/c -> ../d\n /a/x -> y\n\n Then:\n /a/b/x => /a/b/x\n /a/c => /a/d\n /a/x => /a/y\n /a/b/c/d/e => /a/b2/d/e\n\n Args:\n file_path: path to examine.\n allow_fd: If `True`, `file_path` may be open file descriptor\n raw_io: `True` if called from low-level I/O functions\n\n Returns:\n resolved_path (string) or None.\n\n Raises:\n TypeError: if file_path is None.\n IOError: if file_path is '' or a part of the path doesn't exist.\n "
# line: 1603
def _ComponentsToPath(component_folders):
# line: 1604
sep = (self._path_separator(component_folders[0]) if component_folders else self.path_separator)
# line: 1606
path = sep.join(component_folders)
# line: 1607
if (not self._StartsWithRootPath(path)):
# line: 1608
path = (sep + path)
# line: 1609
return path
# line: 1611
def _ValidRelativePath(file_path):
# line: 1612
slash_dotdot = self._matching_string(file_path, '/..')
# line: 1613
while (file_path and (slash_dotdot in file_path)):
# line: 1614
file_path = file_path[:file_path.rfind(slash_dotdot)]
# line: 1615
if (not self.Exists(self.NormalizePath(file_path))):
# line: 1616
return False
# line: 1617
return True
# line: 1619
def _FollowLink(link_path_components, link):
# line: 1639
'Follow a link w.r.t. a path resolved so far.\n\n The component is either a real file, which is a no-op, or a symlink.\n In the case of a symlink, we have to modify the path as built up so far\n /a/b => ../c should yield /a/../c (which will normalize to /a/c)\n /a/b => x should yield /a/x\n /a/b => /x/y/z should yield /x/y/z\n The modified path may land us in a new spot which is itself a\n link, so we may repeat the process.\n\n Args:\n link_path_components: The resolved path built up to the link so far.\n link: The link object itself.\n\n Returns:\n (string) the updated path resolved after following the link.\n\n Raises:\n IOError: if there are too many levels of symbolic link\n '
# line: 1640
link_path = link.contents
# line: 1641
sep = self._path_separator(link_path)
# line: 1642
alt_sep = self._alternative_path_separator(link_path)
# line: 1646
if ((not link_path.startswith(sep)) and ((alt_sep is None) or (not link_path.startswith(alt_sep)))):
# line: 1652
components = link_path_components[:(-1)]
# line: 1653
components.append(link_path)
# line: 1654
link_path = sep.join(components)
# line: 1656
return self.CollapsePath(link_path)
# line: 1658
if (allow_fd and (sys.version_info >= (3, 3)) and isinstance(file_path, int)):
# line: 1659
return self.GetOpenFile(file_path).GetObject().GetPath()
# line: 1661
if (sys.version_info >= (3, 6)):
# line: 1662
file_path = os.fspath(file_path)
# line: 1663
if (file_path is None):
# line: 1665
raise TypeError('Expected file system path string, received None')
# line: 1666
if ((not file_path) or (not _ValidRelativePath(file_path))):
# line: 1669
raise IOError(errno.ENOENT, ("No such file or directory: '%s'" % file_path))
# line: 1671
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1672
if self._IsRootPath(file_path):
# line: 1673
return file_path
# line: 1675
current_dir = self.root
# line: 1676
path_components = self.GetPathComponents(file_path)
# line: 1678
resolved_components = []
# line: 1679
link_depth = 0
# line: 1680
while path_components:
# line: 1681
component = path_components.pop(0)
# line: 1682
resolved_components.append(component)
# line: 1683
current_dir = self._DirectoryContent(current_dir, component)[1]
# line: 1684
if (current_dir is None):
# line: 1690
resolved_components.extend(path_components)
# line: 1691
break
# line: 1694
if stat.S_ISLNK(current_dir.st_mode):
# line: 1698
if (link_depth > _MAX_LINK_DEPTH):
# line: 1699
error_class = (OSError if raw_io else IOError)
# line: 1700
raise error_class(errno.ELOOP, ("Too many levels of symbolic links: '%s'" % _ComponentsToPath(resolved_components)))
# line: 1704
link_path = _FollowLink(resolved_components, current_dir)
# line: 1708
target_components = self.GetPathComponents(link_path)
# line: 1709
path_components = (target_components + path_components)
# line: 1710
resolved_components = []
# line: 1711
current_dir = self.root
# line: 1712
link_depth += 1
# line: 1713
return _ComponentsToPath(resolved_components)
# line: 1715
def GetObjectFromNormalizedPath(self, file_path):
# line: 1727
'Search for the specified filesystem object within the fake filesystem.\n\n Args:\n file_path: specifies target FakeFile object to retrieve, with a\n path that has already been normalized/resolved.\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1728
if (sys.version_info >= (3, 6)):
# line: 1729
file_path = os.fspath(file_path)
# line: 1730
if (file_path == self.root.name):
# line: 1731
return self.root
# line: 1732
path_components = self.GetPathComponents(file_path)
# line: 1733
target_object = self.root
# line: 1734
try:
# line: 1735
for component in path_components:
# line: 1736
if stat.S_ISLNK(target_object.st_mode):
# line: 1737
target_object = self.ResolveObject(target_object.contents)
# line: 1738
if (not stat.S_ISDIR(target_object.st_mode)):
# line: 1739
if (not self.is_windows_fs):
# line: 1740
raise IOError(errno.ENOTDIR, 'Not a directory in fake filesystem', file_path)
# line: 1743
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', file_path)
# line: 1746
target_object = target_object.GetEntry(component)
# line: 1747
except KeyError:
# line: 1748
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', file_path)
# line: 1751
return target_object
# line: 1753
def GetObject(self, file_path):
# line: 1764
'Search for the specified filesystem object within the fake filesystem.\n\n Args:\n file_path: specifies target FakeFile object to retrieve.\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1765
if (sys.version_info >= (3, 6)):
# line: 1766
file_path = os.fspath(file_path)
# line: 1767
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1768
return self.GetObjectFromNormalizedPath(file_path)
# line: 1770
def ResolveObject(self, file_path, follow_symlinks=True, allow_fd=False):
# line: 1784
'Search for the specified filesystem object, resolving all links.\n\n Args:\n file_path: Specifies target FakeFile object to retrieve.\n follow_symlinks: If `False`, the link itself is resolved,\n otherwise the object linked to.\n allow_fd: If `True`, `file_path` may be open file descriptor\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1785
if (allow_fd and (sys.version_info >= (3, 3)) and isinstance(file_path, int)):
# line: 1786
return self.GetOpenFile(file_path).GetObject()
# line: 1788
if follow_symlinks:
# line: 1789
if (sys.version_info >= (3, 6)):
# line: 1790
file_path = os.fspath(file_path)
# line: 1791
return self.GetObjectFromNormalizedPath(self.ResolvePath(file_path))
# line: 1792
return self.LResolveObject(file_path)
# line: 1794
def LResolveObject(self, path):
# line: 1808
'Search for the specified object, resolving only parent links.\n\n This is analogous to the stat/lstat difference. This resolves links *to*\n the object but not of the final object itself.\n\n Args:\n path: specifies target FakeFile object to retrieve.\n\n Returns:\n the FakeFile object corresponding to path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1809
if (sys.version_info >= (3, 6)):
# line: 1810
path = os.fspath(path)
# line: 1811
if (path == self.root.name):
# line: 1813
return self.root
# line: 1816
sep = self._path_separator(path)
# line: 1817
alt_sep = self._alternative_path_separator(path)
# line: 1818
if (path.endswith(sep) or (alt_sep and path.endswith(alt_sep))):
# line: 1819
path = path[:(-1)]
# line: 1821
(parent_directory, child_name) = self.SplitPath(path)
# line: 1822
if (not parent_directory):
# line: 1823
parent_directory = self.cwd
# line: 1824
try:
# line: 1825
parent_obj = self.ResolveObject(parent_directory)
# line: 1826
assert parent_obj
# line: 1827
if (not isinstance(parent_obj, FakeDirectory)):
# line: 1828
if ((not self.is_windows_fs) and isinstance(parent_obj, FakeFile)):
# line: 1829
raise IOError(errno.ENOTDIR, 'The parent object is not a directory', path)
# line: 1831
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1834
return parent_obj.GetEntry(child_name)
# line: 1835
except KeyError:
# line: 1836
raise IOError(errno.ENOENT, 'No such file or directory in the fake filesystem', path)
# line: 1840
def AddObject(self, file_path, file_object, error_class=OSError):
# line: 1851
'Add a fake file or directory into the filesystem at file_path.\n\n Args:\n file_path: the path to the file to be added relative to self.\n file_object: file or directory to add.\n error_class: the error class to be thrown if file_path does\n not correspond to a directory (used internally(\n\n Raises:\n IOError or OSError: if file_path does not correspond to a directory.\n '
# line: 1852
if (not file_path):
# line: 1853
target_directory = self.root
else:
# line: 1855
target_directory = self.ResolveObject(file_path)
# line: 1856
if (not stat.S_ISDIR(target_directory.st_mode)):
# line: 1857
raise error_class(errno.ENOTDIR, 'Not a directory in the fake filesystem', file_path)
# line: 1860
target_directory.AddEntry(file_object)
# line: 1862
def RenameObject(self, old_file_path, new_file_path, force_replace=False):
# line: 1883
'Renames a FakeFile object at old_file_path to new_file_path, preserving all properties.\n\n Args:\n old_file_path: Path to filesystem object to rename.\n new_file_path: Path to where the filesystem object will live after this call.\n force_replace: If set and destination is an existing file, it will be replaced\n even under Windows if the user has permissions, otherwise replacement\n happens under Unix only.\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory\n (Windows, or Posix if old_file_path points to a regular file)\n OSError: if old_file_path is a directory and new_file_path a file\n OSError: if new_file_path is an existing file and force_replace not set\n (Windows only).\n OSError: if new_file_path is an existing file and could not be removed\n (Posix, or Windows with force_replace set).\n OSError: if dirname(new_file_path) does not exist.\n OSError: if the file would be moved to another filesystem (e.g. mount point).\n '
# line: 1884
old_file_path = self.NormalizePath(old_file_path)
# line: 1885
new_file_path = self.NormalizePath(new_file_path)
# line: 1886
if ((not self.Exists(old_file_path)) and (not self.IsLink(old_file_path))):
# line: 1887
raise OSError(errno.ENOENT, 'Fake filesystem object: can not rename nonexistent file', old_file_path)
# line: 1891
old_object = self.LResolveObject(old_file_path)
# line: 1892
if (not self.is_windows_fs):
# line: 1893
if (self.IsDir(old_file_path, follow_symlinks=False) and self.IsLink(new_file_path)):
# line: 1895
raise OSError(errno.ENOTDIR, 'Cannot rename directory to symlink', new_file_path)
# line: 1898
if (self.IsDir(new_file_path, follow_symlinks=False) and self.IsLink(old_file_path)):
# line: 1900
raise OSError(errno.EISDIR, 'Cannot rename symlink to directory', new_file_path)
# line: 1904
if (self.Exists(new_file_path) or self.IsLink(new_file_path)):
# line: 1905
if (old_file_path == new_file_path):
# line: 1906
return
# line: 1908
new_object = self.GetObject(new_file_path)
# line: 1909
if (old_object == new_object):
# line: 1910
if (old_file_path.lower() == new_file_path.lower()):
# line: 1912
pass
else:
# line: 1915
return
elif (stat.S_ISDIR(new_object.st_mode) or stat.S_ISLNK(new_object.st_mode)):
# line: 1918
if self.is_windows_fs:
# line: 1919
if force_replace:
# line: 1920
raise OSError(errno.EACCES, 'Fake filesystem object: can not replace existing directory', new_file_path)
else:
# line: 1924
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to existing directory', new_file_path)
# line: 1927
if (not stat.S_ISLNK(new_object.st_mode)):
# line: 1928
if new_object.contents:
# line: 1929
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to non-empty directory', new_file_path)
# line: 1932
if stat.S_ISREG(old_object.st_mode):
# line: 1933
raise OSError(errno.EISDIR, 'Fake filesystem object: cannot rename file to directory', new_file_path)
elif stat.S_ISDIR(old_object.st_mode):
# line: 1937
raise OSError(errno.ENOTDIR, 'Fake filesystem object: cannot rename directory to file', new_file_path)
elif (self.is_windows_fs and (not force_replace)):
# line: 1941
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to existing file', new_file_path)
else:
# line: 1945
try:
# line: 1946
self.RemoveObject(new_file_path)
# line: 1947
except IOError as exc:
# line: 1948
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 1950
(old_dir, old_name) = self.SplitPath(old_file_path)
# line: 1951
(new_dir, new_name) = self.SplitPath(new_file_path)
# line: 1952
if (not self.Exists(new_dir)):
# line: 1953
raise OSError(errno.ENOENT, 'No such fake directory', new_dir)
# line: 1954
old_dir_object = self.ResolveObject(old_dir)
# line: 1955
new_dir_object = self.ResolveObject(new_dir)
# line: 1956
if (old_dir_object.st_dev != new_dir_object.st_dev):
# line: 1957
raise OSError(errno.EXDEV, 'Fake filesystem object: cannot rename across file systems', old_file_path)
# line: 1960
if (not stat.S_ISDIR(new_dir_object.st_mode)):
# line: 1961
raise OSError((errno.EACCES if self.is_windows_fs else errno.ENOTDIR), 'Fake filesystem object: target parent is not a directory', new_file_path)
# line: 1964
if new_dir_object.HasParentObject(old_object):
# line: 1965
raise OSError(errno.EINVAL, 'Fake filesystem object: invalid target for rename', new_file_path)
# line: 1969
object_to_rename = old_dir_object.GetEntry(old_name)
# line: 1970
old_dir_object.RemoveEntry(old_name, recursive=False)
# line: 1971
object_to_rename.name = new_name
# line: 1972
if (new_name in new_dir_object.contents):
# line: 1974
new_dir_object.RemoveEntry(new_name)
# line: 1975
new_dir_object.AddEntry(object_to_rename)
# line: 1977
def RemoveObject(self, file_path):
# line: 1987
"Remove an existing file or directory.\n\n Args:\n file_path: the path to the file relative to self.\n\n Raises:\n IOError: if file_path does not correspond to an existing file, or if part\n of the path refers to something other than a directory.\n OSError: if the directory is in use (eg, if it is '/').\n "
# line: 1988
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1989
if self._IsRootPath(file_path):
# line: 1990
raise OSError(errno.EBUSY, 'Fake device or resource busy', file_path)
# line: 1992
try:
# line: 1993
(dirname, basename) = self.SplitPath(file_path)
# line: 1994
target_directory = self.ResolveObject(dirname)
# line: 1995
target_directory.RemoveEntry(basename)
# line: 1996
except KeyError:
# line: 1997
raise IOError(errno.ENOENT, 'No such file or directory in the fake filesystem', file_path)
# line: 2000
except AttributeError:
# line: 2001
raise IOError(errno.ENOTDIR, 'Not a directory in the fake filesystem', file_path)
# line: 2005
def CreateDirectory(self, directory_path, perm_bits=PERM_DEF):
# line: 2019
'Create directory_path, and all the parent directories.\n\n Helper method to set up your test faster.\n\n Args:\n directory_path: The full directory path to create.\n perm_bits: The permission bits as set by `chmod`.\n\n Returns:\n the newly created FakeDirectory object.\n\n Raises:\n OSError: if the directory already exists.\n '
# line: 2020
directory_path = self.NormalizePath(directory_path)
# line: 2021
self._AutoMountDriveIfNeeded(directory_path)
# line: 2022
if self.Exists(directory_path):
# line: 2023
raise OSError(errno.EEXIST, 'Directory exists in fake filesystem', directory_path)
# line: 2026
path_components = self.GetPathComponents(directory_path)
# line: 2027
current_dir = self.root
# line: 2029
new_dirs = []
# line: 2030
for component in path_components:
# line: 2031
directory = self._DirectoryContent(current_dir, component)[1]
# line: 2032
if (not directory):
# line: 2033
new_dir = FakeDirectory(component, filesystem=self)
# line: 2034
new_dirs.append(new_dir)
# line: 2035
current_dir.AddEntry(new_dir)
# line: 2036
current_dir = new_dir
else:
# line: 2038
if stat.S_ISLNK(directory.st_mode):
# line: 2039
directory = self.ResolveObject(directory.contents)
# line: 2040
current_dir = directory
# line: 2041
if ((directory.st_mode & stat.S_IFDIR) != stat.S_IFDIR):
# line: 2042
raise OSError(errno.ENOTDIR, 'Not a directory', current_dir.GetPath())
# line: 2046
for new_dir in new_dirs:
# line: 2047
new_dir.st_mode = (stat.S_IFDIR | perm_bits)
# line: 2049
self._last_ino += 1
# line: 2050
current_dir.SetIno(self._last_ino)
# line: 2051
return current_dir
# line: 2053
def CreateFile(self, file_path, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents='', st_size=None, create_missing_dirs=True, apply_umask=False, encoding=None, errors=None):
# line: 2079
'Create file_path, including all the parent directories along the way.\n\n This helper method can be used to set up tests more easily.\n\n Args:\n file_path: The path to the file to create.\n st_mode: The stat constant representing the file type.\n contents: The contents of the file.\n st_size: The file size; only valid if contents not given.\n create_missing_dirs: If `True`, auto create missing directories.\n apply_umask: `True` if the current umask must be applied on st_mode.\n encoding: Ff contents is a unicode string, the encoding used\n for serialization.\n New in pyfakefs 2.9.\n errors: The error mode used for encoding/decoding errors.\n New in pyfakefs 3.2.\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n IOError: if the file already exists.\n IOError: if the containing directory is required and missing.\n '
# line: 2080
return self.CreateFileInternally(file_path, st_mode, contents, st_size, create_missing_dirs, apply_umask, encoding, errors)
# line: 2084
def add_real_file(self, file_path, read_only=True):
# line: 2109
"Create file_path, including all the parent directories along the way, for an existing\n real file. The contents of the real file are read only on demand.\n New in pyfakefs 3.2.\n\n Args:\n file_path: Path to an existing file in the real file system\n read_only: If `True` (the default), writing to the fake file\n raises an exception. Otherwise, writing to the file changes\n the fake file only.\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n OSError: if the file does not exist in the real file system.\n IOError: if the file already exists in the fake file system.\n\n .. note:: On MacOS and BSD, accessing the fake file's contents will update both the real and fake files' `atime.` (access time). In this particular case, `add_real_file()` violates the rule that `pyfakefs` must not modify the real file system. Further, Windows offers the option to enable atime, and older versions of Linux may also modify atime.\n "
# line: 2110
return self.CreateFileInternally(file_path, read_from_real_fs=True, read_only=read_only)
# line: 2114
def add_real_directory(self, dir_path, read_only=True, lazy_read=True):
# line: 2139
'Create a fake directory corresponding to the real directory at the specified\n path. Add entries in the fake directory corresponding to the entries in the\n real directory.\n New in pyfakefs 3.2.\n\n Args:\n dir_path: The path to the existing directory.\n read_only: If set, all files under the directory are treated as\n read-only, e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only\n as usually.\n lazy_read: If set (default), directory contents are only read when\n accessed, and only until the needed subdirectory level.\n *Note:* this means that the file system size is only updated\n at the time the directory contents are read; set this to\n `False` only if you are dependent on accurate file system\n size in your test\n\n Returns:\n the newly created FakeDirectory object.\n\n Raises:\n OSError: if the directory does not exist in the real file system.\n IOError: if the directory already exists in the fake file system.\n '
# line: 2140
if (not os.path.exists(dir_path)):
# line: 2141
raise IOError(errno.ENOENT, 'No such directory', dir_path)
# line: 2142
if lazy_read:
# line: 2143
parent_path = os.path.split(dir_path)[0]
# line: 2144
if self.Exists(parent_path):
# line: 2145
parent_dir = self.GetObject(parent_path)
else:
# line: 2147
parent_dir = self.CreateDirectory(parent_path)
# line: 2148
new_dir = FakeDirectoryFromRealDirectory(dir_path, filesystem=self, read_only=read_only)
# line: 2149
parent_dir.AddEntry(new_dir)
# line: 2150
self._last_ino += 1
# line: 2151
new_dir.SetIno(self._last_ino)
else:
# line: 2153
new_dir = self.CreateDirectory(dir_path)
# line: 2154
for (base, _, files) in os.walk(dir_path):
# line: 2155
for fileEntry in files:
# line: 2156
self.add_real_file(os.path.join(base, fileEntry), read_only)
# line: 2157
return new_dir
# line: 2159
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
# line: 2176
'This convenience method adds multiple files and/or directories from the\n real file system to the fake file system. See `add_real_file()` and\n `add_real_directory()`.\n New in pyfakefs 3.2.\n\n Args:\n path_list: List of file and directory paths in the real file system.\n read_only: If set, all files and files under under the directories are treated as read-only,\n e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only as usually.\n lazy_dir_read: Uses lazy reading of directory contents if set\n (see `add_real_directory`)\n\n Raises:\n OSError: if any of the files and directories in the list does not exist in the real file system.\n OSError: if any of the files and directories in the list already exists in the fake file system.\n '
# line: 2177
for path in path_list:
# line: 2178
if os.path.isdir(path):
# line: 2179
self.add_real_directory(path, read_only, lazy_dir_read)
else:
# line: 2181
self.add_real_file(path, read_only)
# line: 2183
def CreateFileInternally(self, file_path, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents='', st_size=None, create_missing_dirs=True, apply_umask=False, encoding=None, errors=None, read_from_real_fs=False, read_only=True, raw_io=False):
# line: 2203
'Internal fake file creator that supports both normal fake files and fake\n files based on real files.\n\n Args:\n file_path: path to the file to create.\n st_mode: the stat.S_IF constant representing the file type.\n contents: the contents of the file.\n st_size: file size; only valid if contents not given.\n create_missing_dirs: if True, auto create missing directories.\n apply_umask: whether or not the current umask must be applied on st_mode.\n encoding: if contents is a unicode string, the encoding used for serialization.\n errors: the error mode used for encoding/decoding errors\n read_from_real_fs: if True, the contents are reaf from the real file system on demand.\n read_only: if set, the file is treated as read-only, e.g. a write access raises an exception;\n otherwise, writing to the file changes the fake file only as usually.\n raw_io: `True` if called from low-level API (`os.open`)\n '
# line: 2204
error_class = (OSError if raw_io else IOError)
# line: 2205
file_path = self.NormalizePath(file_path)
# line: 2208
if (self.Exists(file_path) or self.IsLink(file_path)):
# line: 2209
raise OSError(errno.EEXIST, 'File already exists in fake filesystem', file_path)
# line: 2212
(parent_directory, new_file) = self.SplitPath(file_path)
# line: 2213
if (not parent_directory):
# line: 2214
parent_directory = self.cwd
# line: 2215
self._AutoMountDriveIfNeeded(parent_directory)
# line: 2216
if (not self.Exists(parent_directory)):
# line: 2217
if (not create_missing_dirs):
# line: 2218
raise error_class(errno.ENOENT, 'No such fake directory', parent_directory)
# line: 2219
self.CreateDirectory(parent_directory)
else:
# line: 2221
parent_directory = self.NormalizeCase(parent_directory)
# line: 2222
if apply_umask:
# line: 2223
st_mode &= (~ self.umask)
# line: 2224
if read_from_real_fs:
# line: 2225
file_object = FakeFileFromRealFile(file_path, filesystem=self, read_only=read_only)
else:
# line: 2227
file_object = FakeFile(new_file, st_mode, filesystem=self, encoding=encoding, errors=errors)
# line: 2229
self._last_ino += 1
# line: 2230
file_object.SetIno(self._last_ino)
# line: 2231
self.AddObject(parent_directory, file_object, error_class)
# line: 2233
if ((not read_from_real_fs) and ((contents is not None) or (st_size is not None))):
# line: 2234
try:
# line: 2235
if (st_size is not None):
# line: 2236
file_object.SetLargeFileSize(st_size)
else:
# line: 2238
file_object._set_initial_contents(contents)
# line: 2239
except IOError:
# line: 2240
self.RemoveObject(file_path)
# line: 2241
raise
# line: 2243
return file_object
# line: 2246
def CreateLink(self, file_path, link_target, create_missing_dirs=True):
# line: 2261
'Create the specified symlink, pointed at the specified link target.\n\n Args:\n file_path: path to the symlink to create\n link_target: the target of the symlink\n create_missing_dirs: If `True`, any missing parent directories of\n file_path will be created\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n OSError: if the symlink could not be created (see `CreateFile`).\n OSError: if on Windows before Python 3.2.\n '
# line: 2262
if (not self._IsLinkSupported()):
# line: 2263
raise OSError('Symbolic links are not supported on Windows before Python 3.2')
# line: 2265
if (not self.IsLink(file_path)):
# line: 2266
file_path = self.ResolvePath(file_path)
# line: 2267
if (sys.version_info >= (3, 6)):
# line: 2268
link_target = os.fspath(link_target)
# line: 2269
return self.CreateFileInternally(file_path, st_mode=(stat.S_IFLNK | PERM_DEF), contents=link_target, create_missing_dirs=create_missing_dirs, raw_io=True)
# line: 2273
def CreateHardLink(self, old_path, new_path):
# line: 2289
"Create a hard link at new_path, pointing at old_path.\n New in pyfakefs 2.9.\n\n Args:\n old_path: an existing link to the target file.\n new_path: the destination path to create a new link at.\n\n Returns:\n the FakeFile object referred to by old_path.\n\n Raises:\n OSError: if something already exists at new_path.\n OSError: if old_path is a directory.\n OSError: if the parent directory doesn't exist.\n OSError: if on Windows before Python 3.2.\n "
# line: 2290
if (not self._IsLinkSupported()):
# line: 2291
raise OSError('Links are not supported on Windows before Python 3.2')
# line: 2292
new_path_normalized = self.NormalizePath(new_path)
# line: 2293
if self.Exists(new_path_normalized):
# line: 2294
raise OSError(errno.EEXIST, 'File already exists in fake filesystem', new_path)
# line: 2298
(new_parent_directory, new_basename) = self.SplitPath(new_path_normalized)
# line: 2299
if (not new_parent_directory):
# line: 2300
new_parent_directory = self.cwd
# line: 2302
if (not self.Exists(new_parent_directory)):
# line: 2303
raise OSError(errno.ENOENT, 'No such fake directory', new_parent_directory)
# line: 2307
try:
# line: 2308
old_file = self.ResolveObject(old_path)
# line: 2309
except:
# line: 2310
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', old_path)
# line: 2314
if (old_file.st_mode & stat.S_IFDIR):
# line: 2315
raise OSError((errno.EACCES if self.is_windows_fs else errno.EPERM), 'Cannot create hard link to directory', old_path)
# line: 2320
old_file.name = new_basename
# line: 2321
self.AddObject(new_parent_directory, old_file)
# line: 2322
return old_file
# line: 2324
def ReadLink(self, path):
# line: 2338
'Read the target of a symlink.\n New in pyfakefs 3.0.\n\n Args:\n path: symlink to read the target of.\n\n Returns:\n the string representing the path to which the symbolic link points.\n\n Raises:\n TypeError: if path is None\n OSError: (with errno=ENOENT) if path is not a valid path, or\n (with errno=EINVAL) if path is valid, but is not a symlink.\n '
# line: 2339
if (path is None):
# line: 2340
raise TypeError
# line: 2341
try:
# line: 2342
link_obj = self.LResolveObject(path)
# line: 2343
except IOError as exc:
# line: 2344
raise OSError(exc.errno, 'Fake path does not exist', path)
# line: 2345
if (stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK):
# line: 2346
raise OSError(errno.EINVAL, 'Fake filesystem: not a symlink', path)
# line: 2347
return link_obj.contents
# line: 2349
def MakeDirectory(self, dir_name, mode=PERM_DEF):
# line: 2362
"Create a leaf Fake directory.\n New in pyfakefs 3.0.\n\n Args:\n dir_name: (str) Name of directory to create. Relative paths are assumed\n to be relative to '/'.\n mode: (int) Mode to create directory with. This argument defaults to\n 0o777. The umask is applied to this mode.\n\n Raises:\n OSError: if the directory name is invalid or parent directory is read only\n or as per `FakeFilesystem.AddObject()`.\n "
# line: 2363
if (sys.version_info >= (3, 6)):
# line: 2364
dir_name = os.fspath(dir_name)
# line: 2365
if self._EndsWithPathSeparator(dir_name):
# line: 2366
dir_name = dir_name[:(-1)]
# line: 2367
if (not dir_name):
# line: 2368
raise OSError(errno.ENOENT, 'Empty directory name')
# line: 2370
(parent_dir, _) = self.SplitPath(dir_name)
# line: 2371
if parent_dir:
# line: 2372
base_dir = self.CollapsePath(parent_dir)
# line: 2373
ellipsis = self._matching_string(parent_dir, (self.path_separator + '..'))
# line: 2374
if parent_dir.endswith(ellipsis):
# line: 2375
(base_dir, dummy_dotdot, _) = parent_dir.partition(ellipsis)
# line: 2376
if (not self.Exists(base_dir)):
# line: 2377
raise OSError(errno.ENOENT, 'No such fake directory', base_dir)
# line: 2379
dir_name = self.NormalizePath(dir_name)
# line: 2380
if self.Exists(dir_name):
# line: 2381
raise OSError(errno.EEXIST, 'Fake object already exists', dir_name)
# line: 2382
(head, tail) = self.SplitPath(dir_name)
# line: 2384
self.AddObject(head, FakeDirectory(tail, (mode & (~ self.umask)), filesystem=self))
# line: 2387
def MakeDirectories(self, dir_name, mode=PERM_DEF, exist_ok=False):
# line: 2402
'Create a leaf Fake directory and create any non-existent parent dirs.\n New in pyfakefs 3.0.\n\n Args:\n dir_name: (str) Name of directory to create.\n mode: (int) Mode to create directory (and any necessary parent\n directories) with. This argument defaults to 0o777. The umask is\n applied to this mode.\n exist_ok: (boolean) If exist_ok is False (the default), an OSError is\n raised if the target directory already exists. New in Python 3.2.\n\n Raises:\n OSError: if the directory already exists and exist_ok=False, or as per\n `FakeFilesystem.CreateDirectory()`.\n '
# line: 2403
dir_name = self.NormalizePath(dir_name)
# line: 2404
path_components = self.GetPathComponents(dir_name)
# line: 2408
current_dir = self.root
# line: 2409
for component in path_components:
# line: 2410
if ((component not in current_dir.contents) or (not isinstance(current_dir.contents, dict))):
# line: 2412
break
else:
# line: 2414
current_dir = current_dir.contents[component]
# line: 2415
try:
# line: 2416
self.CreateDirectory(dir_name, (mode & (~ self.umask)))
# line: 2417
except (IOError, OSError) as e:
# line: 2418
if ((not exist_ok) or (not isinstance(self.ResolveObject(dir_name), FakeDirectory))):
# line: 2420
if isinstance(e, OSError):
# line: 2421
raise
# line: 2422
raise OSError(e.errno, e.strerror, e.filename)
# line: 2424
def _IsType(self, path, st_flag, follow_symlinks=True):
# line: 2438
"Helper function to implement isdir(), islink(), etc.\n\n See the stat(2) man page for valid stat.S_I* flag values\n\n Args:\n path: path to file to stat and test\n st_flag: the stat.S_I* flag checked for the file's st_mode\n\n Returns:\n boolean (the st_flag is set in path's st_mode)\n\n Raises:\n TypeError: if path is None\n "
# line: 2439
if (sys.version_info >= (3, 6)):
# line: 2440
path = os.fspath(path)
# line: 2441
if (path is None):
# line: 2442
raise TypeError
# line: 2443
try:
# line: 2444
obj = self.ResolveObject(path, follow_symlinks)
# line: 2445
if obj:
# line: 2446
return (stat.S_IFMT(obj.st_mode) == st_flag)
# line: 2447
except (IOError, OSError):
# line: 2448
return False
# line: 2449
return False
# line: 2451
def IsDir(self, path, follow_symlinks=True):
# line: 2463
'Determine if path identifies a directory.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a directory (following symlinks).\n\n Raises:\n TypeError: if path is None.\n '
# line: 2464
return self._IsType(path, stat.S_IFDIR, follow_symlinks)
# line: 2466
def IsFile(self, path, follow_symlinks=True):
# line: 2478
'Determine if path identifies a regular file.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a regular file (following symlinks).\n\n Raises:\n TypeError: if path is None.\n '
# line: 2479
return self._IsType(path, stat.S_IFREG, follow_symlinks)
# line: 2481
def IsLink(self, path):
# line: 2493
'Determine if path identifies a symbolic link.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a symlink (S_IFLNK set in st_mode)\n\n Raises:\n TypeError: if path is None.\n '
# line: 2494
return self._IsType(path, stat.S_IFLNK, follow_symlinks=False)
# line: 2496
def ConfirmDir(self, target_directory):
# line: 2508
'Test that the target is actually a directory, raising OSError if not.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: path to the target directory within the fake filesystem.\n\n Returns:\n the FakeDirectory object corresponding to target_directory.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2509
try:
# line: 2510
directory = self.ResolveObject(target_directory)
# line: 2511
except IOError as exc:
# line: 2512
raise OSError(exc.errno, exc.strerror, target_directory)
# line: 2513
if (not (directory.st_mode & stat.S_IFDIR)):
# line: 2514
raise OSError(errno.ENOTDIR, 'Fake os module: not a directory', target_directory)
# line: 2517
return directory
# line: 2519
def RemoveFile(self, path):
# line: 2530
'Remove the FakeFile object at the specified file path.\n New in pyfakefs 3.0.\n\n Args:\n path: path to file to be removed.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 2531
path = self.NormalizePath(path)
# line: 2532
if self.Exists(path):
# line: 2533
obj = self.ResolveObject(path)
# line: 2534
if (stat.S_IFMT(obj.st_mode) == stat.S_IFDIR):
# line: 2535
link_obj = self.LResolveObject(path)
# line: 2536
if (stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK):
# line: 2537
raise OSError(errno.EISDIR, ("Is a directory: '%s'" % path))
# line: 2539
try:
# line: 2540
self.RemoveObject(path)
# line: 2541
except IOError as exc:
# line: 2542
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 2544
def RemoveDirectory(self, target_directory, allow_symlink=False):
# line: 2557
"Remove a leaf Fake directory.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: (str) Name of directory to remove.\n allow_symlink: (bool) if `target_directory` is a symlink,\n the function just returns, otherwise it raises (Posix only)\n\n Raises:\n OSError: if target_directory does not exist.\n OSError: if target_directory does not point to a directory.\n OSError: if removal failed per FakeFilesystem.RemoveObject. Cannot remove '.'.\n "
# line: 2558
if (target_directory in ('.', u'.')):
# line: 2559
raise OSError(errno.EINVAL, "Invalid argument: '.'")
# line: 2560
target_directory = self.NormalizePath(target_directory)
# line: 2561
if self.ConfirmDir(target_directory):
# line: 2562
if ((not self.is_windows_fs) and self.IsLink(target_directory)):
# line: 2563
if allow_symlink:
# line: 2564
return
# line: 2565
raise OSError(errno.ENOTDIR, 'Cannot remove symlink', target_directory)
# line: 2567
dir_object = self.ResolveObject(target_directory)
# line: 2568
if dir_object.contents:
# line: 2569
raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty', target_directory)
# line: 2571
try:
# line: 2572
self.RemoveObject(target_directory)
# line: 2573
except IOError as exc:
# line: 2574
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 2576
def ListDir(self, target_directory):
# line: 2588
'Return a list of file names in target_directory.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: path to the target directory within the fake filesystem.\n\n Returns:\n a list of file names within the target directory in arbitrary order.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2589
target_directory = self.ResolvePath(target_directory, allow_fd=True)
# line: 2590
directory = self.ConfirmDir(target_directory)
# line: 2591
directory_contents = directory.contents
# line: 2592
return list(directory_contents.keys())
# line: 2594
if (sys.version_info >= (3, 5)):
# line: 2595
class DirEntry:
# line: 2596
'Emulates os.DirEntry. Note that we did not enforce keyword only arguments.'
# line: 2598
def __init__(self, filesystem):
# line: 2603
'Initialize the dir entry with unset values.\n\n Args:\n filesystem: the fake filesystem used for implementation.\n '
# line: 2604
self._filesystem = filesystem
# line: 2605
self.name = ''
# line: 2606
self.path = ''
# line: 2607
self._inode = None
# line: 2608
self._islink = False
# line: 2609
self._isdir = False
# line: 2610
self._statresult = None
# line: 2611
self._statresult_symlink = None
# line: 2613
def inode(self):
# line: 2614
'Return the inode number of the entry.'
# line: 2615
if (self._inode is None):
# line: 2616
self.stat(follow_symlinks=False)
# line: 2617
return self._inode
# line: 2619
def is_dir(self, follow_symlinks=True):
# line: 2629
'Return True if this entry is a directory entry.\n\n Args:\n follow_symlinks: If True, also return True if this entry is a symlink\n pointing to a directory.\n\n Returns:\n True if this entry is an existing directory entry, or if\n follow_symlinks is set, and this entry points to an existing directory entry.\n '
# line: 2630
return (self._isdir and (follow_symlinks or (not self._islink)))
# line: 2632
def is_file(self, follow_symlinks=True):
# line: 2642
'Return True if this entry is a regular file entry.\n\n Args:\n follow_symlinks: If True, also return True if this entry is a symlink\n pointing to a regular file.\n\n Returns:\n True if this entry is an existing file entry, or if\n follow_symlinks is set, and this entry points to an existing file entry.\n '
# line: 2643
return ((not self._isdir) and (follow_symlinks or (not self._islink)))
# line: 2645
def is_symlink(self):
# line: 2646
'Return True if this entry is a symbolic link (even if broken).'
# line: 2647
return self._islink
# line: 2649
def stat(self, follow_symlinks=True):
# line: 2655
'Return a stat_result object for this entry.\n\n Args:\n follow_symlinks: If False and the entry is a symlink, return the\n result for the symlink, otherwise for the object it points to.\n '
# line: 2656
if follow_symlinks:
# line: 2657
if (self._statresult_symlink is None):
# line: 2658
file_object = self._filesystem.ResolveObject(self.path)
# line: 2659
if self._filesystem.is_windows_fs:
# line: 2662
file_object.st_ino = 0
# line: 2663
file_object.st_dev = 0
# line: 2664
file_object.st_nlink = 0
# line: 2665
self._statresult_symlink = file_object.stat_result.copy()
# line: 2666
return self._statresult_symlink
# line: 2668
if (self._statresult is None):
# line: 2669
file_object = self._filesystem.LResolveObject(self.path)
# line: 2670
self._inode = file_object.st_ino
# line: 2671
if self._filesystem.is_windows_fs:
# line: 2672
file_object.st_ino = 0
# line: 2673
file_object.st_dev = 0
# line: 2674
file_object.st_nlink = 0
# line: 2675
self._statresult = file_object.stat_result.copy()
# line: 2676
return self._statresult
# line: 2678
class ScanDirIter:
# line: 2681
'Iterator for DirEntry objects returned from `scandir()` function.\n New in pyfakefs 3.0.\n '
# line: 2683
def __init__(self, filesystem, path):
# line: 2684
self.filesystem = filesystem
# line: 2685
self.path = self.filesystem.ResolvePath(path)
# line: 2686
contents = {}
# line: 2687
try:
# line: 2688
contents = self.filesystem.ConfirmDir(path).contents
# line: 2689
except OSError:
# line: 2690
pass
# line: 2691
self.contents_iter = iter(contents)
# line: 2693
def __iter__(self):
# line: 2694
return self
# line: 2696
def __next__(self):
# line: 2697
entry = self.contents_iter.__next__()
# line: 2698
dir_entry = self.filesystem.DirEntry(self.filesystem)
# line: 2699
dir_entry.name = entry
# line: 2700
dir_entry.path = self.filesystem.JoinPaths(self.path, dir_entry.name)
# line: 2701
dir_entry._isdir = self.filesystem.IsDir(dir_entry.path)
# line: 2702
dir_entry._islink = self.filesystem.IsLink(dir_entry.path)
# line: 2703
return dir_entry
# line: 2705
if (sys.version_info >= (3, 6)):
# line: 2706
def __enter__(self):
# line: 2707
return self
# line: 2709
def __exit__(self, exc_type, exc_val, exc_tb):
# line: 2710
self.close()
# line: 2712
def close(self):
# line: 2713
pass
# line: 2715
def ScanDir(self, path=''):
# line: 2728
'Return an iterator of DirEntry objects corresponding to the entries\n in the directory given by path.\n New in pyfakefs 3.0.\n\n Args:\n path: path to the target directory within the fake filesystem.\n\n Returns:\n an iterator to an unsorted list of os.DirEntry objects for each entry in path.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2729
return self.ScanDirIter(self, path)
# line: 2731
def __str__(self):
# line: 2732
return str(self.root)
# line: 2735
class FakePathModule(object):
# line: 2740
'Faked os.path module replacement.\n\n FakePathModule should *only* be instantiated by FakeOsModule. See the\n FakeOsModule docstring for details.\n '
# line: 2741
_OS_PATH_COPY = CopyModule(os.path)
# line: 2743
def __init__(self, filesystem, os_module=None):
# line: 2749
'Init.\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n os_module: (deprecated) FakeOsModule to assign to self.os\n '
# line: 2750
self.filesystem = filesystem
# line: 2751
self._os_path = self._OS_PATH_COPY
# line: 2752
if (os_module is None):
# line: 2753
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2)
# line: 2755
self._os_path.os = self.os = os_module
# line: 2756
self.sep = self.filesystem.path_separator
# line: 2757
self.altsep = self.filesystem.alternative_path_separator
# line: 2759
def exists(self, path):
# line: 2767
'Determine whether the file object exists within the fake filesystem.\n\n Args:\n path: path to the file object.\n\n Returns:\n bool (if file exists).\n '
# line: 2768
return self.filesystem.Exists(path)
# line: 2770
def lexists(self, path):
# line: 2778
'Test whether a path exists. Returns True for broken symbolic links.\n\n Args:\n path: path to the symlink object.\n\n Returns:\n bool (if file exists).\n '
# line: 2779
return (self.exists(path) or self.islink(path))
# line: 2781
def getsize(self, path):
# line: 2789
'Return the file object size in bytes.\n\n Args:\n path: path to the file object.\n\n Returns:\n file size in bytes.\n '
# line: 2790
try:
# line: 2791
file_obj = self.filesystem.ResolveObject(path)
# line: 2792
return file_obj.st_size
# line: 2793
except IOError as exc:
# line: 2794
raise os.error(exc.errno, exc.strerror)
# line: 2796
def isabs(self, path):
# line: 2797
'Return True if path is an absolute pathname.'
# line: 2798
if self.filesystem.is_windows_fs:
# line: 2799
path = self.splitdrive(path)[1]
# line: 2800
if (sys.version_info >= (3, 6)):
# line: 2801
path = os.fspath(path)
# line: 2802
sep = self.filesystem._path_separator(path)
# line: 2803
altsep = self.filesystem._alternative_path_separator(path)
# line: 2804
if self.filesystem.is_windows_fs:
# line: 2805
return ((len(path) > 0) and (path[:1] in (sep, altsep)))
else:
# line: 2807
return (path.startswith(sep) or ((altsep is not None) and path.startswith(altsep)))
# line: 2809
def isdir(self, path):
# line: 2810
'Determine if path identifies a directory.'
# line: 2811
return self.filesystem.IsDir(path)
# line: 2813
def isfile(self, path):
# line: 2814
'Determine if path identifies a regular file.'
# line: 2815
return self.filesystem.IsFile(path)
# line: 2817
def islink(self, path):
# line: 2828
'Determine if path identifies a symbolic link.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a symbolic link.\n\n Raises:\n TypeError: if path is None.\n '
# line: 2829
return self.filesystem.IsLink(path)
# line: 2831
def getmtime(self, path):
# line: 2843
'Returns the modification time of the fake file.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the modification time of the fake file\n in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2844
try:
# line: 2845
file_obj = self.filesystem.ResolveObject(path)
# line: 2846
except IOError as exc:
# line: 2847
raise OSError(errno.ENOENT, str(exc))
# line: 2848
return file_obj.st_mtime
# line: 2850
def getatime(self, path):
# line: 2863
'Returns the last access time of the fake file.\n\n Note: Access time is not set automatically in fake filesystem on access.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the access time of the fake file in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2864
try:
# line: 2865
file_obj = self.filesystem.ResolveObject(path)
# line: 2866
except IOError as exc:
# line: 2867
raise OSError(errno.ENOENT, str(exc))
# line: 2868
return file_obj.st_atime
# line: 2870
def getctime(self, path):
# line: 2881
'Returns the creation time of the fake file.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the creation time of the fake file in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2882
try:
# line: 2883
file_obj = self.filesystem.ResolveObject(path)
# line: 2884
except IOError as exc:
# line: 2885
raise OSError(errno.ENOENT, str(exc))
# line: 2886
return file_obj.st_ctime
# line: 2888
def abspath(self, path):
# line: 2889
'Return the absolute version of a path.'
# line: 2891
def getcwd():
# line: 2892
'Return the current working directory.'
# line: 2894
if ((sys.version_info < (3,)) and isinstance(path, unicode)):
# line: 2895
return self.os.getcwdu()
elif ((sys.version_info >= (3,)) and isinstance(path, bytes)):
# line: 2897
return self.os.getcwdb()
else:
# line: 2899
return self.os.getcwd()
# line: 2901
if (sys.version_info >= (3, 6)):
# line: 2902
path = os.fspath(path)
# line: 2904
sep = self.filesystem._path_separator(path)
# line: 2905
altsep = self.filesystem._alternative_path_separator(path)
# line: 2906
if (not self.isabs(path)):
# line: 2907
path = self.join(getcwd(), path)
elif ((self.filesystem.is_windows_fs and path.startswith(sep)) or ((altsep is not None) and path.startswith(altsep))):
# line: 2911
cwd = getcwd()
# line: 2912
if self.filesystem.StartsWithDriveLetter(cwd):
# line: 2913
path = self.join(cwd[:2], path)
# line: 2914
return self.normpath(path)
# line: 2916
def join(self, *p):
# line: 2917
'Return the completed path with a separator of the parts.'
# line: 2918
return self.filesystem.JoinPaths(*p)
# line: 2920
def split(self, path):
# line: 2923
'Split the path into the directory and the filename of the path.\n New in pyfakefs 3.0.\n '
# line: 2924
return self.filesystem.SplitPath(path)
# line: 2926
def splitdrive(self, path):
# line: 2929
'Split the path into the drive part and the rest of the path, if supported.\n New in pyfakefs 2.9.\n '
# line: 2930
return self.filesystem.SplitDrive(path)
# line: 2932
def normpath(self, path):
# line: 2933
'Normalize path, eliminating double slashes, etc.'
# line: 2934
return self.filesystem.CollapsePath(path)
# line: 2936
def normcase(self, path):
# line: 2939
'Convert to lower case under windows, replaces additional path separator.\n New in pyfakefs 2.9.\n '
# line: 2940
path = self.filesystem.NormalizePathSeparator(path)
# line: 2941
if self.filesystem.is_windows_fs:
# line: 2942
path = path.lower()
# line: 2943
return path
# line: 2945
def relpath(self, path, start=None):
# line: 2946
'We mostly rely on the native implementation and adapt the path separator.'
# line: 2947
if (not path):
# line: 2948
raise ValueError('no path specified')
# line: 2949
if (sys.version_info >= (3, 6)):
# line: 2950
path = os.fspath(path)
# line: 2951
if (start is not None):
# line: 2952
start = os.fspath(start)
# line: 2953
if (start is None):
# line: 2954
start = self.filesystem.cwd
# line: 2955
if (self.filesystem.alternative_path_separator is not None):
# line: 2956
path = path.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
# line: 2957
start = start.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
# line: 2958
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
# line: 2959
start = start.replace(self.filesystem.path_separator, self._os_path.sep)
# line: 2960
path = self._os_path.relpath(path, start)
# line: 2961
return path.replace(self._os_path.sep, self.filesystem.path_separator)
# line: 2963
def realpath(self, filename):
# line: 2967
'Return the canonical path of the specified filename, eliminating any\n symbolic links encountered in the path.\n New in pyfakefs 3.0.\n '
# line: 2968
if self.filesystem.is_windows_fs:
# line: 2969
return self.abspath(filename)
# line: 2970
if (sys.version_info >= (3, 6)):
# line: 2971
filename = os.fspath(filename)
# line: 2972
(path, ok) = self._joinrealpath(filename[:0], filename, {})
# line: 2973
return self.abspath(path)
# line: 2975
if ((sys.platform != 'win32') or (sys.version_info >= (3, 2))):
# line: 2976
def samefile(self, path1, path2):
# line: 2987
'Return whether path1 and path2 point to the same file.\n Windows support new in Python 3.2.\n New in pyfakefs 3.3.\n\n Args:\n path1: first file path or path object (Python >=3.6)\n path2: second file path or path object (Python >=3.6)\n\n Raises:\n OSError: if one of the paths does not point to an existing file system object.\n '
# line: 2988
stat1 = self.filesystem.GetStat(path1)
# line: 2989
stat2 = self.filesystem.GetStat(path2)
# line: 2990
return ((stat1.st_ino == stat2.st_ino) and (stat1.st_dev == stat2.st_dev))
# line: 2992
def _joinrealpath(self, path, rest, seen):
# line: 2996
'Join two paths, normalizing and eliminating any symbolic links\n encountered in the second path.\n Taken from Python source and adapted.\n '
# line: 2997
curdir = self.filesystem._matching_string(path, '.')
# line: 2998
pardir = self.filesystem._matching_string(path, '..')
# line: 3000
sep = self.filesystem._path_separator(path)
# line: 3001
if self.isabs(rest):
# line: 3002
rest = rest[1:]
# line: 3003
path = sep
# line: 3005
while rest:
# line: 3006
(name, _, rest) = rest.partition(sep)
# line: 3007
if ((not name) or (name == curdir)):
# line: 3009
continue
# line: 3010
if (name == pardir):
# line: 3012
if path:
# line: 3013
(path, name) = self.filesystem.SplitPath(path)
# line: 3014
if (name == pardir):
# line: 3015
path = self.filesystem.JoinPaths(path, pardir, pardir)
else:
# line: 3017
path = pardir
# line: 3018
continue
# line: 3019
newpath = self.filesystem.JoinPaths(path, name)
# line: 3020
if (not self.filesystem.IsLink(newpath)):
# line: 3021
path = newpath
# line: 3022
continue
# line: 3024
if (newpath in seen):
# line: 3026
path = seen[newpath]
# line: 3027
if (path is not None):
# line: 3029
continue
# line: 3032
return (self.filesystem.JoinPaths(newpath, rest), False)
# line: 3033
seen[newpath] = None
# line: 3034
(path, ok) = self._joinrealpath(path, self.filesystem.ReadLink(newpath), seen)
# line: 3035
if (not ok):
# line: 3036
return (self.filesystem.JoinPaths(path, rest), False)
# line: 3037
seen[newpath] = path
# line: 3038
return (path, True)
# line: 3040
def dirname(self, path):
# line: 3043
'Returns the first part of the result of `split()`.\n New in pyfakefs 3.0.\n '
# line: 3044
return self.split(path)[0]
# line: 3046
def expanduser(self, path):
# line: 3049
"Return the argument with an initial component of ~ or ~user\n replaced by that user's home directory.\n "
# line: 3050
return self._os_path.expanduser(path).replace(self._os_path.sep, self.sep)
# line: 3052
def ismount(self, path):
# line: 3062
'Return true if the given path is a mount point.\n New in pyfakefs 2.9.\n\n Args:\n path: path to filesystem object to be checked\n\n Returns:\n True if path is a mount point added to the fake file system.\n Under Windows also returns True for drive and UNC roots (independent of their existence).\n '
# line: 3063
if (sys.version_info >= (3, 6)):
# line: 3064
path = os.fspath(path)
# line: 3065
if (not path):
# line: 3066
return False
# line: 3067
normed_path = self.filesystem.NormalizePath(path)
# line: 3068
sep = self.filesystem._path_separator(path)
# line: 3069
if self.filesystem.is_windows_fs:
# line: 3070
if (self.filesystem.alternative_path_separator is not None):
# line: 3071
path_seps = (sep, self.filesystem._alternative_path_separator(path))
else:
# line: 3075
path_seps = (sep,)
# line: 3076
(drive, rest) = self.filesystem.SplitDrive(normed_path)
# line: 3077
if (drive and (drive[:1] in path_seps)):
# line: 3078
return ((not rest) or (rest in path_seps))
# line: 3079
if (rest in path_seps):
# line: 3080
return True
# line: 3081
for mount_point in self.filesystem.mount_points:
# line: 3082
if (normed_path.rstrip(sep) == mount_point.rstrip(sep)):
# line: 3083
return True
# line: 3084
return False
# line: 3086
if (sys.version_info < (3, 0)):
# line: 3087
def walk(self, top, func, arg):
# line: 3095
'Directory tree walk with callback function.\n New in pyfakefs 3.0.\n\n Args:\n top: root path to traverse. The root itself is not included in the called elements.\n func: function to be called for each visited path node.\n arg: first argument to be called with func (apart from dirname and filenames).\n '
# line: 3096
try:
# line: 3097
names = self.filesystem.ListDir(top)
# line: 3098
except os.error:
# line: 3099
return
# line: 3100
func(arg, top, names)
# line: 3101
for name in names:
# line: 3102
name = self.filesystem.JoinPaths(top, name)
# line: 3103
if self.filesystem.is_windows_fs:
# line: 3104
if self.filesystem.IsDir(name):
# line: 3105
self.walk(name, func, arg)
else:
# line: 3107
try:
# line: 3108
st = self.filesystem.GetStat(name, follow_symlinks=False)
# line: 3109
except os.error:
# line: 3110
continue
# line: 3111
if stat.S_ISDIR(st.st_mode):
# line: 3112
self.walk(name, func, arg)
# line: 3114
def __getattr__(self, name):
# line: 3115
'Forwards any non-faked calls to the real os.path.'
# line: 3116
return getattr(self._os_path, name)
# line: 3119
class FakeOsModule(object):
# line: 3130
'Uses FakeFilesystem to provide a fake os module replacement.\n\n Do not create os.path separately from os, as there is a necessary circular\n dependency between os and os.path to replicate the behavior of the standard\n Python modules. What you want to do is to just let FakeOsModule take care of\n os.path setup itself.\n\n # You always want to do this.\n filesystem = fake_filesystem.FakeFilesystem()\n my_os_module = fake_filesystem.FakeOsModule(filesystem)\n '
# line: 3132
_stat_float_times = (sys.version_info >= (2, 5))
# line: 3134
def __init__(self, filesystem, os_path_module=None):
# line: 3140
'Also exposes self.path (to fake os.path).\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n os_path_module: (deprecated) optional FakePathModule instance\n '
# line: 3141
self.filesystem = filesystem
# line: 3142
self.sep = filesystem.path_separator
# line: 3143
self.altsep = filesystem.alternative_path_separator
# line: 3144
self._os_module = os
# line: 3145
if (os_path_module is None):
# line: 3146
self.path = FakePathModule(self.filesystem, self)
else:
# line: 3148
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2)
# line: 3150
self.path = os_path_module
# line: 3151
if (sys.version_info < (3, 0)):
# line: 3152
self.fdopen = self._fdopen_ver2
else:
# line: 3154
self.fdopen = self._fdopen
# line: 3156
def _fdopen(self, *args, **kwargs):
# line: 3168
'Redirector to open() builtin function.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n TypeError: if file descriptor is not an integer.\n '
# line: 3169
if (not isinstance(args[0], int)):
# line: 3170
raise TypeError('an integer is required')
# line: 3171
return FakeFileOpen(self.filesystem)(*args, **kwargs)
# line: 3173
def _fdopen_ver2(self, file_des, mode='r', bufsize=None):
# line: 3188
'Returns an open file object connected to the file descriptor file_des.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n mode: additional file flags. Currently checks to see if the mode matches\n the mode of the requested file object.\n bufsize: ignored. (Used for signature compliance with __builtin__.fdopen)\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n OSError: if bad file descriptor or incompatible mode is given.\n TypeError: if file descriptor is not an integer.\n '
# line: 3189
if (not isinstance(file_des, int)):
# line: 3190
raise TypeError('an integer is required')
# line: 3192
try:
# line: 3193
return FakeFileOpen(self.filesystem).Call(file_des, mode=mode)
# line: 3194
except IOError as exc:
# line: 3195
raise OSError(exc)
# line: 3197
def _umask(self):
# line: 3198
'Return the current umask.'
# line: 3199
if self.filesystem.is_windows_fs:
# line: 3201
return 0
# line: 3202
if (sys.platform == 'win32'):
# line: 3204
return 2
else:
# line: 3209
mask = os.umask(0)
# line: 3210
os.umask(mask)
# line: 3211
return mask
# line: 3214
def open(self, file_path, flags, mode=None, dir_fd=None):
# line: 3233
'Return the file descriptor for a FakeFile.\n\n Args:\n file_path: the path to the file\n flags: low-level bits to indicate io operation\n mode: bits to define default permissions\n Note: only basic modes are supported, OS-specific modes are ignored\n dir_fd: If not `None`, the file descriptor of a directory,\n with `file_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n A file descriptor.\n\n Raises:\n IOError: if the path cannot be found\n ValueError: if invalid mode is given\n NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`\n '
# line: 3234
file_path = self._path_with_dir_fd(file_path, self.open, dir_fd)
# line: 3235
if (mode is None):
# line: 3236
if self.filesystem.is_windows_fs:
# line: 3237
mode = 438
else:
# line: 3239
mode = (511 & (~ self._umask()))
# line: 3241
open_modes = _OpenModes(must_exist=(not (flags & os.O_CREAT)), can_read=(not (flags & os.O_WRONLY)), can_write=(flags & (os.O_RDWR | os.O_WRONLY)), truncate=(flags & os.O_TRUNC), append=(flags & os.O_APPEND), must_not_exist=(flags & os.O_EXCL))
# line: 3249
if (open_modes.must_not_exist and open_modes.must_exist):
# line: 3250
raise NotImplementedError('O_EXCL without O_CREAT mode is not supported')
# line: 3252
if ((not self.filesystem.is_windows_fs) and (not open_modes.can_write) and self.filesystem.Exists(file_path)):
# line: 3256
obj = self.filesystem.ResolveObject(file_path)
# line: 3257
if isinstance(obj, FakeDirectory):
# line: 3258
dir_wrapper = FakeDirWrapper(obj, file_path, self.filesystem)
# line: 3259
file_des = self.filesystem.AddOpenFile(dir_wrapper)
# line: 3260
dir_wrapper.filedes = file_des
# line: 3261
return file_des
# line: 3264
str_flags = 'b'
# line: 3265
delete_on_close = False
# line: 3266
if hasattr(os, 'O_TEMPORARY'):
# line: 3267
delete_on_close = ((flags & os.O_TEMPORARY) == os.O_TEMPORARY)
# line: 3268
fake_file = FakeFileOpen(self.filesystem, delete_on_close=delete_on_close, raw_io=True)(file_path, str_flags, open_modes=open_modes)
# line: 3271
self.chmod(file_path, mode)
# line: 3272
return fake_file.fileno()
# line: 3274
def close(self, file_des):
# line: 3283
'Close a file descriptor.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3284
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3285
file_handle.close()
# line: 3287
def read(self, file_des, num_bytes):
# line: 3300
'Read number of bytes from a file descriptor, returns bytes read.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n num_bytes: Number of bytes to read from file.\n\n Returns:\n Bytes read from file.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3301
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3302
file_handle.raw_io = True
# line: 3303
return file_handle.read(num_bytes)
# line: 3305
def write(self, file_des, contents):
# line: 3318
'Write string to file descriptor, returns number of bytes written.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n contents: String of bytes to write to file.\n\n Returns:\n Number of bytes written.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3319
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3320
file_handle.raw_io = True
# line: 3321
file_handle._sync_io()
# line: 3322
file_handle.write(contents)
# line: 3323
file_handle.flush()
# line: 3324
return len(contents)
# line: 3326
@classmethod
# line: 3326
def stat_float_times(cls, newvalue=None):
# line: 3337
"Determine whether a file's time stamps are reported as floats or ints.\n New in pyfakefs 2.9.\n\n Calling without arguments returns the current value. The value is shared\n by all instances of FakeOsModule.\n\n Args:\n newvalue: if True, mtime, ctime, atime are reported as floats.\n Else, as ints (rounding down).\n "
# line: 3338
if (newvalue is not None):
# line: 3339
cls._stat_float_times = bool(newvalue)
# line: 3340
return cls._stat_float_times
# line: 3342
def fstat(self, file_des):
# line: 3353
"Return the os.stat-like tuple for the FakeFile object of file_des.\n\n Args:\n file_des: file descriptor of filesystem object to retrieve.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3355
file_object = self.filesystem.GetOpenFile(file_des).GetObject()
# line: 3356
return file_object.stat_result.copy()
# line: 3358
def umask(self, new_mask):
# line: 3369
'Change the current umask.\n\n Args:\n new_mask: An integer.\n\n Returns:\n The old mask.\n\n Raises:\n TypeError: new_mask is of an invalid type.\n '
# line: 3370
if (not isinstance(new_mask, int)):
# line: 3371
raise TypeError('an integer is required')
# line: 3372
old_umask = self.filesystem.umask
# line: 3373
self.filesystem.umask = new_mask
# line: 3374
return old_umask
# line: 3376
def chdir(self, target_directory):
# line: 3385
'Change current working directory to target directory.\n\n Args:\n target_directory: path to new current working directory.\n\n Raises:\n OSError: if user lacks permission to enter the argument directory or if\n the target is not a directory\n '
# line: 3386
target_directory = self.filesystem.ResolvePath(target_directory, allow_fd=True)
# line: 3387
self.filesystem.ConfirmDir(target_directory)
# line: 3388
directory = self.filesystem.ResolveObject(target_directory)
# line: 3390
if (not (directory.st_mode | PERM_EXE)):
# line: 3391
raise OSError(errno.EACCES, 'Fake os module: permission denied', directory)
# line: 3393
self.filesystem.cwd = target_directory
# line: 3395
def getcwd(self):
# line: 3396
'Return current working directory.'
# line: 3397
return self.filesystem.cwd
# line: 3399
if (sys.version_info < (3,)):
# line: 3400
def getcwdu(self):
# line: 3401
'Return current working directory as unicode. Python 2 only.'
# line: 3402
return unicode(self.filesystem.cwd)
else:
# line: 3405
def getcwdb(self):
# line: 3406
'Return current working directory as bytes. Python 3 only.'
# line: 3407
return bytes(self.filesystem.cwd, locale.getpreferredencoding(False))
# line: 3409
def listdir(self, target_directory):
# line: 3421
'Return a list of file names in target_directory.\n\n Args:\n target_directory: path to the target directory within the fake\n filesystem.\n\n Returns:\n a list of file names within the target directory in arbitrary order.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 3422
return self.filesystem.ListDir(target_directory)
# line: 3424
if (sys.platform.startswith('linux') and (sys.version_info >= (3, 3))):
# line: 3425
def listxattr(self, path=None, follow_symlinks=True):
# line: 3426
'Dummy implementation that returns an empty list - used by shutil.'
# line: 3427
return []
# line: 3429
if (sys.version_info >= (3, 5)):
# line: 3430
def scandir(self, path=''):
# line: 3442
'Return an iterator of DirEntry objects corresponding to the entries\n in the directory given by path.\n\n Args:\n path: path to the target directory within the fake filesystem.\n\n Returns:\n an iterator to an unsorted list of os.DirEntry objects for each entry in path.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 3443
return self.filesystem.ScanDir(path)
# line: 3445
def _ClassifyDirectoryContents(self, root):
# line: 3460
'Classify contents of a directory as files/directories.\n\n Args:\n root: (str) Directory to examine.\n\n Returns:\n (tuple) A tuple consisting of three values: the directory examined, a\n list containing all of the directory entries, and a list containing all\n of the non-directory entries. (This is the same format as returned by\n the os.walk generator.)\n\n Raises:\n Nothing on its own, but be ready to catch exceptions generated by\n underlying mechanisms like os.listdir.\n '
# line: 3461
dirs = []
# line: 3462
files = []
# line: 3463
for entry in self.listdir(root):
# line: 3464
if self.path.isdir(self.path.join(root, entry)):
# line: 3465
dirs.append(entry)
else:
# line: 3467
files.append(entry)
# line: 3468
return (root, dirs, files)
# line: 3470
def walk(self, top, topdown=True, onerror=None, followlinks=False):
# line: 3486
'Perform an os.walk operation over the fake filesystem.\n\n Args:\n top: root directory from which to begin walk.\n topdown: determines whether to return the tuples with the root as the\n first entry (True) or as the last, after all the child directory\n tuples (False).\n onerror: if not None, function which will be called to handle the\n os.error instance provided when os.listdir() fails.\n followlinks: if True, symbolic links are followed. New in pyfakefs 2.9.\n\n Yields:\n (path, directories, nondirectories) for top and each of its\n subdirectories. See the documentation for the builtin os module for\n further details.\n '
# line: 3487
def do_walk(top, topMost=False):
# line: 3488
top = self.path.normpath(top)
# line: 3489
if ((not topMost) and (not followlinks) and self.path.islink(top)):
# line: 3490
return
# line: 3491
try:
# line: 3492
top_contents = self._ClassifyDirectoryContents(top)
# line: 3493
except OSError as exc:
# line: 3494
top_contents = None
# line: 3495
if (onerror is not None):
# line: 3496
onerror(exc)
# line: 3498
if (top_contents is not None):
# line: 3499
if topdown:
# line: 3500
yield top_contents
# line: 3502
for directory in top_contents[1]:
# line: 3503
if ((not followlinks) and self.path.islink(directory)):
# line: 3504
continue
# line: 3505
for contents in do_walk(self.path.join(top, directory)):
# line: 3506
yield contents
# line: 3508
if (not topdown):
# line: 3509
yield top_contents
# line: 3511
return do_walk(top, topMost=True)
# line: 3514
def readlink(self, path, dir_fd=None):
# line: 3530
'Read the target of a symlink.\n\n Args:\n path: Symlink to read the target of.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the string representing the path to which the symbolic link points.\n\n Raises:\n TypeError: if `path` is None\n OSError: (with errno=ENOENT) if path is not a valid path, or\n (with errno=EINVAL) if path is valid, but is not a symlink.\n '
# line: 3531
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
# line: 3532
return self.filesystem.ReadLink(path)
# line: 3534
def stat(self, entry_path, dir_fd=None, follow_symlinks=None):
# line: 3551
"Return the os.stat-like tuple for the FakeFile object of entry_path.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n dir_fd: (int) If not `None`, the file descriptor of a directory,\n with `entry_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `entry_path` points to a symlink,\n the link itself is changed instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3552
if (follow_symlinks is None):
# line: 3553
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3555
raise TypeError("stat() got an unexpected keyword argument 'follow_symlinks'")
# line: 3556
entry_path = self._path_with_dir_fd(entry_path, self.stat, dir_fd)
# line: 3557
return self.filesystem.GetStat(entry_path, follow_symlinks)
# line: 3559
def lstat(self, entry_path, dir_fd=None):
# line: 3573
"Return the os.stat-like tuple for entry_path, not following symlinks.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n dir_fd: If not `None`, the file descriptor of a directory, with `entry_path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the FakeStatResult object corresponding to `entry_path`.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3575
entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)
# line: 3576
return self.filesystem.GetStat(entry_path, follow_symlinks=False)
# line: 3578
def remove(self, path, dir_fd=None):
# line: 3591
'Remove the FakeFile object at the specified file path.\n\n Args:\n path: Path to file to be removed.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 3592
path = self._path_with_dir_fd(path, self.remove, dir_fd)
# line: 3593
self.filesystem.RemoveFile(path)
# line: 3595
def unlink(self, path, dir_fd=None):
# line: 3608
'Remove the FakeFile object at the specified file path.\n\n Args:\n path: Path to file to be removed.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 3609
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
# line: 3610
self.filesystem.RemoveFile(path)
# line: 3612
def rename(self, old_file_path, new_file_path, dir_fd=None):
# line: 3631
'Rename a FakeFile object at old_file_path to new_file_path,\n preserving all properties.\n Also replaces existing new_file_path object, if one existed (Unix only).\n\n Args:\n old_file_path: Path to filesystem object to rename.\n new_file_path: Path to where the filesystem object will live after this call.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `old_file_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory.\n OSError: if new_file_path is an existing file (Windows only)\n OSError: if new_file_path is an existing file and could not be removed (Unix)\n OSError: if `dirname(new_file)` does not exist\n OSError: if the file would be moved to another filesystem (e.g. mount point)\n '
# line: 3632
old_file_path = self._path_with_dir_fd(old_file_path, self.rename, dir_fd)
# line: 3633
self.filesystem.RenameObject(old_file_path, new_file_path)
# line: 3635
if (sys.version_info >= (3, 3)):
# line: 3636
def replace(self, old_file_path, new_file_path):
# line: 3652
'Renames a FakeFile object at old_file_path to new_file_path,\n preserving all properties.\n Also replaces existing new_file_path object, if one existed.\n New in pyfakefs 3.0.\n\n Args:\n old_file_path: path to filesystem object to rename\n new_file_path: path to where the filesystem object will live after this call\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory.\n OSError: if new_file_path is an existing file and could not be removed\n OSError: if `dirname(new_file)` does not exist\n OSError: if the file would be moved to another filesystem (e.g. mount point)\n '
# line: 3653
self.filesystem.RenameObject(old_file_path, new_file_path, force_replace=True)
# line: 3655
def rmdir(self, target_directory, dir_fd=None):
# line: 3667
"Remove a leaf Fake directory.\n\n Args:\n target_directory: (str) Name of directory to remove.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `target_directory` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if target_directory does not exist or is not a directory,\n or as per FakeFilesystem.RemoveObject. Cannot remove '.'.\n "
# line: 3668
target_directory = self._path_with_dir_fd(target_directory, self.rmdir, dir_fd)
# line: 3669
self.filesystem.RemoveDirectory(target_directory)
# line: 3671
def removedirs(self, target_directory):
# line: 3680
'Remove a leaf fake directory and all empty intermediate ones.\n\n Args:\n target_directory: the directory to be removed.\n\n Raises:\n OSError: if target_directory does not exist or is not a directory.\n OSError: if target_directory is not empty.\n '
# line: 3681
target_directory = self.filesystem.NormalizePath(target_directory)
# line: 3682
directory = self.filesystem.ConfirmDir(target_directory)
# line: 3683
if directory.contents:
# line: 3684
raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty', self.path.basename(target_directory))
else:
# line: 3687
self.rmdir(target_directory)
# line: 3688
(head, tail) = self.path.split(target_directory)
# line: 3689
if (not tail):
# line: 3690
(head, tail) = self.path.split(head)
# line: 3691
while (head and tail):
# line: 3692
head_dir = self.filesystem.ConfirmDir(head)
# line: 3693
if head_dir.contents:
# line: 3694
break
# line: 3696
self.filesystem.RemoveDirectory(head, allow_symlink=True)
# line: 3697
(head, tail) = self.path.split(head)
# line: 3699
def mkdir(self, dir_name, mode=PERM_DEF, dir_fd=None):
# line: 3714
"Create a leaf Fake directory.\n\n Args:\n dir_name: (str) Name of directory to create.\n Relative paths are assumed to be relative to '/'.\n mode: (int) Mode to create directory with. This argument defaults to\n 0o777. The umask is applied to this mode.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `dir_name` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if the directory name is invalid or parent directory is read only\n or as per FakeFilesystem.AddObject.\n "
# line: 3715
dir_name = self._path_with_dir_fd(dir_name, self.mkdir, dir_fd)
# line: 3716
self.filesystem.MakeDirectory(dir_name, mode)
# line: 3718
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=None):
# line: 3733
'Create a leaf Fake directory + create any non-existent parent dirs.\n\n Args:\n dir_name: (str) Name of directory to create.\n mode: (int) Mode to create directory (and any necessary parent\n directories) with. This argument defaults to 0o777. The umask is\n applied to this mode.\n exist_ok: (boolean) If exist_ok is False (the default), an OSError is\n raised if the target directory already exists. New in Python 3.2.\n New in pyfakefs 2.9.\n\n Raises:\n OSError: if the directory already exists and exist_ok=False, or as per\n `FakeFilesystem.CreateDirectory()`.\n '
# line: 3734
if (exist_ok is None):
# line: 3735
exist_ok = False
elif (sys.version_info < (3, 2)):
# line: 3737
raise TypeError("makedir() got an unexpected keyword argument 'exist_ok'")
# line: 3738
self.filesystem.MakeDirectories(dir_name, mode, exist_ok)
# line: 3740
def _path_with_dir_fd(self, path, fct, dir_fd):
# line: 3741
'Return the path considering dir_fd. Raise on nmvalid parameters.'
# line: 3742
if (dir_fd is not None):
# line: 3743
if (sys.version_info < (3, 3)):
# line: 3744
raise TypeError(("%s() got an unexpected keyword argument 'dir_fd'" % fct.__name__))
# line: 3747
real_fct = getattr(os, fct.__name__)
# line: 3748
if (real_fct not in self.supports_dir_fd):
# line: 3749
raise NotImplementedError('dir_fd unavailable on this platform')
# line: 3750
if isinstance(path, int):
# line: 3751
raise ValueError(("%s: Can't specify dir_fd without matching path" % fct.__name__))
# line: 3753
if (not self.path.isabs(path)):
# line: 3754
return self.path.join(self.filesystem.GetOpenFile(dir_fd).GetObject().GetPath(), path)
# line: 3756
return path
# line: 3758
def access(self, path, mode, dir_fd=None, follow_symlinks=None):
# line: 3774
'Check if a file exists and has the specified permissions.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions represented as a bitwise-OR combination of\n os.F_OK, os.R_OK, os.W_OK, and os.X_OK.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Returns:\n bool, `True` if file is accessible, `False` otherwise.\n '
# line: 3775
if ((follow_symlinks is not None) and (sys.version_info < (3, 3))):
# line: 3776
raise TypeError("access() got an unexpected keyword argument 'follow_symlinks'")
# line: 3777
path = self._path_with_dir_fd(path, self.access, dir_fd)
# line: 3778
try:
# line: 3779
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
# line: 3780
except OSError as os_error:
# line: 3781
if (os_error.errno == errno.ENOENT):
# line: 3782
return False
# line: 3783
raise
# line: 3784
return ((mode & ((stat_result.st_mode >> 6) & 7)) == mode)
# line: 3786
def chmod(self, path, mode, dir_fd=None, follow_symlinks=None):
# line: 3798
'Change the permissions of a file as encoded in integer mode.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n '
# line: 3799
if (follow_symlinks is None):
# line: 3800
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3802
raise TypeError("chmod() got an unexpected keyword argument 'follow_symlinks'")
# line: 3803
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
# line: 3804
self.filesystem.ChangeMode(path, mode, follow_symlinks)
# line: 3806
def lchmod(self, path, mode):
# line: 3813
'Change the permissions of a file as encoded in integer mode.\n If the file is a link, the permissions of the link are changed.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n '
# line: 3814
if self.filesystem.is_windows_fs:
# line: 3815
raise (NameError, "name 'lchmod' is not defined")
# line: 3816
self.filesystem.ChangeMode(path, mode, follow_symlinks=False)
# line: 3818
def utime(self, path, times=None, ns=None, dir_fd=None, follow_symlinks=None):
# line: 3842
'Change the access and modified times of a file.\n\n Args:\n path: (str) Path to the file.\n times: 2-tuple of int or float numbers, of the form (atime, mtime) \n which is used to set the access and modified times in seconds. \n If None, both times are set to the current time.\n ns: 2-tuple of int numbers, of the form (atime, mtime) which is \n used to set the access and modified times in nanoseconds. \n If None, both times are set to the current time.\n New in Python 3.3. New in pyfakefs 3.3.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n \n Raises:\n TypeError: If anything other than the expected types is \n specified in the passed `times` or `ns` tuple, \n or if the tuple length is not equal to 2.\n ValueError: If both times and ns are specified.\n '
# line: 3843
if (follow_symlinks is None):
# line: 3844
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3846
raise TypeError("utime() got an unexpected keyword argument 'follow_symlinks'")
# line: 3847
path = self._path_with_dir_fd(path, self.utime, dir_fd)
# line: 3848
if ((ns is not None) and (sys.version_info < (3, 3))):
# line: 3849
raise TypeError("utime() got an unexpected keyword argument 'ns'")
# line: 3851
self.filesystem.UpdateTime(path, times, ns, follow_symlinks)
# line: 3853
def chown(self, path, uid, gid, dir_fd=None, follow_symlinks=None):
# line: 3872
'Set ownership of a faked file.\n\n Args:\n path: (str) Path to the file or directory.\n uid: (int) Numeric uid to set the file or directory to.\n gid: (int) Numeric gid to set the file or directory to.\n dir_fd: (int) If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and path points to a symlink,\n the link itself is changed instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Raises:\n OSError: if path does not exist.\n\n `None` is also allowed for `uid` and `gid`. This permits `os.rename` to\n use `os.chown` even when the source file `uid` and `gid` are `None` (unset).\n '
# line: 3873
if (follow_symlinks is None):
# line: 3874
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3876
raise TypeError("chown() got an unexpected keyword argument 'follow_symlinks'")
# line: 3877
path = self._path_with_dir_fd(path, self.chown, dir_fd)
# line: 3878
try:
# line: 3879
file_object = self.filesystem.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 3880
except IOError as io_error:
# line: 3881
if (io_error.errno == errno.ENOENT):
# line: 3882
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 3885
raise
# line: 3886
if (not ((isinstance(uid, int) or (uid is None)) and (isinstance(gid, int) or (gid is None)))):
# line: 3888
raise TypeError('An integer is required')
# line: 3889
if (uid != (-1)):
# line: 3890
file_object.st_uid = uid
# line: 3891
if (gid != (-1)):
# line: 3892
file_object.st_gid = gid
# line: 3894
def mknod(self, filename, mode=None, device=None, dir_fd=None):
# line: 3914
"Create a filesystem node named 'filename'.\n\n Does not support device special files or named pipes as the real os\n module does.\n\n Args:\n filename: (str) Name of the file to create\n mode: (int) Permissions to use and type of file to be created.\n Default permissions are 0o666. Only the stat.S_IFREG file type\n is supported by the fake implementation. The umask is applied\n to this mode.\n device: not supported in fake implementation\n dir_fd: If not `None`, the file descriptor of a directory,\n with `filename` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if called with unsupported options or the file can not be\n created.\n "
# line: 3915
if self.filesystem.is_windows_fs:
# line: 3916
raise (AttributeError, "module 'os' has no attribute 'mknode'")
# line: 3917
if (mode is None):
# line: 3918
mode = (stat.S_IFREG | PERM_DEF_FILE)
# line: 3919
if (device or (not (mode & stat.S_IFREG))):
# line: 3920
raise OSError(errno.ENOENT, 'Fake os mknod implementation only supports regular files.')
# line: 3924
filename = self._path_with_dir_fd(filename, self.mknod, dir_fd)
# line: 3925
(head, tail) = self.path.split(filename)
# line: 3926
if (not tail):
# line: 3927
if self.filesystem.Exists(head):
# line: 3928
raise OSError(errno.EEXIST, ('Fake filesystem: %s: %s' % (os.strerror(errno.EEXIST), filename)))
# line: 3930
raise OSError(errno.ENOENT, ('Fake filesystem: %s: %s' % (os.strerror(errno.ENOENT), filename)))
# line: 3932
if (tail in ('.', u'.', '..', u'..')):
# line: 3933
raise OSError(errno.ENOENT, ('Fake fileystem: %s: %s' % (os.strerror(errno.ENOENT), filename)))
# line: 3935
if self.filesystem.Exists(filename):
# line: 3936
raise OSError(errno.EEXIST, ('Fake fileystem: %s: %s' % (os.strerror(errno.EEXIST), filename)))
# line: 3938
try:
# line: 3939
self.filesystem.AddObject(head, FakeFile(tail, (mode & (~ self.filesystem.umask)), filesystem=self.filesystem))
# line: 3942
except IOError as e:
# line: 3943
raise OSError(e.errno, ('Fake filesystem: %s: %s' % (os.strerror(e.errno), filename)))
# line: 3946
def symlink(self, link_target, path, dir_fd=None):
# line: 3958
'Creates the specified symlink, pointed at the specified link target.\n\n Args:\n link_target: The target of the symlink.\n path: Path to the symlink to create.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `link_target` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if the file already exists.\n '
# line: 3959
link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd)
# line: 3960
self.filesystem.CreateLink(path, link_target, create_missing_dirs=False)
# line: 3962
def link(self, oldpath, newpath, dir_fd=None):
# line: 3980
"Create a hard link at new_path, pointing at old_path.\n New in pyfakefs 2.9.\n\n Args:\n old_path: An existing link to the target file.\n new_path: The destination path to create a new link at.\n dir_fd: If not `None`, the file descriptor of a directory, with `oldpath`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the FakeFile object referred to by `oldpath`.\n\n Raises:\n OSError: if something already exists at new_path.\n OSError: if the parent directory doesn't exist.\n OSError: if on Windows before Python 3.2.\n "
# line: 3981
oldpath = self._path_with_dir_fd(oldpath, self.link, dir_fd)
# line: 3982
self.filesystem.CreateHardLink(oldpath, newpath)
# line: 3984
def fsync(self, file_des):
# line: 3994
'Perform fsync for a fake file (in other words, do nothing).\n New in pyfakefs 2.9.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: file_des is an invalid file descriptor.\n TypeError: file_des is not an integer.\n '
# line: 3996
self.filesystem.GetOpenFile(file_des)
# line: 3998
def fdatasync(self, file_des):
# line: 4008
'Perform fdatasync for a fake file (in other words, do nothing).\n New in pyfakefs 2.9.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: file_des is an invalid file descriptor.\n TypeError: file_des is not an integer.\n '
# line: 4010
self.filesystem.GetOpenFile(file_des)
# line: 4012
def __getattr__(self, name):
# line: 4013
'Forwards any unfaked calls to the standard os module.'
# line: 4014
return getattr(self._os_module, name)
# line: 4017
class FakeIoModule(object):
# line: 4026
'Uses FakeFilesystem to provide a fake io module replacement.\n New in pyfakefs 2.9.\n\n Currently only used to wrap `io.open()` which is an alias to `open()`.\n\n You need a fake_filesystem to use this:\n filesystem = fake_filesystem.FakeFilesystem()\n my_io_module = fake_filesystem.FakeIoModule(filesystem)\n '
# line: 4028
def __init__(self, filesystem):
# line: 4032
'\n Args:\n filesystem: FakeFilesystem used to provide file system information\n '
# line: 4033
self.filesystem = filesystem
# line: 4034
self._io_module = io
# line: 4036
def open(self, file_path, mode='r', buffering=(-1), encoding=None, errors=None, newline=None, closefd=True, opener=None):
# line: 4040
'Redirect the call to FakeFileOpen.\n See FakeFileOpen.Call() for description.\n '
# line: 4041
if ((opener is not None) and (sys.version_info < (3, 3))):
# line: 4042
raise TypeError("open() got an unexpected keyword argument 'opener'")
# line: 4043
fake_open = FakeFileOpen(self.filesystem, use_io=True)
# line: 4044
return fake_open(file_path, mode, buffering, encoding, errors, newline, closefd, opener)
# line: 4046
def __getattr__(self, name):
# line: 4047
'Forwards any unfaked calls to the standard io module.'
# line: 4048
return getattr(self._io_module, name)
# line: 4051
class FakeFileWrapper(object):
# line: 4056
'Wrapper for a stream object for use by a FakeFile object.\n\n If the wrapper has any data written to it, it will propagate to\n the FakeFile object on close() or flush().\n '
# line: 4057
def __init__(self, file_object, file_path, update=False, read=False, append=False, delete_on_close=False, filesystem=None, newline=None, binary=True, closefd=True, encoding=None, errors=None, raw_io=False, is_stream=False, use_io=True):
# line: 4061
self._file_object = file_object
# line: 4062
self._file_path = file_path
# line: 4063
self._append = append
# line: 4064
self._read = read
# line: 4065
self.allow_update = update
# line: 4066
self._closefd = closefd
# line: 4067
self._file_epoch = file_object.epoch
# line: 4068
self.raw_io = raw_io
# line: 4069
self._binary = binary
# line: 4070
self.is_stream = is_stream
# line: 4071
contents = file_object.byte_contents
# line: 4072
self._encoding = encoding
# line: 4073
errors = (errors or 'strict')
# line: 4074
if encoding:
# line: 4075
file_wrapper = FakeFileWrapper(file_object, file_path, update, read, append, delete_on_close=False, filesystem=filesystem, newline=None, binary=True, closefd=closefd, is_stream=True)
# line: 4079
codec_info = codecs.lookup(encoding)
# line: 4080
self._io = codecs.StreamReaderWriter(file_wrapper, codec_info.streamreader, codec_info.streamwriter, errors)
else:
# line: 4083
if ((not binary) and (sys.version_info >= (3, 0))):
# line: 4084
io_class = io.StringIO
else:
# line: 4086
io_class = io.BytesIO
# line: 4087
io_args = ({} if binary else {'newline': newline, })
# line: 4088
if (contents and (not binary)):
# line: 4089
contents = contents.decode((encoding or locale.getpreferredencoding(False)), errors=errors)
# line: 4091
if (contents and (not update)):
# line: 4092
self._io = io_class(contents, **io_args)
else:
# line: 4094
self._io = io_class(**io_args)
# line: 4096
if contents:
# line: 4097
if update:
# line: 4098
if (not encoding):
# line: 4099
self._io.write(contents)
# line: 4100
if (not append):
# line: 4101
self._io.seek(0)
else:
# line: 4103
self._read_whence = 0
# line: 4104
if (read and (not use_io)):
# line: 4105
self._read_seek = 0
else:
# line: 4107
self._read_seek = self._io.tell()
else:
# line: 4109
self._read_whence = 0
# line: 4110
self._read_seek = 0
# line: 4112
if delete_on_close:
# line: 4113
assert filesystem, 'delete_on_close=True requires filesystem'
# line: 4114
self._filesystem = filesystem
# line: 4115
self.delete_on_close = delete_on_close
# line: 4118
self.name = file_object.opened_as
# line: 4119
self.filedes = None
# line: 4121
def __enter__(self):
# line: 4122
"To support usage of this fake file with the 'with' statement."
# line: 4123
return self
# line: 4125
def __exit__(self, type, value, traceback):
# line: 4126
"To support usage of this fake file with the 'with' statement."
# line: 4127
self.close()
# line: 4129
def _raise(self, message):
# line: 4130
if self.raw_io:
# line: 4131
raise OSError(errno.EBADF, message)
# line: 4132
if (sys.version_info < (3, 0)):
# line: 4133
raise IOError(message)
# line: 4134
raise io.UnsupportedOperation(message)
# line: 4136
def GetObject(self):
# line: 4137
'Return the FakeFile object that is wrapped by the current instance.'
# line: 4138
return self._file_object
# line: 4140
def fileno(self):
# line: 4141
'Return the file descriptor of the file object.'
# line: 4142
return self.filedes
# line: 4144
def close(self):
# line: 4145
'Close the file.'
# line: 4147
if (self not in self._filesystem.open_files):
# line: 4148
return
# line: 4150
if (self.allow_update and (not self.raw_io)):
# line: 4151
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4152
if self._closefd:
# line: 4153
self._filesystem.CloseOpenFile(self.filedes)
# line: 4154
if self.delete_on_close:
# line: 4155
self._filesystem.RemoveObject(self.GetObject().GetPath())
# line: 4157
def flush(self):
# line: 4158
"Flush file contents to 'disk'."
# line: 4159
self._check_open_file()
# line: 4160
if self.allow_update:
# line: 4161
self._io.flush()
# line: 4162
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4163
self._file_epoch = self._file_object.epoch
# line: 4165
def seek(self, offset, whence=0):
# line: 4166
"Move read/write pointer in 'file'."
# line: 4167
self._check_open_file()
# line: 4168
if (not self._append):
# line: 4169
self._io.seek(offset, whence)
else:
# line: 4171
self._read_seek = offset
# line: 4172
self._read_whence = whence
# line: 4173
if (not self.is_stream):
# line: 4174
self.flush()
# line: 4176
def tell(self):
# line: 4181
"Return the file's current position.\n\n Returns:\n int, file's current position in bytes.\n "
# line: 4182
self._check_open_file()
# line: 4183
self._flush_for_read()
# line: 4184
if (not self._append):
# line: 4185
return self._io.tell()
# line: 4186
if self._read_whence:
# line: 4187
write_seek = self._io.tell()
# line: 4188
self._io.seek(self._read_seek, self._read_whence)
# line: 4189
self._read_seek = self._io.tell()
# line: 4190
self._read_whence = 0
# line: 4191
self._io.seek(write_seek)
# line: 4192
return self._read_seek
# line: 4194
def _flush_for_read(self):
# line: 4196
if self._flushes_after_read():
# line: 4197
self.flush()
# line: 4199
def _flushes_after_read(self):
# line: 4200
return ((not self.is_stream) and ((not self._filesystem.is_windows_fs) or (sys.version_info[0] > 2)))
# line: 4204
def _sync_io(self):
# line: 4205
'Update the stream with changes to the file object contents.'
# line: 4206
if (self._file_epoch == self._file_object.epoch):
# line: 4207
return
# line: 4209
if isinstance(self._io, io.BytesIO):
# line: 4210
contents = self._file_object.byte_contents
else:
# line: 4212
contents = self._file_object.contents
# line: 4214
is_stream_reader_writer = isinstance(self._io, codecs.StreamReaderWriter)
# line: 4215
if is_stream_reader_writer:
# line: 4216
self._io.stream.allow_update = True
# line: 4217
whence = self._io.tell()
# line: 4218
self._io.seek(0)
# line: 4219
self._io.truncate()
# line: 4220
self._io.write(contents)
# line: 4221
if self._append:
# line: 4222
self._io.seek(0, os.SEEK_END)
else:
# line: 4224
self._io.seek(whence)
# line: 4226
if is_stream_reader_writer:
# line: 4227
self._io.stream.allow_update = False
# line: 4228
self._file_epoch = self._file_object.epoch
# line: 4230
def _ReadWrapper(self, name):
# line: 4241
'Wrap a stream attribute in a read wrapper.\n\n Returns a read_wrapper which tracks our own read pointer since the\n stream object has no concept of a different read and write pointer.\n\n Args:\n name: the name of the attribute to wrap. Should be a read call.\n\n Returns:\n either a read_error or read_wrapper function.\n '
# line: 4242
io_attr = getattr(self._io, name)
# line: 4244
def read_wrapper(*args, **kwargs):
# line: 4256
"Wrap all read calls to the stream object.\n\n We do this to track the read pointer separate from the write\n pointer. Anything that wants to read from the stream object\n while we're in append mode goes through this.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n "
# line: 4257
self._io.seek(self._read_seek, self._read_whence)
# line: 4258
ret_value = io_attr(*args, **kwargs)
# line: 4259
self._read_seek = self._io.tell()
# line: 4260
self._read_whence = 0
# line: 4261
self._io.seek(0, 2)
# line: 4262
return ret_value
# line: 4264
return read_wrapper
# line: 4266
def _OtherWrapper(self, name, writing):
# line: 4274
'Wrap a stream attribute in an other_wrapper.\n\n Args:\n name: the name of the stream attribute to wrap.\n\n Returns:\n other_wrapper which is described below.\n '
# line: 4275
io_attr = getattr(self._io, name)
# line: 4277
def other_wrapper(*args, **kwargs):
# line: 4289
'Wrap all other calls to the stream Object.\n\n We do this to track changes to the write pointer. Anything that\n moves the write pointer in a file open for appending should move\n the read pointer as well.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n '
# line: 4290
write_seek = self._io.tell()
# line: 4291
ret_value = io_attr(*args, **kwargs)
# line: 4292
if (write_seek != self._io.tell()):
# line: 4293
self._read_seek = self._io.tell()
# line: 4294
self._read_whence = 0
# line: 4295
if ((not writing) or (sys.version_info >= (3,))):
# line: 4296
return ret_value
# line: 4298
return other_wrapper
# line: 4300
def _TruncateWrapper(self):
# line: 4305
'Wrap truncate() to allow flush after truncate.\n\n Returns:\n wrapper which is described below.\n '
# line: 4306
io_attr = getattr(self._io, 'truncate')
# line: 4308
def truncate_wrapper(*args, **kwargs):
# line: 4309
'Wrap truncate call to call flush after truncate.'
# line: 4310
if self._append:
# line: 4311
self._io.seek(self._read_seek, self._read_whence)
# line: 4312
size = io_attr(*args, **kwargs)
# line: 4313
self.flush()
# line: 4314
if (not self.is_stream):
# line: 4315
self._file_object.SetSize(size)
# line: 4316
buffer_size = len(self._io.getvalue())
# line: 4317
if (buffer_size < size):
# line: 4318
self._io.seek(buffer_size)
# line: 4319
self._io.write(('\x00' * (size - buffer_size)))
# line: 4320
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4321
if (sys.version_info >= (3,)):
# line: 4322
return size
# line: 4324
return truncate_wrapper
# line: 4326
def _WriteWrapper(self, name):
# line: 4331
'Wrap write() to adapt return value for Python 2.\n\n Returns:\n wrapper which is described below.\n '
# line: 4332
io_attr = getattr(self._io, name)
# line: 4334
def write_wrapper(*args, **kwargs):
# line: 4335
'Wrap trunctae call to call flush after truncate.'
# line: 4336
ret_value = io_attr(*args, **kwargs)
# line: 4337
if (sys.version_info >= (3,)):
# line: 4338
return ret_value
# line: 4340
return write_wrapper
# line: 4342
def Size(self):
# line: 4343
'Return the content size in bytes of the wrapped file.'
# line: 4344
return self._file_object.st_size
# line: 4346
def __getattr__(self, name):
# line: 4347
if self._file_object.IsLargeFile():
# line: 4348
raise FakeLargeFileIoException(self._file_path)
# line: 4350
reading = (name.startswith('read') or (name == 'next'))
# line: 4351
truncate = (name == 'truncate')
# line: 4352
writing = (name.startswith('write') or truncate)
# line: 4353
if (reading or writing):
# line: 4354
self._check_open_file()
# line: 4355
if ((not self._read) and reading):
# line: 4356
def read_error(*args, **kwargs):
# line: 4357
'Throw an error unless the argument is zero.'
# line: 4358
if (args and (args[0] == 0)):
# line: 4359
if (self._filesystem.is_windows_fs and self.raw_io):
# line: 4360
return ('' if self._binary else u'')
# line: 4361
self._raise('File is not open for reading.')
# line: 4363
return read_error
# line: 4365
if ((not self.allow_update) and writing):
# line: 4366
def write_error(*args, **kwargs):
# line: 4367
'Throw an error.'
# line: 4368
if self.raw_io:
# line: 4369
if (self._filesystem.is_windows_fs and args and (len(args[0]) == 0)):
# line: 4370
return 0
# line: 4371
self._raise('File is not open for writing.')
# line: 4373
return write_error
# line: 4375
if reading:
# line: 4376
self._sync_io()
# line: 4377
self._flush_for_read()
# line: 4378
if truncate:
# line: 4379
return self._TruncateWrapper()
# line: 4380
if self._append:
# line: 4381
if reading:
# line: 4382
return self._ReadWrapper(name)
else:
# line: 4384
return self._OtherWrapper(name, writing)
# line: 4385
if writing:
# line: 4386
return self._WriteWrapper(name)
# line: 4388
return getattr(self._io, name)
# line: 4390
def _check_open_file(self):
# line: 4391
if ((not self.is_stream) and (not (self in self._filesystem.open_files))):
# line: 4392
raise ValueError('I/O operation on closed file')
# line: 4394
def __iter__(self):
# line: 4395
if (not self._read):
# line: 4396
self._raise('File is not open for reading')
# line: 4397
return self._io.__iter__()
# line: 4400
class FakeDirWrapper(object):
# line: 4402
'Wrapper for a FakeDirectory object to be used in open files list.\n '
# line: 4403
def __init__(self, file_object, file_path, filesystem):
# line: 4404
self._file_object = file_object
# line: 4405
self._file_path = file_path
# line: 4406
self._filesystem = filesystem
# line: 4407
self.filedes = None
# line: 4409
def GetObject(self):
# line: 4410
'Return the FakeFile object that is wrapped by the current instance.'
# line: 4411
return self._file_object
# line: 4413
def fileno(self):
# line: 4414
'Return the file descriptor of the file object.'
# line: 4415
return self.filedes
# line: 4417
def close(self):
# line: 4418
'Close the directory.'
# line: 4419
self._filesystem.CloseOpenFile(self.filedes)
# line: 4422
class FakeFileOpen(object):
# line: 4427
'Faked `file()` and `open()` function replacements.\n\n Returns FakeFile objects in a FakeFilesystem in place of the `file()`\n or `open()` function.\n '
# line: 4428
__name__ = 'FakeFileOpen'
# line: 4430
def __init__(self, filesystem, delete_on_close=False, use_io=False, raw_io=False):
# line: 4438
'init.\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n delete_on_close: optional boolean, deletes file on close()\n use_io: if True, the io.open() version is used (ignored for Python 3,\n where io.open() is an alias to open() )\n '
# line: 4439
self.filesystem = filesystem
# line: 4440
self._delete_on_close = delete_on_close
# line: 4441
self._use_io = (use_io or (sys.version_info >= (3, 0)) or (platform.python_implementation() == 'PyPy'))
# line: 4443
self.raw_io = raw_io
# line: 4445
def __call__(self, *args, **kwargs):
# line: 4446
'Redirects calls to file() or open() to appropriate method.'
# line: 4447
if self._use_io:
# line: 4448
return self.Call(*args, **kwargs)
else:
# line: 4450
return self._call_ver2(*args, **kwargs)
# line: 4452
def _call_ver2(self, file_path, mode='r', buffering=(-1), flags=None, open_modes=None):
# line: 4453
'Limits args of open() or file() for Python 2.x versions.'
# line: 4455
mode = (flags or mode)
# line: 4456
return self.Call(file_path, mode, buffering, open_modes=open_modes)
# line: 4458
def Call(self, file_, mode='r', buffering=(-1), encoding=None, errors=None, newline=None, closefd=True, opener=None, open_modes=None):
# line: 4484
"Return a file-like object with the contents of the target file object.\n\n Args:\n file_: path to target file or a file descriptor.\n mode: additional file modes. All r/w/a/x r+/w+/a+ modes are supported.\n 't', and 'U' are ignored, e.g., 'wU' is treated as 'w'. 'b' sets\n binary mode, no end of line translations in StringIO.\n buffering: ignored. (Used for signature compliance with __builtin__.open)\n encoding: the encoding used to encode unicode strings / decode bytes.\n New in pyfakefs 2.9.\n errors: ignored, this relates to encoding.\n newline: controls universal newlines, passed to stream object.\n closefd: if a file descriptor rather than file name is passed, and set\n to false, then the file descriptor is kept open when file is closed.\n opener: not supported.\n open_modes: Modes for opening files if called from low-level API\n\n Returns:\n a file-like object containing the contents of the target file.\n\n Raises:\n IOError: if the target object is a directory, the path is invalid or\n permission is denied.\n "
# line: 4485
orig_modes = mode
# line: 4487
binary = ((sys.version_info < (3, 0)) or ('b' in mode))
# line: 4489
mode = mode.replace('t', '').replace('b', '')
# line: 4490
mode = mode.replace('rU', 'r').replace('U', 'r')
# line: 4492
if (not self.raw_io):
# line: 4493
if (mode not in _OPEN_MODE_MAP):
# line: 4494
raise ValueError(('Invalid mode: %r' % orig_modes))
# line: 4495
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
# line: 4497
file_object = None
# line: 4498
filedes = None
# line: 4500
if isinstance(file_, int):
# line: 4501
filedes = file_
# line: 4502
wrapper = self.filesystem.GetOpenFile(filedes)
# line: 4503
self._delete_on_close = wrapper.delete_on_close
# line: 4504
file_object = self.filesystem.GetOpenFile(filedes).GetObject()
# line: 4505
file_path = file_object.name
else:
# line: 4507
file_path = file_
# line: 4508
real_path = self.filesystem.ResolvePath(file_path, raw_io=self.raw_io)
# line: 4509
if self.filesystem.Exists(file_path):
# line: 4510
file_object = self.filesystem.GetObjectFromNormalizedPath(real_path)
# line: 4511
closefd = True
# line: 4513
error_class = (OSError if self.raw_io else IOError)
# line: 4514
if (open_modes.must_not_exist and (file_object or self.filesystem.IsLink(file_path))):
# line: 4515
raise error_class(errno.EEXIST, 'File exists', file_path)
# line: 4516
if file_object:
# line: 4517
if ((open_modes.can_read and (not (file_object.st_mode & PERM_READ))) or (open_modes.can_write and (not (file_object.st_mode & PERM_WRITE)))):
# line: 4519
raise error_class(errno.EACCES, 'Permission denied', file_path)
# line: 4520
if open_modes.can_write:
# line: 4521
if open_modes.truncate:
# line: 4522
file_object.SetContents('')
else:
# line: 4524
if open_modes.must_exist:
# line: 4525
raise error_class(errno.ENOENT, 'No such file or directory', file_path)
# line: 4526
file_object = self.filesystem.CreateFileInternally(real_path, create_missing_dirs=False, apply_umask=True, raw_io=self.raw_io)
# line: 4529
if stat.S_ISDIR(file_object.st_mode):
# line: 4530
if self.filesystem.is_windows_fs:
# line: 4531
raise OSError(errno.EPERM, 'Fake file object: is a directory', file_path)
else:
# line: 4533
raise error_class(errno.EISDIR, 'Fake file object: is a directory', file_path)
# line: 4537
file_object.opened_as = file_path
# line: 4539
fakefile = FakeFileWrapper(file_object, file_path, update=open_modes.can_write, read=open_modes.can_read, append=open_modes.append, delete_on_close=self._delete_on_close, filesystem=self.filesystem, newline=newline, binary=binary, closefd=closefd, encoding=encoding, errors=errors, raw_io=self.raw_io, use_io=self._use_io)
# line: 4553
if (filedes is not None):
# line: 4554
fakefile.filedes = filedes
# line: 4556
self.filesystem.open_files[filedes] = fakefile
else:
# line: 4558
fakefile.filedes = self.filesystem.AddOpenFile(fakefile)
# line: 4559
return fakefile
# line: 4562
def _RunDoctest():
# line: 4563
import doctest
# line: 4564
from pyfakefs import fake_filesystem
# line: 4565
return doctest.testmod(fake_filesystem)
# line: 4568
if (__name__ == '__main__'):
# line: 4569
_RunDoctest()
|
[
"[email protected]"
] | |
181dec8410b08991baad94234ae2a3e44e9c6690
|
29739c6adeebc19649265228c14e52061e4fabc3
|
/dist/SDLasn1.py
|
3675fb42fc08c7720a68d289f8fe8350d5bdb23a
|
[] |
no_license
|
Hansel-Dsilva/pyAsnProject
|
6670ed78fa1f39b642fb75c99f03942cddd50e46
|
15253ecc4ddc433c812cc31015198ab35ad8b47b
|
refs/heads/master
| 2020-06-18T21:45:18.726967 | 2019-07-19T13:41:18 | 2019-07-19T13:41:18 | 196,461,654 | 3 | 1 | null | 2019-07-13T13:50:39 | 2019-07-11T20:34:29 |
Python
|
UTF-8
|
Python
| false | false | 627 |
py
|
print("Press 0 to display question list")
ls1 = []
for i in range(66):
ls1.append(str(i))
while 1:
Qno = input("Enter question no. : ")
if Qno not in ls1:
print("INVALID INPUT! PLEASE REFER THE QUESTION LIST BY ENTERING '0'")
continue
elif not int(Qno):
f = open('q.txt', 'r')
print(f.read())
f.close()
else:
try:
exec(open(Qno + ".py").read())
except FileNotFoundError:
print("Oops! Looks like " + Qno + " hasn't been solved yet.\nContribute by uploading " + Qno + ".py to https://github.com/Hansel-Dsilva/pyAsnProject/dist")
|
[
"[email protected]"
] | |
92901aec9b80ab3e8ae140686ef6e842b467ae45
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2956/60622/299864.py
|
f11b43e5b8b9f78034e02d9ce7ba11099f2418fb
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 89 |
py
|
a=input()
b=input()
if a=="2" and b=="ab":
print(675)
else:
print(a)
print(b)
|
[
"[email protected]"
] | |
345e5d4b2a041c357a5472fc547e6c265a127ba6
|
18779aa8487d8367c2e3d124cd95ffcc07a42b1d
|
/assignment_1/131.py
|
1f9e09f6d51f0a5f351a20e46435a9b3aa23106a
|
[] |
no_license
|
Yiren-Shen/Machine_Learning
|
f8d590a265a2839babd912a6b01cc84d0d3c6894
|
d7a499bab14d0944da90292b174d769772ce19c6
|
refs/heads/master
| 2021-07-04T04:51:37.198064 | 2017-09-25T19:47:07 | 2017-09-25T19:47:07 | 104,794,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,435 |
py
|
import numpy as np
import tensorflow as tf
np.random.seed(521)
Data = np.linspace(1.0, 10.0, num=100)[:, np. newaxis]
Target = np.sin(Data) + 0.1 * np.power(Data, 2) + 0.5 * np.random.randn(100, 1)
randIdx = np.arange(100)
np.random.shuffle(randIdx)
trainData, trainTarget = Data[randIdx[:80]], Target[randIdx[:80]]
validData, validTarget = Data[randIdx[80:90]], Target[randIdx[80:90]]
testData, testTarget = Data[randIdx[90:100]], Target[randIdx[90:100]]
def GetDist(v1, v2):
v1 = tf.constant(v1)
v2 = tf.constant(v2)
expanded_v1 = tf.expand_dims(v1, 0) # shape: (B,N) -> (1, B, N)
expanded_v2 = tf.expand_dims(v2, 1) # shape: (C,N) -> (C, 1, N)
diff = tf.sub(expanded_v1, expanded_v2) # subtract with broadcasting
sqr = tf.square(diff)
dist = tf.reduce_sum(sqr, 2) # sum over N
return dist
def GetIdx(dist, k):
indices = tf.nn.top_k(-dist, k).indices
return indices
# set the value of k
k = 5
# get the distance matrix
dist = GetDist(trainData, testData)
# get the indices of the k nearest training points to the test points
indices = GetIdx(dist, k)
with tf.Session() as sess:
indices = sess.run(indices)
# initialize the responsibilities of all the training points
r = np.zeros([testData.size, trainData.size])
# set the responsibilities of the k nearest training points to be 1/k
# r[indices] = 1. / k
for i in range(0, testData.size):
r[i, indices[i]] = 1. / k
print r
|
[
"[email protected]"
] | |
6b17dc10db7ef000a03c12afdf0d7cd7b9821e29
|
4904acd900496b4883c2f5b4aa6b45d1ef6654c0
|
/graphgallery/datasets/tu_dataset.py
|
a2979a79643bdb0f96d5d8b81ba2af4af7188b33
|
[
"MIT"
] |
permissive
|
blindSpoter01/GraphGallery
|
aee039edd759be9272d123463b0ad73a57e561c7
|
e41caeb32a07da95364f15b85cad527a67763255
|
refs/heads/master
| 2023-06-17T11:42:27.169751 | 2021-07-15T03:07:39 | 2021-07-15T03:07:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,887 |
py
|
import os
import glob
import requests
import os.path as osp
import numpy as np
import pickle as pkl
import pandas as pd
from urllib.error import URLError
from typing import Optional, List
from .in_memory_dataset import InMemoryDataset
from ..data.edge_graph import EdgeGraph
from ..data.io import makedirs, extractall, remove
_DATASET_URL = 'https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets'
_DATASET_CLEAN_URL = 'https://raw.githubusercontent.com/nd7141/graph_datasets/master/datasets'
class TUDataset(InMemoryDataset):
r"""A variety of graph kernel benchmark datasets, *.e.g.* "IMDB-BINARY",
"REDDIT-BINARY" or "PROTEINS", collected from the `TU Dortmund University
<https://chrsmrrs.github.io/datasets>`_.
In addition, this dataset wrapper provides `cleaned dataset versions
<https://github.com/nd7141/graph_datasets>`_ as motivated by the
`"Understanding Isomorphism Bias in Graph Data Sets"
<https://arxiv.org/abs/1910.12091>`_ paper, containing only non-isomorphic
graphs.
"""
def __init__(self,
name,
root=None,
*,
transform=None,
verbose=True,
url=None,
remove_download=True):
if name.endswith('_clean'):
name = name[:-6]
self._url = _DATASET_CLEAN_URL
else:
self._url = _DATASET_URL
super().__init__(name=name, root=root,
transform=transform,
verbose=verbose, url=url,
remove_download=remove_download)
@staticmethod
def available_datasets():
try:
return [
d[:-4] for d in pd.read_html(_DATASET_URL)
[0].Name[2:-1].values.tolist()
]
except URLError:
# No internet, don't panic
print('No connection. See {}'.format(_DATASET_URL))
return []
def _download(self):
req = requests.get(self.url)
if req.status_code == 404:
raise ValueError(
f"Unknown dataset {self.name}. See '{self.__class__.__name__}.available_datasets()'"
" for a list of available datasets.")
makedirs(self.download_dir)
with open(self.download_paths[0], 'wb') as f:
f.write(req.content)
extractall(self.download_paths, osp.split(self.download_dir)[0])
if self.remove_download:
remove(self.download_paths)
def _process(self):
folder = self.download_dir
prefix = self.name
files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))
names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]
edge_index = genfromtxt(osp.join(folder, prefix + '_A.txt'),
dtype=np.int64).T - 1
node_graph_label = genfromtxt(osp.join(folder, prefix + '_graph_indicator.txt'),
dtype=np.int64) - 1
edge_graph_label = node_graph_label[edge_index[0]]
node_attr = node_label = None
if 'node_attributes' in names:
node_attr = genfromtxt(osp.join(folder,
prefix + '_node_attributes.txt'),
dtype=np.float32)
if 'node_labels' in names:
node_label = genfromtxt(osp.join(folder,
prefix + '_node_labels.txt'),
dtype=np.int64)
node_label = node_label - node_label.min(0)
edge_attr = edge_label = None
if 'edge_attributes' in names:
edge_attr = genfromtxt(osp.join(folder,
prefix + '_edge_attributes.txt'),
dtype=np.float32)
if 'edge_labels' in names:
edge_label = genfromtxt(osp.join(folder,
prefix + '_edge_labels.txt'),
dtype=np.int64)
edge_label = edge_label - edge_label.min(0)
graph_attr = graph_label = None
if 'graph_attributes' in names: # Regression problem.
graph_attr = np.genfromtxt(osp.join(
folder, prefix + '_graph_attributes.txt'),
dtype=np.float32)
if 'graph_labels' in names: # Classification problem.
graph_label = np.genfromtxt(osp.join(folder,
prefix + '_graph_labels.txt'),
dtype=np.int64)
_, graph_label = np.unique(graph_label, return_inverse=True)
graph = EdgeGraph(edge_index,
edge_attr=edge_attr,
edge_label=edge_label,
edge_graph_label=edge_graph_label,
node_attr=node_attr,
node_label=node_label,
node_graph_label=node_graph_label,
graph_attr=graph_attr,
graph_label=graph_label)
cache = {'graph': graph}
with open(self.process_path, 'wb') as f:
pkl.dump(cache, f)
return cache
@property
def download_dir(self):
return osp.join(self.root, "TU", self.name)
def split_graphs(self,
train_size=None,
val_size=None,
test_size=None,
split_by=None,
random_state: Optional[int] = None):
raise NotImplementedError
@property
def url(self) -> str:
return '{}/{}.zip'.format(self._url, self.name)
@property
def process_filename(self):
return f'{self.name}.pkl'
@property
def raw_filenames(self) -> List[str]:
names = ['A', 'graph_indicator'] # and more
return ['{}_{}.txt'.format(self.name, name) for name in names]
@property
def download_paths(self):
return [osp.join(self.download_dir, self.name + '.zip')]
@property
def raw_paths(self) -> List[str]:
return [
osp.join(self.download_dir, raw_filename)
for raw_filename in self.raw_filenames
]
def genfromtxt(path, sep=',', start=0, end=None, dtype=None, device=None):
# with open(path, 'r') as f:
# src = f.read().split('\n')[:-1]
# src = [[float(x) for x in line.split(sep)[start:end]] for line in src]
# src = np.asarray(src, dtype=dtype).squeeze()
# # return src
return np.loadtxt(path, delimiter=sep).astype(dtype).squeeze()
|
[
"[email protected]"
] | |
d98451899f2aa64eecaccc6ec525092be4d44113
|
6d06d62bf4234b6664b5da9739bda32daf8f49c6
|
/src/transformers/models/convnext/modeling_convnext.py
|
af4ec4e7f81b9b55ff295bd3973b80b4a0b38d46
|
[
"Apache-2.0"
] |
permissive
|
miyoungko/pytorch-pretrained-BERT
|
dc4feb91b2807ef800be6486c0427cca2b8cd2ec
|
dd6fb1319b79d9cc8db5838abfec6be5f2bc28a2
|
refs/heads/main
| 2022-12-13T20:02:04.038727 | 2022-11-30T18:49:34 | 2022-11-30T18:49:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,046 |
py
|
# coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch ConvNext model."""
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_convnext import ConvNextConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "ConvNextConfig"
_FEAT_EXTRACTOR_FOR_DOC = "ConvNextImageProcessor"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/convnext-tiny-224",
# See all ConvNext models at https://huggingface.co/models?filter=convnext
]
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input, drop_prob: float = 0.0, training: bool = False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
class ConvNextDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, x: torch.Tensor) -> torch.Tensor:
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
class ConvNextLayerNorm(nn.Module):
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError(f"Unsupported data format: {self.data_format}")
self.normalized_shape = (normalized_shape,)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.data_format == "channels_last":
x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
input_dtype = x.dtype
x = x.float()
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = x.to(dtype=input_dtype)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class ConvNextEmbeddings(nn.Module):
"""This class is comparable to (and inspired by) the SwinEmbeddings class
found in src/transformers/models/swin/modeling_swin.py.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = nn.Conv2d(
config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
)
self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
self.num_channels = config.num_channels
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.layernorm(embeddings)
return embeddings
class ConvNextLayer(nn.Module):
"""This corresponds to the `Block` class in the original implementation.
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
The authors used (2) as they find it slightly faster in PyTorch.
Args:
config ([`ConvNextConfig`]): Model configuration class.
dim (`int`): Number of input channels.
drop_path (`float`): Stochastic depth rate. Default: 0.0.
"""
def __init__(self, config, dim, drop_path=0):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = ACT2FN[config.hidden_act]
self.pwconv2 = nn.Linear(4 * dim, dim)
self.layer_scale_parameter = (
nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
if config.layer_scale_init_value > 0
else None
)
self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
input = hidden_states
x = self.dwconv(hidden_states)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.layernorm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.layer_scale_parameter is not None:
x = self.layer_scale_parameter * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNextStage(nn.Module):
"""ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
Args:
config ([`ConvNextConfig`]): Model configuration class.
in_channels (`int`): Number of input channels.
out_channels (`int`): Number of output channels.
depth (`int`): Number of residual blocks.
drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
"""
def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
super().__init__()
if in_channels != out_channels or stride > 1:
self.downsampling_layer = nn.Sequential(
ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
)
else:
self.downsampling_layer = nn.Identity()
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = nn.Sequential(
*[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.downsampling_layer(hidden_states)
hidden_states = self.layers(hidden_states)
return hidden_states
class ConvNextEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.stages = nn.ModuleList()
drop_path_rates = [
x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
]
prev_chs = config.hidden_sizes[0]
for i in range(config.num_stages):
out_chs = config.hidden_sizes[i]
stage = ConvNextStage(
config,
in_channels=prev_chs,
out_channels=out_chs,
stride=2 if i > 0 else 1,
depth=config.depths[i],
drop_path_rates=drop_path_rates[i],
)
self.stages.append(stage)
prev_chs = out_chs
def forward(
self,
hidden_states: torch.FloatTensor,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.stages):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
)
class ConvNextPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvNextConfig
base_model_prefix = "convnext"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ConvNextModel):
module.gradient_checkpointing = value
CONVNEXT_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVNEXT_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`AutoImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ConvNext model outputting raw features without any specific head on top.",
CONVNEXT_START_DOCSTRING,
)
class ConvNextModel(ConvNextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = ConvNextEmbeddings(config)
self.encoder = ConvNextEncoder(config)
# final layernorm layer
self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: torch.FloatTensor = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
# global average pooling, (N, C, H, W) -> (N, C)
pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@add_start_docstrings(
"""
ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
CONVNEXT_START_DOCSTRING,
)
class ConvNextForImageClassification(ConvNextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.convnext = ConvNextModel(config)
# Classifier head
self.classifier = (
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=ImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: torch.FloatTensor = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
|
[
"[email protected]"
] | |
0a3c1f01082e38e2bd3dfee7887ba9f946a2dc29
|
76be22521c08d1086778b58454e1020ec4066639
|
/server/routes/signin.py
|
a043fe5fcf0b5ed065c0e9a536a2b83daf73056f
|
[
"MIT"
] |
permissive
|
nondanee/dropbox
|
700bb4399f626791bb4e60faa2837fe26f2b1e6f
|
7933a4dd2b17f0882c6f310a5d51e3e4bafdcb77
|
refs/heads/master
| 2020-03-14T21:44:37.607416 | 2019-06-11T16:19:07 | 2019-06-11T16:19:07 | 131,804,122 | 1 | 1 |
MIT
| 2019-06-11T16:00:44 | 2018-05-02T05:49:54 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,152 |
py
|
import asyncio
import hashlib
from . import toolbox
from aiohttp_session import get_session
@asyncio.coroutine
def route(request):
session = yield from get_session(request)
if request.content_type != "application/x-www-form-urlencoded":
return toolbox.javaify(400,"wrong content type")
data = yield from request.post()
email = data['email'] if 'email' in data else ''
password = data['password'] if 'password' in data else ''
if not email:
return toolbox.javaify(400,"miss parameter")
hash_password = hashlib.sha1(password.encode("utf-8")).hexdigest()
with (yield from request.app['pool']) as connect:
cursor = yield from connect.cursor()
yield from cursor.execute('''SELECT id FROM user WHERE email = %s AND password = %s AND status = 1''',(email,hash_password))
check = yield from cursor.fetchone()
yield from cursor.close()
connect.close()
if check:
session["uid"] = check[0]
return toolbox.javaify(200,"ok")
else:
session.clear()
return toolbox.javaify(403,"forbidden")
|
[
"[email protected]"
] | |
977fe8b11fca72311f216a456309f6c63a27759c
|
d02755043a1ecc14aff743c81c991ac9d99095c6
|
/app/Task/TaskByIdREST.py
|
1d852625739f9d43ab7258d3de7968f71e1aaebb
|
[] |
no_license
|
surendranaidu/flask-todo-app
|
7f1edda06337367290d93c3378a02d63892734e0
|
f2272a280461ffb7b16f4f238e082e7301616652
|
refs/heads/master
| 2021-06-19T18:26:07.807137 | 2017-07-25T20:02:25 | 2017-07-25T20:02:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
from flask import request
from flask_restful import Resource
# defining the class with http verb based methods
class TaskByIdREST(Resource):
# defining method for get task by id
def get(self,taskId):
print(taskId)
return {"message":"taskFound"}
# defining method to delete task by id
def delete(self,taskId):
print(taskId)
return {"message":"taskDeleted"}
|
[
"[email protected]"
] | |
cf5dcd1d4c5ee1315e9015b5bfae7574040d4231
|
95e91c750836150508454952dc0a3972f6a0c0bf
|
/call_get_category.py
|
3a96f7f5daf2a5c153381d82b8a1a3f2980f05c6
|
[] |
no_license
|
wendysyn89/CompUX-LSA
|
e6e627219bc687b868020708804b992ded5ccf5d
|
ec80e3638c78d5cef3ab899c4fd10062b67056ea
|
refs/heads/master
| 2021-01-10T01:39:16.949238 | 2015-11-09T09:35:10 | 2015-11-09T09:35:10 | 45,722,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,593 |
py
|
from get_category import SemanticSpace
from training_vector import SubSpace
import re
"""get semantic training data and initiate class"""
c=SemanticSpace('semantic_training_data.txt')
"""BUILD SEMANTIC SPACE"""
#c.build_model()
"""BUILD ITEM SUBSPACE"""
"""get item file and initiate class"""
# d=SubSpace("item_ann.txt")
# d.load_model()
# d.build_tfidf_corpus()
"""LOAD MODEL"""
c.load_model()
print "############################################################################"
"""get query in review sentence"""
doc='this phone is useful'
print "Review sentence:::",doc
"""get item list (construct, item, similarity score)"""
#print '##item similarity##'
result=c.get_category(doc)
for item in result:
print item
"""predict best ux"""
best_ux= c.get_best_ux(result)
print best_ux
print "####################################################################################"
"""get query in review text"""
doc2="This phone is useful. I love it."
print "Review Text:::",doc2
"""Split the text into review sentence"""
m = re.split(r'(?<=[^A-Z].[.?]) +(?=[A-Z])', doc2)
for sent in m:
print "Review sentence:::",sent
"""get item list (construct, item, similarity score)"""
result=c.get_category(sent)
for item in result:
print "Similar measurement items",item
"""predict best ux"""
best_ux= c.get_best_ux(result)
print "Best predicted user experience:::",best_ux
###add on text
"""find similar item"""
"""
print '##item similarity##'
result=c.doc_similarity(doc)
for item in result:
print item[1],'\t',item[0],'\t',item[2]
"""
|
[
"[email protected]"
] | |
1cb69c55e4d7fdf7b4a102d75cb0ed2e13eaf37c
|
d6e08c3285e0c909cc6d5aed2af42fc7945315a1
|
/setup.py
|
f6c27087392be100f082a7d8d6477f8d41e3f8c9
|
[
"MIT"
] |
permissive
|
frumiousbandersnatch/cobe
|
2c46ebae3d019776d84824d0d019100f01f2d460
|
73c8df450f655354e5c4369074b04f55ec9c0605
|
refs/heads/master
| 2020-04-05T19:09:21.407684 | 2017-02-04T20:09:03 | 2017-02-04T20:09:03 | 9,268,583 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,570 |
py
|
#!/usr/bin/env python
# Require setuptools. See http://pypi.python.org/pypi/setuptools for
# installation instructions, or run the ez_setup script found at
# http://peak.telecommunity.com/dist/ez_setup.py
from setuptools import setup
setup(
name="cobe",
version="2.0.4",
author="Peter Teichman",
author_email="[email protected]",
url="http://wiki.github.com/pteichman/cobe/",
description="Markov chain based text generator library and chatbot",
packages=["cobe"],
# setup_require nosetests, so that the nosetests command is
# available immediately in a fresh checkout
setup_requires=[
"nose==1.1.2"
],
test_suite="unittest2.collector",
# mock and unittest2 are required by the tests; coverage is
# required by the [nosetests] section of setup.cfg.
tests_require=[
"coverage==3.5.2",
"mock==1.0b1",
"unittest2==0.5.1"
],
install_requires=[
"PyStemmer==1.2.0",
"argparse==1.2.1",
"irc==3.0",
"park==1.0"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
entry_points={
"console_scripts": [
"cobe = cobe.control:main"
]
}
)
|
[
"[email protected]"
] | |
a624ed2ba72f77d3c88e103f5fa152e7e3670c2a
|
c39c2d43416cfae73fc95d7a928689c53d0a387f
|
/level_2.0/config/architecture_config.py
|
88963f3c9769743872e781db0233a617fe9f9656
|
[] |
no_license
|
Ontheway361/stanford_cs231n
|
229f3034bcf3974159bfee596af74d20cbed3220
|
a28567260cd5e380865e81c6fcc68f420a381797
|
refs/heads/master
| 2020-04-22T22:18:12.640432 | 2019-11-20T12:19:20 | 2019-11-20T12:19:20 | 170,703,257 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,758 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 2019/03/13
author: lujie
"""
import numpy as np
from IPython import embed
class Architecture(object):
def __init__(self, method = 'cnn'):
self.art_cfg = None
self.method = method
self.solver = self._solver_config()
if method is 'cnn':
self.art_cfg = self._cnn_config()
else:
self.art_cfg = self._fcn_config()
def _fcn_config(self):
''' config of fcn-architecture '''
fcn_config = {
'input_dim' : 3 * 32 * 32,
'hidden_dims' : [1024, 100, 10], # TODO
'num_classes' : 10,
'dropout' : 0.1,
'use_batchnorm' : True,
'weights_scale' : 2.5e-2, # 5e-2
'reg' : 1e-2,
'dtype' : np.float64,
'seed' : None
}
return fcn_config
def _cnn_config(self):
''' config of cnn-architecture '''
architecture = {
'input_dim' : (3, 32, 32),
'conv_layers' : {
'sandwich1' : {
'num_filters' : 32,
'filter_size' : 7,
'padding' : 'same',
'stride' : 1,
'pool_height' : 2,
'pool_width' : 2,
'pool_stride' : 2
},
# 'sandwich2' : {
# 'num_filters' : 1,
# 'filter_size' : 1,
# 'padding' : 0,
# 'stride' : 1,
# 'pool_height' : 1,
# 'pool_width' : 1,
# 'pool_stride' : 1
# },
#
# 'sandwich3' : {
# 'num_filters' : 32,
# 'filter_size' : 3,
# 'padding' : 'same',
# 'stride' : 1,
# 'pool_height' : 1,
# 'pool_width' : 1,
# 'pool_stride' : 1
# },
#
# 'sandwich4' : {
# 'num_filters' : 32,
# 'filter_size' : 3,
# 'padding' : 'same',
# 'stride' : 1,
# 'pool_height' : 2,
# 'pool_width' : 2,
# 'pool_stride' : 2
# },
#
# 'sandwich5' : {
# 'num_filters' : 1,
# 'filter_size' : 1,
# 'padding' : 0,
# 'stride' : 1,
# 'pool_height' : 1,
# 'pool_width' : 1,
# 'pool_stride' : 1
# },
},
'fcn_layers' : [500, 100],
'num_classes' : 10,
'use_batchnorm' : True,
'weight_scale' : 2.5e-3, # 2.5e-3
'reg' : 5e-3,
'dtype' : np.float32
}
return architecture
def _solver_config(self):
''' config of solver '''
solver_config = {
'num_train' : None,
'argmented' : [], # ['flip', 'color', 'noise', 'trans', 'crop']
'update_rule' : 'adam',
'learning_rate' : 5e-4, # TODO 5e-4
'lr_decay' : 0.95,
'num_epochs' : 15, # TODO
'batch_size' : 64, # TODO
'verbose' : True
}
return solver_config
def get_configs(self, verbose = True):
''' get the info of config '''
if verbose:
if self.method is 'cnn':
print('%s conv-arch %s' % ('-'*66, '-'*66))
for key, items in self.art_cfg.items():
if key is 'conv_layers':
for conv, arch in items.items():
print(conv, arch)
else:
print(key, items)
else:
print('%s fcns-arch %s' % ('-'*66, '-'*66))
print(self.art_cfg)
print('%s solver_config %s' % ('-'*64, '-'*64))
print(self.solver)
print('%s' % ('-'*143))
res = {}
res['arch'] = self.art_cfg
res['solver'] = self.solver
return res
if __name__ == '__main__':
arch = Architecture()
configs = arch.get_configs(verbose = True)
|
[
"[email protected]"
] | |
ff590a585f867980a353cc96195a4a9848ffb709
|
e90f9eaadcd099f1d4b30504702df59ef0ff63db
|
/build/system/serial/catkin_generated/pkg.installspace.context.pc.py
|
88917374ea9c7bfae4d0801860f829a73d487e2d
|
[] |
no_license
|
SiChiTong/f110_ws
|
ce1e7b8408af645a9d09d8298933253e9810745f
|
a44b77f58527fabd4b2b2905132c6651e102134f
|
refs/heads/master
| 2020-04-02T12:00:55.998831 | 2018-07-24T18:41:31 | 2018-07-24T18:41:31 | 154,416,188 | 1 | 0 | null | 2018-10-24T00:49:29 | 2018-10-24T00:49:29 | null |
UTF-8
|
Python
| false | false | 510 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/nvidia/Desktop/f110_ws/install/include".split(';') if "/home/nvidia/Desktop/f110_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lserial;-lrt;-lpthread".split(';') if "-lserial;-lrt;-lpthread" != "" else []
PROJECT_NAME = "serial"
PROJECT_SPACE_DIR = "/home/nvidia/Desktop/f110_ws/install"
PROJECT_VERSION = "1.2.1"
|
[
"[email protected]"
] | |
7751407c653bfce3866a1d44035327f7b7bba18c
|
553626aa79d7cba83d7c3b6f05b5b63d54ceb98f
|
/IoT-Scripts/run.py
|
23e955f711cd585c5fa7f25c5584bb683da211f0
|
[] |
no_license
|
MNabegh/EMC_Mentorship_program_project
|
04dc4585741f2148d261936b74c153bc4eaa86e9
|
b41cb65e1046550334848f678a94fea6f28467d1
|
refs/heads/master
| 2020-03-27T11:10:59.546778 | 2018-09-23T11:22:05 | 2018-09-23T11:22:05 | 146,470,509 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,219 |
py
|
import subprocess
import multiprocessing
mongoDB = subprocess.call("sudo service mongod start &", stdout= subprocess.PIPE, shell= True)
zookeeper = subprocess.call(
"sudo /home/nabegh/kafka/kafka_2.11-2.0.0//bin/zookeeper-server-start.sh /home/nabegh/kafka/kafka_2.11-2.0.0/config/zookeeper.properties &",
stdout= subprocess.PIPE, shell= True)
kafka = subprocess.call(
"sudo /home/nabegh/kafka/kafka_2.11-2.0.0//bin/kafka-server-start.sh /home/nabegh/kafka/kafka_2.11-2.0.0/config/server.properties &",
stdout= subprocess.PIPE, shell= True)
#gemfire = subprocess.call(
# "java -jar Learn/EMC_Mentorship_program_project/IoT-GemFire/target/IoT-GemFire-0.0.1-SNAPSHOT.jar &",
# stdout= subprocess.PIPE, shell= True)
#transformer = subprocess.call(
# "java -jar Learn/EMC_Mentorship_program_project/IoT-EnrichmentTransformer/target/IoT-EnrichmentTransformer-0.0.1-SNAPSHOT.jar &",
# stdout= subprocess.PIPE, shell= True)
#Set up the parallel task pool to use all available processors
#simulator = subprocess.call(
# "java -jar Learn/EMC_Mentorship_program_project/IoT-CarSimulator/IoT-CarSimulator/target/IoT-CarSimulator-0.0.1-SNAPSHOT.jar &",
# stdout= subprocess.PIPE, shell= True)
#print('The End')
|
[
"[email protected]"
] | |
7b8aa0b7e8576ed4aec9e83b7aa05793b2d8ed3f
|
259b62bdab364a7df554d503ec4e6ef37070564a
|
/article/figure-4b/lineplot-4b.py
|
87d166f330228335c801d6ceb2950d7468f29237
|
[
"MIT"
] |
permissive
|
guilherme-araujo/gsop-dist
|
aab466504e89063933c55611bbc4a82cb501bc7f
|
15da82ffa8add74cc61b95d3544ec3aaa0e71a32
|
refs/heads/master
| 2023-06-28T08:40:11.978053 | 2021-08-01T01:43:28 | 2021-08-01T01:43:28 | 262,335,249 | 0 | 0 |
MIT
| 2020-09-21T16:24:59 | 2020-05-08T13:48:01 |
Python
|
UTF-8
|
Python
| false | false | 1,749 |
py
|
import seaborn as sns
import sys
import csv
from statistics import stdev
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import gc
files = [
{'file': 'a1g9', 'bonus': '0.01'},
{'file': 'a3g7', 'bonus': '0.03'},
{'file': 'a5g5', 'bonus': '0.05'},
{'file': 'a7g3', 'bonus': '0.07'},
{'file': 'a9g1', 'bonus': '0.09'}
]
al = []
for f in files:
with open(f['file']+'/'+f['file']+'.txt') as csv_file_r:
print(f['file'])
csv_reader = csv.reader(csv_file_r, delimiter=';')
e00 = []
for row in csv_reader:
if(row[0]!='partial'):
qta = int(row[0])
qtb = int(row[1])
result = 'Undef.'
if qta == 500:
result = 'A'
elif qta == 0:
result = 'B'
e00.append([qta,qtb,result,f['bonus']])
al += e00
all = pd.DataFrame(al, columns=['qta', 'qtb', 'type', 'bonus'])
print(all)
resumo = all.groupby(["bonus", "type"])["qta"].count().unstack(fill_value=0).stack().reset_index(name="sum")
fig_dims = (6, 4)
fig, ax = plt.subplots(figsize=fig_dims)
print(resumo)
fig = sns.lineplot(data=resumo, x="bonus", y="sum", hue="type")
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
ax.set(xlabel="alpha A", ylabel="Fixation %" )
ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=5000000))
#ax.xaxis.set_major_formatter(mtick.ScalarFormatter())
#ax.set_xticks(resumo['bonus'].unique())
#plt.setp(ax.get_xticklabels(), rotation=90, horizontalalignment='center')
plt.ylim(1200000,2000000)
plt.tight_layout()
plt.show()
plt.savefig("lineplot-4b.svg")
plt.savefig("lineplot-4b.png", dpi=200)
|
[
"[email protected]"
] | |
7cf17d72d043afc388eab71f3d33dc9660709fc1
|
9a2c4394bfd9485eb58e186900b68ad40a4fd650
|
/15_1/test5 15_1.py
|
b07bdf6c4b3365f1295bf8689fd6d298de426177
|
[] |
no_license
|
Jirada01/Python
|
954dbfbb2d79b815e54b65e78b2df7a01ad39e11
|
bee62fd0734614d9dc3fb5fb61a179dcd2ea23a8
|
refs/heads/master
| 2023-03-11T22:48:21.689796 | 2021-02-15T14:03:01 | 2021-02-15T14:03:01 | 336,189,994 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 586 |
py
|
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++")
print(" ร้านตัวจี๊ด ยินดีต้อนรับนะฮะ ")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++")
shop=[]
for i in range (5) :
a = input ("หยิบสินค้าชิ้นที่ " + str(i+1) +" :")
shop.append(a)
print("สินค้าที่หยิบใส่ตะกร้ามีดังนี้")
print("1."+shop[0])
print("2."+shop[1])
print("3."+shop[2])
print("4."+shop[3])
print("5."+shop[4])
|
[
"[email protected]"
] | |
3dd32be86e408952a3c78184fcfb3ceb55eae1d0
|
a062e6c3d57d4c256abff4493e4007d5613fbae8
|
/setup.py
|
1d6d26e0f0d4b0821ae575aa4f9e1941a840a28b
|
[
"MIT"
] |
permissive
|
anumaurya114/mydlib
|
59678e11a4660e4e9f549a96dd6cf7d19fd6325b
|
cdce5033b5d256eba86d5d12277304d1865fa2c2
|
refs/heads/master
| 2022-02-16T20:39:49.476982 | 2019-09-25T10:14:01 | 2019-09-25T10:14:01 | 210,230,533 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 576 |
py
|
import setuptools
with open("README.md",'r') as fh:
long_description = fh.read()
setuptools.setup(
name='mydlib',
version='0.4',
author="Anurag Maurya",
author_email="[email protected]",
description="A library (books) creator and simple libgen book downloader.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/anuragmaurya/mydlib",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
)
|
[
"[email protected]"
] | |
b07c0fbafeed6667c5bed529b5b338c08d284439
|
8ee64f13d2b837113ba86d42d0a308d550ee8297
|
/rss.py
|
c58636d4da43f80849960aafb77ed4544eb89c48
|
[] |
no_license
|
wang598842739/GenerateRSSFeedForWebPages
|
9eb9c126e5f7d308c752133f11cc12942d9fa741
|
aaadda52b4a2d7c56f5aaf0b9fab8b955cb432f5
|
refs/heads/master
| 2021-01-12T08:37:44.568388 | 2016-12-12T13:12:05 | 2016-12-12T13:12:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,901 |
py
|
#!/sw/bin/python
import urllib, re, os
import sys
titlere = re.compile("<TITLE>(.*?)</TITLE>", re.I|re.S)
def gettitle(c):
x = titlere.findall(c)
if x: return x[0]
return None
def htmlencode(text):
return text.replace('&', '&').replace('<', '<').replace('>', '>')
def rssify(title, link, items, encoding='utf8'):
if os.environ.has_key("QUERY_STRING"):
print "Content-Type: application/xml; charset="+encoding
print
if encoding != 'utf8':
print '<?xml version="1.0" encoding="'+encoding+'"?>'
print '<rss version="2.0" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/"><channel>\n <title>'+htmlencode(title)+'</title>\n <link>' + htmlencode(link) + '</link>\n <description>'+"Scraped feed provided by aaronsw.com"+'</description>'
for item in items:
print " <item>"
if type(item) is not type({}): item = {'description': item}
x = item.keys(); x.sort()
for k in x:
print " <"+k+">" + htmlencode(item[k]) + "</"+k+">"
print " </item>"
print "</channel></rss>"
def fromurl(url, title=None):
content = urllib.urlopen(url).read()
if title:
x = gettitle(content)
if x: content = {'title': x, 'description': content, 'link':url}
else: title = gettitle(content)
return rssify(title, url, [content])
def fromlist(title, link, itemlist):
items = []
for l in itemlist:
content = urllib.urlopen(l).read()
ntitle = gettitle(content)
if ntitle: items.append({'title': ntitle, 'description': content, 'link':l})
else: items.append(content)
return rssify(title, link, items)
# fromurl("http://vitanuova.loyalty.org/latest.html", "Vitanuova")
# Rys, Zooko: custom regexp
# Lawmeme, Politech: fromlist
# Mena, Mark's Projects: email owners
# http://www.sfgate.com/examiner/bondage/ http://dir.salon.com/topics/tom_tomorrow/ (use jwz's feed?)
# check on true porn clerk stories, mixerman
|
[
"[email protected]"
] | |
55a6fc1d4b69ad1cc5a5348d539e378e27d7bcc9
|
642ab3a9e0f20ad42b4afe0668704349bf0ce7b4
|
/hw2/hw2-1/utils.py
|
7d21b0c08441dc2b3a9d6bb7cc23eb94dd58150e
|
[] |
no_license
|
j40903272/mlds_2018spring
|
f12e5e8ba8cb7ebf8b077e0ef08c8eaafc3a13bc
|
26a745638dcf3e457c29d98b6599c46a3cd873f2
|
refs/heads/master
| 2021-04-03T01:26:36.667556 | 2018-07-30T11:47:56 | 2018-07-30T11:47:56 | 124,762,899 | 11 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,194 |
py
|
# coding: utf-8
# In[ ]:
import numpy as np
import pickle
import json
import sys
import os
# In[ ]:
import re
from collections import Counter
from keras.utils import to_categorical
from keras.callbacks import Callback
from config import *
from seq2seq_model import seq2seq_model
# In[ ]:
model, encoder_model, decoder_model, inf_model = seq2seq_model()
train_id_list = open('hw2_1_data/training_data/id.txt').read().split()
train_data = {i:np.load('hw2_1_data/training_data/feat/'+ i + '.npy') for i in train_id_list}
train_label = json.loads(open('hw2_1_data/training_label.json', 'r').read())
test_id_list = open('hw2_1_data/testing_data/id.txt').read().split()
test_data = {i:np.load('hw2_1_data/testing_data/feat/'+ i + '.npy') for i in test_id_list}
test_label = json.loads(open('hw2_1_data/testing_label.json', 'r').read())
# In[ ]:
def load_data():
train_id_list = open('hw2_1_data/training_data/id.txt').read().split()
train_data = {i:np.load('hw2_1_data/training_data/feat/'+ i + '.npy') for i in train_id_list}
train_label = json.loads(open('hw2_1_data/training_label.json', 'r').read())
test_id_list = open('hw2_1_data/testing_data/id.txt').read().split()
test_data = {i:np.load('hw2_1_data/testing_data/feat/'+ i + '.npy') for i in test_id_list}
test_label = json.loads(open('hw2_1_data/testing_label.json', 'r').read())
# In[ ]:
with open('vocab2idx', 'rb') as f:
vocab2idx = pickle.load(f)
with open('idx2vocab', 'rb') as f:
idx2vocab = pickle.load(f)
with open('correct_words', 'rb') as f:
correct_words = pickle.load(f)
# In[ ]:
def words(text): return re.findall(r'\w+', text.lower())
WORDS = Counter(words(open('all_captions').read()))
def P(word):
return - WORDS.get(word, 0)
def correction(word):
return max(candidates(word), key=P)
def candidates(word):
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
return set(w for w in words if w in WORDS)
def edits1(word):
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
# In[ ]:
def decode_sequence_reduce(input_seq):
states_value = encoder_model.predict(input_seq)
target_seq = np.zeros((1, 1, vocab_size+1))
target_seq[0, 0, vocab_size] = 1.
stop_condition = False
decoded_sentence = []
last_word = ""
last_last_word = ""
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
sampled_index = np.argmax(output_tokens[0, -1, :])
sampled_word = idx2vocab[sampled_index]
if sampled_word == last_word or sampled_word == last_last_word:
output_tokens[0, -1, sampled_index] = 0
sampled_index = np.argmax(output_tokens[0, -1, :])
sampled_word = idx2vocab[sampled_index]
if sampled_word == last_word or sampled_word == last_last_word:
output_tokens[0, -1, sampled_index] = 0
sampled_index = np.argmax(output_tokens[0, -1, :])
sampled_word = idx2vocab[sampled_index]
last_last_word = last_word
last_word = sampled_word
decoded_sentence.append(sampled_word)
if (len(decoded_sentence) >= out_length):# or sampled_word == "<pad>":
stop_condition = True
target_seq = np.zeros((1, 1, vocab_size+1))
target_seq[0, 0, sampled_index] = 1.
states_value = [h, c]
return decoded_sentence
# In[ ]:
def make_node(state, prob, last, word, idx):
seq = np.zeros((1, 1, vocab_size+1))
seq[0, 0, idx] = 1.
l = 0 if last == None else last['len']+1
prob = 0 if last == None else prob+last['prob']
node = {'state':state, 'seq':seq, 'prob':prob, 'last':last, 'len':l, 'word':word, 'idx':idx, 'next':[]}
return node
# In[ ]:
def decode_sequence_beam(input_seq):
states_value = encoder_model.predict(input_seq)
init_node = make_node(states_value, 0, None, "<S>", vocab_size)
queue = [init_node]
leaf_nodes = []
stop_condition = False
decoded_sentence = []
while len(queue) != 0:
node = queue[0]
if node['len'] >= out_length or node['word'] == '<pad>':
leaf_nodes.append(node)
queue = [] if len(queue) == 1 else queue[1:]
break
target_seq = node['seq']
states_value = node['state']
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
for j in range(2):
sampled_index = np.argmax(output_tokens[0, -1, :])
sampled_word = idx2vocab[sampled_index]
if sampled_word != node['word']:
new_node = make_node([h, c], output_tokens[0, -1, sampled_index], node, sampled_word, sampled_index)
node['next'].append(new_node)
queue.append(new_node)
output_tokens[0, -1, sampled_index] = 0
queue = queue[1:]
# start search
max_prob = 0
for node in leaf_nodes:
tmp = node['prob']/node['len']
if tmp > max_prob:
max_prob = tmp
target_node = node
while target_node['last'] != None:
decoded_sentence.append(target_node['word'])
target_node = target_node['last']
return decoded_sentence[::-1]
# In[ ]:
def gen_output(data, id_list, decode, output_fn='output.txt'):
with open(output_fn, 'w', encoding='utf-8') as f:
for i in id_list:
input_seq = np.array([data[i]])
decoded_sentence = decode_sequence_reduce(input_seq)
out = []
last = ""
for j in decoded_sentence:
if j == "<S>" or j == last:
continue
elif j == '<pad>':
break
last = j
out.append(j)
out = i + ',' + " ".join(out) + '\n'
f.write(out)
# In[ ]:
def cal_bleu(label):
from hw2_1_data.bleu_eval import BLEU
output = "output.txt"
result = {}
with open(output, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
comma = line.index(',')
test_id = line[:comma]
caption = line[comma+1:]
result[test_id] = caption
#count by the method described in the paper https://aclanthology.info/pdf/P/P02/P02-1040.pdf
bleu=[]
for item in label:
score_per_video = []
captions = [x.rstrip('.') for x in item['caption']]
score_per_video.append(BLEU(result[item['id']],captions,True))
bleu.append(score_per_video[0])
average = sum(bleu) / len(bleu)
#print("Average bleu score is " + str(average))
return average
# In[ ]:
def idx2data(idx, x, y, data, label):
# x[idx] is id, y[idx][0] is label index, y[idx][1] is seq index
encoder_input = data[x[idx]]
decoder_input = label[y[idx][0]]['seq'][y[idx][1]]
decoder_target = np.concatenate((decoder_input[1:], np.array([0], dtype='int32')))
return encoder_input, decoder_input, decoder_target
# In[ ]:
def data_generator(data, targets):
global train_data, train_label, batch_size, voacb_size
idx = np.arange(len(data))
while True:
np.random.shuffle(idx)
batches = [idx[range(batch_size*i, min(len(data), batch_size*(i+1)))] for i in range(len(data)//batch_size+1)]
for i in batches:
encoder_inputs, decoder_inputs, decoder_targets = [], [], []
for j in i:
x, y, z = idx2data(j, data, targets, train_data, train_label)
encoder_inputs.append(x)
decoder_inputs.append(y)
decoder_targets.append(z)
encoder_inputs = np.array(encoder_inputs)
decoder_inputs = to_categorical(decoder_inputs, num_classes=vocab_size+1).reshape(-1, out_length, vocab_size+1)
decoder_targets = to_categorical(decoder_targets, num_classes=vocab_size+1).reshape(-1, out_length, vocab_size+1)
yield ([encoder_inputs, decoder_inputs], decoder_targets)
# In[ ]:
def validation_generator(data, targets):
global test_data, test_label, batch_size, voacb_size
idx = np.arange(len(data))
while True:
np.random.shuffle(idx)
batches = [idx[range(batch_size*i, min(len(data), batch_size*(i+1)))] for i in range(len(data)//batch_size+1)]
for i in batches:
encoder_inputs, decoder_inputs, decoder_targets = [], [], []
for j in i:
x, y, z = idx2data(j, data, targets, test_data, test_label)
encoder_inputs.append(x)
decoder_inputs.append(y)
decoder_targets.append(z)
encoder_inputs = np.array(encoder_inputs)
decoder_inputs = to_categorical(decoder_inputs, num_classes=vocab_size+1).reshape(-1, out_length, vocab_size+1)
decoder_targets = to_categorical(decoder_targets, num_classes=vocab_size+1).reshape(-1, out_length, vocab_size+1)
yield ([encoder_inputs, decoder_inputs], decoder_targets)
# In[ ]:
def idx2inf_data(idx, x, y, data, label):
# x[idx] is id, y[idx][0] is label index, y[idx][1] is seq index
encoder_input = data[x[idx]]
decoder_input = label[y[idx][0]]['seq'][y[idx][1]]
decoder_target = np.concatenate((decoder_input[1:], np.array([0], dtype='int32')))
return encoder_input, decoder_input[0], decoder_target
# In[ ]:
def inf_data_generator(data, targets):
global train_data, train_label, batch_size, voacb_size
idx = np.arange(len(data))
while True:
np.random.shuffle(idx)
batches = [idx[range(batch_size*i, min(len(data), batch_size*(i+1)))] for i in range(len(data)//batch_size+1)]
for i in batches:
encoder_inputs, decoder_inputs, decoder_targets = [], [], []
for j in i:
x, y, z = idx2inf_data(j, data, targets, train_data, train_label)
encoder_inputs.append(x)
decoder_inputs.append(y)
decoder_targets.append(z)
encoder_inputs = np.array(encoder_inputs)
decoder_inputs = to_categorical(decoder_inputs, num_classes=vocab_size+1).reshape(-1, 1, vocab_size+1)
decoder_targets = to_categorical(decoder_targets, num_classes=vocab_size+1).reshape(-1, out_length, vocab_size+1)
yield ([encoder_inputs, decoder_inputs], decoder_targets)
# In[ ]:
def inf_validation_generator(data, targets):
global test_data, test_label, batch_size, voacb_size
idx = np.arange(len(data))
while True:
np.random.shuffle(idx)
batches = [idx[range(batch_size*i, min(len(data), batch_size*(i+1)))] for i in range(len(data)//batch_size+1)]
for i in batches:
encoder_inputs, decoder_inputs, decoder_targets = [], [], []
for j in i:
x, y, z = idx2inf_data(j, data, targets, test_data, test_label)
encoder_inputs.append(x)
decoder_inputs.append(y)
decoder_targets.append(z)
encoder_inputs = np.array(encoder_inputs)
decoder_inputs = to_categorical(decoder_inputs, num_classes=vocab_size+1).reshape(-1, 1, vocab_size+1)
decoder_targets = to_categorical(decoder_targets, num_classes=vocab_size+1).reshape(-1, out_length, vocab_size+1)
yield ([encoder_inputs, decoder_inputs], decoder_targets)
# In[ ]:
def preprocess(test_label, train_label):
import string
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
# count words
t = Tokenizer()
# fit_on_texts(texts)
# texts: can be a list of strings, generator of strings, or a list of list of strings.
for i in train_label:
t.fit_on_texts(i['caption'])
for i in test_label:
t.fit_on_texts(i['caption'])
# spelling correction
for i in train_label:
new = []
for j in i['caption']:
tmp = text_to_word_sequence(j)
correct_list = []
for k in range(len(tmp)):
ignore_this_word = False
for l in tmp[k]:
if l not in string.ascii_letters and l not in [" ", "'"]:
ignore_this_word = True
break
if ignore_this_word:
continue
#corrected = spell(tmp[k])
corrected = correction(tmp[k])
if corrected != tmp[k] and corrected in t.word_counts and t.word_counts[corrected] > t.word_counts[tmp[k]]*5 and t.word_counts[tmp[k]] < 10 and tmp[k][-2:] != "'s":
#print (tmp[k], t.word_counts[tmp[k]], corrected, t.word_counts[corrected], tmp)
correct_words[tmp[k]] = corrected
correct_list.append(corrected)
else:
correct_list.append(tmp[k])
new.append(" ".join(correct_list))
i['caption'] = new
t = Tokenizer()
for i in train_label:
t.fit_on_texts(i['caption'])
for i in test_label:
t.fit_on_texts(i['caption'])
vocab_size = len(t.word_counts) + 1
vocab2idx = dict((i, t.word_index[i]) for i in t.word_index)
idx2vocab = dict((t.word_index[i], i) for i in t.word_index)
idx2vocab[0] = "<pad>"
idx2vocab[vocab_size] = "<S>"
from keras.preprocessing.sequence import pad_sequences
for i in train_label:
seqs = t.texts_to_sequences(i['caption']) # input a list of strings
seqs = [[vocab_size]+j for j in seqs] # put start symbol <S> at begining
pad_seqs = pad_sequences(seqs, maxlen=out_length, dtype='int32', padding='post', truncating='post', value=0.0)
i['seq'] = pad_seqs
for i in test_label:
seqs = t.texts_to_sequences(i['caption']) # input a list of strings
seqs = [[vocab_size]+j for j in seqs] # put start symbol <S> at begining
pad_seqs = pad_sequences(seqs, maxlen=out_length, dtype='int32', padding='post', truncating='post', value=0.0)
i['seq'] = pad_seqs
X = []
Y = []
for i, ii in enumerate(train_label):
for j, jj in enumerate(ii['seq']):
X.append(ii['id'])
Y.append([i, j])
X = np.array(X)
Y = np.array(Y)
X_test = []
Y_test = []
for i, ii in enumerate(test_label):
for j, jj in enumerate(ii['seq']):
X_test.append(ii['id'])
Y_test.append([i, j])
X_test = np.array(X_test)
Y_test = np.array(Y_test)
return X, Y, X_test, Y_test
# In[ ]:
def plot_model():
from keras.utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
plot_model(encoder_model, to_file='encoder_model.png', show_shapes=True)
plot_model(decoder_model, to_file='decoder_model.png', show_shapes=True)
# In[ ]:
class MyCallback(Callback):
def __init__(self):
self.best_score = 0
self.bleu_history = {'train':[], 'test':[]}
self.saved_model = ""
def on_epoch_end(self, epoch, logs={}):
for i in self.params['metrics']:
if i not in self.history:
self.history[i] = []
self.history[i].append(logs[i])
gen_output(train_data, train_label, decode_sequence_reduce)
show_outputs_and_score(train_label, 10)
try:
score = cal_bleu(train_label)
score = round(score, 3)
self.bleu_history['train'].append(score)
except ZeroDivisionError:
return
print('\nTrain Bleu score: {}'.format(score))
print ()
gen_output(test_data, test_label, decode_sequence_reduce)
show_outputs_and_score(test_label, 10)
try:
score = cal_bleu(test_label)
score = round(score, 3)
self.bleu_history['test'].append(score)
except ZeroDivisionError:
return
print('Test Bleu score: {}\n'.format(score))
if score > self.best_score:
model.save_weights('model_{}.hdf5'.format(score))
self.saved_model = 'model_{}.hdf5'.format(score)
if self.best_score != 0:
try:
os.remove('model_{}.hdf5'.format(self.best_score))
except Exception as e:
print (str(e))
print ('model_{}.hdf5'.format(self.best_score), 'not found')
self.best_score = score
|
[
"[email protected]"
] | |
8c0d013801941cbf9aebb29396fe710b656e993a
|
ddeb9fd39120b64b74f408f9d0aba8a86907cf12
|
/Homework1/Problemhw1.py
|
8792eaedceaa0962ddcb2be8b9c2a3726815c512
|
[] |
no_license
|
gustavo-sanchez1/CIS2348
|
7158c4dcafbd15fff922f24567e66e0de4492ea5
|
0a7f40e40c5601cba7b2c429d8d179b8d1db19d5
|
refs/heads/main
| 2023-04-10T11:00:07.380765 | 2021-05-12T04:49:13 | 2021-05-12T04:49:13 | 332,627,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 592 |
py
|
print('Birthday Calculator') # output the current day
print('Current day')
print('Month: ')
current_month = int(input())
print("Day: ")
current_day = int(input())
print('Year: ')
current_year = int(input())
print('Birthday') # print user's birthday
print('Month: ')
birthday_month = int(input())
print('Day: ')
birthday_day = int(input())
print('Year: ')
birthday_year = int(input())
age = current_year - birthday_year # calculate age
print("You are", age, "years old.")
if current_month == birthday_month and current_day == birthday_day:
print('\nHappy birthday!')
|
[
"[email protected]"
] | |
4e67f7e411501ead79afa8fdc99e0a34422a06de
|
f28a5beba86a2cdf286848cfece3c4cd8eab06c7
|
/Web01/Homework/Exe1/app.py
|
d8b2f7ccef6c92d48a2800244cb934d580d189b1
|
[] |
no_license
|
luongnhuyen/luongnhuyen-webmodule-C4E17
|
aec4c1e023d1ecf78f3b57ddf4496efe849e7945
|
142df2567595a5b500b0a5a8e54ea29582307362
|
refs/heads/master
| 2020-03-16T19:54:57.823660 | 2018-05-28T17:55:07 | 2018-05-28T17:55:07 | 132,937,641 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 573 |
py
|
from flask import Flask, render_template, redirect
app = Flask(__name__)
@app.route('/bmi/<int:weight>/<int:height>')
def bmi(weight,height):
kq=weight/(height/100*height/100)
# Cach 1
if kq < 16:
ketqua = "Severely underweight"
elif kq < 18.5:
ketqua = "Underweight"
elif kq < 25:
ketqua = "Normal"
elif kq < 30:
ketqua = "Overweight"
else:
ketqua = "Obese"
return "{0} {1}".format(kq,ketqua)
#Cach 2
# return render_template("index.html",kq=kq)
if __name__ == '__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
84325c4920da7b156d19c629504f091e334b54b4
|
fea6004f59a4fb8547205939b40aec8949b2b34d
|
/AlphaBeta.py
|
335b8e4502abeb8885554864e36a93a16e817088
|
[] |
no_license
|
kpwelsh/TensorflowTraining
|
ffaa9fced95eb030f1236dbc372b5955ed184865
|
f878e1416aa24d0b54e00bcaf75875c0ebad0c3c
|
refs/heads/master
| 2021-06-19T19:10:06.181653 | 2017-07-01T21:22:20 | 2017-07-01T21:22:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 1 08:40:51 2017
@author: kevin
"""
class AlphaBeta:
def __init__(self, maxDepth):
self.MaxDepth = maxDepth
|
[
"[email protected]"
] | |
a54ac9f0d8a08ee8192c56e79654ab0dba3cb790
|
575072e8cca42a6a8e33aa85206407248fa68c26
|
/addons/x11542_order_type/models/x11542_order_type.py
|
483b7d4032514c2249afdfd1316c9847e0452afd
|
[] |
no_license
|
alisaifbil/OdooPayrollPakistan
|
509055aea2a00c5518eab2ad1fdced07250e160b
|
cdf5ed8f75bf84a37fae39739eb00fcb36843576
|
refs/heads/master
| 2020-05-29T12:35:21.206995 | 2019-05-29T02:38:19 | 2019-05-29T02:38:19 | 189,131,873 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 175 |
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class x11542_order_type(models.Model):
_name = 'x11542.order.type'
name = fields.Char("Enter Sales Order Type")
|
[
"[email protected]"
] | |
ea2bd5552706203f0050d2b29d213146ff7bdca6
|
6f2a17428e7de6dc74ffd6c933f6b9a33121307d
|
/hello.py
|
bc87049eedb1d8002adefecdca544cb3a40618ea
|
[] |
no_license
|
harshaghub/PYTHON
|
d89eba802870137fe7febb2c8731f39569555ba9
|
a93effe9520889b9612743b33b8b18c63c278d8d
|
refs/heads/master
| 2020-12-02T06:24:40.518232 | 2018-06-26T06:20:50 | 2018-06-26T06:20:50 | 96,829,236 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 32 |
py
|
print('hello')
print('hello')
|
[
"[email protected]"
] | |
e617b920f9e2568d05f0b9b81923724255ed6437
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Platform/darwin.py
|
f997a7d9e6f3b45fabc0a4a6ede8551a69bcf4bd
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328 | 2023-08-27T09:16:45 | 2023-08-27T09:16:45 | 9,626,741 | 8,573 | 599 |
Apache-2.0
| 2023-09-13T02:49:41 | 2013-04-23T15:40:33 |
Python
|
UTF-8
|
Python
| false | false | 2,630 |
py
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
from . import posix
import os
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['HOST_OS'] = 'darwin'
# put macports paths at front to override Apple's versions, fink path is after
# For now let people who want Macports or Fink tools specify it!
# env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin'
# Store extra system paths in env['ENV']['PATHOSX']
filelist = ['/etc/paths',]
# make sure this works on Macs with Tiger or earlier
try:
dirlist = os.listdir('/etc/paths.d')
except FileNotFoundError:
dirlist = []
for file in dirlist:
filelist.append('/etc/paths.d/'+file)
for file in filelist:
if os.path.isfile(file):
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
if line:
env.AppendENVPath('PATHOSX', line.strip('\n'))
# Not sure why this wasn't the case all along?
if env['ENV'].get('PATHOSX', False) and os.environ.get('SCONS_USE_MAC_PATHS', False):
env.AppendENVPath('PATH',env['ENV']['PATHOSX'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"[email protected]"
] | |
d2cdad9f1e5e7b3f7ab43d08e4c1be9a8774df79
|
42ba84510d87cde32d147ee40a76cb5bc93c7ea3
|
/manage.py
|
bebad4b33f6c1a8bc9ca78ff34cfa607aad9c55a
|
[] |
no_license
|
kingdragon46/debate-django
|
5b030e2c5efea9f990ca37aaa06fd6cc8c513145
|
c59b5f38c33615e93d266801dcd8c965a5fdfdc8
|
refs/heads/master
| 2023-07-08T10:32:33.267526 | 2021-08-16T14:03:14 | 2021-08-16T14:03:14 | 396,804,670 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 665 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DebateWeb.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
cd10575325c6158eb2b1ef34ce4cc5bf26ab6553
|
47eb127f940d262bdd2a84fa61122fa30eae9fd0
|
/Library.py
|
2dd1acfd36acdf920620f1636e101f6d3cd62879
|
[] |
no_license
|
rpatelk/SimpleLibrary
|
df4e8133b3ebcc9adbc4b79fbef7de0988337c82
|
53c056677119a82a08c950a405c6ad9039677dc6
|
refs/heads/main
| 2023-03-30T02:25:11.124086 | 2021-04-03T22:49:30 | 2021-04-03T22:49:30 | 352,394,531 | 0 | 0 | null | 2021-04-03T22:49:01 | 2021-03-28T17:33:27 | null |
UTF-8
|
Python
| false | false | 913 |
py
|
from Book import Book
# Class that contaians the Object for Library
# @author Raj Patel
class Library:
# Important library infromation including book list.
__libraryName = ""
__address = ""
__listOfBooks = []
# Library class constructor
def __init__(self, libraryName, address, listOfBooks):
self.setLibraryName(libraryName)
self.setAddress(address)
self.setListOfBooks(listOfBooks)
# Getter and Setter methods for each variable.
def getLibraryName(self):
return self.__libraryName
def setLibraryName(self, libraryName):
self.__libraryName = libraryName
def getAddress(self):
return self.__address
def setAddress(self, address):
self.__address = address
def getListOfBooks(self):
return self.__listOfBooks
def setListOfBooks(self, listOfBooks):
self.__listOfBooks = listOfBooks
|
[
"[email protected]"
] | |
b1c1575eaf323d75f88f59a0fb76dab032680993
|
73deea0d56190d48faba16eef36e0adadb92101f
|
/blog/apps.py
|
7932512c6eff8b8dcf09e2e620f4b9e6d571ea82
|
[] |
no_license
|
lvicencio/blog_django
|
7ee59256482cb577172aed91ab29b939ed623f12
|
641be1cb30d59ec032c99ccb67d8d70274febea7
|
refs/heads/main
| 2023-04-02T11:17:23.613682 | 2021-04-07T03:28:41 | 2021-04-07T03:28:41 | 355,400,363 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 119 |
py
|
from django.apps import AppConfig
class BlogConfig(AppConfig):
name = 'blog'
verbose_name="Gestion del Blog"
|
[
"[email protected]"
] | |
b399c45fda01596366d609a87fd0320da5d59894
|
f7d1f11fd4fa5e71f3951b7fd7b4b20fef2a21b1
|
/venv/Scripts/pip3-script.py
|
10837b59bbe74b22be04f86ffc0e12c90479edd6
|
[] |
no_license
|
ferry-luo/ferry_pycharm_projects
|
130ea7ccd5d605b0965cd1bbc9b5511daa333afb
|
13a1a4a50f1c9fddf76ff0f56bf11f5d9a940467
|
refs/heads/master
| 2022-12-30T10:17:07.921729 | 2020-10-18T01:58:23 | 2020-10-18T01:58:23 | 268,465,037 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
#!F:\AAA-ferry\FerryProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | |
9ff16482c5666b73f7da76d002e50d1659d0b8e7
|
5a7737e401a1c4a0df1cd78b66c4c827320f1d14
|
/strings.py
|
c5880d45901068c72f038a17b7431f3b318ff7f4
|
[] |
no_license
|
venkatajagadeesh123/python_snippets
|
41ada001730bda070d0984b6931b5a94995ac1d9
|
c2c7394b80a86f1bc4ac3c051d5bc655414f8fbc
|
refs/heads/master
| 2021-01-02T09:18:58.512713 | 2017-08-02T06:26:28 | 2017-08-02T06:26:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,005 |
py
|
# name = "Srini"
# age = 23
print ("Hello world")
print("My name is " + name + "my age " + str(age))
print("My name is %s and my age %d" % (name,age))
print("My name is {name} and my age {age}".format(age=age,name=name))
# this syntc work only python 3.6
print(f'My name is {name} my age next year {age+1}')
# writing a function to generate stroy
# this syntax in python 3.X
def story(name,age,email='[email protected]'):
return ("My name is {name} and my age {age} and my email is {email}" .format(age=age,name=name,email=email))
def make_upper_and_give_first_twoval(mystr):
upcasestr = mystr.upper()
return upcasestr[:2]
# name = "srini"
# age = 23
# email = "[email protected]"
# story(name,age,email)
# print(story(age=23,name='srini',email='[email protected]'))
# full_story= story(age=23,name='srini',email='[email protected]')
# print(full_story)
print(story(age=23,name='srini'))
person = {'name': 'Jenn', 'age': 23}
# sentence = 'My name is ' + person['name'] + ' and I am ' + str(person['age']) + ' years old.'
# print(sentence)
# sentence = 'My name is {} and I am {} years old.'.format(person['name'], person['age'])
# print(sentence)
# sentence = 'My name is {0} and I am {1} years old.'.format(person['name'], person['age'])
# print(sentence)
# tag = 'h1'
# text = 'This is a headline'
# sentence = '<{0}>{1}</{0}>'.format(tag, text)
# print(sentence)
sentence = 'My name is {0} and I am {1} years old.'.format(person['name'], person['age'])
print(sentence)
# pi = 3.14159265
# sentence = 'Pi is equal to {}'.format(pi)
# print(sentence)
sentence = '1 MB is equal to {} bytes'.format(1000**2)
print(sentence)
import datetime
my_date = datetime.datetime(2016, 9, 24, 12, 30, 45)
# print(my_date)
# March 01, 2016
sentence = '{:%B %d, %Y}'.format(my_date)
print(sentence)
# March 01, 2016 fell on a Tuesday and was the 061 day of the year.
sentence = '{:%B %d, %Y} fell on a {} and was the {} day of the year'.format(my_date)
print(sentence)
|
[
"[email protected]"
] | |
a3821a679ba8b90116f6d1d74cac7ff0d176923c
|
32d3801c9e5a121586b873d0c3a57cd6c1cf8d0d
|
/examples/classes.py
|
8fb55d1b0b18918c785341828a96ac1ce5a26f7e
|
[] |
no_license
|
dencorg/introduction-to-python-greek
|
808727cab781ccdd2111343ecab0d0d24b74015c
|
8c2b61e067c9d293e03f03caee094214613ba920
|
refs/heads/master
| 2023-04-14T20:58:20.235636 | 2023-04-06T09:24:32 | 2023-04-06T09:24:32 | 107,974,225 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 850 |
py
|
# δημιουργία κλάσης
class Vehicle:
def __init__(self, number_of_wheels, fuel_type, maximum_velocity):
self.number_of_wheels = number_of_wheels
self.type_of_tank = fuel_type
self.maximum_velocity = maximum_velocity
def make_noise(self):
print('VRUUUUUUUM')
tesla = Vehicle(4, 'electric', 250)
print(tesla.number_of_wheels)
class Vehicle:
def __init__(self, number_of_wheels, fuel_type, maximum_velocity):
self.number_of_wheels = number_of_wheels
self.type_of_tank = fuel_type
self.maximum_velocity = maximum_velocity
def get_number_of_wheels(self):
return self.number_of_wheels
def set_number_of_wheels(self, number):
self.number_of_wheels = number
bmw = Vehicle(4, 'petrol', 200)
bmw.set_number_of_wheels(2)
print(bmw.number_of_wheels)
|
[
"[email protected]"
] | |
ed1dd33e5dda0a47ad967c56fda9598a01474e52
|
e2ed90a3023b99348597dd3a5c3cc5b084802b7e
|
/startup.py
|
3f0c17ac222d4f797ecc9b45d7bf050c5f9d7097
|
[] |
no_license
|
jasoncordis/spotifycouplesapp
|
dfbf60140c22487770586a7666e11050339f4cc5
|
bf73324e5f180811b228ef4c226b7c45ab763aab
|
refs/heads/main
| 2023-09-04T10:27:34.938381 | 2021-11-11T22:06:27 | 2021-11-11T22:06:27 | 416,884,630 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,912 |
py
|
import requests
import json
from flask_spotify_auth import getAuth, refreshAuth, getToken
import random
import os
#Add your client ID
CLIENT_ID = os.getenv('CLIENTID', 'Optional default value')
#aDD YOUR CLIENT SECRET FROM SPOTIFY
CLIENT_SECRET = os.getenv('CLIENTSECRET', 'Optional default value')
#Port and callback url can be changed or ledt to localhost:5000
PORT = "5000"
CALLBACK_URL = "http://localhost"
#Add needed scope from spotify user
SCOPE = "playlist-read-private,playlist-read-collaborative, playlist-modify-public, playlist-modify-private"
#token_data will hold authentication header with access code, the allowed scopes, and the refresh countdown
TOKEN_DATA = []
def getUser():
return getAuth(CLIENT_ID, "http://spotifycouplesapp.herokuapp.com/callback/", SCOPE)
def getUserToken(code):
global TOKEN_DATA
TOKEN_DATA = getToken(code, CLIENT_ID, CLIENT_SECRET, "http://spotifycouplesapp.herokuapp.com/callback/")
def refreshToken(time):
time.sleep(time)
TOKEN_DATA = refreshAuth()
def getAccessToken():
return TOKEN_DATA
def getUserJSON(access_token):
user = []
header = {"Authorization": f"Bearer {access_token}"}
base_url = f"https://api.spotify.com/v1/me"
res_json = requests.get(base_url, headers=header).json()
user.append(res_json['id'])
user.append(res_json['display_name'])
user.append(res_json['images'][0]['url'])
return user
def getFriendJSON(access_token, friendID):
user = []
header = {"Authorization": f"Bearer {access_token}"}
base_url = f"https://api.spotify.com/v1/users/{friendID}"
res_json = requests.get(base_url, headers=header).json()
print(res_json)
if("error" in res_json):
return("none")
user.append(res_json['id'])
user.append(res_json['display_name'])
user.append(res_json['images'][0]['url'])
return user
def getUserPlaylists(access_token, username):
header = {"Authorization": f"Bearer {access_token}"}
base_url = f"https://api.spotify.com/v1/users/{username}/playlists?limit=50"
res_json = requests.get(base_url, headers=header).json()
return res_json
def getPlaylistItems(access_token, playid):
header = {"Authorization": f"Bearer {access_token}"}
base_url = f"https://api.spotify.com/v1/playlists/{playid}/tracks"
res_json = requests.get(base_url, headers=header).json()
return res_json
def getUserOwnedPlaylists(playlists, userid):
userPlaylists = []
PlaylistIDs = []
PlaylistNames = []
for i in range(len(playlists["items"])):
if(playlists["items"][i]["owner"]["id"] == userid and playlists["items"][i]["tracks"]["total"] > 0):
PlaylistIDs.append(playlists["items"][i]["id"])
PlaylistNames.append(playlists["items"][i]["name"])
if(playlists["items"][i]["tracks"]["total"] == 0):
print(playlists["items"][i]["name"])
userPlaylists.append(PlaylistIDs)
userPlaylists.append(PlaylistNames)
return userPlaylists
def getRandomTrack(randomPlaylist, access_token):
randomTrack = []
randomTrackName = []
randomTrackArtist = []
randomTrackID = []
for i in range(len(randomPlaylist[0])):
tracks = getPlaylistItems(access_token, randomPlaylist[0][i])
limit = len(tracks["items"])
random_track = random.randint(0, limit-1)
trackname = tracks["items"][random_track]["track"]["name"]
artistname = tracks["items"][random_track]["track"]["album"]["artists"][0]["name"]
trackid = tracks["items"][random_track]["track"]["id"]
randomTrackName.append(trackname)
randomTrackArtist.append(artistname)
randomTrackID.append(trackid)
randomTrack.append(randomTrackName)
randomTrack.append(randomTrackArtist)
randomTrack.append(randomTrackID)
return randomTrack
def getRandomPlaylists(userPlaylists, randNums, userimage):
playlistInfo = []
PlaylistIDs = []
PlaylistNames = []
for i in range(len(randNums)):
PlaylistIDs.append(userPlaylists[0][randNums[i]])
PlaylistNames.append(userPlaylists[1][randNums[i]])
playlistInfo.append(PlaylistIDs)
playlistInfo.append(PlaylistNames)
playlistInfo.append(userimage)
return playlistInfo
def getRandomNumberList(n, length):
randNums = []
for i in range(n):
randNums.append(random.randint(0, length-1))
return randNums
def generateCombinedList(playlist1, playlist2, randomPlaylist, randomFriendPlaylist, user, friend):
combinedList = []
for i in range(len(playlist1[0])):
track1 = []
track1.append(playlist1[0][i])
track1.append(playlist1[1][i])
track1.append(playlist1[2][i])
track1.append(randomPlaylist[1][i])
track1.append(user)
track1.append(randomPlaylist[2])
track2 = []
track2.append(playlist2[0][i])
track2.append(playlist2[1][i])
track2.append(playlist2[2][i])
track2.append(randomFriendPlaylist[1][i])
track2.append(friend)
track2.append(randomFriendPlaylist[2])
combinedList.append(track1)
combinedList.append(track2)
return combinedList
def getIDarray(tracks):
idArray = []
for i in range(len(tracks)):
idArray.append(tracks[i][2])
return idArray
def createPlaylist(access_token, userid, username, friendname):
header = {"Authorization": f"Bearer {access_token}"}
base_url = f"https://api.spotify.com/v1/users/{userid}/playlists"
name = f"{username} and {friendname}\'s Playlist"
description = f"{username} and {friendname}\'s playlist created on http://spotifycouplesapp.herokuapp.com"
public = "true"
jsonData = {}
jsonData["name"] = name
jsonData["description"] = description
jsonData["public"] = public
playlistJson = json.dumps(jsonData)
res_json = requests.post(base_url, data = playlistJson, headers=header).json()
return res_json
def addPlaylistItems(access_token, playlist_id, idArray):
ids = idArray.split(", ")
header = {"Authorization": f"Bearer {access_token}"}
for i in range(len(ids)):
trackid = ids[i]
trackid = trackid.replace("'",'')
trackid = trackid.replace("[",'')
trackid = trackid.replace("]",'')
base_url = f"https://api.spotify.com/v1/playlists/{playlist_id}/tracks?uris=spotify%3Atrack%3A{trackid}"
res_json = requests.post(base_url, headers=header).json()
|
[
"[email protected]"
] | |
813f811cc4c034ef5333500a17d4e40972d394ad
|
b2926df6685098607a16051810041a64a8427211
|
/PythonSciCom/birthday.py
|
00f2f899ca7b6412293ec490a7f875ea6cf5213c
|
[] |
no_license
|
wyh0106/python_homework
|
478d7cfa5b9177b5301f4b3fab09eca98a354905
|
d17d4eca8eb012746c39197823b8f4cc5a67b948
|
refs/heads/master
| 2023-04-10T19:27:35.395986 | 2021-04-13T15:05:01 | 2021-04-13T15:05:01 | 266,530,300 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 276 |
py
|
from random import randint
def birth():
birthday = []
for _ in range(23):
birthday.append(randint(1,366))
set1 = set(birthday)
if len(birthday)!=len(set1):
return 1
else:
return 0
sum = 0
for _ in range(10**5):
sum += birth()
print("{:.2f}%".format(sum/10**3))
|
[
"[email protected]"
] | |
8a7818455d82620f37170b9ac842f62c8df49b7a
|
a69d82e6e74d72deb03ef7f4b9682b74f0eba993
|
/gold/scoreboard/modules/available/bitcoin/render
|
7ef432ac087372d8c770685e163de1edd8b69a30
|
[
"WTFPL"
] |
permissive
|
krebs/painload
|
f903e07d71c65b49af009a0b1e7b8f5f68b4a91f
|
2b09702300843947d40c8671776ea4227d1ad6d6
|
refs/heads/master
| 2023-08-31T04:11:51.835401 | 2023-08-20T22:20:40 | 2023-08-20T22:20:40 | 1,703,153 | 17 | 3 |
WTFPL
| 2023-07-06T23:14:49 | 2011-05-04T20:21:18 |
Shell
|
UTF-8
|
Python
| false | false | 296 |
#!/usr/bin/python
import json,urllib,datetime,sys
today = datetime.datetime.now()
result = json.load(sys.stdin)
print today.strftime("%Y-%m-%dT%H:%M:%S"),
print '%s' % result["blocks"],
print '%s' % result["difficulty"],
print '%s' % result["keypoololdest"],
#print '%s' % result["keypoolsize"]
|
[
"[email protected]"
] | ||
15520656810218c0c951a1874c2a869e7c97bd93
|
b5ea257e4accff19a105c2fc6a99578e70d2b0a7
|
/tempCodeRunnerFile.py
|
0b2ab081b03c1a35c3761e3bbd54fd012a059de9
|
[] |
no_license
|
oiji-official/PongGame
|
c361bdc58b5a93c6bb454dd9068b9e1b8c2be17f
|
f19ff28c1a2e2cfa1d1c5e36bbc4e870753ab787
|
refs/heads/master
| 2023-03-24T20:08:06.094134 | 2021-03-23T17:06:44 | 2021-03-23T17:06:44 | 350,793,178 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 38 |
py
|
paddle.listen()
# paddle.onkey(move, )
|
[
"[email protected]"
] | |
3ad6da7e651af530778e2774b28f603e6972c393
|
1ee0d5eaa3b7f325c1c00dce3b7ba27d08861c82
|
/euler3.py
|
8af5ebcc22e83ae62ada2462aded508dcce7047f
|
[] |
no_license
|
thwhisler/eulerproblems
|
0a3b3689e706acddafc79d8adcf03d9d76f53266
|
78a6c9f87d4059a3e72a503b1142112dc071c7a2
|
refs/heads/master
| 2021-01-20T08:43:31.394514 | 2017-05-03T19:33:33 | 2017-05-03T19:33:33 | 90,184,986 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
def euler3(x):
ogvalue = x
factors = []
counter = 3
while (x != 1):
#2 only even prime, pull out first
while (x % 2 == 0):
factors.append(2)
x = int(x / 2)
#check every odd number 3 onwards
if (x % counter == 0):
factors.append(counter)
x = int(x / counter)
else:
counter += 2
print("The prime factors of ", ogvalue, " are ", factors)
|
[
"[email protected]"
] | |
7854736b0d20f9d28c5c1c17c0df13ff424ef40c
|
7c81419bd1735fe8e04e8669c67888b666e7d3d2
|
/store/admin.py
|
4f1979e947bd216f5a0f0b487840d11c6e91a16a
|
[] |
no_license
|
cforcross/Ecom_with_test
|
55fd9377e1d2a04573bca541a9480d881749996a
|
229efb0f9d319026c87e8a1de53769dc98eec06c
|
refs/heads/main
| 2023-03-16T02:46:50.369551 | 2021-03-09T00:07:47 | 2021-03-09T00:07:47 | 345,823,958 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
from django.contrib import admin
from .models import Product,Category
# Register your models here.
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display=['name','slug']
prepopulated_fields = {"slug": ("name",)}
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display=['title','author','slug','price','in_stock',
'created','updated']
list_filter=['in_stock','is_active']
list_editable=['price','in_stock']
prepopulated_fields = {"slug": ("title",)}
|
[
"[email protected]"
] | |
594245fe959ba44f61448c1aba7f88b2cb53a315
|
8af334f8ece73d6f50cbea97169d10ce59aca3b9
|
/djangoTest/settings.py
|
bb902034d27ede296d7b6ee71d6b74e71d8232a1
|
[] |
no_license
|
ayhamo/DjangoTest
|
6bb6a1aa4791b9fb2c228f36e85b493597494a83
|
56fe8f6090d60e8dd11b998197d71ec00bdfeffb
|
refs/heads/master
| 2023-08-11T05:06:04.215471 | 2021-09-20T16:21:17 | 2021-09-20T16:21:17 | 403,290,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,815 |
py
|
"""
Django settings for djangoTest project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-rvmzw_b#+)p(nc8id0#gdqqi(pike_cfxttqf4$zcy-2fhzs-p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
AUTH_USER_MODEL = 'CourseApp.User'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'example_api',
'CourseApp',
'django_filters',
'bootstrapform',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djnagotest',
'HOST': 'localhost',
'USER': 'root',
'PASSWORD': '1234',
'PORT': '3306',
'OPTIONS': { # Recommended by django-mysql
'init_command': 'SET innodb_strict_mode=1;SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci',
'charset': 'utf8mb4',
},
},
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
]
}
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"[email protected]"
] | |
776147fd6c13a8a141d5e67bc8bbf075b45e5aea
|
7d474522d84c4cf15c842aff8137c19313014d42
|
/pytwis/pytwis_clt_constants.py
|
887e7de6b0392715eb7d488eddbfc6a26bf6fe23
|
[
"Apache-2.0"
] |
permissive
|
renweizhukov/pytwis
|
782fa8662912ae3e089aeeaac54e16a61d025ea1
|
1bc45b038d7e5343824c520f89f644bbd6faab0a
|
refs/heads/master
| 2021-04-28T03:53:54.506686 | 2018-06-10T07:33:10 | 2018-06-10T07:33:10 | 122,150,388 | 5 | 2 |
Apache-2.0
| 2018-04-09T02:54:19 | 2018-02-20T03:21:04 |
Python
|
UTF-8
|
Python
| false | false | 737 |
py
|
# -*- coding: utf-8 -*-
"""This module defines all the constants used by pytwis_clt.py."""
CMD_REGISTER = 'register'
CMD_LOGIN = 'login'
CMD_LOGOUT = 'logout'
CMD_CHANGE_PASSWORD = 'changepwd'
CMD_GET_USER_PROFILE = 'userprofile'
CMD_POST = 'post'
CMD_FOLLOW = 'follow'
CMD_UNFOLLOW = 'unfollow'
CMD_GET_FOLLOWERS = 'followers'
CMD_GET_FOLLOWINGS = 'followings'
CMD_TIMELINE = 'timeline'
CMD_GET_USER_TWEETS = 'tweetsby'
CMD_EXIT = 'exit'
CMD_QUIT = 'quit'
ARG_COMMAND = 'command'
ARG_USERNAME = 'username'
ARG_PASSWORD = 'password'
ARG_OLD_PASSWORD = 'old_password'
ARG_NEW_PASSWORD = 'new_password'
ARG_CONFIRMED_NEW_PASSWORD = 'confirmed_new_password'
ARG_FOLLOWEE = 'followee'
ARG_MAX_TWEETS = 'max_cnt_tweets'
ARG_TWEET = 'tweet'
|
[
"[email protected]"
] | |
d60d647cb22e7a66f7fc8c92225082303a842d23
|
8de371587c9a6263314df9ccad3e5b911e0439cb
|
/web/django_survey/vote/models.py
|
770903da11ac5f1a781517ac0e523df1cf518c96
|
[] |
no_license
|
desenvolvendoweb/survey
|
307fc615012ba0ea04446c5fe90cb8563422bc76
|
8d1138bfb4c3df9d8ffb5da01d1c34fc87a26d72
|
refs/heads/master
| 2021-01-20T02:37:02.310888 | 2017-04-26T16:40:16 | 2017-04-26T16:40:16 | 89,426,537 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,390 |
py
|
from django.db import models
from django.db.models import Q
# Create your models here.
class Survey(models.Model):
title = models.CharField(max_length=255, verbose_name='Título')
status = models.BooleanField(default=True, verbose_name='Habilitado')
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
def alternatives(self):
return Alternative.objects.filter(Q(survey=self) & Q(status=True))
class Meta:
verbose_name = 'Enquete'
class Alternative(models.Model):
survey = models.ForeignKey(Survey, verbose_name='Enquete')
title = models.CharField(max_length=255, verbose_name='Título')
votes = models.IntegerField(verbose_name='Votos', default=0, editable=False)
status = models.BooleanField(default=True, verbose_name='Habilitado')
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Alternativa'
class Log(models.Model):
user_session = models.CharField(max_length=255, verbose_name='Usuário')
survey = models.ForeignKey(Survey, verbose_name='Enquete')
alternative = models.ForeignKey(Alternative, verbose_name='Alternativa')
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.survey.title
class Meta:
verbose_name = 'Log'
|
[
"[email protected]"
] | |
f8d303cc34b76e161aa112517f3e69fb5b750dff
|
77fc7bc3986e6cb43dbe0b782b52b0c724f5f872
|
/parseHW2_1.py
|
acde984243c88e7b618421761303d386e90a5567
|
[] |
no_license
|
abhchennagiri/CN_HW1
|
7adceb37af6eea0aca633660d2748d52c19e3307
|
8c75e8bd406bb89a91607bf4d9bd39480cdf78a3
|
refs/heads/master
| 2021-01-12T11:23:32.028697 | 2016-11-05T04:16:29 | 2016-11-05T04:16:29 | 72,904,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,390 |
py
|
# A Python script to parse the average bandwidth and the average packet loss rate
mb = 1000000
def calBW(start_time,end_time):
tcpflow1_count = 0
#tcpflow2_count = 0
cbrflow_count = 0
#tcpack1_count = 0
#tcpack2_count = 0
t = end_time - start_time
with open('out2_1.tr') as trace_file:
for line in trace_file:
event,time,from_node,to_node,pkt_type,pkt_size,flags,fid,src_addr,dst_addr,seq_num,pkt_id = line.split()
#print time
if(float(time) >= start_time and float(time) <= end_time):
if(event == 'r' and to_node == '3' and src_addr == '1.0' and dst_addr == '4.0'):
tcpflow1_count = tcpflow1_count + 1
#if (event == 'r' and to_node == '2' and src_addr == '4.0' and dst_addr == '1.0'):
# tcpack1_count += 40
#if(event == 'r' and to_node == '3' and src_addr == '5.0' and dst_addr == '6.0'):
# tcpflow2_count = tcpflow2_count + 1
#if((event == 'r' and to_node == '2' and src_addr == '6.0' and dst_addr == '5.0')):
# tcpack2_count += 40
if(event == 'r' and to_node == '3' and src_addr == '5.0' and dst_addr == '6.0'):
cbrflow_count = cbrflow_count + 1
numBytes_tcp1 = (tcpflow1_count) * 1000
#numBytes_tcp2 = (tcpflow2_count) * 1040 + tcpack2_count
numBytes_cbr = (cbrflow_count) * 500
# print numBytes_tcp1,numBytes_tcp2, numBytes_cbr
bw_tcp1 = (numBytes_tcp1 * 8/float(t))/mb
#bw_tcp2 = (numBytes_tcp2 * 8/float(t))
bw_cbr = (numBytes_cbr * 8/float(t))/mb
print bw_tcp1,bw_cbr
def calAvgPktLoss(start_time, end_time, gap):
totalPktXmit_tcp1 = 0
#totalPktXmit_tcp2 = 0
totalPktXmit_cbr = 0
totalPktDrop_tcp1 = 0
#totalPktDrop_tcp2 = 0
totalPktDrop_cbr = 0
t = end_time - start_time
with open('out2_1.tr') as trace_file:
for line in trace_file:
event,time,from_node,to_node,pkt_type,pkt_size,flags,flow_id,src_addr,dst_addr,seq_num,pkt_id = line.split()
#print time
if(float(time) >= start_time and float(time) <= (end_time + gap)):
if(event == '-' and from_node == '1' ):
totalPktXmit_tcp1 += 1
#if(event == '-' and from_node == '5' ):
# totalPktXmit_tcp2 += 1
if(event == '-' and from_node == '5' ):
totalPktXmit_cbr += 1
if(event == 'd' and flow_id == '1'):
totalPktDrop_tcp1 += 1
#if(event == 'd' and flow_id == '2'):
# totalPktDrop_tcp2 += 1
if(event == 'd' and flow_id == '2'):
totalPktDrop_cbr += 1
pktLossRate_tcp1 = ((totalPktDrop_tcp1)/float(totalPktXmit_tcp1))
#pktLossRate_tcp2 = ((totalPktDrop_tcp2)/float(totalPktXmit_tcp2))
pktLossRate_cbr = (( totalPktDrop_cbr)/float(totalPktXmit_cbr))
#print totalPktXmit_tcp1,totalPktXmit_cbr
#print totalPktDrop_tcp1,totalPktDrop_cbr
print pktLossRate_tcp1, pktLossRate_cbr
def main():
calBW(5,20)
#calBW(5,9)
#calAvgPktLoss(0,4,1)
#calAvgPktLoss(5,9,1)
#calAvgPktLoss(30,34,1)
calAvgPktLoss(5,20,1)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
2a52ea8eda42300a5583eaa59b28a3a97c7f726a
|
ee0bcf752928b8310ed497c1ff365afe927e31de
|
/run.py
|
9222f33837f5182e5329dddaba95be439393cc0e
|
[] |
no_license
|
kduxin/mpi-median-filter-colored
|
0b2c38a20c26ec697f3218608866b1f5477a507c
|
5fe722faa0ff8e4647052c29871032fa81248c10
|
refs/heads/master
| 2020-06-19T14:17:09.750414 | 2019-07-19T06:51:55 | 2019-07-19T06:51:55 | 196,739,539 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,097 |
py
|
import ctypes
from PIL import Image
import numpy as np
import cv2
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# lib_cpu = ctypes.cdll.LoadLibrary("./lib_cpu.so")
lib_mpi = ctypes.cdll.LoadLibrary("./lib_mpi.so")
img = np.array(Image.open("./image/aimer.jpg"), dtype=np.uint8)
# img = img.transpose([2,0,1])
shape = img.shape
def SaltNoise(img, prob=0.01):
shape = img.shape
noise = np.random.rand(*shape)
img_sn = img.copy()
img_sn[noise < prob] = 1
return img_sn
def MedianBlurCpu(img, window_size):
c_uint8_p = ctypes.POINTER(ctypes.c_uint8)
shape = img.shape
img_p = img_sn.reshape([-1]).copy().ctypes.data_as(c_uint8_p)
lib_cpu.medianBlurColored(img_p, shape[0], shape[1], shape[2], window_size)
print("44444")
img_f = np.array(img_p[:shape[0]*shape[1]*shape[2]], dtype=np.uint8).reshape(*shape)
print("55555")
return img_f
def MedianBlurMPI(img, window_size):
c_uint8_p = ctypes.POINTER(ctypes.c_uint8)
shape = img.shape
img_p = img_sn.reshape([-1]).copy().ctypes.data_as(c_uint8_p)
lib_mpi.medianBlurColored_MPI(img_p, shape[0], shape[1], shape[2], window_size)
print("44444")
img_f = np.array(img_p[:shape[0]*shape[1]*shape[2]], dtype=np.uint8).reshape(*shape)
print("55555")
return img_f
def MedianBlurCuda(img, window_size):
c_uint8_p = ctypes.POINTER(ctypes.c_uint8)
shape = img.shape
img_p = img_sn.reshape([-1]).copy().ctypes.data_as(c_uint8_p)
lib.medianBlurColoredCuda(img_p, shape[0], shape[1], shape[2], window_size)
img_f = np.array(img_p[:shape[0]*shape[1]*shape[2]], dtype=np.uint8).reshape(shape)
return img_f
img_sn = SaltNoise(img, prob=0.01)
# img_medblur = cv2.medianBlur(img_sn, 5)
# img_bilblur = cv2.bilateralFilter(img_sn, 5, 75, 75)
# img_nlmblur = cv2.fastNlMeansDenoisingColored(img_sn, h=9, hColor=10)
print("Start medblurring with mpi.")
# img_medblur_cpu = MedianBlurCpu(img, 5)
img_medblur_mpi = MedianBlurMPI(img_sn, 5)
# img_medblur_cuda = MedianBlurCuda(img, 5)
# """ draw figure """
# fig = plt.figure(figsize=(15,10))
# ax11 = fig.add_subplot('221')
# ax12 = fig.add_subplot('222')
# ax21 = fig.add_subplot('223')
# ax22 = fig.add_subplot('224')
# ax11.imshow(img, aspect='auto')
# ax12.imshow(img_sn, aspect='auto')
# ax21.imshow(img_medblur_cpu, aspect='auto')
# ax22.imshow(img_medblur_mpi, aspect='auto')
# ax11.set_title('Raw Image')
# ax12.set_title('Salt Noise with prob. 0.1')
# ax21.set_title('Denoised Image with Median Blurring (single cpu)')
# ax22.set_title('Denoised Image with Median Blurring (mpi)')
# for ax in [ax11, ax12, ax21, ax22]:
# ax.axis("off")
# fig.subplots_adjust(hspace=0.2, wspace=0.2)
# fig.save_fig("./result.jpg")
# plt.imshow(np.array(img, dtype=np.uint8))
# plt.show()
# plt.imshow(np.array(img_medblur_cpu, dtype=np.uint8))
# plt.show()
# lib.printList(img_p, 6)
# img_f = np.frombuffer(img_p, dtype=np.float32, count=3)
# img_f = np.fromiter(img_p, dtype=np.float32, count=6)
# img_f
# plt.imshow(img_f.reshape([400,-1]))
# plt.show()
# plt.imshow(img)
|
[
"[email protected]"
] | |
1f65a3b9b769c30d6f53d3c5f460f7fbe37996a3
|
ad6f3be8c2746eec5778f8b505a6718500697b83
|
/laboratory_work_2-master/task_3/info.py
|
7ef476e79106d3cb04a7a4989c10ed1804549876
|
[] |
no_license
|
Dementr/labs
|
74344765a4defdd1a2e246360d56f7d8cc513942
|
f3831c978e94deda7e0a0b43558c3975b041a8b2
|
refs/heads/master
| 2020-07-25T04:20:13.206343 | 2019-09-12T23:19:19 | 2019-09-12T23:19:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 919 |
py
|
info = 'ФИО;Возраст;Категория;_Иванов Иван Иванович;23 года;Студент 3 курса;_Петров Семен Игоревич;22 года;Студент 2 ' \
'курса;_Иванов Семен Игоревич;22 года;Студент 2 курса;_Акибов Ярослав Наумович;23' \
' года;Студент 3 курса;_Борков Станислав Максимович;21 год;Студент 1 курса;_Петров Семен Семенович;21 год;' \
'Студент 1 курса;_Романов Станислав Андреевич;23 года;Студент 3 курса;_Петров Всеволод Борисович;21 год;Студент ' \
'2 курса;'
info = info.split(';_')
for i in range(0, len(info)):
str = info[i]
if str.find('21 год') != -1:
print(str)
|
[
"[email protected]"
] | |
545c240dc43ec38cffd97004bd6125bf765692d6
|
5e49afd9c6ca73d7074c7ae220d5186fe4f44c08
|
/setup.py
|
100a3637c77fb07f8f43449aadc017a221620a02
|
[
"MIT"
] |
permissive
|
pylover/sharedlists
|
c2842618e7f6f9fea9dfefd710b9f94f36c19e7c
|
b020be26d3a64a3cdb9417a066a454b5b92006c5
|
refs/heads/master
| 2020-06-22T02:55:47.892070 | 2019-08-04T20:37:20 | 2019-08-04T20:37:20 | 197,615,918 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
import re
from os.path import join, dirname
from setuptools import setup, find_packages
# reading package version (same way the sqlalchemy does)
with open(join(dirname(__file__), 'sharedlists', '__init__.py')) as v_file:
package_version = re.compile('.*__version__ = \'(.*?)\'', re.S).\
match(v_file.read()).group(1)
dependencies = [
'restfulpy >= 3.4, < 4',
'easycli >= 1.4, < 2',
# Deployment
'gunicorn',
]
setup(
name='sharedlists',
version=package_version,
packages=find_packages(exclude=['tests']),
install_requires=dependencies,
include_package_data=True,
license='MIT',
entry_points={
'console_scripts': [
'sharedlists = sharedlists:server_main',
'bee = sharedlists:client_main'
]
}
)
|
[
"[email protected]"
] | |
f29c5f5a8fd629051b2a7cb2e2d5a0557af1f558
|
2d9e5a30ac44bf15296a058a198b97fa1e6574c0
|
/galaxy_ml/binarize_target/_binarize_estimators.py
|
0ba49fca7485267cc0fb472bafdd5ef680a5af56
|
[
"MIT"
] |
permissive
|
kxk302/Galaxy-ML
|
8397e533b1529354fc1d5e7b147cd808b129efd8
|
d42bea8591f691c44fd0523d567c1dfa8a87bd01
|
refs/heads/master
| 2023-04-13T21:44:18.478631 | 2021-03-23T17:54:46 | 2021-03-23T17:54:46 | 346,071,096 | 0 | 0 |
MIT
| 2021-03-12T23:08:31 | 2021-03-09T16:28:41 | null |
UTF-8
|
Python
| false | false | 10,288 |
py
|
import numpy as np
from sklearn.base import (BaseEstimator, clone, RegressorMixin,
TransformerMixin)
from sklearn.utils.validation import (check_array, check_is_fitted,
column_or_1d)
class BinarizeTargetClassifier(BaseEstimator, RegressorMixin):
"""
Convert continuous target to binary labels (True and False)
and apply a classification estimator.
Parameters
----------
classifier : object
Estimator object such as derived from sklearn `ClassifierMixin`.
z_score : float, default=-1.0
Threshold value based on z_score. Will be ignored when
fixed_value is set
value : float, default=None
Threshold value
less_is_positive : boolean, default=True
When target is less the threshold value, it will be converted
to True, False otherwise.
verbose : int, default=0
If greater than 0, print discretizing info.
Attributes
----------
classifier_ : object
Fitted classifier
discretize_value : float
The threshold value used to discretize True and False targets
"""
def __init__(self, classifier, z_score=-1, value=None,
less_is_positive=True, verbose=0):
self.classifier = classifier
self.z_score = z_score
self.value = value
self.less_is_positive = less_is_positive
self.verbose = verbose
def fit(self, X, y, sample_weight=None, **fit_params):
"""
Convert y to True and False labels and then fit the classifier
with X and new y
Returns
------
self: object
"""
y = check_array(y, accept_sparse=False, force_all_finite=True,
ensure_2d=False, dtype='numeric')
y = column_or_1d(y)
if self.value is None:
discretize_value = y.mean() + y.std() * self.z_score
else:
discretize_value = self.Value
self.discretize_value = discretize_value
if self.less_is_positive:
y_trans = y < discretize_value
else:
y_trans = y > discretize_value
n_positives = np.sum(y_trans)
# for older version compatibility
if self.verbose and self.verbose > 0:
print("{0} out of total {1} samples are discretized into "
"positive.".format(n_positives, X.shape[0]))
self.classifier_ = clone(self.classifier)
keys = list(fit_params.keys())
for key in keys:
if not key.startswith('classifier__'):
raise ValueError("fit_params for BinarizeTargetClassifier "
"must start with `classifier__`")
fit_params[key[12:]] = fit_params.pop(key)
if sample_weight is not None:
self.classifier_.fit(X, y_trans,
sample_weight=sample_weight,
**fit_params)
else:
self.classifier_.fit(X, y_trans, **fit_params)
# Used in RFE or SelectFromModel
if hasattr(self.classifier_, 'feature_importances_'):
self.feature_importances_ = self.classifier_.feature_importances_
if hasattr(self.classifier_, 'coef_'):
self.coef_ = self.classifier_.coef_
if hasattr(self.classifier_, 'n_outputs_'):
self.n_outputs_ = self.classifier_.n_outputs_
if hasattr(self.classifier_, 'n_features_'):
self.n_features_ = self.classifier_.n_features_
return self
def predict(self, X):
"""Predict using a fitted estimator
"""
return self.classifier_.predict(X)
def decision_function(self, X):
"""Predict using a fitted estimator
"""
return self.classifier_.decision_function(X)
def predict_proba(self, X):
"""Predict using a fitted estimator
"""
return self.classifier_.predict_proba(X)
class BinarizeTargetRegressor(BaseEstimator, RegressorMixin):
"""
Extend regression estimator to have discretize_value
Parameters
----------
regressor : object
Estimator object such as derived from sklearn `RegressionMixin`.
z_score : float, default=-1.0
Threshold value based on z_score. Will be ignored when
value is set
value : float, default=None
Threshold value
less_is_positive : boolean, default=True
When target is less the threshold value, it will be converted
to True, False otherwise.
verbose : int, default=0
If greater than 0, print discretizing info.
Attributes
----------
regressor_ : object
Fitted regressor
discretize_value : float
The threshold value used to discretize True and False targets
"""
def __init__(self, regressor, z_score=-1, value=None,
less_is_positive=True, verbose=0):
self.regressor = regressor
self.z_score = z_score
self.value = value
self.less_is_positive = less_is_positive
self.verbose = verbose
def fit(self, X, y, sample_weight=None, **fit_params):
"""
Calculate the discretize_value fit the regressor with traning data
Returns
------
self: object
"""
y = check_array(y, accept_sparse=False, force_all_finite=True,
ensure_2d=False, dtype='numeric')
y = column_or_1d(y)
if not np.all((y >= 0) & (y <= 1)):
raise ValueError("The target value of BinarizeTargetRegressor "
"must be in the range [0, 1]")
if self.value is None:
discretize_value = y.mean() + y.std() * self.z_score
else:
discretize_value = self.Value
self.discretize_value = discretize_value
if self.less_is_positive:
n_positives = np.sum(y < discretize_value)
else:
n_positives = np.sum(y > discretize_value)
# for older version compatibility
if self.verbose and self.verbose > 0:
print("{0} out of total {1} samples are discretized into "
"positive.".format(n_positives, X.shape[0]))
self.regressor_ = clone(self.regressor)
keys = list(fit_params.keys())
for key in keys:
if not key.startswith('regressor__'):
raise ValueError("fit_params for BinarizeTargetClassifier "
"must start with `regressor__`")
fit_params[key[11:]] = fit_params.pop(key)
if sample_weight is not None:
self.regressor_.fit(X, y,
sample_weight=sample_weight,
**fit_params)
else:
self.regressor_.fit(X, y, **fit_params)
# attach classifier attributes
if hasattr(self.regressor_, 'feature_importances_'):
self.feature_importances_ = self.regressor_.feature_importances_
if hasattr(self.regressor_, 'coef_'):
self.coef_ = self.regressor_.coef_
if hasattr(self.regressor_, 'n_outputs_'):
self.n_outputs_ = self.regressor_.n_outputs_
if hasattr(self.regressor_, 'n_features_'):
self.n_features_ = self.regressor_.n_features_
return self
def predict(self, X):
"""Predict target value of X
"""
check_is_fitted(self, 'regressor_')
return self.regressor_.predict(X)
def decision_function(self, X):
"""
Output the proba for True label
For use in the binarize target scorers.
"""
pred = self.predict(X)
if self.less_is_positive:
pred = 1 - pred
return pred
def predict_label(self, X, cutoff):
""" output a label based on cutoff value
Parameters
----------
cutoff : float
"""
scores = self.decision_function(X)
return scores > cutoff
class BinarizeTargetTransformer(BaseEstimator, TransformerMixin):
"""
Extend transformaer to work for binarized target.
Parameters
----------
transformer : object
Estimator object such as derived from sklearn `TransformerMixin`,
including feature_selector and preprocessor
z_score : float, default=-1.0
Threshold value based on z_score. Will be ignored when
fixed_value is set
value : float, default=None
Threshold value
less_is_positive : boolean, default=True
When target is less the threshold value, it will be converted
to True, False otherwise.
Attributes
----------
transformer_ : object
Fitted regressor
discretize_value : float
The threshold value used to discretize True and False targets
"""
def __init__(self, transformer, z_score=-1, value=None,
less_is_positive=True):
self.transformer = transformer
self.z_score = z_score
self.value = value
self.less_is_positive = less_is_positive
def fit(self, X, y):
"""
Convert y to True and False labels and then fit the transformer
with X and new y
Returns
------
self: object
"""
y = check_array(y, accept_sparse=False, force_all_finite=True,
ensure_2d=False, dtype='numeric')
y = column_or_1d(y)
if self.value is None:
discretize_value = y.mean() + y.std() * self.z_score
else:
discretize_value = self.Value
self.discretize_value = discretize_value
if self.less_is_positive:
y_trans = y < discretize_value
else:
y_trans = y > discretize_value
self.transformer_ = clone(self.transformer)
self.transformer_.fit(X, y_trans)
return self
def transform(self, X):
"""Transform X
Parameters
----------
X : array of shape [n_samples, n_features]
Returns
-------
X_r : array
"""
check_is_fitted(self, 'transformer_')
X = check_array(X, dtype=None, accept_sparse='csr')
return self.transformer_.transform(X)
|
[
"[email protected]"
] | |
f06d43fb2d6f3095a730d2a824cce59219131f2c
|
63ff60a5fde00c79fc4cd72ebd88595d14925921
|
/picker/migrations/0001_initial.py
|
06d0748e23c3c998488055b09f20e15d79015dab
|
[
"MIT"
] |
permissive
|
dakrauth/picker
|
e797e99cb7859b2f33998aca2e84e432c0c19d8d
|
084bc22cf50b200333a6c76d9577463eda6a0948
|
refs/heads/main
| 2022-09-15T03:43:28.400665 | 2022-09-03T05:57:56 | 2022-09-03T05:57:56 | 42,045,430 | 2 | 2 |
MIT
| 2022-01-07T17:21:16 | 2015-09-07T09:58:39 |
Python
|
UTF-8
|
Python
| false | false | 10,602 |
py
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Alias',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=50)),
],
),
migrations.CreateModel(
name='Conference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('abbr', models.CharField(max_length=8)),
],
),
migrations.CreateModel(
name='Division',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('conference', models.ForeignKey(on_delete=models.CASCADE, to='picker.Conference')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('kickoff', models.DateTimeField()),
('tv', models.CharField(max_length=8, verbose_name=b'TV', blank=True)),
('notes', models.TextField(blank=True)),
('category', models.CharField(default='REG', max_length=4, choices=[('REG', b'Regular Season'), ('POST', b'Post Season')])),
('status', models.CharField(default='U', max_length=1, choices=[('U', b'Unplayed'), ('T', b'Tie'), ('H', b'Home Win'), ('A', b'Away Win')])),
('location', models.CharField(max_length=50, blank=True)),
],
options={
'ordering': ('kickoff', 'away'),
},
),
migrations.CreateModel(
name='GamePick',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('game', models.ForeignKey(on_delete=models.CASCADE, related_name='gamepicks', to='picker.Game')),
],
options={
'ordering': ('game__kickoff', 'game__away'),
},
),
migrations.CreateModel(
name='GameSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('season', models.PositiveSmallIntegerField()),
('week', models.PositiveSmallIntegerField()),
('points', models.PositiveSmallIntegerField(default=0)),
('opens', models.DateTimeField()),
('closes', models.DateTimeField()),
],
options={
'ordering': ('season', 'week'),
},
),
migrations.CreateModel(
name='League',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=50)),
('abbr', models.CharField(max_length=8)),
('logo', models.ImageField(null=True, upload_to=b'picker/logos', blank=True)),
('is_pickable', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='PickSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('points', models.PositiveSmallIntegerField(default=0)),
('correct', models.PositiveSmallIntegerField(default=0)),
('wrong', models.PositiveSmallIntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('strategy', models.CharField(default='USER', max_length=4, choices=[('USER', b'User'), ('RAND', b'Random'), ('HOME', b'Home Team'), ('BEST', b'Best Record')])),
('user', models.ForeignKey(on_delete=models.CASCADE, related_name='picksets', to=settings.AUTH_USER_MODEL)),
('week', models.ForeignKey(on_delete=models.CASCADE, related_name='picksets', to='picker.GameSet')),
],
),
migrations.CreateModel(
name='Playoff',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('season', models.PositiveSmallIntegerField()),
('kickoff', models.DateTimeField()),
('league', models.ForeignKey(on_delete=models.CASCADE, to='picker.League')),
],
),
migrations.CreateModel(
name='PlayoffPicks',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('picks', models.TextField(blank=True)),
('playoff', models.ForeignKey(on_delete=models.CASCADE, to='picker.Playoff')),
('user', models.ForeignKey(on_delete=models.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='PlayoffTeam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('seed', models.PositiveSmallIntegerField()),
('playoff', models.ForeignKey(on_delete=models.CASCADE, to='picker.Playoff')),
],
options={
'ordering': ('seed',),
},
),
migrations.CreateModel(
name='Preference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='ACTV', max_length=4, choices=[('ACTV', b'Active'), ('IDLE', b'Inactive'), ('SUSP', b'Suspended')])),
('autopick', models.CharField(default='RAND', max_length=4, choices=[('NONE', b'None'), ('RAND', b'Random')])),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('abbr', models.CharField(max_length=8, blank=True)),
('nickname', models.CharField(max_length=50)),
('location', models.CharField(max_length=100, blank=True)),
('image', models.CharField(max_length=50, blank=True)),
('colors', models.CharField(max_length=40, blank=True)),
('logo', models.ImageField(null=True, upload_to=b'picker/logos', blank=True)),
('conference', models.ForeignKey(on_delete=models.CASCADE, to='picker.Conference')),
('division', models.ForeignKey(on_delete=models.SET_NULL, blank=True, to='picker.Division', null=True)),
('league', models.ForeignKey(on_delete=models.CASCADE, to='picker.League')),
],
options={
'ordering': ('name',),
},
),
migrations.AddField(
model_name='preference',
name='favorite_team',
field=models.ForeignKey(on_delete=models.SET_NULL, blank=True, to='picker.Team', null=True),
),
migrations.AddField(
model_name='preference',
name='league',
field=models.ForeignKey(on_delete=models.CASCADE, to='picker.League'),
),
migrations.AddField(
model_name='preference',
name='user',
field=models.OneToOneField(on_delete=models.CASCADE, related_name='picker_preferences', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='playoffteam',
name='team',
field=models.ForeignKey(on_delete=models.CASCADE, to='picker.Team'),
),
migrations.AddField(
model_name='gameset',
name='byes',
field=models.ManyToManyField(related_name='bye_set', verbose_name=b'Bye Teams', to='picker.Team'),
),
migrations.AddField(
model_name='gameset',
name='league',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='game_set', to='picker.League'),
),
migrations.AddField(
model_name='gamepick',
name='pick',
field=models.ForeignKey(on_delete=models.CASCADE, to='picker.PickSet'),
),
migrations.AddField(
model_name='gamepick',
name='winner',
field=models.ForeignKey(on_delete=models.SET_NULL, blank=True, to='picker.Team', null=True),
),
migrations.AddField(
model_name='game',
name='away',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='away_games', to='picker.Team'),
),
migrations.AddField(
model_name='game',
name='home',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='home_games', to='picker.Team'),
),
migrations.AddField(
model_name='game',
name='week',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='games', to='picker.GameSet'),
),
migrations.AddField(
model_name='conference',
name='league',
field=models.ForeignKey(on_delete=models.CASCADE, to='picker.League'),
),
migrations.AddField(
model_name='alias',
name='team',
field=models.ForeignKey(on_delete=models.CASCADE, to='picker.Team'),
),
migrations.AlterUniqueTogether(
name='pickset',
unique_together=set([('user', 'week')]),
),
]
|
[
"[email protected]"
] | |
d97b425f44e492b8a223677528b958ac0a7585f5
|
9f4a1f8546e95da92d7e1b924b651f0ea5eb16ba
|
/sockets/aes/aes_gcm.py
|
8fe968c20a25ac087bc23438966c934a26053813
|
[] |
no_license
|
pengyejun/python_demo
|
928e557fb890c2881284d9e9100c5ac9aa2292c6
|
80732c440c97070149c1d5bb4d08972d9cd2869a
|
refs/heads/master
| 2022-12-21T13:07:24.688683 | 2020-09-15T15:22:55 | 2020-09-15T15:22:55 | 265,818,736 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,626 |
py
|
# need python 3.6 or above & Linux >=4.9
import contextlib
import socket
import os
@contextlib.contextmanager
def create_alg(typ, name):
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
s.bind((typ, name))
yield s
finally:
s.close()
def encrypt(key, iv, assoc, taglen, plaintext):
""" doing aes-gcm encrypt
:param key: the aes symmetric key
:param iv: initial vector
:param assoc: associated data (integrity protection)
:param taglen: authenticator tag len
:param plaintext: plain text data
"""
assoclen = len(assoc)
ciphertext = None
tag = None
with create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG,
socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG,
socket.ALG_SET_AEAD_AUTHSIZE,
None,
assoclen)
op, _ = algo.accept()
with op:
msg = assoc + plaintext
op.sendmsg_afalg([msg],
op=socket.ALG_OP_ENCRYPT,
iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plaintext) + taglen)
ciphertext = res[assoclen:-taglen]
tag = res[-taglen:]
return ciphertext, tag
def decrypt(key, iv, assoc, tag, ciphertext):
""" doing aes-gcm decrypt
:param key: the AES symmetric key
:param iv: initial vector
:param assoc: associated data (integrity protection)
:param tag: the GCM authenticator tag
:param ciphertext: cipher text data
"""
plaintext = None
assoclen = len(assoc)
with create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG,
socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG,
socket.ALG_SET_AEAD_AUTHSIZE,
None,
assoclen)
op, _ = algo.accept()
with op:
msg = assoc + ciphertext + tag
op.sendmsg_afalg([msg],
op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
taglen = len(tag)
res = op.recv(len(msg) - taglen)
plaintext = res[assoclen:]
return plaintext
key = os.urandom(16)
iv = os.urandom(12)
assoc = os.urandom(16)
plaintext = b"Hello AES-GCM"
ciphertext, tag = encrypt(key, iv, assoc, 16, plaintext)
plaintext = decrypt(key, iv, assoc, tag, ciphertext)
print(ciphertext.hex())
print(plaintext)
|
[
"[email protected]"
] | |
fbb7b4596efb9ead919f079b83df0bfef29774c1
|
8b07a4a8f01a3cf9bf9e312a1e0716af72713e56
|
/Python/django/djpr/freefly/freefly/wsgi.py
|
bf2884d6d75fcd04c2f60bdaddadd182b491c6ae
|
[] |
no_license
|
milleniax/All-projects
|
68d1f6250c03cadff0ccd2a5a29f53b9dc91a7f2
|
d295a502c7e732ab222439b1e52d29fc2463bd9f
|
refs/heads/master
| 2020-09-25T17:10:14.464139 | 2020-03-28T20:02:18 | 2020-03-28T20:02:18 | 226,049,706 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
"""
WSGI config for freefly project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'freefly.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
d3411e33fed37f486fef508d9bc01e73d9d91107
|
70d762a49a33cb9896ac9946239e74c7751b2bac
|
/openstack_dashboard/enabled/_2200_WRS_dc_admin.py
|
db2f90a618208eb1a6aef86b8d23f156473bbcfb
|
[
"Apache-2.0"
] |
permissive
|
aleks-kozyrev/stx-horizon
|
638cde052e02da71be9f3d7bb0bb1718db0579c2
|
517d18255c714a4bd4d2476697947d3929d57039
|
refs/heads/master
| 2020-03-27T11:11:43.381274 | 2018-08-27T19:38:11 | 2018-08-27T19:38:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 497 |
py
|
# The slug of the dashboard to be added to HORIZON['dashboards']. Required.
DASHBOARD = 'dc_admin'
# If set to True, this dashboard will be set as the default dashboard.
DEFAULT = False
# A dictionary of exception classes to be added to HORIZON['exceptions'].
ADD_EXCEPTIONS = {}
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = ['openstack_dashboard.dashboards.dc_admin']
ADD_ANGULAR_MODULES = [
'horizon.dashboard.dc_admin',
]
AUTO_DISCOVER_STATIC_FILES = True
|
[
"[email protected]"
] | |
7107ab73e45047060a6a8580092971ab13b86db0
|
ab616e26a623fe7e81d30ba7b86fabe4a3658794
|
/LibriSpeech/Get_Meta_LibriSpeech.py
|
39a801bf12ffad5efc95d8bb95ea6ef3ab2b9afa
|
[] |
no_license
|
ruclion/linears_decoder
|
1d2367fbfa8fdde3ae0a8c53e5e82ed7035d1eed
|
93cf874f87a601584c07ba5e4b673e401e9e7c90
|
refs/heads/master
| 2022-12-16T14:25:34.373534 | 2020-09-22T14:42:58 | 2020-09-22T14:42:58 | 289,808,115 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,973 |
py
|
import os
# import numpy as np
# from audio import wav2mfcc_v2, load_wav
wavs_dir = 'wavs'
ppgs_dir = 'alignments'
zhaoxt_train = 'train.txt'
zhaoxt_test = 'test.txt'
meta_list_fromWavs = []
meta_list_fromPPGs = []
meta_list_fromZhaoxt = []
meta_list = []
meta_path = 'meta.txt'
def main():
# 2391-145015-0048
f = open(zhaoxt_train, 'r')
a = [t.strip() for t in f.readlines()]
meta_list_fromZhaoxt.extend(a)
f = open(zhaoxt_test, 'r')
a = [t.strip() for t in f.readlines()]
meta_list_fromZhaoxt.extend(a)
print('Zhaoxts:', len(meta_list_fromZhaoxt), meta_list_fromZhaoxt[0])
# wavs
for second_dir in os.listdir(wavs_dir):
for third_dir in os.listdir(os.path.join(wavs_dir,second_dir)):
third_wavs_dir = os.path.join(os.path.join(wavs_dir,second_dir),third_dir)
wav_files = [f[:-4] for f in os.listdir(third_wavs_dir) if f.endswith('.wav')]
# print('Extracting MFCC from {}...'.format(third_wavs_dir))
meta_list_fromWavs.extend(wav_files)
print('Wavs:', len(meta_list_fromWavs), meta_list_fromWavs[0])
# 100-121669-0000 1 1 1 1 1 1 1
for f_path in os.listdir(ppgs_dir):
f = open(os.path.join(ppgs_dir, f_path), 'r')
a = f.readlines()
for line in a:
line = line.strip().split(' ')
meta_list_fromPPGs.append(line[0])
print('PPGs:', len(meta_list_fromPPGs), meta_list_fromPPGs[0])
# 主要用欣陶的list,辅助看看wavs和ppgs有没有;会跑1分钟,也就暴力看又没有了
for idx in meta_list_fromZhaoxt:
if idx in meta_list_fromPPGs and idx in meta_list_fromWavs:
meta_list.append(idx)
else:
print('为什么不用:', idx)
# break
f = open(meta_path, 'w')
for idx in meta_list:
f.write(idx + '\n')
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
80181757a1dd7012195372ab59e888c4ee85a6db
|
5c48be6df9bc5ef55725c4020d67d046e19cc9e2
|
/Practice(beginner)/ATM(HS08TEST).py
|
36d8423fb0e95b17d2744af9f4b2b3fad3a6fb07
|
[] |
no_license
|
aman171006/codeshef
|
9cd14a6cfee828adf71fdd847fc5dd3a4e0edd64
|
f2716d27b51bc02e5e932efd53b3ef05f665c146
|
refs/heads/master
| 2022-11-27T10:15:09.223209 | 2020-07-28T12:20:28 | 2020-07-28T12:20:28 | 283,109,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 772 |
py
|
### Third variable is not needed for output ###
### map()---> map(function,iterables) map() applies float() function to all iterables we get from input()
# | | You can use map() on any iterable variables not only input
# | |
with_draw,total_balance = map(float,input().split())
if ( with_draw % 5 ) ==0 and ( with_draw + 0.50 ) <= total_balance : # 0.50 is added to the withdrawal money as interest is paid by your account only
print("{0:0.2f}".format(total_balance-with_draw-0.50)) # print("any_string1 {} any_string2 {}".format(var1,var2)) easier to read the code and {0:0.2f} for accuracy.
else :
print("{0:0.2f}".format(total_balance))
|
[
"[email protected]"
] | |
39b4713bb06e115f5fef7f696c1b2c73fcf47adf
|
1ed536ef1527e6655217e731f622d643ece49c2b
|
/scripts/align_all_vs_all.py
|
c152b8b783b8dffd40812fc5cb7771efc2c163fb
|
[] |
no_license
|
siping/cgat
|
de0f7af124eb38c72d7dece78fff83ff92ddbf96
|
aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e
|
refs/heads/master
| 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,781 |
py
|
'''
align_all_vs_all.py - all-vs-all pairwise alignment
===================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script computes all-vs-all alignments between
sequences in a :term:`fasta` formatted file.
Currently only Smith-Waterman protein alignment is
implemented.
Usage
-----
Example::
python align_all_vs_all.py --help
Type::
python align_all_vs_all.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import time
import optparse
import math
import tempfile
import CGAT.Experiment as E
import alignlib
import CGAT.FastaIterator as FastaIterator
""" program $Id: align_all_vs_all.py 2782 2009-09-10 11:40:29Z andreas $
"""
if __name__ == "__main__":
parser = E.OptionParser( version = "%prog version: $Id: align_all_vs_all.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-s", "--sequences", dest="filename_sequences", type="string",
help="input file with sequences" )
parser.set_defaults(
filename_sequences = None,
gop = -10.0,
gep = -1.0,
)
(options, args) = E.Start( parser, add_pipe_options = True )
if options.filename_sequences:
infile = open(options.filename_sequences, "r")
else:
infile = sys.stdin
parser = FastaIterator.FastaIterator( infile )
sequences = []
while 1:
cur_record = iterator.next()
if cur_record is None: break
sequences.append( (cur_record.title, alignlib.makeSequence(re.sub( " ", "", cur_record.sequence)) ) )
if options.filename_sequences:
infile.close()
alignator = alignlib.makeAlignatorFullDP( options.gop, options.gep )
map_a2b = alignlib.makeAlignataVector()
nsequences = len(sequences)
for x in range(0,nsequences-1):
for y in range(x+1, nsequences):
alignator.Align( sequences[x][1], sequences[y][1], map_a2b)
row_ali, col_ali = alignlib.writeAlignataCompressed( map_a2b )
options.stdout.write( "%s\t%s\t%i\t%i\t%i\t%s\t%i\t%i\t%s\t%i\t%i\t%i\t%i\n" % (\
sequences[x][0], sequences[y][0],
map_a2b.getScore(),
map_a2b.getRowFrom(),
map_a2b.getRowTo(),
row_ali,
map_a2b.getColFrom(),
map_a2b.getColTo(),
col_ali,
map_a2b.getScore(),
100 * alignlib.calculatePercentIdentity( map_a2b, sequences[x][1], sequences[y][1]),
sequences[x][1].getLength(),
sequences[y][1].getLength() ))
E.Stop()
|
[
"[email protected]"
] | |
1ebf265851834e1dbde7d2c4c549c1c82252350c
|
02a535bbe64f52c112aef2b6b2abce5e2bf46933
|
/Alquileres/Alquileres/urls.py
|
ae334f9df5de595c8a6305904a4afacd6ebe427a
|
[] |
no_license
|
RicardoWebProject/BackendAlquileres
|
ff05e8a46c9cdf0c12edc9e8ff04c07d3e9b4961
|
72b12a459830bd4348caf763afa3d65ad60b8603
|
refs/heads/master
| 2022-11-22T04:07:45.020079 | 2020-07-27T01:10:05 | 2020-07-27T01:10:05 | 282,762,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,028 |
py
|
"""Alquileres URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('clientes.urls')),
path('', include('maquinaria.urls')),
path('', include('malquileres.urls')),
] + static (settings.MEDIA_URL, document_root= settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
a0325f9b5660f6f88458256b3e32f7e6882e5fc0
|
365358cab54e175ea0cef0d32f012cf74d9d4546
|
/hipo/settings.py
|
ee8cffb404143470799ebf85ed83ceed72a11e85
|
[] |
no_license
|
baranugur/hipo
|
613692ad24cc8793c4ffd1e721e3d51a2464edc2
|
006425bb28ec69d3dad2b2514f0f0a16db3815c2
|
refs/heads/master
| 2020-04-13T00:29:18.093216 | 2019-01-17T22:27:16 | 2019-01-17T22:27:16 | 162,846,854 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,294 |
py
|
"""
Django settings for hipo project.
Generated by 'django-admin startproject' using Django 2.0.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!ogj6y$_n=w4w42!bi_bc(_85=3hixqq2-tcd_*7$7vi%pbkre'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['flickr-baranugur.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hipo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hipo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'photo',
'USER': 'postgres',
'PASSWORD': '1234567890',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Activate Django-Heroku.
django_heroku.settings(locals())
|
[
"[email protected]"
] | |
28c0e80eb8d54321a71795b13c75ff070a6ccf3c
|
8f755bf2147b2ec74fdf26fd3fb583256be5993d
|
/project_1.py
|
13e07e1e20bae1397f808f1e3a042cc54064f4a5
|
[] |
no_license
|
vladroolit/euler_project
|
b8ad3dc85cf08d382d4449f56b5626cb8441861b
|
cd35027317e9f420756b27773631bd2a976e141d
|
refs/heads/master
| 2022-12-11T04:02:08.178119 | 2020-08-28T17:44:20 | 2020-08-28T17:44:20 | 290,271,594 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
print('Если выписать все натуральные числа меньше 10, кратные 3 или 5, то получим 3, 5, 6 и 9. Сумма этих чисел равна 23.\nНайдите сумму всех чисел меньше 1000, кратных 3 или 5.')
sum = 0
for i in range(1000):
if i % 3 == 0 or i % 5 == 0:
sum += i
print('\n\n')
print(sum)
|
[
"[email protected]"
] | |
8ec9adeaa2aae769320e408697df1517ee7f8b4e
|
0511cc9421df293ef72ad92f01b0e08d8e9300d9
|
/src/chap_08/48_udpbroadcast_py/server/main.py
|
97cf42057a7901ecb4732385095652cc01c68bc5
|
[] |
no_license
|
github-jxm/LinuxQt_Exambel
|
7cdb234a27e3e81827ceb7266b23cb80e3922e4a
|
309fde83c6492638a76713e3950395326fd00eef
|
refs/heads/master
| 2020-05-21T20:43:59.562848 | 2016-12-25T13:28:43 | 2016-12-25T13:28:43 | 65,841,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 470 |
py
|
#!/usr/bin/env python
#coding=utf-8
import sys
from PyQt4.QtCore import QTranslator
from PyQt4.QtGui import QApplication, QFont
from udpserver import UdpServer
if __name__ == "__main__":
font = QFont("ZYSong18030",12)
QApplication.setFont(font)
app = QApplication(sys.argv)
translator = QTranslator(None)
translator.load("udpserver_zh",".")
app.installTranslator(translator)
udpserver = UdpServer()
udpserver.show()
app.exec_()
|
[
"[email protected]"
] | |
c03f53db650c68d623e1743dedfe7d8f5d004a94
|
a202ad5ef0fd96487d544a860c863c825954e9f8
|
/conf/Effnet_Adam_512.py
|
427de1cddc0462c1d5670bdfb100b96da0ce2685
|
[] |
no_license
|
aakash-saboo/kaggle_rsna_19
|
d86195a84b08af4cce06110ef82de8ad7fa0fb59
|
c81c5d3a612207567d5b291ce86ad108c185c5aa
|
refs/heads/master
| 2021-10-07T17:16:02.597109 | 2021-10-04T14:44:09 | 2021-10-04T14:44:09 | 222,275,048 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,792 |
py
|
workdir = './model/oct_resnet101'
seed = 20
apex = True
n_fold = 5
epoch = 12
# resume_from = './model/oct_resnet101/fold0_ep7.pt'
resume_from = None
batch_size = 28
num_workers =4
imgsize = (512, 512) #(height, width)
loss = dict(
name='BCEWithLogitsLoss',
params=dict(),
)
optim = dict(
name='Adam',
params=dict(
lr=6e-5,
),
)
model = dict(
name='Effnet_b5',
pretrained='imagenet',
n_output=6,
)
scheduler = dict(
name='MultiStepLR',
params=dict(
milestones=[1,2],
gamma=2/3,
),
)
#normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}
normalize = None
crop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
resize = dict(name='Resize', params=dict(height=imgsize[0], width=imgsize[1]))
hflip = dict(name='HorizontalFlip', params=dict(p=0.5,))
vflip = dict(name='VerticalFlip', params=dict(p=0.5,))
contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.08, contrast_limit=0.08, p=0.5))
totensor = dict(name='ToTensor', params=dict(normalize=normalize))
rotate = dict(name='Rotate', params=dict(limit=30, border_mode=0), p=0.7)
window_policy = 2
data = dict(
train=dict(
dataset_type='CustomDataset',
annotations='./cache/train_folds.pkl',
imgdir='/home/centos/deepTB/stage_1_train_images',
imgsize=imgsize,
n_grad_acc=1,
loader=dict(
shuffle=True,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop, hflip, rotate, contrast, totensor],
dataset_policy='all',
window_policy=window_policy,
),
valid = dict(
dataset_type='CustomDataset',
annotations='./cache/train_folds.pkl',
imgdir='/home/centos/deepTB/stage_1_train_images',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop, hflip, rotate, contrast, totensor],
dataset_policy='all',
window_policy=window_policy,
),
test = dict(
dataset_type='CustomDataset',
annotations='./cache/test.pkl',
imgdir='/home/centos/deepTB/stage_1_test_images',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop, hflip, rotate, contrast, totensor],
dataset_policy='all',
window_policy=window_policy,
),
)
|
[
"[email protected]"
] | |
c4fe4853bddb6eebac1cbb2a01d92829ed3e2e45
|
0b920b9ff703ccb988d91cb2221233f4ad02ce30
|
/chapel/prayers/tests/test_view_success.py
|
570ba6a6b9039429cdd321aba80dce8db894cc8c
|
[] |
no_license
|
unisagrado/virtual_chapel
|
539a27da8adb3cc856e31d78f392e14f8c8a4cf1
|
700b2f65649e0368471b28574973ac74d5b60f18
|
refs/heads/master
| 2023-02-06T10:48:32.097842 | 2020-12-04T12:10:56 | 2020-12-04T12:10:56 | 308,651,028 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 735 |
py
|
from django.test import TestCase
from django.shortcuts import resolve_url as r
class PrayerSuccessGet(TestCase):
def setUp(self):
self.resp = self.client.get(r('prayers:success'))
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_template(self):
self.assertTemplateUsed(self.resp, 'prayers/success.html')
def test_html(self):
contents = [
'A Bem-Aventurada Clélia Merloni interceda por você e por suas intenções.',
'<div class="success-message"',
'Clique aqui para acender sua vela'
]
for expected in contents:
with self.subTest():
self.assertContains(self.resp, expected)
|
[
"[email protected]"
] | |
87e06c5f092bc078e57470af3c2e97ccb8e14638
|
6c137e70bb6b1b618fbbceddaeb74416d387520f
|
/lantz/lantz/drivers/agilent/__init__.py
|
6cb05fee840da445ceb8ceea76d2bfa2c2dd3fe9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
zhong-lab/code
|
fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15
|
b810362e06b44387f0768353c602ec5d29b551a2
|
refs/heads/master
| 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 |
BSD-2-Clause
| 2022-12-08T21:46:15 | 2019-05-02T23:37:39 |
Python
|
UTF-8
|
Python
| false | false | 593 |
py
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.agilent
~~~~~~~~~~~~~~~~~~~~~~
:company: Agilent Technologies.
:description: Manufactures test instruments for research and industrial applications
:website: http://www.agilent.com/home
----
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .n51xx import N51xx
from .ag33220A import Ag33220A
from .ag81130a import Ag81130A
from .e8257c import E8257C
from .AG33522a import AG33522A
__all__ = ['N51xx', 'Ag33220A', 'Ag81130A', 'AG33522A', 'E8257C']
|
[
"none"
] |
none
|
c885cec2649947128049fd1f7ac70e2aa9de1124
|
258f4e0357ab5ce98272ce7c9c63a7812b783afa
|
/notes-catalog/api/utils.py
|
3db412a3c970a3a6895dcc506c3778524a67cae8
|
[] |
no_license
|
apanariello4/lab-sac
|
fb741a63b9528083404cf71753d42a65e7e8c6fb
|
734544ffb9e671ebc0141e14be2268f632f5b58c
|
refs/heads/master
| 2022-12-20T06:41:14.919932 | 2020-10-17T13:33:20 | 2020-10-17T13:33:20 | 244,681,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
class Notes(object):
def __init__(self):
pass
def get_note_by_id(self, owner_id, note_id, db):
try:
owner_ref = db.collection(u'owners').document(owner_id)
note_ref = owner_ref.collection(u'notes').document(note_id)
note = note_ref.get().to_dict().get('note')
except:
return False
return note
def insert_note(self, owner_id, note_id, note, db):
try:
owner_ref = db.collection(u'owners').document(owner_id)
notes_ref = owner_ref.collection(u'notes')
except:
return False
notes_ref.document(note_id).set({u'note': note})
return True
class NoteShare(object):
def __init__(self):
pass
def check_first_note(owner_id, db):
try:
owner_ref = db.collection(u'owners').document(owner_id)
except:
return False
return True
def share_note(recipient_id, shared_note_id, note, db):
owner_ref = db.collection(u'owners').document(recipient_id)
owner_ref.collection(u'notes').document(shared_note_id).set({u'note': note})
|
[
"[email protected]"
] | |
ede6a7910e34d87a8089ec7a7a792cc145ae0a44
|
ec700463d9af81f68a477535ac233646f4d262f7
|
/python/__main__.py
|
2966615ac56651c81b277b34316ddc91361aca73
|
[] |
no_license
|
gregjhansell97/grid-map
|
36579afa7beadb78a4b8cc53e2c7f45c75ac28a2
|
7d4c25b583474ec45265b01e524ed0884aaa2937
|
refs/heads/master
| 2020-03-16T20:42:24.156940 | 2018-10-02T23:29:58 | 2018-10-02T23:29:58 | 132,969,337 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
from grid_map import GridMap
import timeit
if __name__ == "__main__":
gm = GridMap(5, bit_depth=10)
for x in range(1000):
for y in range(1000):
gm.add(x, y, "loc:" + str((x, y)))
gm = gm.sub_grids[1][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
|
[
"[email protected]"
] | |
ec472ac3246fec9dfbc5a3f26fd7ea6c268117ea
|
2aee8ada5a5212de8708dee7fe685d745872addc
|
/test_project/test_app/tests.py
|
49b2b00ec4825e129b2bad7adfa0ee7092c7d001
|
[
"BSD-3-Clause"
] |
permissive
|
chrischambers/django-audited-models
|
ec9b715b1cbdec9db76717eeb0a4076405ba2ce9
|
481523e014036be8ed1c3210b06f6dc3b4e0c858
|
refs/heads/master
| 2021-01-23T14:05:02.165571 | 2012-05-09T11:36:06 | 2012-05-09T11:36:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,594 |
py
|
from datetime import date
from django.test import TestCase
from nose.tools import *
from django.contrib.auth.models import User
from test_app.models import TestDefaultAuditedModel, TestCustomRelatedName
from threaded_multihost.threadlocals import get_current_user, set_current_user
class DefaultAutditedModelTests(TestCase):
def setUp(self):
self.original_user = get_current_user()
self.user = User.objects.create(username='TestyMcTesterson')
set_current_user(self.user)
self.test_instance = TestDefaultAuditedModel.objects.create(name='foo')
def tearDown(self):
set_current_user(self.original_user)
def test_auditing_fields_auto_populated_accurately(self):
today = date.today()
assert_equal(self.test_instance.creator, self.user)
assert_equal(self.test_instance.editor, self.user)
datetime_created = self.test_instance.datetime_created
datetime_modified = self.test_instance.datetime_modified
assert_equal(datetime_created.date(), today)
assert_equal(datetime_modified.date(), today)
self.test_instance.save()
assert_equal(datetime_created, self.test_instance.datetime_created)
assert_true(datetime_modified < self.test_instance.datetime_modified)
assert_equal(self.test_instance.creator, self.user)
assert_equal(self.test_instance.editor, self.user)
def test_get_latest_by_propagates_to_children(self):
assert_equal(TestDefaultAuditedModel.objects.latest(), self.test_instance)
class CustomPluralFormTests(TestCase):
def setUp(self):
self.original_user = get_current_user()
self.user = User.objects.create(username='TestyMcTesterson')
set_current_user(self.user)
self.test_instance = TestCustomRelatedName.objects.create(name='foo')
def tearDown(self):
set_current_user(self.original_user)
def test_correct_related_field_names(self):
assert_equal(
set(self.user.tests_created.all()),
set(TestCustomRelatedName.objects.all())
)
assert_equal(
set(self.user.tests_last_modified.all()),
set(TestCustomRelatedName.objects.all())
)
self.test_instance.save()
self.user = User.objects.get(pk=self.user.pk)
assert_equal(
set(self.user.tests_last_modified.all()),
set(TestCustomRelatedName.objects.all())
)
assert_equal(
set(self.user.tests_created.all()),
set(TestCustomRelatedName.objects.all())
)
|
[
"[email protected]"
] | |
5cfd366441a944b811950d03eeedc493da0afb82
|
2185217abc9d39919d4e7efd796f0dfb4dc70303
|
/advent_of_code_2019/day_14.py
|
fbbf78fb91f1870c85c57361403d969fbeb03c46
|
[] |
no_license
|
HappyTreeBeard/Advent_of_Code_2019
|
78b6061da74bb427e1b2b70c17eb6e630a0618e4
|
7d6cb8c04c6d509095b8c61bcd5b1a93f19a68b4
|
refs/heads/master
| 2020-11-24T17:42:01.344355 | 2020-01-08T02:34:03 | 2020-01-08T02:34:03 | 228,277,688 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,938 |
py
|
import unittest
from pathlib import Path
from typing import List, Dict
from dataclasses import dataclass, field
@dataclass
class ProductionMethod(object):
input_chemicals: List[str]
input_quantities: List[int]
output_chemical: str
output_quantity: int
@classmethod
def build_from_reaction_str(cls, reaction: str) -> 'ProductionMethod':
""" Create a ProductionMethod object from a reaction definition.
Example: '10 ORE => 10 A' or '7 A, 1 E => 1 FUEL'
:param reaction: The reaction definition
:type reaction: str
:return: The ProductionMethod
:rtype: ProductionMethod
"""
# Split the string into inputs and output chemicals
# '7 A, 1 E => 1 FUEL' to ['7 A, 1 E', '1 FUEL']
multi_inputs_str, output_str = reaction.split(' => ')
# Separate each chemical from the multi_inputs_str
# '7 A, 1 E' to ['7 A', '1 E']
input_str_list = multi_inputs_str.split(', ')
# Split the input_str_list into a list of chemicals and a list of quantities
input_chemicals = list()
input_quantities = list()
for input_str in input_str_list:
# '7 A' to [7, 'A']
quantity_str, chemical = input_str.split(' ')
input_chemicals.append(chemical)
input_quantities.append(int(quantity_str))
output_quantity_str, output_chemical = output_str.split(' ')
return ProductionMethod(
input_chemicals=input_chemicals,
input_quantities=input_quantities,
output_chemical=output_chemical,
output_quantity=int(output_quantity_str),
)
@classmethod
def build_inf_oar_production(cls) -> 'ProductionMethod':
return ProductionMethod(
input_chemicals=[],
input_quantities=[],
output_chemical='ORE',
output_quantity=1,
)
@dataclass
class ChemicalSupply(object):
chemical_name: str
production_method: ProductionMethod
total_production: int = 0
total_consumption: int = 0
@property
def current_inventory(self) -> int:
return self.total_production - self.total_consumption
@dataclass
class FuelFactory(object):
inventory: Dict[str, ChemicalSupply] = field(default_factory=dict)
def add_production_method(self, method: ProductionMethod):
chem_name = method.output_chemical
assert chem_name not in self.inventory
chem_supply = ChemicalSupply(
chemical_name=chem_name,
production_method=method,
total_production=0,
total_consumption=0,
)
self.inventory[chem_name] = chem_supply
def produce_chemical(self, chemical_name: str, count: int):
chem_supply = self.inventory[chemical_name]
while chem_supply.current_inventory < count:
# Produce and consume the required amount of the precursor / input chemical
input_chemicals = chem_supply.production_method.input_chemicals
input_quantities = chem_supply.production_method.input_quantities
for input_chem, input_quantity in zip(input_chemicals, input_quantities):
# Note a ProductionMethod could create more precursor than necessary
self.produce_chemical(chemical_name=input_chem, count=input_quantity)
self.consume_chemical(chemical_name=input_chem, count=input_quantity)
# print(f"+{chem_supply.production_method.output_quantity} {chemical_name}")
chem_supply.total_production += chem_supply.production_method.output_quantity
def consume_chemical(self, chemical_name: str, count: int):
chem_supply = self.inventory[chemical_name]
if chem_supply.current_inventory < count:
raise ValueError(f'Insufficient supply. Requested: {count}, Current inventory: '
f'{chem_supply.current_inventory}, {chem_supply}')
# print(f"-{count} {chemical_name}")
chem_supply.total_consumption += count
class Day14Tests(unittest.TestCase):
def test_build_from_reaction_str1(self):
reaction_str = '10 ORE => 10 A'
expected_production = ProductionMethod(
input_chemicals=['ORE'],
input_quantities=[10],
output_chemical='A',
output_quantity=10,
)
actual_method = ProductionMethod.build_from_reaction_str(reaction=reaction_str)
self.assertEqual(expected_production, actual_method)
def test_build_from_reaction_str2(self):
reaction_str = '7 A, 1 E => 1 FUEL'
expected_production = ProductionMethod(
input_chemicals=['A', 'E'],
input_quantities=[7, 1],
output_chemical='FUEL',
output_quantity=1,
)
actual_method = ProductionMethod.build_from_reaction_str(reaction=reaction_str)
self.assertEqual(expected_production, actual_method)
def test_part1_example1(self):
reaction_str_list = [
'10 ORE => 10 A',
'1 ORE => 1 B',
'7 A, 1 B => 1 C',
'7 A, 1 C => 1 D',
'7 A, 1 D => 1 E',
'7 A, 1 E => 1 FUEL',
]
fuel_factory = self.build_factory(reaction_str_list=reaction_str_list)
self.assertEqual(
len(fuel_factory.inventory),
len(reaction_str_list) + 1, # This should include the additional ProductionMethod for ORE
)
fuel_str = 'FUEL'
fuel_supply = fuel_factory.inventory[fuel_str]
self.assertEqual(fuel_supply.chemical_name, fuel_str)
self.assertListEqual(fuel_supply.production_method.input_chemicals, ['A', 'E'])
self.assertListEqual(fuel_supply.production_method.input_quantities, [7, 1])
# To produce 1 FUEL, a total of 31 ORE is required
expected_ore_requirement = 31
fuel_factory.produce_chemical(chemical_name=fuel_str, count=1)
actual_ore_requirement = fuel_factory.inventory['ORE'].total_consumption
self.assertEqual(actual_ore_requirement, expected_ore_requirement)
def test_part1_example2(self):
reaction_str_list = [
'9 ORE => 2 A',
'8 ORE => 3 B',
'7 ORE => 5 C',
'3 A, 4 B => 1 AB',
'5 B, 7 C => 1 BC',
'4 C, 1 A => 1 CA',
'2 AB, 3 BC, 4 CA => 1 FUEL',
]
# The above list of reactions requires 165 ORE to produce 1 FUEL:
expected_ore_requirement = 165
fuel_factory = self.build_factory(reaction_str_list=reaction_str_list)
fuel_factory.produce_chemical(chemical_name='FUEL', count=1)
actual_ore_requirement = fuel_factory.inventory['ORE'].total_consumption
self.assertEqual(actual_ore_requirement, expected_ore_requirement)
def test_part1_example5(self):
reaction_str_list = [
'171 ORE => 8 CNZTR',
'7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL',
'114 ORE => 4 BHXH',
'14 VRPVC => 6 BMBT',
'6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL',
'6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT',
'15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW',
'13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW',
'5 BMBT => 4 WPTQ',
'189 ORE => 9 KTJDG',
'1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP',
'12 VRPVC, 27 CNZTR => 2 XDBXC',
'15 KTJDG, 12 BHXH => 5 XCVML',
'3 BHXH, 2 VRPVC => 7 MZWV',
'121 ORE => 7 VRPVC',
'7 XCVML => 6 RJRHP',
'5 BHXH, 4 VRPVC => 5 LTCX',
]
# 2210736 ORE for 1 FUEL:
expected_ore_requirement = 2210736
fuel_factory = self.build_factory(reaction_str_list=reaction_str_list)
fuel_factory.produce_chemical(chemical_name='FUEL', count=1)
actual_ore_requirement = fuel_factory.inventory['ORE'].total_consumption
self.assertEqual(actual_ore_requirement, expected_ore_requirement)
@staticmethod
def build_factory(reaction_str_list: List[str]) -> FuelFactory:
production_methods = [ProductionMethod.build_from_reaction_str(reaction=x) for x in reaction_str_list]
fuel_factory = FuelFactory()
for method in production_methods:
fuel_factory.add_production_method(method=method)
# Add the production method that provides an infinite supply of oar
fuel_factory.add_production_method(method=ProductionMethod.build_inf_oar_production())
return fuel_factory
def day_14(txt_path: Path) -> list:
# Load puzzle input. Multiple rows with reaction definitions on each row.
with open(str(txt_path), mode='r', newline='') as f:
rows = [x.strip() for x in f.readlines()]
production_methods = [ProductionMethod.build_from_reaction_str(reaction=x) for x in rows]
fuel_factory = FuelFactory()
for method in production_methods:
fuel_factory.add_production_method(method=method)
# Add the production method that provides an infinite supply of oar
fuel_factory.add_production_method(method=ProductionMethod.build_inf_oar_production())
# Part 1: What is the minimum amount of ORE required to produce exactly 1 FUEL?
fuel_factory.produce_chemical(chemical_name='FUEL', count=1)
part_1_answer = fuel_factory.inventory['ORE'].total_consumption
part_2_answer = None
return [part_1_answer, part_2_answer]
def main():
txt_path = Path(Path(__file__).parent, 'input_data', 'day_14_input.txt')
answer = day_14(txt_path=txt_path)
print(f'Day 14 Answers: {repr(answer)}')
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Day14Tests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
main()
|
[
"[email protected]"
] | |
7dad0e339648e460c7894fadad7d824066d514b6
|
3457ba6617f9eb2dd136b133deb62a906c5fcc45
|
/run_project.py
|
a44a28f19df3e0c94a3d5b6875c776e4ffeab47c
|
[] |
no_license
|
ludosessa/Data-an-AI-project
|
81ac40b1ffdf7089dfd547ba9b6e826f12c9d4cb
|
3b5c76aef16cab62a742e2102498101867e492e1
|
refs/heads/master
| 2020-03-09T23:01:28.023186 | 2018-05-28T15:59:56 | 2018-05-28T15:59:56 | 129,048,913 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,190 |
py
|
# import the necessary packages
import readTrafficSigns
import argparse
import numpy as np
import pickle
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import optim
from conv_layers import ConvNet
import scipy.misc
#########################################################################
# TODO: #
# This is used to input our test dataset to your model in order to #
# calculate your accuracy #
# Note: The input to the function is similar to the output of the method#
# "get_CIFAR10_data" found in the notebooks. #
#########################################################################
def predict(X):
#########################################################################
# TODO: #
# -
#your saved model #
# - Do the operation required to get the predictions #
# - Return predictions in a numpy array #
# Note: For the predictions, you have to return the index of the max #
# value #
#########################################################################
images = Variable(torch.from_numpy(X).type(torch.FloatTensor))
CNN_project = ConvNet(n_input_channels=3, n_output=43)
CNN_project.load_state_dict(torch.load('./project.pt'))
output = CNN_project.predict(images)
_, prediction = torch.max(output.data, 1)
pred_y = prediction.data.cpu().numpy().squeeze()
#########################################################################
# END OF YOUR CODE #
#########################################################################
return pred_y
def main():
trafficSigns_test = './data/test/Final_Test/Images'
X, y = readTrafficSigns(trafficSigns_test)
#X_resized = []
#for i in range(len(X)):
#image_resized = scipy.misc.imresize(X[i], (47, 47))
#X_resized.append(image_resized)
#X_test = X_resized
X_test = np.asarray(X)
#X_test = np.transpose(X_test,[0,3,1,2])
y_test = np.asarray(y, dtype=np.int64)
X_test, y_test = torch.from_numpy(X_test).type(torch.cuda.FloatTensor), torch.from_numpy(y_test).type(torch.cuda.LongTensor)
prediction_project = predict(X_test)
acc_project = sum(prediction_project == y_test)/len(X_test)
print("Accuracy %s"%(acc_project))
if __name__ == "__main__":
main()
#ap = argparse.ArgumentParser()
#ap.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training')
#ap.add_argument("-t", "--test", required=True, help="path to test file")
#ap.add_argument("-g", "--group", required=True, help="group number")
#args = vars(ap.parse_args())
#args.cuda = not args.no_cuda and torch.cuda.is_available()
#main(args["test"],args["group"])
|
[
"[email protected]"
] | |
d2856e764575cdb8308c02b69d2303ddf1692b83
|
c6d852e5842cf6f74123445d20ff03876377ae26
|
/lemon/python22/lemon_14_190918_测试框架_unittest/test_练习相减02.py
|
447882bd4b22fb5aed635fbc7eb95a77abf6e076
|
[] |
no_license
|
songyongzhuang/PythonCode_office
|
0b3d35ca5d58bc305ae90fea8b1e8c7214619979
|
cfadd3132c2c7c518c784589e0dab6510a662a6c
|
refs/heads/master
| 2023-02-13T14:06:10.610935 | 2021-01-14T09:11:32 | 2021-01-14T09:11:32 | 327,183,429 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,436 |
py
|
# --*-- coding : utf-8 --*--
# Project : python22
# Current file : test_练习相减02.py
# Author : Administrator
# Create time : 2019-09-19 10:22
# IDE : PyCharm
# TODO 成长很苦, 进步很甜, 加油!
import unittest
def minus(a, b): # add 加起来
""" 相减 """''
return a - b
x = 3
y = 5
expected = -2
class TestMinus(unittest.TestCase):
# 测试类方法,每一个测试类只运行一次
@classmethod
def setUpClass(cls):
print('每一个测试类之前只运行一次')
@classmethod
def tearDownClass(cls):
print('每一个测试类之后只运行一次')
# 测试用例的设计
# 前置条件
def setUp(self):
"""前置条件
测试用例方法之前自动运行 setUp 里面的程序"""
print('每个测试用例执行前置条件')
# 后置条件
def tearDown(self):
"""后置条件
测试用例方法之后自动运行 tearDown 里面的程序"""
print('每个测试用例执行后置条件')
def test_add_success(self):
""" 判断表达式是否为真 """''
self.assertTrue(expected == minus(x, y))
def test_add_error(self):
"""如果确定两个对象不相等,则失败。"""''
try:
self.assertEqual(-2, minus(x, y))
except SyntaxError:
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
290d45e64c050e5ee0c6d5b685bc0da322de1ca5
|
be33f0d7a59527322d554baf2b2d4886fd07062c
|
/ipython/ipython_kernel_config.py
|
9359c12d0a98f51e11555d19ee677cc8d0998cdc
|
[] |
no_license
|
eddienko/jimaxt
|
9aeab9064c6ecd157aa0172d1bde055a73f36069
|
01d780ad350711cd1916099f1625e6931971d125
|
refs/heads/master
| 2020-04-04T12:05:24.871848 | 2019-01-15T07:52:32 | 2019-01-15T07:52:32 | 155,913,813 | 0 | 0 | null | 2018-12-13T17:06:49 | 2018-11-02T19:42:10 |
Python
|
UTF-8
|
Python
| false | false | 133 |
py
|
c.InteractiveShellApp.exec_files = ['/etc/ipython/startup.py', '/etc/ipython/custom.py']
c.InteractiveShellApp.matplotlib = "inline"
|
[
"[email protected]"
] | |
7513d02b9cd8d88736fc954a4e3e47772a53f50b
|
756f9b38889035f5e8c2d96c6898263e427237c6
|
/rename_inventory/rename_inventory.py
|
0983691591fc0875a8620853105b141151683c78
|
[] |
no_license
|
fantas1st0/network_scripts
|
335e189351f13009f1ddaeb80b4017eedb6c6fa0
|
9270754f0bd04c1c6d081e7d7f22357102062595
|
refs/heads/main
| 2023-02-21T14:58:43.620562 | 2021-01-22T12:49:43 | 2021-01-22T12:49:43 | 329,067,767 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 773 |
py
|
import os
from pprint import pprint
import re
def rename_dir(directory):
os.chdir("./" + directory)
arr = os.listdir()
ip_dev = {}
for device in arr:
if "___" in device:
k = device.split("___")[0]
v = device.split("___")[1]
ip_dev[k] = v
os.rename(device, v)
for device in ip_dev.values():
os.chdir("./" + device)
files = os.listdir()
for file in files:
regex = "(?P<ip_add>\S+)-(?P<type>\S+)"
match = re.search(regex, file)
if match:
newname = device + "-" + match.group("type")
os.rename(file, newname)
os.chdir("../")
os.chdir("../")
if __name__ == "__main__":
for directory in os.listdir():
if "Raw" in directory:
raw_inv = directory
os.chdir("./" + raw_inv)
for directory in os.listdir():
rename_dir(directory)
|
[
"[email protected]"
] | |
a4c59543e02e12f49dee77f257032c2830b52aa0
|
4a3fefc163ec80de58b1843f86e7fd301c6a0700
|
/profiles_api/views.py
|
97dc9e997df91a4e27410be27501d7084158af2a
|
[
"MIT"
] |
permissive
|
nawaz-t/profiles-rest-api
|
6115feefcb5d32e79ee545742d7f7f1431614425
|
67398ef50a98b7188119ffa50e2f24a99c671cc6
|
refs/heads/master
| 2022-05-16T07:36:05.538536 | 2020-03-25T15:43:25 | 2020-03-25T15:43:25 | 249,365,074 | 0 | 0 |
MIT
| 2022-04-22T23:08:12 | 2020-03-23T07:39:39 |
Python
|
UTF-8
|
Python
| false | false | 3,051 |
py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from profiles_api import serializers
class HelloAPIView(APIView):
"""Test API View"""
serializer_class=serializers.HelloSerializer
def get(self, request , format=None):
"""Returns a list of APIVIews features"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over you application logic',
'Is mapped manually to URLs',
]
return Response({'message':'Hello!','an_apiview': an_apiview})
def post(self,request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status=status.HTTPS_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method':'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method':'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method':'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset=[
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLS using Routers',
'Provides more functionality with less code'
]
return Response({'message': 'Hello!', 'a_viewset':a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message=f'Hello {name}!'
return Response({'message':message})
else:
return response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method':'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method':'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method':'PATCH'})
def destroy (self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method':'DELETE'})
|
[
"[email protected]"
] | |
4cb569f1636bfc4eae939e6f9a0744d37db16326
|
20899d453bc61c169153338ac9d22d324df089c1
|
/abc/abc162/B.py
|
9eb9826bfab9e83ccd7c92096c9c66a9611d1f39
|
[] |
no_license
|
mui-nyan/AtCoder
|
b2d926b113963915426af679bf9b28430569707c
|
a702280f11a5b0b1b29dd099dbfc7b1c31fb89fd
|
refs/heads/master
| 2022-07-04T16:32:41.164564 | 2022-06-19T07:24:11 | 2022-06-19T07:24:11 | 182,425,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 778 |
py
|
import math
from functools import reduce
from collections import deque
import sys
sys.setrecursionlimit(10**7)
# スペース区切りの入力を読み込んで数値リストにして返します。
def get_nums_l():
return [ int(s) for s in input().split(" ")]
# 改行区切りの入力をn行読み込んで数値リストにして返します。
def get_nums_n(n):
return [ int(input()) for _ in range(n)]
# 改行またはスペース区切りの入力をすべて読み込んでイテレータを返します。
def get_all_int():
return map(int, open(0).read().split())
def log(*args):
print("DEBUG:", *args, file=sys.stderr)
n = int(input())
ans = 0
for i in range(1, n+1):
if i%3 == 0 or i%5 == 0:
continue
ans += i
print(ans)
|
[
"[email protected]"
] | |
25103e52b26fe0ced7a652490d6fdbd9bd4f524e
|
82dae0c6d535a48bcddcfae27d48238e9c7021d6
|
/tests/test_accountsettings.py
|
e5104c144e678969f13873b027ddadc847e5f89c
|
[] |
no_license
|
eventure-interactive/eventure_django
|
130a369ecfdf79a46c0fc99aeda345d28fe60893
|
0d5912eb2800eeb095df9aec19045e3916ba0d13
|
refs/heads/master
| 2023-06-08T19:33:10.185289 | 2016-03-31T18:07:37 | 2016-03-31T18:07:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,574 |
py
|
from pprint import pformat
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIRequestFactory, APIClient, APITestCase
from core.models import Account, AccountSettings, AccountStatus, EventPrivacy
class AccountSettingsModelTests(TestCase):
"AccountSettings model test cases."
def test_account_settings_auto(self):
"Creating an account automatically creates account settings."
acct = Account.objects.create_user(phone="+18005551212", password="secret", email="[email protected]",
name="Testy McTesterson")
# Already saved, can check id
self.assertIsNotNone(acct.id)
settings = acct.accountsettings
self.assertEqual(acct.id, settings.account_id)
class AccountSettingsAPITests(APITestCase):
def setUp(self):
# log in
self.user = Account.objects.create_user(phone="+18005551212", password="secret", email="[email protected]",
name="Testy McTesterson", status=AccountStatus.ACTIVE)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def testAccessTypes(self):
"AccountSettings should only allow PUT and GET (not DELETE or POST)."
url = reverse('self-settings')
# GET OK
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
# POST NOT ALLOWED
response = self.client.post(url, {'email_promotions': False})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED, response.data)
# PUT OK
response = self.client.put(url, {'email_promotions': False})
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
# DELETE NOT ALLOWED
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED, response.data)
def testSaveAccountSettings(self):
url = reverse('self-settings')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
expect = response.data
test_params = (
{'email_rsvp_updates': False},
{'email_rsvp_updates': True},
{'email_social_activity': False},
{'email_promotions': False},
{'text_rsvp_updates': False},
{'text_rsvp_updates': True},
{'text_social_activity': True},
{'text_promotions': True},
{'default_event_privacy': EventPrivacy.PRIVATE},
{'default_event_privacy': EventPrivacy.PUBLIC},
{'profile_privacy': Account.PRIVATE},
{'profile_privacy': Account.PUBLIC},
{'email_rsvp_updates': False, 'email_social_activity': False,
'profile_privacy': Account.PRIVATE},
)
for params in test_params:
expect.update(params)
response = self.client.patch(url, params)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(expect, response.data,
"\nExpect: \n{}\nResponse: \n{}\nparams: {}".format(pformat(expect),
pformat(response.data),
pformat(params)))
|
[
"[email protected]"
] | |
9eb155ab168b320e301794c6d06721d8159379c8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/329/usersdata/297/91364/submittedfiles/dec2bin.py
|
f499b6f8e6c0b866d68629df150aa2c83d3d617b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
# -*- coding: utf-8 -*-
while(true):
p=int(input('digite um numero p: '))
q=int(input('digite um numero q: '))
if q>=p:
break
if str(p) in str(q):
print('S')
else :
print('N')
|
[
"[email protected]"
] | |
51fde9dbea96a836afe90553f903269ec803cf6e
|
634635afd0e9e0b5b33d0e2a0ff6c54f1b60805d
|
/dependecyparser/tokens.py
|
2427d513893d407d6c72cc0b05ece0ef6b12fc7e
|
[] |
no_license
|
Pashovich/parser
|
116e32a69775bcdda8aa91494b6bc9ac36a80e9f
|
4dbcbf049eb137b473908a6b98248fa4d72168b3
|
refs/heads/master
| 2022-11-16T23:00:47.098834 | 2020-07-08T09:10:03 | 2020-07-08T09:10:03 | 278,032,958 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 56 |
py
|
class TokenSpecialCase:
text = ''
children = []
|
[
"[email protected]"
] | |
e35cce8f90ca96866959109432451fecbde33194
|
e7515012ccb999a499947bea3ef01e82b9a2b15f
|
/plaso/parsers/winjob.py
|
f971a8822e0f8e1fe7a3a3157e3264dd4c4eaa56
|
[
"Apache-2.0"
] |
permissive
|
vonnopsled/plaso
|
ebfe5af84b955b9e40610bd76598671256ddea4f
|
c14e3a0c1db0b05280ff58219d33f487c0a40a6f
|
refs/heads/master
| 2020-12-31T04:07:25.047511 | 2016-01-04T07:07:01 | 2016-01-04T07:07:01 | 46,817,224 | 0 | 0 | null | 2015-11-24T20:38:20 | 2015-11-24T20:38:20 | null |
UTF-8
|
Python
| false | false | 10,311 |
py
|
# -*- coding: utf-8 -*-
"""Parser for Windows Scheduled Task job files."""
import construct
from plaso.events import time_events
from plaso.lib import binary
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = 'Brian Baskin ([email protected])'
class WinJobEvent(time_events.TimestampEvent):
"""Convenience class for a Windows Scheduled Task event.
Attributes:
application: string that contains the path to job executable.
comment: string that contains the job description.
parameter: string that contains the application command line parameters.
trigger: an integer that contains the event trigger, e.g. DAILY.
username: string that contains the username that scheduled the job.
working_dir: string that contains the working path for task.
"""
DATA_TYPE = u'windows:tasks:job'
def __init__(
self, timestamp, timestamp_description, application, parameter,
working_dir, username, trigger, description):
"""Initializes the event object.
Args:
timestamp: the timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timestamp_description: the usage string for the timestamp value.
application: string that contains the path to job executable.
parameter: string that contains the application command line parameters.
working_dir: string that contains the working path for task.
username: string that contains the username that scheduled the job.
trigger: an integer that contains the event trigger, e.g. DAILY.
description: string that contains the job description.
"""
super(WinJobEvent, self).__init__(timestamp, timestamp_description)
self.application = application
self.comment = description
self.parameter = parameter
self.trigger = trigger
self.username = username
self.working_dir = working_dir
class WinJobParser(interface.FileObjectParser):
"""Parse Windows Scheduled Task files for job events."""
NAME = u'winjob'
DESCRIPTION = u'Parser for Windows Scheduled Task job (or At-job) files.'
_PRODUCT_VERSIONS = {
0x0400: u'Windows NT 4.0',
0x0500: u'Windows 2000',
0x0501: u'Windows XP',
0x0600: u'Windows Vista',
0x0601: u'Windows 7',
0x0602: u'Windows 8',
0x0603: u'Windows 8.1'
}
_JOB_FIXED_STRUCT = construct.Struct(
u'job_fixed',
construct.ULInt16(u'product_version'),
construct.ULInt16(u'format_version'),
construct.Bytes(u'job_uuid', 16),
construct.ULInt16(u'application_length_offset'),
construct.ULInt16(u'trigger_offset'),
construct.ULInt16(u'error_retry_count'),
construct.ULInt16(u'error_retry_interval'),
construct.ULInt16(u'idle_deadline'),
construct.ULInt16(u'idle_wait'),
construct.ULInt32(u'priority'),
construct.ULInt32(u'max_run_time'),
construct.ULInt32(u'exit_code'),
construct.ULInt32(u'status'),
construct.ULInt32(u'flags'),
construct.ULInt16(u'ran_year'),
construct.ULInt16(u'ran_month'),
construct.ULInt16(u'ran_weekday'),
construct.ULInt16(u'ran_day'),
construct.ULInt16(u'ran_hour'),
construct.ULInt16(u'ran_minute'),
construct.ULInt16(u'ran_second'),
construct.ULInt16(u'ran_millisecond'),
)
# Using Construct's utf-16 encoding here will create strings with their
# null terminators exposed. Instead, we'll read these variables raw and
# convert them using Plaso's ReadUTF16() for proper formatting.
_JOB_VARIABLE_STRUCT = construct.Struct(
u'job_variable',
construct.ULInt16(u'running_instance_count'),
construct.ULInt16(u'application_length'),
construct.String(
u'application',
lambda ctx: ctx.application_length * 2),
construct.ULInt16(u'parameter_length'),
construct.String(
u'parameter',
lambda ctx: ctx.parameter_length * 2),
construct.ULInt16(u'working_dir_length'),
construct.String(
u'working_dir',
lambda ctx: ctx.working_dir_length * 2),
construct.ULInt16(u'username_length'),
construct.String(
u'username',
lambda ctx: ctx.username_length * 2),
construct.ULInt16(u'comment_length'),
construct.String(
u'comment',
lambda ctx: ctx.comment_length * 2),
construct.ULInt16(u'userdata_length'),
construct.String(
u'userdata',
lambda ctx: ctx.userdata_length),
construct.ULInt16(u'reserved_length'),
construct.String(
u'reserved',
lambda ctx: ctx.reserved_length),
construct.ULInt16(u'test'),
construct.ULInt16(u'trigger_size'),
construct.ULInt16(u'trigger_reserved1'),
construct.ULInt16(u'sched_start_year'),
construct.ULInt16(u'sched_start_month'),
construct.ULInt16(u'sched_start_day'),
construct.ULInt16(u'sched_end_year'),
construct.ULInt16(u'sched_end_month'),
construct.ULInt16(u'sched_end_day'),
construct.ULInt16(u'sched_start_hour'),
construct.ULInt16(u'sched_start_minute'),
construct.ULInt32(u'sched_duration'),
construct.ULInt32(u'sched_interval'),
construct.ULInt32(u'trigger_flags'),
construct.ULInt32(u'trigger_type'),
construct.ULInt16(u'trigger_arg0'),
construct.ULInt16(u'trigger_arg1'),
construct.ULInt16(u'trigger_arg2'),
construct.ULInt16(u'trigger_padding'),
construct.ULInt16(u'trigger_reserved2'),
construct.ULInt16(u'trigger_reserved3'))
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a Windows job file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
header_struct = self._JOB_FIXED_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse Windows Task Job file with error: {0:s}'.format(
exception))
if not header_struct.product_version in self._PRODUCT_VERSIONS:
raise errors.UnableToParseFile((
u'Unsupported product version in: 0x{0:04x} Scheduled Task '
u'file').format(header_struct.product_version))
if not header_struct.format_version == 1:
raise errors.UnableToParseFile(
u'Unsupported format version in: {0:d} Scheduled Task file'.format(
header_struct.format_version))
try:
job_variable_struct = self._JOB_VARIABLE_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse Windows Task Job file with error: {0:s}'.format(
exception))
try:
last_run_date = timelib.Timestamp.FromTimeParts(
header_struct.ran_year,
header_struct.ran_month,
header_struct.ran_day,
header_struct.ran_hour,
header_struct.ran_minute,
header_struct.ran_second,
microseconds=header_struct.ran_millisecond * 1000,
timezone=parser_mediator.timezone)
except errors.TimestampError as exception:
last_run_date = None
parser_mediator.ProduceParseError(
u'unable to determine last run date with error: {0:s}'.format(
exception))
try:
scheduled_date = timelib.Timestamp.FromTimeParts(
job_variable_struct.sched_start_year,
job_variable_struct.sched_start_month,
job_variable_struct.sched_start_day,
job_variable_struct.sched_start_hour,
job_variable_struct.sched_start_minute,
0, # Seconds are not stored.
timezone=parser_mediator.timezone)
except errors.TimestampError as exception:
scheduled_date = None
parser_mediator.ProduceParseError(
u'unable to determine scheduled date with error: {0:s}'.format(
exception))
application = binary.ReadUTF16(job_variable_struct.application)
description = binary.ReadUTF16(job_variable_struct.comment)
parameter = binary.ReadUTF16(job_variable_struct.parameter)
username = binary.ReadUTF16(job_variable_struct.username)
working_dir = binary.ReadUTF16(job_variable_struct.working_dir)
if last_run_date is not None:
event_object = WinJobEvent(
last_run_date, eventdata.EventTimestamp.LAST_RUNTIME, application,
parameter, working_dir, username, job_variable_struct.trigger_type,
description)
parser_mediator.ProduceEvent(event_object)
if scheduled_date is not None:
event_object = WinJobEvent(
scheduled_date, u'Scheduled To Start', application, parameter,
working_dir, username, job_variable_struct.trigger_type,
description)
parser_mediator.ProduceEvent(event_object)
# TODO: create a timeless event object if last_run_date and scheduled_date
# are None? What should be the description of this event?
if job_variable_struct.sched_end_year:
try:
scheduled_end_date = timelib.Timestamp.FromTimeParts(
job_variable_struct.sched_end_year,
job_variable_struct.sched_end_month,
job_variable_struct.sched_end_day,
0, # Hours are not stored.
0, # Minutes are not stored.
0, # Seconds are not stored.
timezone=parser_mediator.timezone)
except errors.TimestampError as exception:
scheduled_end_date = None
parser_mediator.ProduceParseError(
u'unable to determine scheduled end date with error: {0:s}'.format(
exception))
if scheduled_end_date is not None:
event_object = WinJobEvent(
scheduled_end_date, u'Scheduled To End', application, parameter,
working_dir, username, job_variable_struct.trigger_type,
description)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(WinJobParser)
|
[
"[email protected]"
] | |
4ce1c8ce1eb5a7d9d1ab89effe73b1635dc0f452
|
a2066f7e4ac0ac95761f12bb697e506c2444e52d
|
/linked_list/leetcode234.py
|
c81579c8d1eeca1488cc135a3a75f513a8acaddd
|
[] |
no_license
|
zhengjiawen/leetcode_python
|
ec53f09515510c4da327a9d2c7ccc85d5fd59d26
|
f24d29a1717a58fffc68dfab3806f71f2a8b388c
|
refs/heads/master
| 2021-06-20T11:32:21.431726 | 2021-04-09T03:14:32 | 2021-04-09T03:14:32 | 202,135,141 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,147 |
py
|
'''
请判断一个链表是否为回文链表。
'''
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if not head or not head.next:
return True
res = list()
while head:
res.append(head.val)
head = head.next
# left, right = 0, len(res)-1
# while left<right:
# if res[left] != res[right]:
# return False
# left+=1
# right-=1
# return True
return res==res[::-1]
def init_list():
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(2)
node5 = ListNode(1)
node1.next = node2
# node2.next = node3
# node3.next = node4
# node4.next = node5
return node1
def traverse(root):
print(root.val)
if root.next:
traverse(root.next)
if __name__ == '__main__':
solution = Solution()
root = init_list()
# traverse(root)
result_root = solution.isPalindrome(root)
print(result_root)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.