blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2622b4b48f92d9c5c9b8811b8a7a0759654b023b | 66b88803ab13b2029217331fba15a6adacaa5798 | /learn/ajax.py | 0715e69947655e3698eb19d4d390565c5f8c55cb | [
"Unlicense"
] | permissive | ericmok/eri53 | 446e9f5ba14c3744f18cddf47346e1de3be59406 | 511f21787a7e1eae1eeb87399ae2b93bbcf1210e | refs/heads/master | 2020-05-23T16:36:26.357975 | 2014-02-25T18:14:08 | 2014-02-25T18:17:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,364 | py | import django.http
import django.template
import django.utils.simplejson
from django.shortcuts import render_to_response
import views
import models
import svm
import svmutil
def json(dict):
ret = django.utils.simplejson.dumps(dict)
return django.http.HttpResponse(ret, mimetype='application/json')
def add_action(request):
'''Adds a point'''
new_point = models.Point2d()
new_point.x = float( request.POST.get("x", 0) )
new_point.y = float( request.POST.get("y", 0) )
new_point.x = 1. * new_point.x
new_point.y = 1. * new_point.y
print("Label: " + request.POST.get("label"))
new_point.label = request.POST.get("label", -1)
new_point.save()
data = {'x': new_point.x, 'y': new_point.y, 'label': new_point.label}
return json(data)
class PointEncoder(django.utils.simplejson.JSONEncoder):
def default(self, obj):
if not isinstance(obj, models.Point2d):
return super(PointEncoder, self).default(obj)
return {"x": obj.x, "y": obj.y, "label": obj.label}
def read_action(request):
points = models.Point2d.objects.all()
data = {}
data["length"] = len(points)
i = 0
for point in points:
data[i] = [point.x, point.y, point.label]
i = i + 1
response_data = django.utils.simplejson.dumps(data, cls=PointEncoder)
return django.http.HttpResponse(response_data, mimetype='application/json')
def predict(request):
predictX = float( request.POST.get("x", -1) )
predictY = float( request.POST.get("y", -1) )
predictLabel = int( request.POST.get("label", -1) )
if predictX == -1 or predictY == -1 or predictLabel == -1:
return django.http.HttpResponse("Missing Params")
points = models.Point2d.objects.all()
# Storing the information to be presented to SVM
labels = []
inputs = []
# For each point, store the information into arrays
#for p in points:
# labels.append( p.label )
# inputs.append([p.x, p.y])
#prob = svm.svm_problem(labels, inputs)
#param = svm.svm_parameter('-t 2 -c 100')
#model = svmutil.svm_train(prob, param)
#svmutil.svm_save_model('libsvm.model', model)
model = svmutil.svm_load_model('libsvm.model')
p_label , acc, val = svmutil.svm_predict([0], [[predictX, predictY]], model)
data = {'x': predictX, 'y': predictY, 'label': int( p_label[0] ) }
return json(data)
def predict_all(request):
'''Predicts points in an array'''
width = float( request.POST.get("width", "None") )
height = float( request.POST.get("height", "None") )
model = svmutil.svm_load_model('libsvm.model')
# Get grid of points to query
points = []
for counterY in [ 1.0 / 15.0 * y for y in range(0, 15) ]:
for counterX in [ 1.0 / 15.0 * x for x in range(0, 15) ]:
points.append([counterX, counterY])
#for counterY in [ 1.0 / 10.0 * x for x in range(0, 10) ]:
# for counterX in [ 1.0 / 10.0 * y for y in range(0, 10) ]:
# label , acc, val = svmutil.svm_predict( [0], [[counterX, counterY]], model )
# results[i] = [counterX, counterY, label]
# i = i + 1
#results["length"] = i
# Get labels
labels, acc, val = svmutil.svm_predict([0] * len(points), points, model)
results = {}
for index, value in enumerate(points):
results[index] = { "x" : points[index][0],
"y" : points[index][1],
"label" : labels[index] }
results["length"] = len(points)
return json(results)
def train(request):
points = models.Point2d.objects.all()
# Storing the information to be presented to SVM
labels = []
inputs = []
# For each point, store the information into arrays
for p in points:
labels.append( p.label )
inputs.append([p.x, p.y])
prob = svm.svm_problem(labels, inputs)
param = svm.svm_parameter('-t 2 -c 100')
model = svmutil.svm_train(prob, param)
try:
svmutil.svm_save_model('libsvm.model', model)
except Exception as e:
print "error: ", e, "\n"
data = {"status": "trained"}
return json(data)
def handle_ajax_action(request):
'''Routes ajax calls based on action parameter'''
if request.POST.get("action", "None") == "add":
return add_action(request)
elif request.POST.get("action", "None") == "read":
return read_action(request)
elif request.POST.get("action", "None") == "predict":
return predict(request)
elif request.POST.get("action", "None") == "train":
return train(request)
elif request.POST.get("action", "None") == "predictAll":
return predict_all(request)
else:
data = {'status': 'Invalid action'}
return json(data)
def handle_requests(request):
'''Handles ajax requests directed to this learn/ajax/
If not post, then goes to index page'''
if request.method == 'POST':
return handle_ajax_action(request)
return views.index(request)
| [
"[email protected]"
] | |
882bc8305c6b4b4ae3fbbf64ffc2a1c691800e88 | f987a6669e721747d4bbbcfc9e99a9d244e07bb8 | /test/merge_sort.py | c06169c73def4b0205ec2dc751cecf13bc8e3e11 | [] | no_license | matiTechno/various | ece6be63c0c032f97eb9dedc933f97112649977b | e482ac1d186914fdc798369679608a7ca2834272 | refs/heads/master | 2021-07-17T14:56:11.906189 | 2021-07-13T21:37:47 | 2021-07-13T21:37:47 | 252,883,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,628 | py | """
algorithm: merge sort
input: (a_1, ..., a_n)
output: sorted sequence
step <- 2
half <- 1
while half < n
do i <- 0
while i + half <= n
do l <- ( a_i, ..., a_(i + half - 1) )
end <- min(i + step - 1, n)
r <- ( a_(i + half), ..., a_(end) )
(a_i, ..., a_(end)) <- Merge(l, r)
i <- i + step
step <- 2 step
half <- 2 half
Merge
input: l, r - sorted sequences
output: o - merged (sorted) sequence
o <- empty
while |l| and |r|
do if first(l) < first(r)
then o <- (o_1, ..., o_n, first(l))
pop_first(l)
else o <- (o_1, ..., o_n, first(r))
pop_first(r)
o <- (o_1, ..., o_n, l_1, ..., l_n, r_1, ..., r_n)
return o
"""
def merge_sort(values):
step = 2
half = 1
while half < len(values):
i = 0
while i + half < len(values):
l = values[i : i + half]
end = int( min(i + step, len(values)) )
r = values[i + half : end]
m = merge(l, r)
for k in range(len(m)):
values[i + k] = m[k]
i += step
step *= 2
half *= 2
def merge(l, r):
o = []
while len(l) and len(r):
if l[0] < r[0]:
o.append(l[0])
l.pop(0)
else:
o.append(r[0])
r.pop(0)
return o + l + r
import random
for i in range(1, 100):
values = [random.randint(1,100) for _ in range(i)]
merge_sort(values)
for k in range(0, len(values) - 1):
assert values[k] <= values[k+1]
print("success")
| [
"[email protected]"
] | |
1b59d9d8da3ff73d6273decbaf467d407763001a | d69e59155c7eb8b692feb927bf12d13260d50ebb | /Admin_Panel/asgi.py | 5fc512c293fcfe509121637253b2e6eac0921274 | [] | no_license | susmitha009/Admin-Panel | 8ee905150c84c6a197d77c0ddb03d3c7c81c3de4 | 4eddcfe60f95c07e79d57be7de2fb1ae2f155cc3 | refs/heads/master | 2023-06-22T01:16:14.085588 | 2021-07-17T10:42:54 | 2021-07-17T10:42:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for Admin_Panel project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Admin_Panel.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
e7f03e415f0656bb0c5cceb31b095325aff331d4 | 19eb60415bc13ccd65634511482a1b300c05c2ff | /droid.py | 3a54fde5f815e90e3a2af148143f030b36603324 | [] | no_license | vaiper79/probe_droid_diorama | 87969f45cb783d5e47d7811a59eea317d5bf625d | 60f7da1963d74d570bce37321115416e2ad0454a | refs/heads/master | 2020-07-03T05:25:59.610389 | 2019-12-20T14:29:11 | 2019-12-20T14:29:11 | 201,796,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,225 | py | # Python3
# Version 7, 20.12.2019, OleA aka vaiper79
# Volume configuration by Adafruit:
# Hardware: MCP3008 DAC Chip
# Code: https://learn.adafruit.com/reading-a-analog-in-and-controlling-audio-volume-with-the-raspberry-pi/overview
# Amplifier configuration by Adafruit:
# Hardware: TPA2016 i2c amplifier
# Code: https://learn.adafruit.com/adafruit-tpa2016-2-8w-agc-stereo-audio-amplifier/python-circuitpython
# Logging Code: https://gist.github.com/sweenzor/1782457
# Shutdown: https://gpiozero.readthedocs.io/en/stable/recipes.html#shutdown-button
import pygame, random, time, os, busio, digitalio, board, adafruit_tpa2016, logging, logging.handlers
from gpiozero import Button, PWMLED, LED
from subprocess import check_call
from signal import pause
## Some variables
# Shutdown
shDwn = False
# Audio
volume = 0.1
maxVolume = 0.3
started = 0
rndmChatterMillis = 0
lastChatterMillis = 0
rndmTelemetryMillis = 0
lastTelemetryMillis = 0
buttonTriggered = True
audioState = 0
# LEDs
lastLEDMillis = 0
lastCountDMillis = 0
LEDMillis = 60
brList = [0.001, 0.02, 0.005, 0.01, 0.008, 0.012, 0.015, 0.002, 0.007, 0.017, 0.011, 0.009] # Brightnesses to use for flickering lights
shDwnBr = 0.1
# Set up logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
# Amplifier code
i2c = busio.I2C(board.SCL, board.SDA)
tpa = adafruit_tpa2016.TPA2016(i2c)
tpa.fixed_gain = 0 # Anything above and the way it is connected now causes clipping. Perhaps separate of more powerful PSU = more oomf!
# GPIO Pins defined
# GPIO19 is busted. Terminal wont hold.
button_top = Button(17, hold_time=3)
button_volUp = Button(4)
button_volDwn = Button(27)
led_front1 = PWMLED(5)
led_front2 = PWMLED(6)
led_front3 = PWMLED(13)
led_front4 = PWMLED(24)
led_button = PWMLED(26)
#led_droidRed = GPIO.PWM(18, 1000) # HW PWM
led_droidRed = PWMLED(18)
led_droidYlw = PWMLED(25)
pygame.init() # Required to get_ticks() since start
bg_music = "/home/pi/droid/audio/background_music.wav"
#set up the mixer
freq = 44100 # audio CD quality
bitsize = -16 # unsigned 16 bit
channels = 2 # 1 is mono, 2 is stereo
buffer = 2048 # number of samples (experiment to get right sound)
pygame.mixer.init(freq, bitsize, channels, buffer)
pygame.mixer.set_num_channels(10) # Not to be confused with the number of channels..right..this is # of "voices"
# Load up the bg music for continous playback
pygame.mixer.music.load(bg_music)
#Create a Channel for each type of audio track
musicChannel = pygame.mixer.Channel(1)
chatterChannel = pygame.mixer.Channel(2)
hoverChannel = pygame.mixer.Channel(3)
telemetryChannel = pygame.mixer.Channel(4)
# Set the volume for all channels separately.. start silent
pygame.mixer.music.set_volume(0)
pygame.mixer.Channel(1).set_volume(0)
pygame.mixer.Channel(2).set_volume(0)
pygame.mixer.Channel(3).set_volume(0)
pygame.mixer.Channel(4).set_volume(0)
# Set the droids lights
led_droidRed.value = 0.1
led_droidYlw.value = 0.2
def volumeChange(volume):
print(volume)
pygame.mixer.music.set_volume(volume)
pygame.mixer.Channel(1).set_volume(volume)
pygame.mixer.Channel(2).set_volume(volume)
pygame.mixer.Channel(3).set_volume(volume)
pygame.mixer.Channel(4).set_volume(volume)
def doIt(): # Could do one thing on the first press..something else on the next..etc...but what..
global audioState
global buttonTriggered
if(audioState == 0 and buttonTriggered == True):
log.debug("Turning on soundfx")
pygame.mixer.Channel(1).set_volume(volume)
pygame.mixer.Channel(2).set_volume(volume)
pygame.mixer.Channel(3).set_volume(volume)
pygame.mixer.Channel(4).set_volume(volume)
audioState = 1
buttonTriggered = False
if (audioState == 1 and buttonTriggered == True):
log.debug("Turning on music")
pygame.mixer.music.set_volume(volume)
audioState = 2
buttonTriggered = False
if (audioState == 2 and buttonTriggered == True):
log.debug("Turning off music")
pygame.mixer.music.set_volume(0)
audioState = 3
buttonTriggered = False
if(audioState == 3 and buttonTriggered == True):
log.debug("Turning off audiofx")
pygame.mixer.Channel(1).set_volume(0)
pygame.mixer.Channel(2).set_volume(0)
pygame.mixer.Channel(3).set_volume(0)
pygame.mixer.Channel(4).set_volume(0)
audioState = 0
buttonTriggered = False
buttonTriggered = True # Might seem counter productive, but we need to reset the value as we exit the function.
def shutDown():
global shDwn
shDwn = True
log.debug("Shutting down amplifier")
tpa.amplifier_shutdown = True
log.debug("Counting down..")
led_front1.value = shDwnBr
led_front2.value = shDwnBr
led_front3.value = shDwnBr
led_front4.value = shDwnBr
time.sleep(0.5)
led_front4.value = 0
time.sleep(0.5)
led_front3.value = 0
time.sleep(0.5)
led_front2.value = 0
time.sleep(0.5)
led_front1.value = 0
led_droidRed = 0
led_droidYlw = 0
led_button = 0
log.debug("Shutting down droid controller")
check_call(['sudo', 'poweroff']) # Shutsdown the OS. Will leave this out until prod..
time.sleep(10)
def volDwn():
log.debug("Volume Down")
global volume
volume = volume - 0.05
volumeChange(volume)
def volUp():
log.debug("Volume Up")
global volume
global maxVolume
volume = volume + 0.05
if (volume >= maxVolume):
volume = maxVolume # Limit volume
volumeChange(volume)
# Start amplifier
log.debug("Switching on the amplifier")
tpa.amplifier_shutdown = False # Not strickly necessary..but for completeness.
while True:
## BUTTONS ## - More or less done
button_top.when_held = shutDown # Hold for 3 seconds to shut down..requires power cycle.
button_top.when_released = doIt
button_volUp.when_released = volUp
button_volDwn.when_released = volDwn
## LIGHTS ## - FAR from done..
if (shDwn == False):
if (pygame.time.get_ticks() - lastLEDMillis >= LEDMillis):
led_front1.value = random.choice(brList)
led_front2.value = random.choice(brList)
led_front3.value = random.choice(brList)
led_front4.value = random.choice(brList)
led_button.value = random.choice(brList)
lastLEDMillis = pygame.time.get_ticks()
## MUSIC ## - More or less done
# Background Music Playing
if started == 0:
log.debug("Playing BG music indefinately")
pygame.mixer.music.play(loops=-1) # Looping the loaded music file indef..
started = 1
# Droid Hover, randomized in content
if hoverChannel.get_busy() == False:
rand = str(random.randrange(1, 8))
log.debug("Playing hover:" + str(rand))
hoverChannel.play(pygame.mixer.Sound("/home/pi/droid/audio/hover"+rand+".wav"))
# Droid Chatter, randomized in content and time
if (pygame.time.get_ticks() - lastChatterMillis >= rndmChatterMillis) and (chatterChannel.get_busy() == False) and (telemetryChannel.get_busy() == False):
rand = str(random.randrange(1, 19))
rndmChatterMillis = random.randrange(800, 8000)
log.debug("Playing chatter:" + str(rand) + ", " + str(rndmChatterMillis) + " since last")
lastChatterMillis = pygame.time.get_ticks()
chatterChannel.play(pygame.mixer.Sound("/home/pi/droid/audio/chatter"+rand+".wav"))
# Droid Chatter, randomized in content and time
if (pygame.time.get_ticks() - lastChatterMillis >= rndmChatterMillis) and (chatterChannel.get_busy() == False) and (telemetryChannel.get_busy() == False):
rand = str(random.randrange(1, 8))
rndmTelemetryMillis = random.randrange(1000, 15000)
log.debug("Playing telemetry:" + str(rand) + ", " + str(rndmTelemetryMillis) + " since last")
lastTelemetryMillis = pygame.time.get_ticks()
telemetryChannel.play(pygame.mixer.Sound("/home/pi/droid/audio/telemetry"+rand+".wav")) | [
"[email protected]"
] | |
23f7b2ae612a0f3288a140242598fb0d27ce7b2b | 36e1c37b4af7685840e145a3b444c2417d246438 | /object/get-item.py | 94e04a9354bee08d198131c50b7bcb21c0acd29e | [] | no_license | lynndotconfig/python-analysis | 0850fa87cfbb5a295970ad3eebb2759371a469bf | dcfb4d139bc8df0e41a979fa6d9b89a6184ef786 | refs/heads/master | 2021-01-13T16:40:03.063075 | 2018-04-04T09:53:03 | 2018-04-04T09:53:03 | 78,192,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """object has no dict like method."""
class Vhost(object):
def __init__(self, name, permission):
self.name = name
self.permission = permission
if __name__ == '__main__':
vhost1 = {
"name": "test2",
"permissions": "partily"
}
print "name:", vhost1["name"]
print "permissions:", vhost1["permissions"]
vhost = Vhost("test1", "all")
print "name:", vhost["name"]
print "permssion", vhost["permssion"]
| [
"[email protected]"
] | |
68c721a16385f419d079d43fbc79a0b7a4f246ea | 0ec6292fb64aaf66cee6172991a50a6db166c4a7 | /venv/Scripts/pip-script.py | 8f562948be7a119ad9082035d649059fe5ce21c3 | [] | no_license | orjantonnessen/TDT4136IntroAI | 1bca377c3b5464be94f79a181989bb10f8b9bfe1 | 9e5a4011e2b7931e152429d959975d9e49c260ca | refs/heads/master | 2020-03-28T18:38:11.314966 | 2018-11-10T11:29:51 | 2018-11-10T11:29:51 | 148,897,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | #!C:\GitHub\TDT4136IntroAI\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
747a467ce325c42417835ac7a9bc48c51520fa31 | 21410fb2d07712ae4b95e1f74f8605f99b149856 | /tests/test_environment_variables.py | d6ea97bdc39996d764c318378f84ece5aababf28 | [] | no_license | decobots/sentence_sentences | 22bf8ebea4c32d8daf005c7f8b9fd3e2a3f85820 | 01ae972a1490416d0142844cabbf8b5fef506b10 | refs/heads/master | 2022-12-15T08:55:26.264525 | 2020-03-03T09:56:10 | 2020-03-03T09:56:10 | 226,102,502 | 1 | 0 | null | 2022-12-08T03:35:37 | 2019-12-05T12:56:32 | Python | UTF-8 | Python | false | false | 313 | py | import pytest
from preparation.environment_variables import get_env
def test_environment_variables_correct(global_variable):
assert get_env(global_variable[0]) == global_variable[1]
def test_environment_variables_not_defined():
with pytest.raises(OSError):
get_env("undefined_variable_name")
| [
"[email protected]"
] | |
cbdd87c23c5758f0816784a7b7a07ec29a0d2930 | ea50e9b44ca708cfb68bb51bd3f346a430818445 | /dsl_parser/exceptions.py | bdb65ad5b7d7cdbf7fc69acb84c5a5f56d7f80fb | [
"Apache-2.0"
] | permissive | mahak/cloudify-dsl-parser | 1dcbde1efe122214d6c7c47217f6eeaf5b418b11 | dc76673abdd2b80ad42a49f620dcb7efeb97145a | refs/heads/master | 2020-12-25T23:26:06.144065 | 2014-08-27T16:13:56 | 2014-08-27T16:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
class MissingRequiredInputError(Exception):
"""
An error raised when a deployment is created and a required input
was not specified on its creation.
"""
def __init__(self, *args, **kwargs):
super(MissingRequiredInputError, self).__init__(*args, **kwargs)
class UnknownInputError(Exception):
"""
An error raised when an unknown input is specified on deployment creation.
"""
def __init__(self, *args, **kwargs):
super(UnknownInputError, self).__init__(*args, **kwargs)
| [
"[email protected]"
] | |
911253e690f721990fdf83a54bb4150a30380b41 | 4b4581e74a03442ee0293b1bbfdff7a171f8b8c5 | /IndiaFacts/skill_env/ask_sdk_model/interfaces/videoapp/metadata.py | 4edb57b5af1e586416f432f7dc56c8bd0fa5e129 | [
"Apache-2.0"
] | permissive | JeremieBou/AlexaSkills | 53d6b78e09cfbbb63dcdaf489b582da20ee86944 | 5d81c2c6e7ca8068e3850e8f2f2ebb3d9cca87cd | refs/heads/master | 2022-12-14T01:11:42.975211 | 2018-12-20T17:02:26 | 2018-12-20T17:02:26 | 162,493,681 | 0 | 0 | Apache-2.0 | 2022-11-22T02:54:58 | 2018-12-19T21:39:59 | Python | UTF-8 | Python | false | false | 3,221 | py | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class Metadata(object):
"""
:param title:
:type title: (optional) str
:param subtitle:
:type subtitle: (optional) str
"""
deserialized_types = {
'title': 'str',
'subtitle': 'str'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle'
}
def __init__(self, title=None, subtitle=None):
# type: (Optional[str], Optional[str]) -> None
"""
:param title:
:type title: (optional) str
:param subtitle:
:type subtitle: (optional) str
"""
self.__discriminator_value = None
self.title = title
self.subtitle = subtitle
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Metadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"JeremieBou"
] | JeremieBou |
dbd1cb95a499b1e63d3f2bd7a291e479c1a24a56 | db1bb993c1356265b64372471a09fc6e8313a07e | /final_module_iDIL_django/image_analysis/chat_engine.py | 10287e8aebe0789e2cf6e5db2781e0df1f3b71b1 | [] | no_license | nabajitdey/iDIL_final_module | c61878644271a5d08a5b6e37b69a9572d28aaf1f | a4505ca0b81f073d561806952338e56786919204 | refs/heads/master | 2022-11-30T07:09:16.193701 | 2020-08-17T16:12:14 | 2020-08-17T16:12:14 | 288,140,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from .models import *
from cv2 import cv2
import numpy as np
from .forms import *
import glob
class ChatEngine:
def chat_engine(self,question):
x=1
| [
"[email protected]"
] | |
5ca0355653e6138d821f89fc37bb78fd804a4efd | ca54cd99fd98fc938081248c06d2e077bdedca92 | /pvae/datasets/__init__.py | a6ccfacbc2ff39cea3ccdc375ecdcbbc2d954109 | [
"MIT"
] | permissive | emilemathieu/pvae | 85b965d0b2556edaf53627cfc58f3c4ed933d486 | c04ec2149fc4d37fd83946a366780816c0cbe3c0 | refs/heads/master | 2023-07-19T12:36:02.809762 | 2022-07-11T08:50:50 | 2022-07-11T08:50:50 | 183,016,471 | 124 | 39 | MIT | 2023-07-06T21:52:03 | 2019-04-23T13:03:27 | Python | UTF-8 | Python | false | false | 23 | py | from .datasets import * | [
"[email protected]"
] | |
4de46bf17c31dccd69fe817cec18302d393d27ea | b7ce1274bd442c0b8ec31493198edaa2efcf43ce | /hrl_fabric_based_tactile_sensor/src/hrl_fabric_based_tactile_sensor/pr2_tactile_sleeve_pps_driver_node.py | a8233ab45366103d1c39500d2977ad0884442f4a | [
"Apache-2.0"
] | permissive | JuChanyoung/hrl-haptic-manip | 692a62a1cb7b02cabf9e31a74c2cad96a9655e47 | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | refs/heads/master | 2021-01-18T12:10:37.422794 | 2014-10-09T21:36:50 | 2014-10-09T21:36:50 | 84,330,897 | 1 | 0 | null | 2017-03-08T14:44:22 | 2017-03-08T14:44:22 | null | UTF-8 | Python | false | false | 2,997 | py | #!/usr/bin/python
import sys
import math, numpy as np
import copy
import roslib; roslib.load_manifest('hrl_fabric_based_tactile_sensor')
import rospy
import hrl_lib.util as ut
import hrl_lib.transforms as tr
from m3skin_ros.msg import RawTaxelArray
from geometry_msgs.msg import Transform
from m3skin_ros.srv import None_TransformArray, None_TransformArrayResponse
from m3skin_ros.srv import None_String
from pr2_tactile_sleeve_driver_node import Tactile_Sleeve
roslib.load_manifest('pr2_msgs')
from pr2_msgs.msg import PressureState
def pps_cb(msg):
global l_fingertip, r_fingertip
l_fingertip = copy.copy(msg.l_finger_tip)
r_fingertip = copy.copy(msg.r_finger_tip)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser()
p.add_option('--arm_to_use', action='store',
dest='arm', type='string',
help='l or r')
opt, args = p.parse_args()
if opt.arm != 'r' and opt.arm != 'l':
rospy.logerr('Unsupported arm_to_use')
raise RuntimeError('Unsupported arm_to_use')
ts = Tactile_Sleeve(opt.arm)
raw_data_left_pps_pub = rospy.Publisher('pr2_pps_left_sensor/taxels/raw_data',
RawTaxelArray)
raw_data_right_pps_pub = rospy.Publisher('pr2_pps_right_sensor/taxels/raw_data',
RawTaxelArray)
rospy.Service('pr2_pps_left_sensor/taxels/srv/local_coord_frames',
None_TransformArray, ts.local_coord_frames_pps_left_cb)
rospy.Service('pr2_pps_left_sensor/taxels/srv/link_name', None_String,
ts.link_name_pps_left_cb)
rospy.Service('pr2_pps_right_sensor/taxels/srv/local_coord_frames',
None_TransformArray, ts.local_coord_frames_pps_right_cb)
rospy.Service('pr2_pps_right_sensor/taxels/srv/link_name', None_String,
ts.link_name_pps_right_cb)
global l_fingertip, r_fingertip
l_fingertip = None
r_fingertip = None
if opt.arm == 'l':
input_topic = '/pressure/l_gripper_motor'
if opt.arm == 'r':
input_topic = '/pressure/r_gripper_motor'
rospy.Subscriber(input_topic, PressureState, pps_cb)
rospy.init_node('pps_driver_node')
rospy.loginfo('waiting for fingertip data')
while r_fingertip == None:
rospy.sleep(0.1)
rospy.loginfo('Started publishing data')
rta_left = RawTaxelArray()
rta_right = RawTaxelArray()
import time
start = time.time()
while not rospy.is_shutdown():
l = l_fingertip
r = r_fingertip
#front, bottom, top is order of taxels
data_left = [l[3]+l[4], l[5]+l[6], l[1]+l[2]]
rta_left.val_z = data_left
#front, bottom, top is order of taxels
data_right = [r[3]+r[4], r[1]+r[2], r[5]+r[6]]
rta_right.val_z = data_right
raw_data_left_pps_pub.publish(rta_left)
raw_data_right_pps_pub.publish(rta_right)
rospy.sleep(0.02)
| [
"[email protected]"
] | |
793e1359815f61e9f5619b8a1c5ad08b1c69c5d0 | 5f7e140c9fc6c9e4db6f9132126ff5747735c29f | /blog/migrations/0003_avatarimages_wallpaperimages.py | 7130986ba7c58a4394a24fba4ec7c0ba27ddc811 | [] | no_license | xw0235/django-blog-tutorial | 29370bc76e1b06b1f5259da85517353b5746cca9 | 024d1ceabbabb45743f6d74e83bed392d732855a | refs/heads/master | 2020-04-02T17:03:34.987972 | 2018-10-25T07:59:03 | 2018-10-25T07:59:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-10-12 05:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_qkcookies'),
]
operations = [
migrations.CreateModel(
name='avatarImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.TextField()),
],
),
migrations.CreateModel(
name='wallpaperImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.TextField()),
],
),
]
| [
"[email protected]"
] | |
84adbc43e66bede94f5f3d17b8aa071e9becf93f | 97930b7800bc26a84c46a4a1f026d12f3be80aae | /runtests.py | 09f59e13547b6cb21d4f79df0aaee1ad928a8943 | [] | no_license | iambibhas/imgee | 3844abb984267454ea91cb36989304a911a814d6 | a681733b0aeb4d031e241f3a25e546acb0d9e4e8 | refs/heads/master | 2021-01-12T08:02:50.513284 | 2016-08-31T15:10:16 | 2016-08-31T15:10:16 | 77,111,006 | 0 | 0 | null | 2016-12-22T04:06:34 | 2016-12-22T04:06:34 | null | UTF-8 | Python | false | false | 77 | py | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import nose
nose.main()
| [
"[email protected]"
] | |
1816403908201b8142c37717149c787cc4e1003f | ea231f0d8d4a367946db544ac5f3aa6e670d8829 | /leakage-sort.py | 1c5a082e14ddc45a117ca17e980ce6b2f3175739 | [] | no_license | YuXinFan/prototype | 844d26758d71fe36f05c207c2580cf0411cc5d24 | be2cf2a771ab35289abeddc34e294c6ea8bfbeb0 | refs/heads/main | 2023-02-25T23:53:28.209731 | 2021-01-26T12:35:50 | 2021-01-26T12:35:50 | 333,078,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | #!/usr/bin/python
from z3 import *
#semantic consequence
def imply(range,lh, rh):
return ForAll(range, Implies(lh, rh))
def declassifyBranchCondition(s, range, constrain, condition):
s.push()
s.add(imply(range, constrain, condition))
then_branch = s.check()
s.pop(num=1)
if (then_branch == sat):
return 1
s.push()
s.add(imply(range, constrain, Not(condition)))
else_branch = s.check()
s.pop(num=1)
if (else_branch == sat):
return -1
return 0
# def declassifyBranchCondition(s, range, constrain, condition):
# return If(
# condition,
# ForAll(range, Implies(constrain, condition)),
# ForAll(range, Implies(constrain, Not(condition))))
I = IntVector('I', 8)
n = len(I)
s = Solver()
constrain = And(I[1] > I[2],
I[2] > I[3],
I[3] > I[4],
I[4] > I[5],
I[5] > I[6],
I[6] > I[7],
I[0] > I[1])
inputs = [I[i] for i in range(8)]
def partition(s, arr,low,high):
i = ( low-1 ) # index of smaller element
pivot = arr[high] # pivot
for j in range(low , high):
c = declassifyBranchCondition(s, inputs, constrain, arr[j] < pivot)
if (c == 1):
#print("Branch to Then")
i = i+1
arr[i],arr[j] = arr[j],arr[i]
elif ( c == -1):
#print("Branch to Else")
pass
else:
print("Verification failed at condition: arr[j] < pivot")
arr[i+1],arr[high] = arr[high],arr[i+1]
return ( i+1 )
# The main function that implements QuickSort
# arr[] --> Array to be sorted,
# low --> Starting index,
# high --> Ending index
# Function to do Quick sort
def quickSort(s, arr,low,high):
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(s, arr,low,high)
# Separately sort elements before
# partition and after partition
quickSort(s, arr, low, pi-1)
quickSort(s, arr, pi+1, high)
#arr = [10, 7, 8, 9, 1, 5]
quickSort(s,I,0,n-1)
print ("Sorted array is:")
for i in range(n):
print (I[i]) | [
"[email protected]"
] | |
5d18fad5753e2a8aa91824a074334bac59808902 | a2d885e1e700fe0107e5f9a34f006b854ea304d0 | /messageBomber using amazon/messageBomber.py | f169a45380621afa7da90bce1edb55fea7ff1a6c | [] | no_license | sayon-moulik/automation-with-python | eec7c5d4435d91ae5fcbd4aa530671b856c781af | bb9c786953e9ed2154090ad8e01310122f9d34d3 | refs/heads/master | 2022-12-03T18:59:38.923668 | 2020-08-29T16:08:48 | 2020-08-29T16:08:48 | 291,299,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | from selenium import webdriver
number=input("give the phone number:")
times = int(input('give the number of times to bomb:'))
browser = webdriver.Firefox(executable_path='./geckodriver')
browser.get('https://www.amazon.in/ap/signin?_encoding=UTF8&ignoreAuthState=1&openid.assoc_handle=inflex&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.ns.pape=http%3A%2F%2Fspecs.openid.net%2Fextensions%2Fpape%2F1.0&openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.in%2F%3Fref_%3Dnav_custrec_signin&switch_account=')
phoneno=browser.find_element_by_id('ap_email')
cont=browser.find_element_by_id('continue')
phoneno.send_keys(number)
cont.click()
otp=browser.find_element_by_id('continue')
otp.click()
for i in range(times-1):
otplink = browser.find_element_by_link_text('Resend OTP')
otplink.click()
| [
"[email protected]"
] | |
213b6d95f66148c90d8f72fb0b15ee4a02a875de | d952071b94a4a0dfd19bf45700919282fc5a0f54 | /blog/forms.py | b911c0f5e7406d4f6e13bd3063178e9d0b5d8ad7 | [] | no_license | bit1creative/django-blog | d96deb716455e115c46739e178775c597cc9e028 | 5bfebf7511ac2a8aa9c21ac2ed65559a3ce9c66b | refs/heads/master | 2021-03-17T21:45:58.429759 | 2020-03-25T10:45:34 | 2020-03-25T10:45:34 | 247,020,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
picture = forms.ImageField(required = False)
class Meta:
model = Post
fields = ('title', 'text', 'picture',)
| [
"[email protected]"
] | |
ccc71337af5b2333284f5be54fd753ba6fb5cd6b | fd00425844776d26ddbea9af8b6cbcf2a0679131 | /polpos/admin.py | f83d55d346eb7c5b6f78cb00c5c36f4c2baeedc7 | [
"MIT"
] | permissive | InformaticsResearchCenter/kabayan | 6066fd43d6b50148dfa64b8ded790f4a019ca8b5 | 98139958d6b42098c233f7a6e74bef8a0edde60d | refs/heads/master | 2022-12-04T06:50:30.574503 | 2020-07-15T15:46:49 | 2020-07-15T15:46:49 | 276,532,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from django.contrib import admin
from .models import PenjadwalanProdi, PenjadwalanBAAK
admin.site.register(PenjadwalanProdi)
admin.site.register(PenjadwalanBAAK) | [
"[email protected]"
] | |
f9311c1afbc3f5b617fcd14c1e8ab7262aeba117 | fb33ebb63b5b293eefbb7125532cc94269361df6 | /prac_06/car.py | 7804abf2224c6ecac18db3b8bc618c682bef199d | [] | no_license | Brendan-Hill-00/CP1404_Pracs | 318ee76a103f98b5b78b9352e57d6ceb9cc85784 | 344d1bd73d63e759c4a241302562f2fecfea9190 | refs/heads/master | 2020-03-26T07:47:26.168018 | 2018-10-23T05:51:48 | 2018-10-23T05:51:48 | 144,670,916 | 0 | 0 | null | 2018-09-04T01:32:05 | 2018-08-14T05:03:42 | Python | UTF-8 | Python | false | false | 954 | py | """CP1404/CP5632 Practical - Car class example."""
class Car:
"""Represent a Car object."""
def __init__(self, name="", fuel=0):
"""Initialise a Car instance.
fuel: float, one unit of fuel drives one kilometre
"""
self.name = name
self.fuel = fuel
self.odometer = 0
def __str__(self):
return "{}, fuel={}, odometer={}".format(self.name, self.fuel, self.odometer)
def add_fuel(self, amount):
"""Add amount to the car's fuel."""
self.fuel += amount
def drive(self, distance):
"""Drive the car a given distance.
Drive given distance if car has enough fuel
or drive until fuel runs out return the distance actually driven.
"""
if distance > self.fuel:
distance = self.fuel
self.fuel = 0
else:
self.fuel -= distance
self.odometer += distance
return distance
| [
"[email protected]"
] | |
08b5b79225c285027c5c22c3b2e35508d3c6dc76 | 247b63718b37a3cfdf08528f4724a2fd43f67100 | /similarity matrix producer/similarity.py | d82aa13f50cef07a2cf47bb1b1c6a18a4e6c067f | [] | no_license | avaspataru/Dissertation | db70fe4998985c0dfc850ca5ded97dbec20410ad | 8dbe9095dd66f2c31aa0ea4432831935d49f0470 | refs/heads/master | 2022-12-10T05:58:37.878487 | 2020-09-04T17:16:32 | 2020-09-04T17:16:32 | 155,190,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py |
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
def readData(fileName):
fp = open( fileName, 'r')
line = fp.readline() #ignore headers
cnt = 1
array = []
while line:
line = fp.readline()
if line == "":
continue
qcnt = 0 #number of "
dcnt = 0 #number of $
cluster_id_s = ""
gene = ""
avg_expr_s = ""
for c in line:
if c == '"':
qcnt=qcnt+1
continue
if c == '$':
dcnt=dcnt+1
continue
if c== ' ':
continue
if(qcnt == 3 and dcnt == 0): #in gene name
gene = gene +c
if(qcnt == 3 and dcnt == 1 ): #in cluster id
cluster_id_s = cluster_id_s + c
if(qcnt == 3 and dcnt == 2 ): # in avg value
avg_expr_s = avg_expr_s + c
avg_expr = float(avg_expr_s)
cluster_id = int(cluster_id_s)
elem = [gene, cluster_id, avg_expr]
array.append(elem)
cnt += 1
fp.close()
d = ['gene', 'cluster_id', 'avg_expr']
df = pd.DataFrame(array, columns=d)
return df
def main():
print "Computing similarities for pre and post identified clusters."
pre_data = readData('preGenesClusters.txt')
post_data = readData('postGenesClusters.txt')
pre_clusters = pre_data['cluster_id'].unique()
post_clusters = post_data['cluster_id'].unique()
top_n = input ("Enter the number of genes to look at (-1 if all):")
for i in pre_clusters:
print "-------------------------------------------------------------------"
losses = []
for j in post_clusters:
pre_cluster = pre_data.loc[pre_data['cluster_id'] == i]
post_cluster = post_data.loc[post_data['cluster_id']==j]
if(top_n == -1):
lookup_genes = pre_cluster['gene'].unique()
else:
top_genes = pre_cluster.nlargest(top_n,'avg_expr')
lookup_genes = top_genes['gene'].unique()
loss = 0
ngene = lookup_genes.size
for gene in lookup_genes:
avg_expr_pre = pre_cluster.loc[pre_cluster['gene'] == gene]['avg_expr'].item()
if(post_cluster.loc[post_cluster['gene'] == gene].empty): #gene doesn't exist in post cluster
loss = loss + avg_expr_pre*avg_expr_pre
continue
avg_expr_post = post_cluster.loc[post_cluster['gene'] == gene]['avg_expr'].item()
#print avg_expr_pre
loss = loss + (avg_expr_pre - avg_expr_post) * (avg_expr_pre - avg_expr_post)
loss = loss / ngene
losses = losses + [loss]
print "compare pre_" + str(i) + ", post_" + str(j) + ": loss " + str(loss) + "; looked at " + str(ngene) + " genes."
#losses = scale( losses, axis=0, with_mean=True, with_std=True, copy=True )
#print losses
main()
| [
"[email protected]"
] | |
2b5b0f6b693cfac095904a5938f52e7ac20fcba1 | 24c14fa953f35eb23ead2a9b38547bd3ddafe841 | /exercise_7/utils.py | 9a134e82c56c99cec51c2b967ea229ebaff0db2b | [] | no_license | HaLamUs/machine-learning-coursera | ba79fec213091a9faf2bad0977a313eec431d3a0 | e485f76daa8e9bea95a37425c6c53cd7d6c26bbd | refs/heads/master | 2023-02-17T02:13:41.964492 | 2023-02-14T17:33:32 | 2023-02-14T17:33:32 | 249,435,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,439 | py | import sys
import numpy as np
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
import matplotlib as mpl
sys.path.append('..')
def displayData(X, example_width=None, figsize=(10, 10)):
"""
Displays 2D data in a nice grid.
Parameters
----------
X : array_like
The input data of size (m x n) where m is the number of examples and n is the number of
features.
example_width : int, optional
THe width of each 2-D image in pixels. If not provided, the image is assumed to be square,
and the width is the floor of the square root of total number of pixels.
figsize : tuple, optional
A 2-element tuple indicating the width and height of figure in inches.
"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:
n = X.size
m = 1
X = X[None] # Promote to a 2 dimensional array
else:
raise IndexError('Input X should be 1 or 2 dimensional.')
example_width = example_width or int(np.round(np.sqrt(n)))
example_height = int(n / example_width)
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_height, example_width, order='F'), cmap='gray')
ax.axis('off')
def featureNormalize(X):
"""
Normalizes the features in X returns a normalized version of X where the mean value of each
feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when
working with learning algorithms.
Parameters
----------
X : array_like
An dataset which is a (m x n) matrix, where m is the number of examples,
and n is the number of dimensions for each example.
Returns
-------
X_norm : array_like
The normalized input dataset.
mu : array_like
A vector of size n corresponding to the mean for each dimension across all examples.
sigma : array_like
A vector of size n corresponding to the standard deviations for each dimension across
all examples.
"""
mu = np.mean(X, axis=0)
X_norm = X - mu
sigma = np.std(X_norm, axis=0, ddof=1)
X_norm /= sigma
return X_norm, mu, sigma
def plotProgresskMeans(i, X, centroid_history, idx_history):
"""
A helper function that displays the progress of k-Means as it is running. It is intended for use
only with 2D data. It plots data points with colors assigned to each centroid. With the
previous centroids, it also plots a line between the previous locations and current locations
of the centroids.
Parameters
----------
i : int
Current iteration number of k-means. Used for matplotlib animation function.
X : array_like
The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should
be equal to 2.
centroid_history : list
A list of computed centroids for all iteration.
idx_history : list
A list of computed assigned indices for all iterations.
"""
K = centroid_history[0].shape[0]
pyplot.gcf().clf()
cmap = pyplot.cm.rainbow
norm = mpl.colors.Normalize(vmin=0, vmax=2)
for k in range(K):
current = np.stack([c[k, :] for c in centroid_history[:i+1]], axis=0)
pyplot.plot(current[:, 0], current[:, 1],
'-Xk',
mec='k',
lw=2,
ms=10,
mfc=cmap(norm(k)),
mew=2)
pyplot.scatter(X[:, 0], X[:, 1],
c=idx_history[i],
cmap=cmap,
marker='o',
s=8**2,
linewidths=1,)
pyplot.grid(False)
pyplot.title('Iteration number %d' % (i+1))
def runkMeans(X, centroids, findClosestCentroids, computeCentroids,
max_iters=10, plot_progress=False):
"""
Runs the K-means algorithm.
Parameters
----------
X : array_like
The data set of size (m, n). Each row of X is a single example of n dimensions. The
data set is a total of m examples.
centroids : array_like
Initial centroid location for each clusters. This is a matrix of size (K, n). K is the total
number of clusters and n is the dimensions of each data point.
findClosestCentroids : func
A function (implemented by student) reference which computes the cluster assignment for
each example.
computeCentroids : func
A function(implemented by student) reference which computes the centroid of each cluster.
max_iters : int, optional
Specifies the total number of interactions of K-Means to execute.
plot_progress : bool, optional
A flag that indicates if the function should also plot its progress as the learning happens.
This is set to false by default.
Returns
-------
centroids : array_like
A (K x n) matrix of the computed (updated) centroids.
idx : array_like
A vector of size (m,) for cluster assignment for each example in the dataset. Each entry
in idx is within the range [0 ... K-1].
anim : FuncAnimation, optional
A matplotlib animation object which can be used to embed a video within the jupyter
notebook. This is only returned if `plot_progress` is `True`.
"""
K = centroids.shape[0]
idx = None
idx_history = []
centroid_history = []
for i in range(max_iters):
idx = findClosestCentroids(X, centroids)
if plot_progress:
idx_history.append(idx)
centroid_history.append(centroids)
centroids = computeCentroids(X, idx, K)
if plot_progress:
fig = pyplot.figure()
anim = FuncAnimation(fig, plotProgresskMeans,
frames=max_iters,
interval=500,
repeat_delay=2,
fargs=(X, centroid_history, idx_history))
return centroids, idx, anim
return centroids, idx
| [
"[email protected]"
] | |
f3833dd3d59f4158a04d1bd6615ecb69f32df1f0 | cc4cd41a51b8bd7ed8ae3e8eaa5020c04a2bb9e8 | /main.py | 74a8db44072d27d656ae647715230fd524a20ff5 | [] | no_license | fatmaerciyas/restaurant-reservation | b32fadad13752332bcd21ba450501af968baa478 | 522bc2c315d7e93bdf28bf115f1a40070e116947 | refs/heads/master | 2022-10-24T12:08:34.424714 | 2020-06-13T17:26:49 | 2020-06-13T17:26:49 | 272,054,700 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,087 | py | import json
bilgiler = dict()
def musteri_ekle():
global bilgiler
with open("ornek_dosya_icerigi.txt", "r", encoding="utf-8") as file:
try:
bilgiler = json.load(file)
# dosyadan verileri çek ve 'bilgiler' sözlüğüne ekle
except ValueError:
bilgiler = {}
musteri = dict() # 'bilgiler' sözlüğünün içine 'müsteri' sözlükleri olacak ve her müşteriye ait bilgileri tutacak
bayrak = 0 # a ve bayrak yanlış bilgiler girildiğinde while ile tekrar bilgi istemek için
a = "bos"
while (bayrak == 0):
print("\n" * 20)
tarih = input("Lütfen rezervasyon yapacağınız tarihi giriniz:(Örn. :2 = Bu ayın 2 si):")
yerler = dict() # bu sözlüğün bi esprisi yok sadece müşteriye fiyatları göstermek için menü şeklinde ayarlandı
yerler = {"kapalı_alan": {"2": 50, "4": 60, "6": 70, "8": 80},
"balkon": {"2": 75, "4": 90, "6": 100, "8": 125},
"teras": {"2": 90, "4": 100, "6": 150, "8": 200}}
print("\n" * 3)
for i, j in yerler.items():
print("{} masaya göre fiyatları : {}".format(i, j))
print("\n")
print("Üç çeşit mekanımız bulunmaktadır.Teras, Balkon ve Kapalı alan.")
yer = input("Lütfen oturacağınız mekanı yazınız:")
bayrak2 = 0 # istenilen kişi sayısı tek ise tekrar istemek için
while (bayrak2 == 0):
print("\n")
print("2,4,6 ve 8 kişilik olmak üzere masalarımız bulunmaktadır.")
kisi_sayisi = input("Kaç kişilik masada yer ayırtmak istediğiniz giriniz:")
if (int(kisi_sayisi) % 2 != 0):
print("Lütfen mevcut masalardan yer ayırtınız")
else:
bayrak2 = 1
for i in range(1, len(bilgiler)):
if (bilgiler[str(i)]["Tarih"] == tarih and bilgiler[str(i)]["Mekan"] == yer and bilgiler[str(i)][
"Masa"] == kisi_sayisi):
print("Seçmek istediğiniz masa o tarihte dolu lütfen başka bir yer seçiniz")
a = "bulundu"
if (a == "bulundu"):
bayrak = 0
elif (a == "bos"):
bayrak = 1
print("\n")
print("Seçmek istediğiniz yer o tarihte uygun")
print("\n" * 3)
isim = input("Müşterinin adını giriniz:")
musteri["Ad"] = isim
soyisim = input("Müşterinin soyadını giriniz:")
musteri["Soyad"] = soyisim
musteri["Tarih"] = tarih
musteri["Mekan"] = yer
musteri["Masa"] = kisi_sayisi
bilgiler[len(bilgiler) + 1] = musteri # 'bilgiler' sözlüğünün son elemanı 'müsteri' sözlüğü olsun
print("\n" * 7)
print("Ayın {}'inde {} de {} kişilik masanın rezervasyonu {} {} adına yapılmıştır.".format(tarih, yer, kisi_sayisi,
isim, soyisim))
with open("ornek_dosya_icerigi.txt", 'w', encoding="utf-8") as file:
json.dump(bilgiler, file, ensure_ascii=False, indent=4) # müşteriyi dosyaya ekler
ana_menu()
def musteri_ara():
with open("ornek_dosya_icerigi.txt", "r", encoding="utf-8") as file:
try:
bilgiler = json.load(file) # load
except ValueError:
bilgiler = {}
print("\n" * 20)
isim = input("Aranacak müşterinin adını giriniz:")
soyisim = input("Soyadını giriniz:")
print("\n")
for i in range(1, len(
bilgiler) + 1): # 'bilgiler' sözlüğünün 0. elemanı yok 1 den başlıyor range de son elemaı almadığı için + 1 yaptım
if (isim == bilgiler[str(i)]["Ad"] and soyisim == bilgiler[str(i)]["Soyad"]):
print("{} {} adlı müşterinin, ayın {} ında {} de {} kişilik masa rezervasyonu bulunmaktadır.".format(isim,
soyisim,
bilgiler[
str(
i)][
"Tarih"],
bilgiler[
str(
i)][
"Mekan"],
bilgiler[
str(
i)][
"Masa"]))
# para hesaplaması için fonk çağırılır ve rezervasyon yaptırdığı masanın ücret bilgisi de gösterilir
ana_menu()
def musteri_guncelle():
with open("ornek_dosya_icerigi.txt", "r", encoding="utf-8") as file:
try:
bilgiler = json.load(file) # load
except ValueError:
bilgiler = {}
print("\n" * 20)
isim = input("Bilgilerini güncellemek istediğiniz müşterinin;\nAdını giriniz:")
soyisim = input("Soyadını giriniz:")
bayrak = 0 # müşteri bulunamadığında mesaj vermek için kullanılacak
dolu = 0 # güncellenen yer başka müşteriye rezerve edilmiş mi onu kontrol edecek
a = "bos"
musteri_indisi = list() # müşterinin kaç tane rezervasonu olduğunu tutar
for i in range(1, len(bilgiler) + 1):
if (isim == bilgiler[str(i)]["Ad"] and soyisim == bilgiler[str(i)]["Soyad"]):
bayrak = 1 # Oyle bir müsteri var
musteri_indisi.append(i)
for i in range(1, len(bilgiler) + 1): # 'bilgiler' sözlüğünün 0. elemanı yok 1 den başlıyor
if (isim == bilgiler[str(i)]["Ad"] and soyisim == bilgiler[str(i)]["Soyad"]):
if (len(musteri_indisi) == 1):
# musterinin 1 rezervasyonu var demektir
print("\n")
print("Müşteri bulundu")
print("\n" * 2)
print("{} {} adlı müşteriye, ayın {} ında {} de {} kişilik masanın rezervasyonu yapılmıştır"
.format(isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
else:
# musterinin 1 den fazla rezervasyonu var
# hangi tarihteki güncellenmek istiyor sorulur
for i in musteri_indisi:
print(
"{} {} adlı müşteriye, ayın {} ında {} de {} kişilik masanın rezervasyonu yapılmıştır".format(
isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
bul = input(
"Müşteri rezervasyonlarına sahip. Hangi tarihteki rezervasyonda güncelleme yapmak istediğinizi giriniz:")
for i in range(1, len(bilgiler) + 1):
# tarih -> bul a atanır musteri sözlük içinde aranır
if (isim == bilgiler[str(i)]["Ad"] and soyisim == bilgiler[str(i)]["Soyad"] and bul ==
bilgiler[str(i)]["Tarih"]):
print("Müşteri bulundu\n")
print("{} {} adlı müşteriye, ayın {} ında {} de {} kişilik masanın rezervasyonu yapılmıştır"
.format(isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
break
print("Hangi bilgiyi değiştirmek istediğinizi girin")
print("\n")
guncel = input(
"Müşteri adı değiştirmek için: 1\nTarih değiştirmek için: 2\nMekan değiştirmek için: 3\nMasa değiştirmek için: 4 ' e basınız:")
if (guncel == "1"):
print("\n")
yeni_ad = input("Yeni adı giriniz:")
yeni_soyad = input("Yeni soyadı giriniz:")
print("\n")
bilgiler[str(i)]["Ad"] = yeni_ad
bilgiler[str(i)]["Soyad"] = yeni_soyad
print("Müşteri bilgileri güncellenmiştir.")
print(
"{} {} adlı müşterinin rezervasyon bilgileri ayın {} ında {} de {} kişilik masa olmak üzere güncellenmiştir"
.format(isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
break
elif (guncel == "2"):
# Ad ve soyad değişikliğinde buna ihitiyacımız yoktu aynı müşteri 1 den fazla rezervasyon yapabilir
# Ama bu değişiklik için diğer müşteri rezervasyonlarına bakılması gerekir çünkü güncellenmek istenen yer dolu olabilir
while (dolu == 0):
print("\n")
yeni_tarih = input("Yeni tarihi giriniz:")
# burada güncellenmek istenen yerin dolu olup olmadığı kontrol edilir
for j in range(1, len(bilgiler) + 1):
if (bilgiler[str(j)]["Tarih"] == yeni_tarih and bilgiler[str(j)]["Mekan"] == bilgiler[str(i)][
"Mekan"] and bilgiler[str(j)]["Masa"] == bilgiler[str(i)]["Masa"]):
print("Seçmek istediğiniz masa o tarihte dolu lütfen başka bir yer seçiniz")
a = "bulundu"
break
else:
a = "bos"
if (a == "bulundu"):
dolu = 0
elif (a == "bos"):
dolu = 1
# masa dolu değilse güncellenmek istenen bilgi alınır
bilgiler[str(i)]["Tarih"] = yeni_tarih
print("\n")
print("Müşteri bilgileri güncellenmiştir.")
print(
"{} {} adlı müşterinin rezervasyon bilgileri ayın {} ında {} de {} kişilik masa olmak üzere güncellenmiştir"
.format(isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
break
elif (guncel == "3"):
# Ad ve soyad değişikliğinde buna ihitiyacımız yoktu aynı müşteri 1 den fazla rezervasyon yapabilir
# Ama bu değişiklik için diğer müşteri rezervasyonlarına bakılması gerekir çünkü güncellenmek istenen yer dolu olabilir
while (dolu == 0):
print("\n")
yeni_mekan = input("Yeni mekan giriniz:")
# burada güncellenmek istenen yerin dolu olup olmadığı kontrol edilir
for j in range(1, len(bilgiler) + 1):
if (bilgiler[str(j)]["Tarih"] == bilgiler[str(i)]["Tarih"] and bilgiler[str(j)][
"Mekan"] == yeni_mekan
and bilgiler[str(j)]["Masa"] == bilgiler[str(i)]["Masa"]):
print("Seçmek istediğiniz masa o tarihte dolu lütfen başka bir yer seçiniz")
a = "bulundu"
break
else:
a = "bos"
if (a == "bulundu"):
dolu = 0
elif (a == "bos"):
dolu = 1
# masa dolu değilse güncellenmek istenen bilgi alınır
bilgiler[str(i)]["Mekan"] = yeni_mekan
print("\n")
print("Müşteri bilgileri güncellenmiştir.")
print(
"{} {} adlı müşterinin rezervasyon bilgileri ayın {} ında {} de {} kişilik masa olmak üzere güncellenmiştir"
.format(isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
break
elif (guncel == "4"):
# Ad ve soyad değişikliğinde buna ihitiyacımız yoktu aynı müşteri 1 den fazla rezervasyon yapabilir
# Ama bu değişiklik için diğer müşteri rezervasyonlarına bakılması gerekir çünkü güncellenmek istenen yer dolu olabilir
while (dolu == 0):
print("\n")
yeni_masa = input("Kaç kişilik masa istediğinizi giriniz:")
if (int(yeni_masa) % 2 != 0):
print("Lütfen geçerli masa sayısı giriniz")
continue
# burada güncellenmek istenen yerin dolu olup olmadığı kontrol edilir
for j in range(1, len(bilgiler) + 1):
if (bilgiler[str(j)]["Tarih"] == bilgiler[str(i)]["Tarih"] and bilgiler[str(j)]["Mekan"] ==
bilgiler[str(i)]["Mekan"]
and bilgiler[str(j)]["Masa"] == yeni_masa):
print("Seçmek istediğiniz masa o tarihte dolu lütfen başka bir yer seçiniz")
a = "bulundu"
break
else:
a = "bos"
if (a == "bulundu"):
dolu = 0
elif (a == "bos"):
dolu = 1
# masa dolu değilse güncellenmek istenen bilgi alınır
bilgiler[str(i)]["Masa"] = yeni_masa
print("\n")
print("Müşteri bilgileri güncellenmiştir.")
print(
"{} {} adlı müşterinin rezervasyon bilgileri ayın {} ında {} de {} kişilik masa olmak üzere güncellenmiştir"
.format(isim, soyisim, bilgiler[str(i)]["Tarih"], bilgiler[str(i)]["Mekan"],
bilgiler[str(i)]["Masa"]))
break
with open("ornek_dosya_icerigi.txt", 'w', encoding="utf-8") as file:
json.dump(bilgiler, file, ensure_ascii=False, indent=4) # müşteriyi dosyaya ekler
if (bayrak == 0):
print("Öyle bir müşteri rezervasyonu bulunamadı.")
ana_menu()
def musteri_sil():
global bilgiler
with open("ornek_dosya_icerigi.txt", "r", encoding="utf-8") as file:
try:
bilgiler = json.load(file) # load
except ValueError:
bilgiler = {}
print("\n" * 20)
isim = input("Silmek istediğiniz müşterinin adını giriniz:")
soyisim = input("Soyadını giriniz:")
rezervasyon_adeti = list()
print("\n")
for i in range(1, len(bilgiler) + 1):
if (bilgiler[str(i)]["Ad"] == isim and bilgiler[str(i)]["Soyad"] == soyisim):
rezervasyon_adeti.append(i)
if (len(rezervasyon_adeti) == 1):
# Müşterinin 1 rezervasyonu var
anahtar = rezervasyon_adeti[0]
for i in range(1, len(bilgiler) + 1):
if (i == anahtar):
del bilgiler[str(anahtar)]
print("\n" * 2)
print("Müşteri rezervasyonu iptal edildi")
else:
if (i > anahtar):
a = i - 1
bilgiler[str(a)] = bilgiler[str(i)]
del bilgiler[str(i)]
elif (len(rezervasyon_adeti) > 1):
# Müşterinin 1 den fazla rezervasyonu var
for i in rezervasyon_adeti:
print(bilgiler[str(i)])
print("Olmak üzere rezervasyonlarınız bulunmaktadır.")
tarih = input("Hangi tarihteki rezervasyonu iptal ettirmek istiyorsanız o tarihi giriniz:")
for i in range(1, len(bilgiler)):
if (bilgiler[str(i)]["Ad"] == isim and bilgiler[str(i)]["Soyad"] == soyisim and bilgiler[str(i)][
"Tarih"] == tarih):
rezervasyon_adeti.clear() # diğer müşteri rezervasyonları iptal et
rezervasyon_adeti.append(i) # sadece seçilen tarihtekini listeye ekle
anahtar = rezervasyon_adeti[0]
for i in range(1, len(bilgiler) + 1):
if (i == anahtar):
del bilgiler[str(anahtar)]
print("\n" * 2)
print("Müşteri rezervasyonu iptal edildi")
else:
if (i > anahtar):
a = i - 1
bilgiler[str(a)] = bilgiler[str(i)]
del bilgiler[str(i)]
with open("ornek_dosya_icerigi.txt", 'w', encoding="utf-8") as file:
json.dump(bilgiler, file, ensure_ascii=False, indent=4) # müşteriyi dosyadan siler
ana_menu()
def fiyat_hesapla():
with open("ornek_dosya_icerigi.txt", "r", encoding="utf-8") as file:
try:
bilgiler = json.load(file)
except ValueError:
bilgiler = {}
print("\n" * 20)
isim = input("Rezervasyon fiyat bilgisini öğrenmek istediğiniz müşterinin \nAdını giriniz:")
soyisim = input("Soyadını giriniz:")
kapalı_alan = {"2": 50, "4": 60, "6": 70, "8": 80}
balkon = {"2": 75, "4": 90, "6": 100, "8": 125}
teras = {"2": 90, "4": 100, "6": 150, "8": 200}
# yer fiyatlarını sözlüklerde tuttum
musteri_rezervasyon_sayaci = 0 # müsterinin kaç tane rezervasyonu olduğunu tutacağım
for i in range(1, len(bilgiler) + 1):
if (bilgiler[str(i)]["Ad"] == isim and bilgiler[str(i)]["Soyad"] == soyisim):
musteri_rezervasyon_sayaci += 1
tutar = 0 # toplam ödenmesi gereken
sayac = 0 # bu müşteriye hangi tarihteki tutarı istediğini 1 kere soracak
for i in range(1, len(bilgiler) + 1):
if (bilgiler[str(i)]["Ad"] == isim and bilgiler[str(i)]["Soyad"] == soyisim):
if (musteri_rezervasyon_sayaci > 1):
# müsterinin 1 den fazla rezervasyonu varsa tarih bilgisi istenir
if (sayac == 0):
tarih = input("Hangi tarihteki tutarı istiyorsanız o tarihi giriniz:")
sayac += 1
else:
if (bilgiler[str(i)]["Ad"] == isim and bilgiler[str(i)]["Soyad"] == soyisim and bilgiler[str(i)][
"Tarih"] == tarih):
pass
else:
continue
if (bilgiler[str(i)]["Mekan"] == "kapalı alan" or bilgiler[str(i)]["Mekan"] == "Kapalı alan"):
a = int(bilgiler[str(i)]["Masa"])
tutar = kapalı_alan[str(a)]
print("\n" * 2)
print("Ödemeniz gereken tutar {} TL.".format(tutar))
elif (bilgiler[str(i)]["Mekan"] == "balkon" or bilgiler[str(i)]["Mekan"] == "Balkon"):
a = int(bilgiler[str(i)]["Masa"])
tutar = balkon[str(a)]
print("\n" * 2)
print("Ödemeniz gereken tutar {} TL.".format(tutar))
elif (bilgiler[str(i)]["Mekan"] == "teras" or bilgiler[str(i)]["Mekan"] == "Teras"):
a = int(bilgiler[str(i)]["Masa"])
tutar = teras[str(a)]
print("\n" * 2)
print("Ödemeniz gereken tutar {} TL.".format(tutar))
if (1 < musteri_rezervasyon_sayaci < 5):
print("\n")
print("1'den fazla rezervasyon yaptırdığınız için size özel %5 indirimimiz mevcuttur :) ")
print("Yaptırdığınız tüm rezervasyonların fiyatlarına %5 indirim yapılır")
yeni_tutar = tutar - (tutar * (5 / 100))
print("\n")
print("{} tarihinde indirimli ödemeniz gereken tutar {} TL.".format(bilgiler[str(i)]["Tarih"],
yeni_tutar))
break
elif (5 < musteri_rezervasyon_sayaci < 10):
print("\n")
print("Daha önce rezervasyon yaptırdığınız için size özel %5 indirimimiz mevcuttur :) ")
yeni_tutar = tutar - (tutar * (10 / 100))
print("\n")
print("{} tarihinde indirimli ödemeniz gereken tutar {} TL.".format(bilgiler[str(i)]["Tarih"],
yeni_tutar))
break
if (cocuk_ozel_sayac != 0):
# burası cocuk_ozel fonksiyonu icin eğer oradan çalıştırılıyorsa bu fonksiyon ana menüye dönmez
return tutar
elif (cocuk_ozel_sayac == 0):
# ama bunun çalışmasını ana menüden istediysem tekrar ana menüye döner
ana_menu()
cocuk_ozel_sayac = 0 # bu sayac fiyat_hesapla() fonkunun tekrar ana menüye dönmemesini sağlayacak
def cocuk_ozel(): # Çocuklar için indirimler ya da fiyatlar
global cocuk_ozel_sayac
with open("ornek_dosya_icerigi.txt", "r", encoding="utf-8") as file:
try:
bilgiler = json.load(file)
except ValueError:
bilgiler = {}
print("\n" * 20)
cocuk_ozel_sayac += 1 # fiyat hesapla fonkunda da cocuk hesapla fonksiyonunu çağıracağım
tutar = fiyat_hesapla()
cocuk_sayisi = int(input("Kaç adet çocuğunuz olduğunu giriniz:"))
for i in range(cocuk_sayisi):
print("\n")
cocuk_yas = int(input("{}. çocuğunuzun yaşını giriniz:".format(i + 1)))
def eglence(yas):
if (yas < 10):
# eğer çocuk yaşı 10 dan küçükse teklifler sunulur
fiyat = 0
oyun_park = input(
" Restoranımızda çocuklara özel oyun parkı bulunmaktadır.\n"
" İsterseniz rezervasyon ödemenize ek sadece 100 TL'ye oyun parkında dilediğinizce zaman geçirebilirsiniz.\n"
" Bu tekliften yararlanmak istiyor musunuz (Evet veya Hayır) yazınız:")
if (oyun_park == "evet" or oyun_park == "Evet"):
fiyat = fiyat + 100
print("\n")
print("Oyun parkında iyi eğlenceler dileriz :)")
print("\n" * 2)
print("Bizim için en önemlisi sizin ve çocuklarınızın rahatlığı")
print("\n")
bakici = input(
" Yemek yerken sürekli çocuklarınıza göz kulak olmanıza gerek yok.\n"
" İsterseniz rezervasyon ödemenize ek sadece 100 TL'ye çocuklarınıza bakıcı hizmeti sunuyoruz.\n"
" Bu tekliften yararlanmak istiyor musunuz (Evet veya Hayır) yazınız:")
if (bakici == "evet" or bakici == "Evet"):
fiyat = fiyat + 100
print("\n")
print("Bizi seçtiğiniz için teşekkür eder, iyi eğlenceler dileriz :)")
# print("Ödemeniz gereken tutar {} Tl".format(tutar))
elif (bakici == "hayır" or bakici == "Hayır"):
print("\n")
print("Bizi seçtiğiniz için teşekkür ederiz :)")
# print("Ödemeniz gereken tutar {} Tl".format(tutar))
elif (oyun_park == "hayır" or oyun_park == "Hayır"):
print("\n")
print("Bizi seçtiğiniz için teşekkür ederiz :)")
else:
pass
return fiyat
tutar = tutar + eglence(cocuk_yas)
if (1 <= cocuk_sayisi <= 2):
cocuk_ozel_sayac = 0
print("\n" * 2)
print("Çocuklara özel %5 indirimimizden yararlanıyorsunuz.")
tutar = tutar - (tutar * (5 / 100))
print("Ödemeniz gereken indirimli tutar: {} TL".format(tutar))
elif (cocuk_sayisi > 2):
cocuk_ozel_sayac = 0
print("\n" * 2)
print("Çocuklara özel %10 indirimimizden yararlanıyorsunuz.")
tutar = tutar - (tutar * (10 / 100))
print("Ödemeniz gereken indirimli tutar: {} TL".format(tutar))
ana_menu()
def ana_menu():
print("\n")
islem = input("Ne tür bir işlem yapmak istediğinizi yazınız.\n"
"Yeni bir müşteri rezervasyonu eklemek için -ekle- \n"
"Müşteri aramak için -ara-\n"
"Müşteri bilgilerini güncellemek için -güncelle-\n"
"İptal olan rezervasyonları silmek için -sil-\n"
"Müşteri rezervasyon fiyatı hesaplamak için -hesapla-\n"
"Çocuklu müşterilerimize özel indirimler ve çocuklara özel eğlencelerden yararlanmak için -çocuk-\n"
"Sistemden çıkış yapmak için -çıkış- yazınız:")
if (islem == "ekle" or islem == "Ekle"):
musteri_ekle()
elif (islem == "ara" or islem == "Ara"):
musteri_ara()
elif (islem == "güncelle" or islem == "Güncelle"):
musteri_guncelle()
elif (islem == "sil" or islem == "Sil"):
musteri_sil()
elif (islem == "hesapla" or islem == "Hesapla"):
fiyat_hesapla()
elif (islem == "çocuk" or islem == "Çocuk"):
cocuk_ozel()
elif (islem == "çıkış" or islem == "Çıkış"):
return None
ana_menu() | [
"[email protected]"
] | |
7c41c9497b4849dcb62f2a90fddadf53879e5b91 | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/python/estimator/canned/head.py | 44c0a891298d270c410658bb4a603de4ff466789 | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61,632 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# The above default is defined by TF Serving, but these next three are just
# a local convention without any special meaning.
_CLASSIFY_SERVING_KEY = 'classification'
_REGRESS_SERVING_KEY = 'regression'
_PREDICT_SERVING_KEY = 'predict'
# A LossSpec contains
# * a scalar `Tensor` representing reduced weighted training loss
# * a scalar `Tensor` representing the unreduced unweighted loss
# * a scalar `Tensor` representing the example weights
# * possibly processed labels (e.g. vocabulary lookup, shape manipulation, etc)
LossSpec = collections.namedtuple(
'LossSpec', ['training_loss', 'unreduced_loss', 'weights',
'processed_labels'])
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
class _Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, train_op, metrics and export outputs. It is meant to:
1. Simplify writing model_fn and to make model_fn more configurable
2. Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
Common usage:
Here is simplified model_fn to build a DNN regression model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.regression_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=_train_op_fn)
```
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
estimator_spec = head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=tf.contrib.learn.no_op_train_fn)
if mode == model_fn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=estimator_spec.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... update train_op and hooks in EstimatorSpec and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
"""The name of this head.
Returns:
A string.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_loss(self, features, mode, logits, labels):
"""Returns a loss Tensor from provided logits.
This function is designed to be used by framework developers. Almost all
users should use create_estimator_spec(), which calls this internally.
`mode` and `features` are most likely not used, but some Head
implementations may require them.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used for loss construction.
labels: Labels `Tensor`, or `dict` of same.
Returns:
A LossSpec that contains
* the scalar `Tensor` representing reduced weighted training loss
* the scalar `Tensor` representing the unreduced unweighted loss
* the scalar `Tensor` representing the example weights
* possibly processed labels (e.g. vocabulary lookup, shape manipulation,
etc.)
To be extendable in the future.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None,
regularization_losses=None):
"""Returns `EstimatorSpec` that a model_fn can return.
Please note that,
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
EstimatorSpec.loss to compute and apply gradients.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses.
Returns:
`EstimatorSpec`.
"""
raise NotImplementedError('Calling an abstract method.')
def _check_dense_labels_match_logits_and_reshape(
labels, logits, expected_labels_dimension):
"""Checks that labels shape matches logits and reshapes if needed.
Consider logits of shape [D0, D1, ... DN, logits_dimension]. Then labels
shape must be [D0, D1, ... DN, expected_labels_dimension].
If expected_labels_dimension=1, labels could be [D0, D1, ... DN] and this
method reshapes them to [D0, D1, ... DN, 1].
Args:
labels: labels Tensor.
logits: logits Tensor.
expected_labels_dimension: Integer.
Returns:
Validated and reshaped labels Tensor.
Raises:
ValueError: If labels is a SparseTensor.
ValueError: If labels shape is statically defined and fails validation.
OpError: If labels shape is not statically defined and fails validation.
"""
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
with ops.name_scope(None, 'labels', (labels, logits)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError(
'SparseTensor labels are not supported. '
'labels must be a Tensor of shape [D0, D1, ..., DN, %s], '
'e.g. [batch_size, %s]. '
'Suggested Fix (1): Check the label feature in your data. '
'Each example must contain %s value(s). If not, your choice of label '
'was probably incorrect. '
'Suggested Fix (2): In your input_fn, use '
'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'
'' % (expected_labels_dimension, expected_labels_dimension,
expected_labels_dimension))
if (labels.shape.ndims is not None and logits.shape.ndims is not None and
labels.shape.ndims == logits.shape.ndims - 1):
labels = array_ops.expand_dims(labels, -1)
labels_shape = array_ops.shape(labels)
logits_shape = array_ops.shape(logits)
err_msg = (
'labels shape must be [D0, D1, ... DN, {}]. '
'Suggested Fix: check your n_classes argument to the estimator '
'and/or the shape of your label.'.format(expected_labels_dimension))
assert_rank = check_ops.assert_rank_at_least(labels, 2, message=err_msg)
with ops.control_dependencies([assert_rank]):
static_shape = labels.shape
if static_shape.ndims is not None:
dim1 = static_shape[-1]
if (dim1 is not None) and (dim1 != expected_labels_dimension):
raise ValueError(
'Mismatched label shape. '
'Classifier configured with n_classes=%s. Received %s. '
'Suggested Fix: check your n_classes argument to the estimator '
'and/or the shape of your label.' %
(expected_labels_dimension, dim1))
expected_labels_shape = array_ops.concat(
[logits_shape[:-1], [expected_labels_dimension]], axis=0)
assert_dimension = check_ops.assert_equal(
expected_labels_shape, labels_shape, message=err_msg,
data=['expected_labels_shape: ', expected_labels_shape,
'labels_shape: ', labels_shape])
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(labels, name=scope)
def _get_weights_and_check_match_logits(
features, weight_column, logits, allow_per_logit_weights=False):
"""Fetches weights from features and checks that the shape matches logits.
Consider logits of shape [D0, D1, ... DN, logits_dimension]. Weights shape
can be either:
* [D0, D1, ... DN, logits_dimension] if `allow_per_logit_weights=True`.
* [D0, D1, ... DN, 1]
* [D0, D1, ... DN]: In this case, weights is reshaped into
[D0, D1, ... DN, 1] to work with weight broadcasting rules.
Args:
features: The features dict that contains weights.
weight_column: The weight column. If not given, this method returns 1.
logits: logits Tensor.
allow_per_logit_weights: Boolean. Whether we allow weights along the logits
dimension, namely shape `[D0, D1, ... DN, logits_dimension]`.
Returns:
Validated and reshaped weights Tensor.
Raises:
ValueError: If the weights `Tensor` cannot be cast into float.
"""
if allow_per_logit_weights:
err_msg = (
'weights shape must be [D0, D1, ... DN], [D0, D1, ... DN, 1] or '
'[D0, D1, ... DN, logits_dimension]')
else:
err_msg = (
'weights shape must be [D0, D1, ... DN] or [D0, D1, ... DN, 1]')
with ops.name_scope(
None, 'weights',
values=tuple(six.itervalues(features)) + (logits,)) as scope:
# Fetch the weights.
if weight_column is None:
return 1.
if isinstance(weight_column, six.string_types):
weight_column = feature_column_lib.numeric_column(
key=weight_column, shape=(1,))
if not isinstance(weight_column, feature_column_lib._NumericColumn): # pylint: disable=protected-access
raise TypeError('Weight column must be either a string or _NumericColumn.'
' Given type: {}.'.format(type(weight_column)))
weights = weight_column._get_dense_tensor( # pylint: disable=protected-access
feature_column_lib._LazyBuilder(features)) # pylint: disable=protected-access
if not (weights.dtype.is_floating or weights.dtype.is_integer):
raise ValueError('Weight column should be castable to float. '
'Given dtype: {}'.format(weights.dtype))
weights = math_ops.to_float(weights, name='weights')
# Validate the weights shape.
weights_shape = array_ops.shape(weights, name='weights_shape')
logits_shape = array_ops.shape(logits, name='logits_shape')
if (weights.shape.ndims is not None and logits.shape.ndims is not None and
weights.shape.ndims == logits.shape.ndims - 1):
assert_dimension = check_ops.assert_equal(
logits_shape[:-1], weights_shape, message=err_msg,
data=['logits_shape: ', logits_shape,
'weights_shape: ', weights_shape])
with ops.control_dependencies([assert_dimension]):
return array_ops.expand_dims(weights, -1, name=scope)
supported_weights_shape = array_ops.concat([logits_shape[:-1], [1]], axis=0)
if allow_per_logit_weights:
condition = math_ops.reduce_any(
[math_ops.reduce_all(math_ops.equal(logits_shape, weights_shape)),
math_ops.reduce_all(math_ops.equal(
supported_weights_shape, weights_shape))])
assert_dimension = control_flow_ops.Assert(
condition=condition,
data=[err_msg, 'logits_shape: ', logits_shape,
'weights_shape: ', weights_shape])
else:
assert_dimension = check_ops.assert_equal(
supported_weights_shape, weights_shape, message=err_msg,
data=['logits_shape: ', logits_shape,
'weights_shape: ', weights_shape])
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(weights, name=scope)
def _check_logits_final_dim(logits, expected_logits_dimension):
"""Checks that logits shape is [D0, D1, ... DN, logits_dimension]."""
with ops.name_scope(None, 'logits', (logits,)) as scope:
logits = math_ops.to_float(logits)
logits_shape = array_ops.shape(logits)
assert_rank = check_ops.assert_rank_at_least(
logits, 2, data=[logits_shape],
message='logits shape must be [D0, D1, ... DN, logits_dimension]')
with ops.control_dependencies([assert_rank]):
static_shape = logits.shape
if static_shape.ndims is not None and static_shape[-1] is not None:
if static_shape[-1] != expected_logits_dimension:
raise ValueError(
'logits shape must be [D0, D1, ... DN, logits_dimension], '
'got %s.' % (static_shape,))
return logits
assert_dimension = check_ops.assert_equal(
expected_logits_dimension, logits_shape[-1], data=[logits_shape],
message='logits shape must be [D0, D1, ... DN, logits_dimension]')
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(logits, name=scope)
def _validate_loss_fn_args(loss_fn):
"""Validates loss_fn arguments.
Required arguments: labels, logits.
Optional arguments: features.
Args:
loss_fn: The loss function.
Raises:
ValueError: If the signature is unexpected.
"""
loss_fn_args = util.fn_args(loss_fn)
for required_arg in ['labels', 'logits']:
if required_arg not in loss_fn_args:
raise ValueError(
'loss_fn must contain argument: {}. '
'Given arguments: {}'.format(required_arg, loss_fn_args))
invalid_args = list(set(loss_fn_args) - set(['labels', 'logits', 'features']))
if invalid_args:
raise ValueError('loss_fn has unexpected args: {}'.format(invalid_args))
def _call_loss_fn(loss_fn, labels, logits, features, expected_loss_dim=1):
"""Calls loss_fn and checks the returned shape.
Args:
loss_fn: The loss function.
labels: Processed labels Tensor.
logits: Logits Tensor of shape [D0, D1, ... DN, logits_dimension].
features: Features dict.
expected_loss_dim: The expected last dimension of loss Tensor.
Returns:
Loss Tensor with shape [D0, D1, ... DN, expected_loss_dim].
"""
loss_fn_args = util.fn_args(loss_fn)
kwargs = {}
if 'features' in loss_fn_args:
kwargs['features'] = features
with ops.name_scope(
None, 'call_loss_fn',
values=[labels, logits] + list(six.itervalues(features))):
unweighted_loss = loss_fn(labels=labels, logits=logits, **kwargs)
logits_shape = array_ops.shape(logits, name='logits_shape')
expected_loss_shape = array_ops.concat(
[logits_shape[:-1], [expected_loss_dim]], axis=0,
name='expected_loss_shape')
loss_shape = array_ops.shape(unweighted_loss, name='loss_shape')
check_loss_shape_op = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(loss_shape, expected_loss_shape)),
data=[
'loss_fn must return Tensor of shape '
'[D0, D1, ... DN, {}]. '.format(expected_loss_dim),
'logits_shape: ', logits_shape, 'loss_shape: ', loss_shape],
name='check_loss_shape')
with ops.control_dependencies([check_loss_shape_op]):
return array_ops.identity(unweighted_loss)
def _indicator_labels_mean(labels, weights=None, name=None):
with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:
labels = math_ops.to_float(labels, name='labels')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
return metrics_lib.mean(labels, weights=weights, name=scope)
def _classification_output(scores, n_classes, label_vocabulary=None):
batch_size = array_ops.shape(scores)[0]
if label_vocabulary:
export_class_list = label_vocabulary
else:
export_class_list = string_ops.as_string(math_ops.range(n_classes))
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
return export_output.ClassificationOutput(
scores=scores,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
def _accuracy_baseline(labels_mean):
"""Return accuracy baseline based on labels mean.
This is the best the model could do by always predicting one class.
Args:
labels_mean: Tuple of value and update op.
Returns:
Tuple of value and update op.
"""
with ops.name_scope(None, 'accuracy_baseline', labels_mean):
value, update_op = labels_mean
return (
math_ops.maximum(value, 1. - value, name='value'),
math_ops.maximum(update_op, 1 - update_op, name='update_op'))
def _predictions_mean(predictions, weights=None, name=None):
with ops.name_scope(
name, 'predictions_mean', (predictions, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.mean(predictions, weights=weights, name=scope)
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.auc(
labels=labels, predictions=predictions, weights=weights, curve=curve,
name=scope)
def _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'accuracy_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(
labels=labels, predictions=threshold_predictions, weights=weights,
name=scope)
def _precision_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'precision_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.precision_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _recall_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'recall_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.recall_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _multi_class_head_with_softmax_cross_entropy_loss(
n_classes,
weight_column=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
"""Creates a '_Head' for multi class classification.
The head expects `logits` with shape `[D0, D1, ... DN, n_classes]`.
In many applications, the shape is `[batch_size, n_classes]`.
`labels` must be a dense `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
`Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
`labels` must be an integer `Tensor` with values specifying the class index.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
The loss is the weighted sum over the input dimensions. Namely, if the input
labels have shape `[batch_size, 1]`, the loss is the weighted sum over
`batch_size`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support integer `labels` with
shape `[D0, D1, ... DN, 1]`. Namely, the head applies `label_vocabulary` to
the input labels before passing them to `loss_fn`.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHeadWithSigmoidCrossEntropyLoss`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list or tuple of strings representing possible label
values. If it is not given, that means labels are already encoded as an
integer within [0, n_classes). If given, labels must be of string type and
have any value in `label_vocabulary`. Note that errors will be raised if
`label_vocabulary` is not provided but labels are strings.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: If `n_classes`, `label_vocabulary` or `loss_reduction` is
invalid.
"""
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError(
'label_vocabulary should be a list or a tuple. Given type: {}'.format(
type(label_vocabulary)))
if (loss_reduction not in losses.Reduction.all() or
loss_reduction == losses.Reduction.NONE):
raise ValueError('Invalid loss_reduction: {}'.format(loss_reduction))
if loss_fn:
_validate_loss_fn_args(loss_fn)
return _MultiClassHeadWithSoftmaxCrossEntropyLoss(
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
name=name)
class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
"""See `_multi_class_head_with_softmax_cross_entropy_loss`."""
def __init__(self,
n_classes,
weight_column=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
if (n_classes is None) or (n_classes <= 2):
raise ValueError('n_classes must be > 2: %s.' % n_classes)
self._n_classes = n_classes
self._weight_column = weight_column
self._label_vocabulary = label_vocabulary
self._loss_reduction = loss_reduction
self._loss_fn = loss_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._n_classes
def _eval_metric_ops(
self, labels, class_ids, weights, unreduced_loss, regularization_loss):
"""Returns the Eval metric ops."""
with ops.name_scope(
None, 'metrics',
(labels, class_ids, weights, unreduced_loss, regularization_loss)):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
# TODO(xiejw): Any other metrics?
_summary_key(self._name, keys.LOSS_MEAN):
metrics_lib.mean(
values=unreduced_loss,
weights=weights,
name=keys.LOSS_MEAN),
_summary_key(self._name, keys.ACCURACY):
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
}
if regularization_loss is not None:
metric_ops[_summary_key(self._name, keys.LOSS_REGULARIZATION)] = (
metrics_lib.mean(
values=regularization_loss,
name=keys.LOSS_REGULARIZATION))
return metric_ops
def _label_ids(self, labels):
"""Converts labels to integer id space."""
if self._label_vocabulary is None:
if not labels.dtype.is_integer:
raise ValueError('Labels dtype should be integer. Instead got {}.'.
format(labels.dtype))
label_ids = labels
else:
if labels.dtype != dtypes.string:
raise ValueError('Labels dtype should be string if there is a '
'vocabulary. Instead got {}'.format(labels.dtype))
label_ids = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
return _assert_range(label_ids, self._n_classes)
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
logits = ops.convert_to_tensor(logits)
labels = _check_dense_labels_match_logits_and_reshape(
labels=labels, logits=logits, expected_labels_dimension=1)
label_ids = self._label_ids(labels)
if self._loss_fn:
unweighted_loss = _call_loss_fn(
loss_fn=self._loss_fn, labels=label_ids, logits=logits,
features=features, expected_loss_dim=1)
else:
unweighted_loss = losses.sparse_softmax_cross_entropy(
labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
# Restore the squeezed dim, so unweighted_loss matches the weights shape.
unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=-1)
weights = _get_weights_and_check_match_logits(
features=features, weight_column=self._weight_column, logits=logits)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=self._loss_reduction)
return LossSpec(
training_loss=training_loss,
unreduced_loss=unweighted_loss,
weights=weights,
processed_labels=label_ids)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None,
regularization_losses=None):
"""Returns an `EstimatorSpec`.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
For many applications, the shape is `[batch_size, logits_dimension]`.
labels: Labels integer or string `Tensor` with shape matching `logits`,
namely `[D0, D1, ... DN, 1]` or `[D0, D1, ... DN]`. `labels` is
required argument when `mode` equals `TRAIN` or `EVAL`.
train_op_fn: Function that takes a scalar loss `Tensor` and returns
`train_op`. Required in TRAIN mode.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses. These losses are
usually expressed as a batch average, so for best results users need to
set `loss_reduction=SUM_OVER_BATCH_SIZE` or
`loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
avoid scaling errors.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If `train_op_fn` is `None` in TRAIN mode.
"""
with ops.name_scope(self._name, 'head'):
logits = _check_logits_final_dim(logits, self.logits_dimension)
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
# class_ids's shape is [D0, D1, ... DN].
class_ids = math_ops.argmax(logits, axis=-1, name=pred_keys.CLASS_IDS)
class_ids = array_ops.expand_dims(class_ids, axis=-1)
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
# Expand to [batch_size, 1]
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
classifier_output = _classification_output(
scores=probabilities, n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
_CLASSIFY_SERVING_KEY: classifier_output,
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
training_loss, unreduced_loss, weights, label_ids = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
if regularization_losses:
regularization_loss = math_ops.add_n(regularization_losses)
regularized_training_loss = math_ops.add_n(
[training_loss, regularization_loss])
else:
regularization_loss = None
regularized_training_loss = training_loss
# Eval.
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=regularized_training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=label_ids,
class_ids=class_ids,
weights=weights,
unreduced_loss=unreduced_loss,
regularization_loss=regularization_loss))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn cannot be None.')
# Only summarize mean_loss for SUM reduction to preserve backwards
# compatibility. Otherwise skip it to avoid unnecessary computation.
if self._loss_reduction == losses.Reduction.SUM:
example_weight_sum = math_ops.reduce_sum(
weights * array_ops.ones_like(unreduced_loss))
mean_loss = training_loss / example_weight_sum
else:
mean_loss = None
with ops.name_scope(''):
keys = metric_keys.MetricKeys
summary.scalar(
_summary_key(self._name, keys.LOSS),
regularized_training_loss)
if mean_loss is not None:
summary.scalar(
_summary_key(self._name, keys.LOSS_MEAN),
mean_loss)
if regularization_loss is not None:
summary.scalar(
_summary_key(self._name, keys.LOSS_REGULARIZATION),
regularization_loss)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=regularized_training_loss,
train_op=train_op_fn(regularized_training_loss))
def _binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
"""Creates a `_Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
The head expects `logits` with shape `[D0, D1, ... DN, 1]`.
In many applications, the shape is `[batch_size, 1]`.
`labels` must be a dense `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
`Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
`labels` must be float `Tensor` with values in the interval `[0, 1]`.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
The loss is the weighted sum over the input dimensions. Namely, if the input
labels have shape `[batch_size, 1]`, the loss is the weighted sum over
`batch_size`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support float `labels` with
shape `[D0, D1, ... DN, 1]`. Namely, the head applies `label_vocabulary` to
the input labels before passing them to `loss_fn`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list or tuple of strings representing possible label
values. If it is not given, that means labels are already encoded within
[0, 1]. If given, labels must be string type and have any value in
`label_vocabulary`. Note that errors will be raised if `label_vocabulary`
is not provided but labels are strings.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for binary classification.
Raises:
ValueError: If `thresholds` contains a value outside of `(0, 1)`.
ValueError: If `loss_reduction` is invalid.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError(
'label_vocabulary should be a list or tuple. Given type: {}'.format(
type(label_vocabulary)))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError('thresholds not in (0, 1): {}.'.format((thresholds,)))
if (loss_reduction not in losses.Reduction.all() or
loss_reduction == losses.Reduction.NONE):
raise ValueError('Invalid loss_reduction: {}'.format(loss_reduction))
if loss_fn:
_validate_loss_fn_args(loss_fn)
return _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
name=name)
class _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(_Head):
"""See `_binary_logistic_head_with_sigmoid_cross_entropy_loss`."""
def __init__(self,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
self._loss_reduction = loss_reduction
self._loss_fn = loss_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return 1
def _eval_metric_ops(self, labels, logits, logistic, class_ids, weights,
unreduced_loss, regularization_loss):
with ops.name_scope(None, 'metrics',
(labels, logits, logistic, class_ids, weights,
unreduced_loss, regularization_loss)):
keys = metric_keys.MetricKeys
labels_mean = _indicator_labels_mean(
labels=labels, weights=weights, name=keys.LABEL_MEAN)
metric_ops = {
# Estimator already adds a metric for loss.
_summary_key(self._name, keys.LOSS_MEAN):
metrics_lib.mean(
values=unreduced_loss,
weights=weights,
name=keys.LOSS_MEAN),
_summary_key(self._name, keys.ACCURACY):
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
_summary_key(self._name, keys.PREDICTION_MEAN):
_predictions_mean(
predictions=logistic,
weights=weights,
name=keys.PREDICTION_MEAN),
_summary_key(self._name, keys.LABEL_MEAN):
labels_mean,
_summary_key(self._name, keys.ACCURACY_BASELINE):
_accuracy_baseline(labels_mean),
_summary_key(self._name, keys.AUC):
_auc(
labels=labels,
predictions=logistic,
weights=weights,
name=keys.AUC),
_summary_key(self._name, keys.AUC_PR):
_auc(
labels=labels,
predictions=logistic,
weights=weights,
curve='PR',
name=keys.AUC_PR)
}
if regularization_loss is not None:
metric_ops[_summary_key(self._name, keys.LOSS_REGULARIZATION)] = (
metrics_lib.mean(
values=regularization_loss,
name=keys.LOSS_REGULARIZATION))
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._name,
accuracy_key)] = _accuracy_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=accuracy_key)
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._name,
precision_key)] = _precision_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=precision_key)
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._name,
recall_key)] = _recall_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=recall_key)
return metric_ops
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
logits = ops.convert_to_tensor(logits)
labels = _check_dense_labels_match_logits_and_reshape(
labels=labels, logits=logits, expected_labels_dimension=1)
if self._label_vocabulary is not None:
labels = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
labels = math_ops.to_float(labels)
labels = _assert_range(labels, 2)
if self._loss_fn:
unweighted_loss = _call_loss_fn(
loss_fn=self._loss_fn, labels=labels, logits=logits,
features=features, expected_loss_dim=1)
else:
unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
weights = _get_weights_and_check_match_logits(
features=features, weight_column=self._weight_column, logits=logits)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=self._loss_reduction)
return LossSpec(
training_loss=training_loss,
unreduced_loss=unweighted_loss,
weights=weights,
processed_labels=labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None,
regularization_losses=None):
"""Returns an `EstimatorSpec`.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` with shape `[D0, D1, ... DN, 1]`. For many
applications, the shape is `[batch_size, 1]`.
labels: Labels integer or string `Tensor` with shape matching `logits`,
namely `[D0, D1, ... DN, 1]` or `[D0, D1, ... DN]`. `labels` is required
argument when `mode` equals `TRAIN` or `EVAL`.
train_op_fn: Function that takes a scalar loss `Tensor` and returns
`train_op`. Required in TRAIN mode.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses. These losses are
usually expressed as a batch average, so for best results users need to
set `loss_reduction=SUM_OVER_BATCH_SIZE` or
`loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
avoid scaling errors.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If `train_op_fn` is `None` in TRAIN mode.
"""
# Predict.
with ops.name_scope(self._name, 'head'):
with ops.name_scope(None, 'predictions', (logits,)):
pred_keys = prediction_keys.PredictionKeys
logits = _check_logits_final_dim(logits, self.logits_dimension)
logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
two_class_logits = array_ops.concat(
(array_ops.zeros_like(logits), logits),
axis=-1, name='two_class_logits')
probabilities = nn.softmax(
two_class_logits, name=pred_keys.PROBABILITIES)
class_ids = math_ops.argmax(
two_class_logits, axis=-1, name=pred_keys.CLASS_IDS)
class_ids = array_ops.expand_dims(class_ids, axis=-1)
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
predictions = {
pred_keys.LOGITS: logits,
pred_keys.LOGISTIC: logistic,
pred_keys.PROBABILITIES: probabilities,
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
classifier_output = _classification_output(
scores=probabilities, n_classes=2,
label_vocabulary=self._label_vocabulary)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
_CLASSIFY_SERVING_KEY: classifier_output,
_REGRESS_SERVING_KEY: export_output.RegressionOutput(
value=logistic),
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
(training_loss, unreduced_loss, weights, processed_labels) = (
self.create_loss(
features=features, mode=mode, logits=logits, labels=labels))
if regularization_losses:
regularization_loss = math_ops.add_n(regularization_losses)
regularized_training_loss = math_ops.add_n(
[training_loss, regularization_loss])
else:
regularization_loss = None
regularized_training_loss = training_loss
# Eval.
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=regularized_training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
logits=logits,
logistic=logistic,
class_ids=class_ids,
weights=weights,
unreduced_loss=unreduced_loss,
regularization_loss=regularization_loss))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
# Only summarize mean_loss for SUM reduction to preserve backwards
# compatibility. Otherwise skip it to avoid unnecessary computation.
if self._loss_reduction == losses.Reduction.SUM:
example_weight_sum = math_ops.reduce_sum(
weights * array_ops.ones_like(unreduced_loss))
mean_loss = training_loss / example_weight_sum
else:
mean_loss = None
with ops.name_scope(''):
keys = metric_keys.MetricKeys
summary.scalar(
_summary_key(self._name, keys.LOSS),
regularized_training_loss)
if mean_loss is not None:
summary.scalar(
_summary_key(self._name, keys.LOSS_MEAN), mean_loss)
if regularization_loss is not None:
summary.scalar(
_summary_key(self._name, keys.LOSS_REGULARIZATION),
regularization_loss)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=regularized_training_loss,
train_op=train_op_fn(regularized_training_loss))
def _regression_head_with_mean_squared_error_loss(
weight_column=None,
label_dimension=1,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
inverse_link_fn=None,
name=None):
"""Creates a `_Head` for regression using the `mean_squared_error` loss.
The loss is the weighted sum over all input dimensions. Namely, if the input
labels have shape `[batch_size, label_dimension]`, the loss is the weighted
sum over both `batch_size` and `label_dimension`.
The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
In many applications, the shape is `[batch_size, label_dimension]`.
The `labels` shape must match `logits`, namely
`[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
`[D0, D1, ... DN]` is also supported.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
`[D0, D1, ... DN, label_dimension]`.
Supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, label_dimension]`.
Also supports custom `inverse_link_fn`, also known as 'mean function'.
`inverse_link_fn` takes `logits` as argument and returns predicted values.
This function is the inverse of the link function defined in
https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function
Namely, for poisson regression, set `inverse_link_fn=tf.exp`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function. Defaults to `mean_squared_error`.
inverse_link_fn: Optional inverse link function, also known as 'mean
function'. Defaults to identity.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for linear regression.
Raises:
ValueError: If `label_dimension` or `loss_reduction` is invalid.
"""
if (loss_reduction not in losses.Reduction.all() or
loss_reduction == losses.Reduction.NONE):
raise ValueError('Invalid loss_reduction: {}'.format(loss_reduction))
if loss_fn:
_validate_loss_fn_args(loss_fn)
return _RegressionHeadWithMeanSquaredErrorLoss(
weight_column=weight_column,
label_dimension=label_dimension,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
inverse_link_fn=inverse_link_fn,
name=name)
class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
"""`Head` for regression using the mean squared loss."""
def __init__(
self,
label_dimension,
weight_column=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
inverse_link_fn=None,
name=None):
"""`Head` for regression."""
if label_dimension < 1:
raise ValueError('Invalid label_dimension %s.' % label_dimension)
self._logits_dimension = label_dimension
self._weight_column = weight_column
self._loss_reduction = loss_reduction
self._loss_fn = loss_fn
self._inverse_link_fn = inverse_link_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._logits_dimension
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
logits = ops.convert_to_tensor(logits)
labels = _check_dense_labels_match_logits_and_reshape(
labels=labels, logits=logits,
expected_labels_dimension=self._logits_dimension)
labels = math_ops.to_float(labels)
if self._loss_fn:
unweighted_loss = _call_loss_fn(
loss_fn=self._loss_fn, labels=labels, logits=logits,
features=features, expected_loss_dim=self._logits_dimension)
else:
unweighted_loss = losses.mean_squared_error(
labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
weights = _get_weights_and_check_match_logits(
features=features, weight_column=self._weight_column, logits=logits,
allow_per_logit_weights=True)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=self._loss_reduction)
return LossSpec(
training_loss=training_loss,
unreduced_loss=unweighted_loss,
weights=weights,
processed_labels=labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None,
regularization_losses=None):
"""Returns an `EstimatorSpec`.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
For many applications, the shape is `[batch_size, logits_dimension]`.
labels: Labels `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, logits_dimension]`. When `logits_dimension=1`, shape
`[D0, D1, ... DN]` is also supported. `labels` is required argument when
`mode` equals `TRAIN` or `EVAL`.
train_op_fn: Function that takes a scalar loss `Tensor` and returns
`train_op`. Required in TRAIN mode.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses. These losses are
usually expressed as a batch average, so for best results users need to
set `loss_reduction=SUM_OVER_BATCH_SIZE` or
`loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
avoid scaling errors.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If `train_op_fn` is `None` in TRAIN mode.
"""
# Predict.
with ops.name_scope(self._name, 'head'):
logits = _check_logits_final_dim(logits, self._logits_dimension)
if self._inverse_link_fn:
predicted_value = self._inverse_link_fn(logits)
predictions = {
prediction_keys.PredictionKeys.PREDICTIONS: predicted_value,
prediction_keys.PredictionKeys.LOGITS: logits,
}
else:
predicted_value = logits
predictions = {
prediction_keys.PredictionKeys.PREDICTIONS: predicted_value}
if mode == model_fn.ModeKeys.PREDICT:
regression_output = export_output.RegressionOutput(
value=predicted_value)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: regression_output,
_REGRESS_SERVING_KEY: regression_output,
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
training_loss, unreduced_loss, weights, _ = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
if regularization_losses:
regularization_loss = math_ops.add_n(regularization_losses)
regularized_training_loss = math_ops.add_n(
[training_loss, regularization_loss])
else:
regularization_loss = None
regularized_training_loss = training_loss
# Eval.
if mode == model_fn.ModeKeys.EVAL:
keys = metric_keys.MetricKeys
# Estimator already adds a metric for loss.
eval_metric_ops = {
_summary_key(self._name, keys.LOSS_MEAN):
metrics_lib.mean(
values=unreduced_loss,
weights=weights)
}
if regularization_loss is not None:
regularization_loss_key = _summary_key(
self._name, keys.LOSS_REGULARIZATION)
eval_metric_ops[regularization_loss_key] = metrics_lib.mean(
values=regularization_loss,
name=keys.LOSS_REGULARIZATION)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=regularized_training_loss,
eval_metric_ops=eval_metric_ops)
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
# Only summarize mean_loss for SUM reduction to preserve backwards
# compatibility. Otherwise skip it to avoid unnecessary computation.
if self._loss_reduction == losses.Reduction.SUM:
example_weight_sum = math_ops.reduce_sum(
weights * array_ops.ones_like(unreduced_loss))
mean_loss = training_loss / example_weight_sum
else:
mean_loss = None
with ops.name_scope(''):
keys = metric_keys.MetricKeys
summary.scalar(
_summary_key(self._name, keys.LOSS),
regularized_training_loss)
if mean_loss is not None:
summary.scalar(
_summary_key(self._name, keys.LOSS_MEAN), mean_loss)
if regularization_loss is not None:
summary.scalar(
_summary_key(self._name, keys.LOSS_REGULARIZATION),
regularization_loss)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=regularized_training_loss,
train_op=train_op_fn(regularized_training_loss))
def _assert_range(labels, n_classes, message=None):
with ops.name_scope(None, 'assert_range', (labels,)):
assert_less = check_ops.assert_less(
labels,
ops.convert_to_tensor(n_classes, dtype=labels.dtype),
message=message or 'Label IDs must < n_classes')
assert_greater = check_ops.assert_non_negative(
labels, message=message or 'Label IDs must >= 0')
with ops.control_dependencies((assert_less, assert_greater)):
return array_ops.identity(labels)
# TODO(b/69000400): Delete this method.
def _weights(features, weight_column):
"""Fetches weights from features."""
with ops.name_scope(None, 'weights', values=features.values()):
if weight_column is None:
return 1.
if isinstance(weight_column, six.string_types):
weight_column = feature_column_lib.numeric_column(
key=weight_column, shape=(1,))
if not isinstance(weight_column, feature_column_lib._NumericColumn): # pylint: disable=protected-access
raise TypeError('Weight column must be either a string or _NumericColumn.'
' Given type: {}.'.format(type(weight_column)))
weights = weight_column._get_dense_tensor( # pylint: disable=protected-access
feature_column_lib._LazyBuilder(features)) # pylint: disable=protected-access
if not (weights.dtype.is_floating or weights.dtype.is_integer):
raise ValueError('Weight column should be castable to float. '
'Given dtype: {}'.format(weights.dtype))
return math_ops.to_float(weights, name='weights')
| [
"[email protected]"
] | |
e59112aa5411caae42368ea3b747fbc042137c92 | e6a53434fe18c6f4a9871dd8be9333e0f2800d0d | /redis_demo.py | 49ec2212d905264d58d1015449b6624798bbd4cf | [] | no_license | 13794521695/python_base | 02754263f0f6d98bd46a54e8f7dc21c42a495c07 | 88469624d27d3f7790e4fbe7ec7d96be5ec8b46f | refs/heads/master | 2020-04-07T13:31:08.857195 | 2018-11-20T15:30:06 | 2018-11-20T15:30:06 | 158,410,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import redis
# 创建连接
re = redis.Redis(host='192.168.237.131', port='6379', password='123456')
########### 字符串
# key value
# re.set('py_name', '你好')
# b ==> byte
# print(re.get('py_name').decode('utf8')) 中文的话,需要解码成utf8, 不然会变成乱码。
# print(re.get('py_name'))
# re.mset('s_name', 'which', 'age', 18) # 不行的
# re.mset(s_name='hehe', age=19) # 和原生不同
# print(re.mget('s_name', 'age', 'py_name'))
# re.expire('name', 20)
# print(re.ttl('name'))
# re.set('read_count1', 2)
# re.incr('read_count1')
# print(re.get('read_count1'))
#### 列表
# re.lpush('py_list', 1, 2, 3, 'which')
# print(re.lrange('py_list', 0, -1))
# re.hset('py_hash', 'username', 'which')
# print(re.hget('py_hash', 'username'))
# 不同
# re.hmset('py_hash', {"age":"18", "abc":"qwe"})
# print(re.hmget('py_hash', 'username', 'age', 'abc'))
# print(re.hkeys('py_hash'))
print(re.keys())
# re.sadd('py_set', 1, 2 , 3 ,1 , 5, 5)
# print(re.smembers('py_set'))
# re.spop('py_set')
# print(re.smembers('py_set'))
# re.zadd('py_zset', 'a', 11, 'z', 10, 'zz', 1)
# print(re.zrange('py_zset', 0, -1, withscores=True, score_cast_func=int))
# print(re.zrevrange('py_zset', 0, -1, withscores=True, score_cast_func=int))
# 设置订阅
# p_s = re.pubsub()
# # 订阅频道
# p_s.subscribe('fm915.8')
# while True:
# # 开始订阅
# p_s.parse_response()
#
#
# # 发布
# p_l = re.publish('fm915.8', 'hello') | [
"[email protected]"
] | |
6f3ed7062a5c9335a152c8f552a54604184fda09 | 586767ee351748702c04b1dfa9d1d5377640cdc3 | /evaluate.py | 7e3a2b489519b70133cbe1cdb7718b7fd1d62f02 | [] | no_license | timkambic/FaceRecognitionAndPresentationAttackDetection | e781b20d6329ae8108aaf25d5c28161d1ef5f692 | fd76d1f76b8fdcc6138dc10d9e706a87d84512a1 | refs/heads/master | 2020-04-05T18:21:56.872758 | 2018-12-28T14:23:00 | 2018-12-28T14:23:00 | 157,098,833 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | import numpy as np
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--scores", required=True, help="numpy array with scores")
ap.add_argument("-l", "--true_labels", required=True, help="true labels")
ap.add_argument("-t", "--threshold", required=True, help="classification threshold for score [-1,1]")
args = vars(ap.parse_args())
score_list = np.loadtxt(args["scores"])
true_labels = np.loadtxt(args["true_labels"])
THRESHOLD = float(args["threshold"])
NC = np.where(true_labels >= 0)[0].size # n of true clients
NI = np.where(true_labels < 0)[0].size # number of impostors
print(NC, NI)
FA = FR = 0
for i in range(score_list.size):
true_label = true_labels[i]
label = 1 if score_list[i] > THRESHOLD else -1
if label == -1 and true_label == 1:
FR += 1
elif label == 1 and true_label == -1:
FA += 1
print(FA, FR)
FAR = FA / NI
FRR = FR / NC
HTER = (FAR + FRR) / 2
print("FAR:", FAR)
print("FRR:", FRR)
print("HTER:", HTER)
# -------------------------------------------------------------------
fpr, tpr, threshold = roc_curve(true_labels, score_list)
roc_auc = auc(fpr, tpr)
fnr = 1 - tpr
eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
EER = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
print("EER:", EER)
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.grid(True)
plt.show()
| [
"[email protected]"
] | |
4b45f7810f155b74596cdf39bc9d09a05fe545f6 | 4fe726c9fe9073f6f5774a0d0376389771f1aa06 | /src/6_StrAndRe/9_strformat_startwith.py | f0ac1dda93342efff51dba6ee44c13de6fcc6a0c | [] | no_license | wangkangreg/LearnPython | 3ebf8ac6f1e4e0a40873b37576b0db2de75f5219 | 59785b2caa36796c5c93f90c02630d5cfdc0d3f1 | refs/heads/master | 2021-01-10T01:35:38.204786 | 2015-12-03T07:18:01 | 2015-12-03T07:18:01 | 46,340,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding:utf-8 -*-
'''
Created on 2015年11月11日
@author: Administrator
'''
word = 'hello world'
print 'hello' == word[0:5]
print word.startswith('hello')
print word.endswith('ld', 6)
print word.endswith('ld', 6, 10) #False,不包括位置10
print word.endswith('ld', 6, len(word)) | [
"[email protected]"
] | |
d1b78ac3b11c174a585c1148fdeb6c9e05f1caa4 | b1cbace1dc6ad8519d983060ad0f974ccfd49560 | /actions/O3E.py | dbb3d48ebfe1d2201ce01e9a09b974cedf9d642f | [] | no_license | saifilali/Crescendia | 40665426f1952d329d2a709c2abdff578cfacc45 | 3eb6656ec53f6e8223baef2cc17ce37856123c26 | refs/heads/master | 2021-01-20T01:34:58.893394 | 2018-08-02T21:03:34 | 2018-08-02T21:03:34 | 89,301,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,053 | py | import argparse
import pymysql
import configparser
import action_helper
config = configparser.ConfigParser()
config.read("/var/www/config.ini")
sqlhost = config.get("configuration", "sqlhost")
sqluser = config.get("configuration", "sqluser")
sqlpassword = config.get("configuration", "sqlpassword")
sqldatabase = config.get("configuration", "sqldatabase")
parser = argparse.ArgumentParser()
parser.add_argument("-battle_id", type=str, dest="battle_id")
parser.add_argument("-team", type=str, dest="team")
parser.add_argument("-turn", type=str, dest="turn")
parser.add_argument("-unit", type=str, dest="unit")
parser.add_argument("-unit_key", type=str, dest="unit_key")
parser.add_argument("-unit_speed", type=str, dest="unit_speed")
parser.add_argument("-action_target_team", type=str,
dest="action_target_team")
parser.add_argument("-action_target_unit", type=str,
dest="action_target_unit")
parser.add_argument("-turn_expire", type=str,
dest="turn_expire")
args = parser.parse_args()
battle_id = args.battle_id
team = args.team
turn = args.turn
unit = args.unit
unit_key = args.unit_key
unit_speed = args.unit_speed
action_target_team = args.action_target_team
action_target_unit = args.action_target_unit
turn_expire = args.turn_expire
balance = action_helper.get_balance("O3")
cost = balance["cost"]
scale = balance["scale"]
target_team = action_helper.get_enemy(team)
print("EVERYBODY HURTS IS GOIN YALLL")
connection = pymysql.connect(host='localhost', user=sqluser, password=sqlpassword,
db=sqldatabase, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
sql = "SELECT * FROM battle_unit_stats WHERE battle_id = %s AND team = %s AND slot = %s"
params = (battle_id, team, unit)
cursor.execute(sql, params)
unit_stats = cursor.fetchone()
if(unit_stats["energy_current"] < cost):
action_helper.exhausted_action_receipt(battle_id, team, unit, unit_stats["title"], turn)
else:
action_helper.spend_energy(battle_id, team, unit, cost)
sql = "SELECT * FROM battle_unit_stats WHERE battle_id = %s AND team = %s AND slot = %s"
params = (battle_id, target_team, action_target_unit)
cursor.execute(sql, params)
target_stats = cursor.fetchone()
power_multiplied = action_helper.key_bonus_enemy(
action_helper.key_difference(target_stats["song_key"], unit_stats["song_key"])) * unit_stats["power_current"] * scale
damage_percent = (power_multiplied * 0.01 + 0.05)
print(damage_percent)
damage = damage_percent * target_stats["health_current"]
if(target_stats["defense_current"] > damage):
damage = 1
else:
damage = damage - target_stats["defense_current"]
if(action_helper.is_immune(target_stats["immune"], battle_id, turn, target_stats["team"], target_stats["slot"]) != 0):
damage = 0
newhealth = target_stats["health_current"] - damage
'''
This is the part where it updates some stuff in the receipt
'''
if(damage > target_stats["health_default"] * 0.2):
effective_text = "It was extremely strong!"
target_animation = "offensive_action_effect_strong"
elif(damage > target_stats["health_default"] * 0.1):
effective_text = "It was strong!"
target_animation = "offensive_action_effect_moderate"
elif(damage > target_stats["health_default"] * 0.05):
effective_text = "It was weak!"
target_animation = "offensive_action_effect_weak"
elif(damage > 0):
effective_text = "It did barely anything!"
target_animation = "offensive_action_effect_nothing"
if(newhealth < 0):
effective_text = target_stats["title"] + " died!"
target_animation = target_animation + " offensive_action_effect_nothing"
sql = "UPDATE battle_unit_stats SET health_current = %s WHERE battle_id = %s AND team = %s AND slot = %s"
params = (newhealth, battle_id, target_team, action_target_unit)
cursor.execute(sql, params)
source_animation = ""
background_animation = ""
target_animation = "offensive_action_effect"
summary_code = "t{}u{} -{}".format(target_team, action_target_unit, damage)
summary_text = "{} continues to hurt {} for {} damage with Everybody Hurts".format(
unit_stats["title"], target_stats["title"], damage) + effective_text
sql = "UPDATE battle_action_queue SET source_animation=%s, target_animation=%s, background_animation=%s, summary_code=%s, summary_text=%s, processed=1 WHERE battle_id=%s AND unit=%s AND team=%s AND turn =%s"
params = (source_animation, target_animation, background_animation,
summary_code, summary_text, battle_id, unit, team, turn)
cursor.execute(sql, params)
connection.commit()
connection.close()
| [
"[email protected]"
] | |
5d9bedd621589e37661167f4141544fdf25ba0ba | 462385714beddead618fe0193a68685365793e5c | /Python/Python_Problems/Rosalind-master/036_KMER.py | 27ad7ed199f2fa44d9af243b80a5829c416182b6 | [] | no_license | 0n1udra/Learning | 7e3f83a3a1ed850cc476ef738c76759616d9e12e | 69e656327c782465318db85abdee519e2f79dab0 | refs/heads/master | 2022-12-04T06:29:29.894406 | 2020-08-17T20:47:47 | 2020-08-17T20:47:47 | 73,001,058 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/env python
'''
A solution to a ROSALIND bioinformatics problem.
Problem Title: k-Mer Composition
Rosalind ID: KMER
Rosalind #: 036
URL: http://rosalind.info/problems/kmer/
'''
from itertools import product
from scripts import ReadFASTA
dna = ReadFASTA('data/rosalind_kmer.txt')[0][1]
# Get a list of all 4-mers in lexiographic order.
kmer_list = [''.join(kmer) for kmer in list(product('ACGT', repeat = 4))]
# Initialize the count of each 4-mer at zero for each 4-mer.
kmer_count = [0]*(4**4)
# Count each 4-mer
for i in range(len(dna)-3):
kmer_count[kmer_list.index(dna[i:i+4])] += 1
print ' '.join(map(str,kmer_count))
with open('output/036_KMER.txt', 'w') as output_data:
output_data.write(' '.join(map(str,kmer_count)))
| [
"[email protected]"
] | |
5112b4e87a70c071052e9dc0329d0df3a393061f | 98763b94e214d56806456a43b008ac3042fe97ad | /LinkedList.py | 729de55824af51e30aa29ad2ee80a5a93fe76500 | [] | no_license | sjshashank31/DSA-Using-Python | aedf19d9d78e59fe80548148430b717cba180e20 | f5112e5a841c73b96f4be317b74274b1df050f8c | refs/heads/main | 2023-05-04T09:44:51.642792 | 2021-05-17T12:49:23 | 2021-05-17T12:49:23 | 368,183,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | class LinkedList:
def __init__(self):
self.node = None
def listPrint(self):
l = []
node = self.node
while node is not None:
l.append(node.data)
node = node.next
return l
def insertAtBeginning(self, data):
# create a newnode with data
newnode = Node(data)
# insert the first node of current Linkedlist in the node.next
newnode.next = self.node
# make the new node as the first node
self.node = newnode
def insertAtEnd(self, data):
newNode = Node(data)
if self.node is None:
self.node = newNode
return
node = self.node
while node.next:
node = node.next
node.next = newNode
def insertInBeetween(self, nextval, data):
start = self.node
while start.next:
if start.data == nextval:
print("Match Found")
newNode = Node(data)
newNode.next = start.next
start.next = newNode
return
else:
start = start.next
def removeNode(self, Removekey):
start = self.node
if (start is not None):
if (start.data == Removekey):
self.node = start.next
start = None
return
while (start is not None):
if start.data == Removekey:
break
prev = start
start = start.next
prev.next = start.next
start = None
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
def getNext(self):
return self.next
llist = LinkedList()
llist.node = Node(1)
new = Node(2)
new1 = Node(3)
new2 = Node(2)
new3 = Node(4)
llist.node.next = new
new.next = new1
new1.next = new2
new2.next = new3
print(llist.listPrint())
llist.removeNode(2)
print(llist.listPrint())
| [
"[email protected]"
] | |
d86deeb04414323dde6f6b016d006bd60a386e97 | 477ed010c5c0154fa6d2f2fae05403da982c684f | /raas/http/http_client.py | da1096892cf36c200468d9d34c0b75162a88a311 | [
"MIT"
] | permissive | fahimahmedmasood/raas-v2-sdk-python | 37235f3460c21bf1f80d08ee6965a64d9d55f4dc | 300b4649e945aecc761c6f25cdbf829ff951788b | refs/heads/master | 2020-03-26T08:40:40.890471 | 2018-08-14T12:52:55 | 2018-08-14T12:52:55 | 144,714,917 | 0 | 0 | null | 2018-08-14T12:07:34 | 2018-08-14T12:07:34 | null | UTF-8 | Python | false | false | 7,094 | py | # -*- coding: utf-8 -*-
"""
raas.http.http_client
This file was automatically generated for Tango Card, Inc. by APIMATIC v2.0 ( https://apimatic.io ).
"""
from .http_request import HttpRequest
from .http_method_enum import HttpMethodEnum
class HttpClient(object):
"""An interface for the methods that an HTTP Client must implement
This class should not be instantiated but should be used as a base class
for HTTP Client classes.
"""
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def convert_response(self, response, binary):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
raise NotImplementedError("Please Implement this method")
def get(self, query_url,
headers={},
query_parameters={}):
"""Create a simple GET HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.GET,
query_url,
headers,
query_parameters,
None,
None)
def head(self, query_url,
headers={},
query_parameters={}):
"""Create a simple HEAD HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.HEAD,
query_url,
headers,
query_parameters,
None,
None)
def post(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple POST HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.POST,
query_url,
headers,
query_parameters,
parameters,
files)
def put(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PUT HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PUT,
query_url,
headers,
query_parameters,
parameters,
files)
def patch(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PATCH HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PATCH,
query_url,
headers,
query_parameters,
parameters,
files)
def delete(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple DELETE HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the URL.
parameters (dict, optional): Form or body parameters to be included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.DELETE,
query_url,
headers,
query_parameters,
parameters,
files)
| [
"[email protected]"
] | |
8b67ca6610cbc4bf66c2a1463e124df29e04d9bb | c3c864a8383b1b7fcb1a7f6743c82e1deb4b2b8b | /MyDjango/user/views.py | b9fbdcfecdceccdb1f9d44fa1b38f07f65e62849 | [] | no_license | ytkz11/Playing-Django2.0 | d7c2481737213f06923d60ab07f968034d7915b9 | 7232e376f6621e62974993056028f5a5d32ae819 | refs/heads/master | 2023-03-21T18:17:01.569154 | 2019-03-04T00:56:45 | 2019-03-04T00:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth import login, logout, authenticate
# Create your views here.
def loginView(request):
# 设置标题和另外两个URL链接
title = '登录'
unit_2 = '/user/register.html'
unit_2_name = '立即注册'
unit_1 = '/user/setpassword.html'
unit_1_name = '修改密码'
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
if User.objects.filter(username=username):
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return redirect('/')
else:
tips = '账号密码错误,请重新输入'
else:
tips = '用户不存在,请注册'
return render(request, 'user.html', locals())
def registerView(request):
# 设置标题和另外两个URL链接
title = '注册'
unit_2 = '/user/login.html'
unit_2_name = '立即登录'
unit_1 = '/user/setpassword.html'
unit_1_name = '修改密码'
if request.method == 'POST':
username = request.POST.get("username", '')
password = request.POST.get("password", '')
if User.objects.filter(username=username):
tips = '用户已存在'
else:
user = User.objects.create_user(username=username, password=password)
user.save()
tips = '注册成功,请登录'
return render(request, 'user.html', locals())
def setpasswordView(request):
# 设置标题和另外两个URL链接
title = '修改密码'
unit_2 = '/user/login.html'
unit_2_name = '立即登录'
unit_1 = '/user/register.html'
unit_1_name = '立即注册'
new_password = True
if request.method == 'POST':
username = request.POST.get('username', '')
old_password = request.POST.get('password', '')
new_password = request.POST.get('new_password', '')
if User.objects.filter(username=username):
user = authenticate(username=username, password=old_password)
user.set_password(new_password)
user.save()
tips = '密码修改成功'
else:
tips = '用户不存在'
return render(request, 'user.html', locals())
def logoutView(request):
logout(request)
return redirect('/')
| [
"[email protected]"
] | |
f2827e6f18278dc41439a58eb8e284eea0b23b2f | cfdf08cb9592760a1b676282b458e58779abb0dd | /20180103/zq805_tur_v5.py | 52a83eceb88f46bfe7739529197dcad1b844ff5b | [
"MIT"
] | permissive | webturing/Python3 | d830c22921fe1d1e6182e9481bcd0a930309f65c | 6fcf6ee6cc5afba0225bd56e6928b01882eb9ecb | refs/heads/master | 2021-06-12T13:13:44.421511 | 2020-06-28T11:56:01 | 2020-06-28T11:56:01 | 128,641,162 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,741 | py | # -*- coding: utf-8 -*-
'''
zw_tur.py
tur海龟策略
'''
import numpy as np
import pandas as pd
# zwQuant
import zwSys as zw
import zwTools as zwt
import zwQTBox as zwx
import zwQTDraw as zwdr
import zwBacktest as zwbt
import zwStrategy as zwsta
import zw_talib as zwta
# =======================
# ----策略函数
def tur10(qx):
'''
海龟策略:tur10
当今天的收盘价,大于过去n个交易日中的最高价时,以收盘价买入;
买入后,当收盘价小于过去n个交易日中的最低价时,以收盘价卖出。
'''
stknum = 0;
xtim, xcod = qx.xtim, qx.stkCode
dprice = qx.xbarWrk['dprice'][0];
x9 = qx.xbarWrk['xhigh'][0];
x1 = qx.xbarWrk['xlow'][0];
dcash = qx.qxUsr['cash'];
dnum0 = zwx.xusrStkNum(qx, xcod)
if dprice > x9:
if dnum0 == 0:
stknum = int(dcash * 0.9 / dprice); # dsum=stknum*kprice
# stknum = 500
# print(xtim,stknum,dnum,'++b,%.2f,%.2f,%.2f,$,%.2f,%.2f' %(dprice,dlow,dup,kprice,dsum))
# print(xtim,stknum,'++xd',xcod,dprice,x9,x1)
elif (dprice < x1):
# stknum = -500
stknum = -1
# stknum = -1;dsum=dnum*kprice
if stknum != 0:
# print(xtim,stknum,'xd',xcod,dprice,x9,x1)
pass;
return stknum
def tur10_dataPre(qx, xnam0, ksgn0):
'''
海龟策略:tur10, 数据预处理函数 说明
当今天的收盘价,大于过去n个交易日中的最高价时,以收盘价买入;
买入后,当收盘价小于过去n个交易日中的最低价时,以收盘价卖出。
Args:
qx (zwQuantX): zwQuantX数据包
xnam0 (str):函数标签
ksgn0 (str): 价格列名称,一般是'adj close'
'''
zwx.sta_dataPre0xtim(qx, xnam0);
# ----对各只股票数据,进行预处理,提高后期运算速度
ksgn, qx.priceCalc = ksgn0, ksgn0; # 'adj close';
for xcod in zw.stkLibCode:
d20 = zw.stkLib[xcod];
# 计算交易价格kprice和策略分析采用的价格dprice,kprice一般采用次日的开盘价
# d20['dprice']=d20['open']*d20[ksgn]/d20['close']
# d20['kprice']=d20['dprice'].shift(-1)
d20['dprice'] = d20['close']
d20['kprice'] = d20['dprice']
#
d = qx.staVars[0];
ksgn = 'xhigh0';
d20[ksgn] = pd.rolling_max(d20['high'], d)
d = qx.staVars[1];
ksgn = 'xlow0';
d20[ksgn] = pd.rolling_min(d20['low'], d)
d20['xhigh'] = d20['xhigh0'].shift(1)
d20['xlow'] = d20['xlow0'].shift(1)
#
zw.stkLib[xcod] = d20;
if qx.debugMod > 0:
print(d20.tail())
# ---
fss = 'tmp\\' + qx.prjName + '_' + xcod + '.csv'
d20.to_csv(fss)
def bt_endRets(qx):
# ---ok ,测试完毕
# 保存测试数据,qxlib,每日收益等数据;xtrdLib,交易清单数据
# qx.qxLib=qx.qxLib.round(4)
qx.qxLib.to_csv(qx.fn_qxLib, index=False, encode='utf-8')
qx.xtrdLib.to_csv(qx.fn_xtrdLib, index=False, encode='utf-8')
qx.prQLib()
#
# -------计算交易回报数据
zwx.zwRetTradeCalc(qx)
zwx.zwRetPr(qx)
# -------绘制相关图表,可采用不同的模板
# 初始化绘图模板:dr_quant3x
zwdr.dr_quant3x_init(qx, 12, 8);
# 设置相关参数
xcod = zw.stkLibCode[0];
ksgn = qx.priceBuy;
# xcod='glng';ksgn=qx.priceBuy;
# kmid8=[['aeti',ksgn],['egan',ksgn],['glng',ksgn,'ma_5','ma_30'],['simo',ksgn,'ma_5','ma_30']]
kmid8 = [[xcod, ksgn, 'xhigh', 'xlow']]
# 绘图
zwdr.dr_quant3x(qx, xcod, 'val', kmid8, '')
# 可设置,中间图形窗口的标识
# qx.pltMid.legend([]);
#
print('')
print('每日交易推荐')
print('::xtrdLib', qx.fn_xtrdLib)
print(qx.xtrdLib.tail())
# print(qx.xtrdLib)
# ==================main
# --------init,设置参数
# rss='\\zwdat\\cn\\day\\'
rss = 'dat\\'
xlst = ['600401'] # 600401,*ST海润,*SThr
qx = zwbt.bt_init(xlst, rss, 'tur10', 10000);
#
# ---设置策略参数
# qx.staVars=[163,'2014-01-01','']
qx.staVars = [35, 15, '2014-01-01', ''] # 30,15,=14339.67,43.40 %
qx.debugMod = 1
qx.staFun = tur10; # ---绑定策略函数&运行回溯主函数
# qx.staFun=zwsta.tur10; #---绑定策略函数&运行回溯主函数
# ---根据当前策略,对数据进行预处理
# zwsta.tur10_dataPre(qx,'sta00','close')
tur10_dataPre(qx, 'tur10', 'close')
# ----运行回溯主程序
zwbt.zwBackTest(qx)
# ----输出回溯结果
bt_endRets(qx)
'''
,最终资产价值 ,回报率
30,10,=9325.77, -6.74 %
20,10,=$12407.49, 24.07 %
10,10,=$12544.90,25.45 %
5,10,=$15057.73, 50.58 %
5,5,=$19511.12,95.11 %
'''
| [
"[email protected]"
] | |
9d676d7395968293c805875f15eb1dc736e81eb2 | d27af9d58b91b8cd998ac0eb87d980d304ff0670 | /Grand-Contest/AGC004/AGC004_D.py | 030a7c23d767d81445f1a33236c56c006519f836 | [] | no_license | mongesan/Atcoder-m0_ngesan-py | 29dd79daab149003ffc8b6b6bad5fa2e7daa9646 | 6654af034d4ff4cece1be04c2c8b756976d99a4b | refs/heads/master | 2023-08-20T19:50:04.547025 | 2021-10-27T12:24:51 | 2021-10-27T12:24:51 | 258,486,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9 | py | #AGC004_D | [
"[email protected]"
] | |
34d8d98cd8234d0e633ad686d0b4fcf7e3530d30 | 2109f71920e7b5783ae0070b2629f2138dfb20c1 | /tmp/demo/create_service.py | cecf74824b367d75266b18550eea3f261835c954 | [] | no_license | caocheng7979/selenium_test | d2d214b9bd84ae43e3fd0afe7815ba97538f4e86 | 67de0d1183a54caf7b70d0790ca962d0cfc5a84d | refs/heads/master | 2020-03-11T07:35:00.143023 | 2018-05-15T12:57:31 | 2018-05-15T12:57:31 | 129,860,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import common
def main(url):
# login
browser=common.login(url)
# Let the page load
browser.implicitly_wait(30)
browser.find_element_by_css_selector('#li_topologyComp > span').click()
browser.find_element_by_xpath("//body[@id='ext-element-1']/div[3]/div/div/div[2]/div/div/div/div/div/div/div/div/a/span/span").click()
browser.find_element_by_xpath("//body[@id='ext-element-1']/div[3]/div/div/div[2]/div/div/div/div/div[2]/div/div/div/div/div/a/span/span").click()
common.set_service_name(browser,service_name='autotest')
# state: Active, Pending
common.set_service_state(browser,state='Active')
# service_linerate: 100G, 10G, 10G-e, 200G
common.set_service_linerate(browser,service_linerate='100G')
# service_rtObj: Least Cost, Least Hops, Least Latency, Manual
common.set_service_rtObj(browser,service_rtObj='Manual')
# service Source Node
common.set_service_tidA(browser,service_tidA='kanagawa-T310-4')
# service Source End Point
common.set_service_ctpAidA(browser)
# service Target Node
common.set_service_tidZ(browser,service_tidZ='tokyo-T310-3')
# service Target End Point
common.set_service_ctpAidZ(browser)
# save
browser.find_element_by_link_text('Next').click()
# # linkcombo
# linkcombo = browser.find_elements_by_name('linkCombo')
# linkcombo_2 = linkcombo[0]
# linkcombo_2.click()
# # linkcombo_2.send_keys(Keys.DOWN)
# linkcombo_2.send_keys(Keys.ENTER)
# create
browser.find_element_by_link_text('Create').click()
if __name__ == '__main__':
main(url='https://192.168.10.30:8443/virtuoranc/login.html') | [
"[email protected]"
] | |
34419ff2a426f39a2c5fcf8d970df3a306bcab8c | 532964fa286f0ffd9a6d1ee5efad313fa91bcbc3 | /core/main.py | 31558605d9168029285351ae6d5949cd265f520c | [] | no_license | dalzymendoza/python-repo-template | 39268a05419daba312fc7b818cafa18927920175 | afea892810e7ade2bdc2773b9a16cee8c633d517 | refs/heads/main | 2023-02-03T00:03:01.382643 | 2020-12-22T04:08:31 | 2020-12-22T04:08:31 | 323,513,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | def main():
print("Hello World")
return 0
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
7a692e40b1fd5219c7accd3ce76fc3dad4deb111 | 0f07b4ae2557578dcc4fd96048930aa79b56736e | /gpe_mcmc_tools/priors.py | a7a0a497edb8719a36cdac1225ad45b4dcfaf533 | [] | no_license | swarder/GPE_MCMC_tools | 8b661dd2a02f7ba863a475a54c9b309f34076ff2 | eba4c34d10c81534125ec9ab2670b72e01e5b029 | refs/heads/main | 2023-05-06T21:37:47.734892 | 2021-05-26T16:17:15 | 2021-05-26T16:17:15 | 338,350,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | import numpy as np
class Prior:
""" Base class for priors """
def __init__(self, log=False):
self.log = log
def evaluate(self, x):
""" Evaluate prior in non-log space"""
pass
def evaluate_log(self, x):
"""Evaluate prior in log space"""
p = self.evaluate(x)
if p == 0:
return -np.inf
else:
return np.log(p)
def __call__(self, x):
"""Call relevant evaluation function"""
if self.log:
return self.evaluate_log(x)
else:
return self.evaluate(x)
class FlatPrior(Prior):
"""Flat prior, always returns 1"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def evaluate(self, x):
return 1
class UniformPrior(Prior):
"""Uniform prior between specified min and max values"""
def __init__(self, min_val, max_val, **kwargs):
super().__init__(**kwargs)
self.min_val = min_val
self.max_val = max_val
def evaluate(self, x):
if x >= self.min_val and x <= self.max_val:
return 1/(self.max_val - self.min_val)
else:
return 0
class GaussianPrior(Prior):
"""Gaussian prior for specified mean and standard deviation"""
def __init__(self, mu, sigma, **kwargs):
super().__init__(**kwargs)
self.mu = mu
self.sigma = sigma
def evaluate(self, x):
return np.exp(-0.5 * (x - self.mu)**2 / self.sigma**2) / (self.sigma * np.sqrt(2*np.pi))
def evaluate_log(self, x):
return -0.5 * (x - self.mu)**2 / self.sigma**2 - np.log(self.sigma * np.sqrt(2*np.pi))
class JeffreysPrior(Prior):
"""Jeffreys prior, to be used for hyperparameters"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def evaluate(self, x):
if x > 0:
return 1/x**2
else:
return 0
| [
"[email protected]"
] | |
0efd0348cc1782a86ccfa09781d30cca1fe25e65 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/103/23187/submittedfiles/av2_p3_civil.py | 4f8ae6b97b467691b490965b5681026eb0e327a2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
linhas=input('Digide a dimensão da matriz:')
colunas=linhas
x=input('Digite a linha que contém a célula em questão:')
y=input('Digite a coluna que contém a célula em questão:')
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite um valor:')
somal=0
somac=0
for i in range(0,a.shape[0],1):
somal=somal+a[i,y]
for j in range(0,a.shape[1],1):
somac=somac+a[x,j]
Peso=somac+somal
print Peso | [
"[email protected]"
] | |
2e506c50df709df29fe6ff6d203aaa9cb0127915 | c9e8089d2dfd7e6fd207388c149645a509d03762 | /users/serializers.py | c31feb779821ce08f047978c7b454d2143abe428 | [] | no_license | fahad1226/Fahads-Blog | c554d7c06433dccbb5140bc0e55d80e6733155eb | e0ed2e5798e34788e70dd799a2208ba006ac4bf9 | refs/heads/master | 2020-05-15T15:04:43.552455 | 2019-04-20T06:09:15 | 2019-04-20T06:09:15 | 182,358,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from django.urls import reverse_lazy
User = get_user_model()
class UserDisplaySerializer(serializers.ModelSerializer):
follower_count = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
class Meta:
model = User
fields = [
'username',
'first_name',
'last_name',
'follower_count',
'url'
]
def get_follower_count(self,obj):
return 0
def get_url(self,obj):
return reverse_lazy('Post:post-detail',kwargs={'username':obj.username})
| [
"[email protected]"
] | |
26fa565e2057c6211af8a22ed06099b2f55222c3 | 0d73194c6fc5aedb0a7bcecc4dd551746139d174 | /tools/CameraCapture.py | 1464a6abf98b41dd26c477173eee9943497d5352 | [] | no_license | t109368038/Specail_topic | c4474bbfc662b9326541ce95fca1b7893c7b5656 | cfea70cac25682b10a6bfaf1307e6cecd7735a98 | refs/heads/master | 2023-05-02T06:51:00.541011 | 2021-05-19T10:44:47 | 2021-05-19T10:44:47 | 368,833,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | import cv2
import threading as th
import time
import mediapipe as mp
import sys
class CamCapture(th.Thread):
def __init__(self, thread_id, name, counter, th_lock, cam_queue=None, save_queue=None, status=0, mode=0,mp4_path=''):
th.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.counter = counter
self.fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
self.lock = th_lock
self.mode = mode
self.cam_queue = cam_queue
self.save_queue = save_queue
self.status = status
self.save_mp4_path = mp4_path
print('Camera Capture Mode:{}'.format(mode))
print('========================================')
def run(self):
if self.mode == 1:
##-------------------------
self.cam = cv2.VideoCapture(self.counter)
self.cam.set(cv2.CAP_PROP_FPS, 20)
fps = int(self.cam.get(5))
print('FPS:{}'.format(fps))
sz = (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.fourcc = cv2.VideoWriter_fourcc(*'mp4v')
self.vout = cv2.VideoWriter()
self.vout.open(self.save_mp4_path + 'output'+str(self.counter)+'.mp4', self.fourcc, 20, sz, True)
ret, frame = self.cam.read()
# tmp_frame = frame
# tmp_frame = cv2.cvtColor(cv2.flip(tmp_frame, 1), cv2.COLOR_BGR2RGB)
# self.cam_queue.put(tmp_frame)
# cv2.imshow(self.name, frame)
print('Camera is opened')
print("Camera[%s] open time: %s" % (self.counter, time.ctime(time.time())))
print('========================================')
while self.cam.isOpened():
# print('fps', fps)
# print(int(cam.get(cv2.CAP_PROP_FRAME_WIDTH)))
ret, frame = self.cam.read()
cv2.imshow(self.name, frame)
tmp_frame = frame
tmp_frame = cv2.cvtColor(tmp_frame, cv2.COLOR_BGR2RGB)
self.copy_frame = tmp_frame.copy()
if self.status == 1:
# print(self.status)
# self.save_queue.put(tmp_frame)
# self.cam_queue.put(tmp_frame)
self.vout.write(frame)
pass
# self.cam_queue.put(tmp_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyWindow(self.name)
self.cam.release()
print('Close process')
print("%s: %s" % (self.name, time.ctime(time.time())))
else:
raise ValueError('CamCapture does not have this mode.')
def close(self):
self.cam.release()
self.vout.release()
def get_frame(self):
return self.copy_frame
| [
"[email protected]"
] | |
06f41b4f9db3b0d72109d8dbbfaa840b8c33c73e | 690759d4fa4e5c66d89cd72dea84036a022ee745 | /saxo/spiders/spider.py | f61a43ba38ef5396b52ef63a4c5f12a20d242acb | [] | no_license | SimeonYS/saxo | a2810f661c8ac7bb92986dad192b7f65553dd216 | 570ca7431b81f0edc0cd4189311900de076ebf83 | refs/heads/main | 2023-03-21T12:00:36.768452 | 2021-03-05T08:59:54 | 2021-03-05T08:59:54 | 344,751,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import SaxoItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
class SaxoSpider(scrapy.Spider):
name = 'saxo'
start_urls = ['https://www.home.saxo/about-us/press-releases']
def parse(self, response):
post_links = response.xpath('//section[@data-styles="media-element"]//a/@href').getall() + response.xpath('//div[@class="v2-bbox"]//a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
date = response.xpath('//time/@datetime').get()
date = ''.join(re.findall(r'\d+\-\d+\-\d+',date))
title = response.xpath('//h1/text()').get()
content = response.xpath('//div[@class="v2-wrapper v2-wrapper--small"]//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=SaxoItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
yield item.load_item()
| [
"[email protected]"
] | |
12c8ac23f61c67f952c29319a037c8df56823a73 | 4500172cf203b078bd1826062821af16018747ee | /project/staff/migrations/0009_auto__add_field_aimtransaction_transaction_type.py | e9cf52cfec33962500d863ca0e268b48223c4a83 | [] | no_license | chrisblythe812/gamemine | 726ad338279a80676eee9693a6ecc6bfca5eaf13 | 7c3acc39a24c38ae2ee06b71104a24cfbbde8453 | refs/heads/master | 2020-04-05T22:53:46.622902 | 2012-01-26T00:50:28 | 2012-01-26T00:50:28 | 3,269,945 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,724 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AimTransaction.transaction_type'
db.add_column('staff_aimtransaction', 'transaction_type', self.gf('django.db.models.fields.CharField')(max_length=32, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'AimTransaction.transaction_type'
db.delete_column('staff_aimtransaction', 'transaction_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'staff.aimrequest': {
'Meta': {'object_name': 'AimRequest'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'data': ('django_snippets.models.blowfish_field.BlowfishField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'})
},
'staff.aimresponse': {
'Meta': {'object_name': 'AimResponse'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'data': ('django_snippets.models.blowfish_field.BlowfishField', [], {'null': 'True'}),
'email_address': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'response_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'response_reason_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'response_reason_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True'}),
'response_subcode': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_index': 'True'}),
'transaction_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'})
},
'staff.aimtransaction': {
'Meta': {'object_name': 'AimTransaction'},
'card_num': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'db_index': 'True'}),
'card_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['staff.AimRequest']", 'unique': 'True'}),
'response': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['staff.AimResponse']", 'unique': 'True'}),
'response_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'response_subcode': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_index': 'True'}),
'transaction_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'staff.muzeupdatelog': {
'Meta': {'object_name': 'MuzeUpdateLog'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
}
}
complete_apps = ['staff']
| [
"[email protected]"
] | |
978c0dd3aaec7962333cd38cc22995d9eb6f0e50 | 7d6a5d79fb1443b019bf135ade812e504e2e9a9c | /match_flann_orb.py | 724425aeea1ab2edc88af2b97a0605aee94583ab | [] | no_license | tdrops/opencv | 154a97d5a9ff394a2df6b67ef81112029185e67a | 76fb94cfbb7746b6db3603ba1594a5876bc8376d | refs/heads/main | 2023-06-28T11:32:23.556429 | 2021-08-03T05:59:35 | 2021-08-03T05:59:35 | 380,880,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | """
opencv
출처
제목: 파이썬으로 만드는 OpenCV 프로젝트
저자: 이세우
출판: 프로그래밍인사이트
"""
"""
요약
[예제 8-18] FLANNMatcher 와 ORB 로 매칭
"""
import cv2
import numpy as np
img1 = cv2.imread(filename="../img/taekwonv1.jpg")
img2 = cv2.imread(filename="../img/figures.jpg")
gray1 = cv2.cvtColor(src=img1, code=cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(src=img2, code=cv2.COLOR_BGR2GRAY)
detector = cv2.ORB_create()
kp1, desc1 = detector.detectAndCompute(gray1, None)
kp2, desc2 = detector.detectAndCompute(gray2, None)
index_params = dict(algorithm=6, table_number=6, key_size=12, multi_probe_level=1)
search_params = dict(checks=32)
matcher = cv2.FlannBasedMatcher(index_params, search_params)
matches = matcher.match(queryDescriptors=desc1, trainDescriptors=desc2)
res = cv2.drawMatches(img1=img1, keypoints1=kp1, img2=img2, keypoints2=kp2, matches1to2=matches, outImg=None,
flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
cv2.imshow(winname="result", mat=res)
cv2.waitKey()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
604d167ee22f327db5a232ca5cfcf97e45146276 | 32114bed9535bcecf4c05f6a3546dbd95fc1b57a | /logdog/roles/formatters/base.py | a72420564f7fabfd44fd8e34f2a49e5314b7153c | [] | no_license | miphreal/python-logdog | 8bd6e4067d73deecd7e2fe1debdeb6e007ef3b55 | 44194b199f1906a156aa15bb97a40bb8b13f3d52 | refs/heads/master | 2020-05-18T04:42:08.143627 | 2015-07-04T23:12:09 | 2015-07-04T23:12:09 | 30,718,313 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from __future__ import absolute_import, unicode_literals
import logging
from logdog.core.base_role import BaseRole
logger = logging.getLogger(__name__)
class BaseFormatter(BaseRole):
pass | [
"[email protected]"
] | |
caf749c110411a6f35abe9cdfac7b7aea7d76df8 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /grorumaEjyFDmZQCx_8.py | fe3acccd6fdd436692863a4d9ddb364c77a9a3e9 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py |
def is_wristband(lst):
a = check_horizontal(lst)
b = check_vertical(lst)
c = check_left_diagonal(lst)
d = check_right_diagonal(lst)
if (a+b+c+d>0):
return True
else:
return False
def check_horizontal(lst):
for i in range(0,len(lst)):
for j in range(0,len(lst[i])-1):
if(lst[i][j] != lst[i][j+1]):
return 0
return 1
def check_vertical(lst):
for i in range(0,len(lst[0])):
for j in range(0,len(lst)-1):
if(lst[j][i] != lst[j+1][i]):
return 0
return 1
def check_left_diagonal(lst):
for i in range(0,len(lst)-1):
for j in range(0,len(lst[0])-1):
if(lst[i][j] != lst[i+1][j+1]):
return 0
return 1
def check_right_diagonal(lst):
for i in range(0,len(lst)-1):
for j in range(0,len(lst[0])-1):
if(lst[len(lst)-i-1][j] != lst[len(lst)-i-2][j+1]):
return 0
return 1
| [
"[email protected]"
] | |
9ff96557e800b32b0ff952b4c19545cc2d773048 | fc81adee79f12d06e65283c1cb276e49292cc214 | /myquora/urls.py | 835bcf80b36713529715224f9a17829af550e8fc | [] | no_license | Joy-Cse/Q-A-forum-for-exam-preparationweb-app-like-quora-in-Django | 335d1ac3733a365f0d515cd170b819c7fd0685ea | 11476a58439bc4ab724cb95856440ec2a2c73fb4 | refs/heads/master | 2022-12-19T02:14:00.247944 | 2020-09-22T11:13:11 | 2020-09-22T11:13:11 | 297,620,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('question/add/', views.QuestionCreate.as_view(), name='question-add'),
path('questions/', views.QuestionListView.as_view(), name='questions'),
path('answers/', views.AnswerListView.as_view(), name='answers'),
path('question/<int:pk>', views.QuestionDetailView.as_view(), name='question-detail'),
path('question/<int:pk>/answer/', views.AnswerCreate.as_view(), name='answer-add'),
path('question/<int:pk>/update/', views.UpdateQuestion.as_view(), name='question-update'),
path('answer/<int:pk>/update/', views.UpdateAnswer.as_view(), name='answer-update'),
path('answer/<int:pk>/comment/', views.CommentCreate.as_view(), name='comment-add'),
path('answer/upvote/<int:pk>', views.UpvoteCreate.as_view(), name='answer-upvote'),
path('answer/downvote/<int:pk>', views.DownvoteCreate.as_view(), name='answer-downvote'),
path('author/<int:pk>', views.AuthorDetailView.as_view(), name='author-detail'),
path('author/add/', views.AuthorCreate.as_view(), name='author-add'),
path('author/<int:pk>/', views.AuthorUpdate.as_view(), name='author-update')
]
| [
"[email protected]"
] | |
6303afc4ff3f1b115da67e485f03d84430a0e26a | 492e2bed8aaee614c37811fe99fa11b31cf858ca | /ship.py | 393394b86291b7d931389b313af747be514dea6d | [] | no_license | foxyol/Cat_alien_invasion | 93e5358a357b790ec6e2dcfb73a42ec6a4221ab4 | 2b100750d696a24b4aa3aa2a598e64a3bd20a6a8 | refs/heads/master | 2022-12-22T10:54:35.331762 | 2020-09-20T19:27:55 | 2020-09-20T19:27:55 | 294,505,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
''' Класс для управления кораблем'''
def __init__(self, ai_game):
'''Инициализирует корабль и задаёт его начальную позицию'''
super().__init__()
self.screen = ai_game.screen
self.screen_rect = ai_game.screen.get_rect()
self.settings = ai_game.settings
# Загружает изображение корабля и получает прямоугольник
self.image = pygame.image.load('images/cat1.png')
self.rect = self.image.get_rect()
# Каждый новый корабль появляется у нижнего края экрана
self.rect.midbottom = self.screen_rect.midbottom
# Сохранение вещественной координаты центра корабля
self.x = float(self.rect.x)
# Флаг перемещения
self.moving_right = False
self.moving_left = False
def update(self):
''' Обновляет позицию корабля в соответствии с флагом'''
if self.moving_right and self.rect.right < self.screen_rect.right:
self.x += self.settings.ship_speed
if self.moving_left and self.rect.left > 0:
self.x -= self.settings.ship_speed
self.rect.x = self.x
def blitme(self):
'''Рисует корабль в текущей позиции'''
self.screen.blit(self.image, self.rect)
def center_ship(self):
''' Помещает корабль в центре bottom '''
self.rect.midbottom = self.screen_rect.midbottom
self.x = float(self.rect.x)
| [
"[email protected]"
] | |
80b153cbb7ed5da530a851223186462c4bdc392c | 4e3518947811d63025a18f5ed96081aa82fe1da1 | /getpaid/wiretransfer/tests.py | 0efe16f594b22070cea395402e140340f3dded74 | [] | no_license | collective/getpaid.wiretransfer | d8d8af1c3a273c1711c8946a3c00ef27cd7311c7 | e6f87ee286d07fa446a4bcaa955e2091aacc7276 | refs/heads/master | 2023-08-24T23:45:03.992203 | 2009-11-14T12:57:22 | 2009-11-14T12:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | import unittest
from zope.testing import doctestunit
from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import zcml
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import getpaid.wiretransfer
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(getpaid.wiretransfer)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='getpaid.wiretransfer',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='getpaid.wiretransfer.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='getpaid.wiretransfer',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='getpaid.wiretransfer',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"[email protected]@5c657d5f-f92f-0410-bf4e-417a11dd3c0b"
] | [email protected]@5c657d5f-f92f-0410-bf4e-417a11dd3c0b |
bb55fecab0a1fc36dfb41251345cbbc5e4f4a6d8 | 7a9e7fb91b344983cf2a986d60cddb49b348ba5a | /image/img_client.py | 141bc13c7cd300007d1dbc43a608bdf9318a2ac4 | [] | no_license | ivantrw/client---server | 5d9463813f85ff399c12876c896ddd6888c5f06b | 0396d9b8174fc7a76cbfc19c33293037019bde00 | refs/heads/master | 2022-07-30T17:08:03.790985 | 2020-05-20T06:59:11 | 2020-05-20T06:59:11 | 265,478,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # A very simple Flask Hello World app for you to get started with...
from _future_ import print_function
from flask import Flask
import requests
import json
import cv2
app = Flask(_name_)
@app.route('/')
def hello_world():
addr = 'http://ivanserver.pythonanywhere.com/'
test_url = addr + '/api/test'
# prepare headers for http request
content_type = 'image/jpeg'
headers = {'content-type': content_type}
img = cv2.imread('slideshow.jpg')
# encode image as jpeg
_, img_encoded = cv2.imencode('.jpg', img)
# send http request with image and receive response
response = requests.post(test_url, data=img_encoded.tostring(), headers=headers)
# decode response
return json.loads(response.text) | [
"[email protected]"
] | |
90ca5761541b1fa1bb8c2f881a15e991a3e39327 | 39cb4512737bafa6e6c8b33834420c4a3cbd0617 | /tools/readable.py | bd64fa99c5ec7a8b8368584c5b97c237090e7e2e | [] | no_license | byui-cse/cse111-course | e691ab311ddf1880c3a8dc1d36b8affd6e30cb6c | 113fcbcd545a03407d217ba33df482cca94ef2bb | refs/heads/master | 2023-05-24T19:24:54.882191 | 2023-05-19T20:24:33 | 2023-05-19T20:24:33 | 215,662,254 | 4 | 8 | null | 2023-05-19T20:20:11 | 2019-10-16T23:26:14 | HTML | UTF-8 | Python | false | false | 4,238 | py | import os
import os.path as path
import re
import sys
import readability
def main(argv):
argv.pop(0)
if len(argv) == 0:
argv = ["."]
if len(argv) == 2 and path.isfile(argv[0]) and path.isfile(argv[1]):
srcpath = argv[0]
dstpath = argv[1]
measure = measure_file(srcpath, dstpath)
print(measure, srcpath)
else:
measures = []
for srcpath in argv:
if path.isdir(srcpath):
measures.extend(process_dir(srcpath))
else:
measures.append(process_dir(srcpath))
for measure in sorted(measures, key=lambda elem: elem[0], reverse=True):
print(measure)
def process_dir(dirpath):
measures = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
suffix = path.splitext(filename)[1]
if suffix == '.html':
srcpath = path.join(root, filename)
measures.append(process_file(srcpath))
return measures
def process_file(srcpath, dstpath=None):
if dstpath is None:
parts = path.split(srcpath)
dirname = parts[0]
filename = parts[1]
basename = path.splitext(filename)[0]
dstpath = path.join(dirname, f"{basename}.txt")
return measure_file(srcpath, dstpath), srcpath
patterns = [
# Extract the article part of the HTML document.
(re.compile('.*<article>(.*)</article>.*', re.I|re.S), r'\1'),
# Remove all python and console preformatted content.
(re.compile('<pre class="(python|console)">.*?</pre>', re.I|re.S), ''),
# Remove all tables.
(re.compile('<table>(.*?)</table>', re.I|re.S), ' \u00b6 '),
(re.compile('<h[1-6]>', re.I), ' \u00b6 '),
(re.compile('<li>', re.I), ' \u00b6 '),
(re.compile('<p>', re.I), ' \u00b6 '),
# Remove all mathematical expressions.
(re.compile('<div class="[^"]*expr[^"]*">.*', re.I), ''),
# Remove all remaing tags.
(re.compile('</?[^>]+>'), ''),
# HTML entities
(re.compile('←', re.I), ' <- '),
(re.compile(' ', re.I), ' '),
(re.compile('—', re.I), ' -- '),
(re.compile('−', re.I), ' - '),
(re.compile('–', re.I), ' - '),
(re.compile('π', re.I), 'PI'),
(re.compile('→', re.I), ' -> '),
(re.compile('<', re.I), ' < '),
(re.compile('>', re.I), ' > '),
(re.compile('&', re.I), ' & '),
(re.compile('&[^;]+;'), ' '),
# Tokenize the text so that the readability module can process it.
# Convert all multiple blank lines to a single paragraph symbol.
(re.compile(r'(\r?\n){2,}'), ' \u00b6 '),
# Remove all tab and newline characters.
(re.compile(r'\s+'), ' '),
# Insert a space before and a newline after each [:;.?!].
(re.compile('[:;.?!]'), r' \g<0>\n'),
# Insert a space before each comma.
(re.compile(','), r' \g<0>'),
# Insert a space before and after each [_"()[]{}].
(re.compile('[-"()[\]{}_]'), r' \g<0> '),
# Clean up extra spaces.
(re.compile(' {2,}'), ' '),
# Replace all paragraph symbols with a blank line.
(re.compile('( ?\u00b6 ?)+'), r'\n\n'),
(re.compile(r'(\r?\n){3,}'), r'\n\n'),
# Remove spaces at the start and end of each line.
(re.compile(r'^[\t ]+', re.M), ''),
(re.compile(r'[\t ]+$', re.M), ''),
]
def measure_file(srcpath, dstpath):
print(srcpath)
with open(srcpath, 'rt', encoding='utf-8') as srcfile:
text = srcfile.read()
for pat, repl in patterns:
text = re.sub(pat, repl, text)
results = readability.getmeasures(text, lang='en')
measure = results['readability grades']['GunningFogIndex']
with open(dstpath, 'wt', encoding='utf-8') as dstfile:
for key in results:
print('\t', key, sep='', file=dstfile)
value = results[key]
for key2 in value:
value2 = value[key2]
if isinstance(value2, float):
value2 = round(value2, 2)
print('\t\t', f"{key2} {value2}", sep='', file=dstfile)
dstfile.write(text)
#os.remove(dstpath)
return round(measure, 2)
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
] | |
03a23ef5796f720ff8f501a87a913af9728598fc | e2bd39106992b592de686e5bd79002edc05cc8bc | /429-N 叉树的层序遍历/LevelOrder.py | add08fd51114e2c7edf52a4c81ce76d29533ee4c | [] | no_license | Mumulhy/LeetCode | 9b8ad3af9f9a3b838bdd54727cf8f33401292d27 | 269419ba2a2840fcf100fa217c5275029ffa229e | refs/heads/master | 2022-10-28T23:06:54.081073 | 2022-10-23T07:48:49 | 2022-10-23T07:48:49 | 212,135,892 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | # -*- coding: utf-8 -*-
# LeetCode 429-N 叉树的层序遍历
"""
Created on Fri Apr 8 10:18 2022
@author: _Mumu
Environment: py38
"""
from typing import List
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
if root is None:
return []
stack = [root]
ans = []
while stack:
ans.append([])
new_stack = []
for node in stack:
ans[-1].append(node.val)
new_stack.extend(node.children)
stack = new_stack
return ans
| [
"[email protected]"
] | |
ff3266e467c727ae0981e545f66227850fd75da1 | 3dc393ba07cfb57cdb1efbcd3824817a1bed9eb6 | /src/my_package/todelete/functions/functions_pytorch0.2/InterpolationChLayer.py | f840fb7931113fd513ee4553899655db90cd63d4 | [
"MIT"
] | permissive | laomao0/AIM_DAIN | c7ebf96db7b8b305d6574bc2252a7ca10ab2fc95 | 8322569498d675d3b2c1f35475c1299cad580bde | refs/heads/master | 2020-07-23T07:34:51.043470 | 2019-09-10T13:20:58 | 2019-09-10T13:20:58 | 207,486,949 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | # this is for wrapping the customized layer
import torch
from torch.autograd import Function
import _ext.my_lib as my_lib
#Please check how the STN FUNCTION is written :
#https://github.com/fxia22/stn.pytorch/blob/master/script/functions/gridgen.py
#https://github.com/fxia22/stn.pytorch/blob/master/script/functions/stn.py
class InterpolationChLayer(Function):
def __init__(self,ch):
super(InterpolationChLayer,self).__init__()
self.ch = ch
def forward(self, input1,input2):
# assert(input1.is_contiguous())
# assert(input2.is_contiguous())
self.input1 = input1.contiguous() # need to use in the backward process, so we need to cache it
self.input2 = input2.contiguous() # TODO: Note that this is simply a shallow copy?
if input1.is_cuda:
self.device = torch.cuda.current_device()
else:
self.device = -1
# output = torch.zeros(input1.size())
if input1.is_cuda :
# output = output.cuda()
output = torch.cuda.FloatTensor().resize_(self.input1.size()).zero_()
my_lib.InterpolationChLayer_gpu_forward(input1, input2, output)
else:
# output = torch.cuda.FloatTensor(input1.data.size())
output = torch.FloatTensor().resize_(self.input1.size()).zero_()
my_lib.InterpolationChLayer_cpu_forward(input1, input2, output)
# the function returns the output to its caller
return output
#TODO: if there are multiple outputs of this function, then the order should be well considered?
def backward(self, gradoutput):
# print("Backward of Interpolation Layer")
# gradinput1 = input1.new().zero_()
# gradinput2 = input2.new().zero_()
# gradinput1 = torch.zeros(self.input1.size())
# gradinput2 = torch.zeros(self.input2.size())
if self.input1.is_cuda:
# print("CUDA backward")
# gradinput1 = gradinput1.cuda(self.device)
# gradinput2 = gradinput2.cuda(self.device)
gradinput1 = torch.cuda.FloatTensor().resize_(self.input1.size()).zero_()
gradinput2 = torch.cuda.FloatTensor().resize_(self.input2.size()).zero_()
# the input1 image should not require any gradients
# print("Does input1 requires gradients? " + str(self.input1.requires_grad))
err = my_lib.InterpolationChLayer_gpu_backward(self.input1,self.input2,gradoutput,gradinput1,gradinput2)
if err != 0 :
print(err)
else:
# print("CPU backward")
# print(gradoutput)
gradinput1 = torch.FloatTensor().resize_(self.input1.size()).zero_()
gradinput2 = torch.FloatTensor().resize_(self.input2.size()).zero_()
err = my_lib.InterpolationChLayer_cpu_backward(self.input1, self.input2, gradoutput, gradinput1, gradinput2)
# print(err)
if err != 0 :
print(err)
# print(gradinput1)
# print(gradinput2)
# print(gradinput1)
return gradinput1, gradinput2 | [
"[email protected]"
] | |
6d273cadf77a130f8e83404e3653ad01bb4cae41 | 455cad4ee469c82f0f7f07e4f9964e72b5d8db59 | /formatForKeepass_Objects.py | 490f8406b93d584624fe8ab7375f6a7690b391fe | [] | no_license | RobertoChapa/Kaspersky_Keepass_import | 0114eb7732619c048c5df0a70bc8aac97c54afa3 | 5651ea91a4b3758120b2257eb7fc71250a18e30d | refs/heads/main | 2023-01-14T05:38:14.207872 | 2020-11-23T21:14:13 | 2020-11-23T21:14:13 | 315,440,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | """
Bobby Chapa 11-5-2020
Format for keepass
"""
# format output for keepass. returns text file read for import by keepass
class formatForKeepass:
def __init__(self, read_timport):
self.read_timport = read_timport
# input output file stream. writes, reads, or appends to text file
# arguments: name of text file, w/r/a, text
def parse_timport(self):
output = '"Account"' + ',' + '"Login Name"' + ',' + '"Password"' + ',' + '"Web Site"' + ',' + '"Comments"' + "\n"
for key, value in self.read_timport.items(): # skip the first row
if key != '':
output += self.format_write(key, True)
# print('Account', k.strip())
ln = self.read_timport[key][0] # Login Name
output += self.format_write(ln, True)
pw = self.read_timport[key][1] # Password
output += self.format_write(pw, True)
ws = self.read_timport[key][2] # Web Site
output += self.format_write(ws, True)
com = self.read_timport[key][3] # Comment
output += self.format_write(com, False) # last value in row
return output
# adds a comma at the end of each value except for the last value in each row
@staticmethod
def format_write(kas_text, test):
s = '"' + kas_text + '"'
if test:
s += ','
else:
s += "\n"
return s
| [
"[email protected]"
] | |
b928a2599f378c149035d2366e023ce00be56d7c | 99f30c49517d39e3695bdc287aa91e3150342ac4 | /OpenAi-Gym/Basics/Basics_Reward.py | a57f2086e7c414aec3209b39aa10b14cd2150fcf | [] | no_license | Pavankunchala/Python | ab2c4786a60e66cd885a4ee257c5baf3ecba123f | 0c6e542ac14bab1b8345fc2ac869eea8e11f1b8d | refs/heads/master | 2020-08-01T01:46:08.167599 | 2020-05-27T04:54:40 | 2020-05-27T04:54:40 | 210,816,859 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 12:32:06 2020
@author: pavankunchala
"""
import gym
#creating the environment
env = gym.make("CartPole-v1")
env.reset()
#Play 10 games
for i in range(0, 10):
#initalizing the variables
done = False
game_rew = 0
while not done:
#choosing a random action
action = env.action_space.sample()
# take a step in the environmwenr
new_obs ,reward, done ,info = env.step(action)
game_rew += reward
# printing the cumulative reward after done
if done:
print("Episode %d finished , Reward %d "%(i,game_rew))
env.reset() | [
"[email protected]"
] | |
332a75d5d3398638a690352b60ebc299c77435f9 | b0653128bc312878778f1392fdda26eab454901f | /Challenges/chall_04.py | 70f3a7f653b75675784b18413b89607c45cdcf13 | [
"MIT"
] | permissive | HKuz/PythonChallenge | aef9df8e02e9e2710a8d7c3f77dba5890d16e063 | e9d72616aa389483cab710bca3bccf708c0f99e0 | refs/heads/master | 2021-01-21T15:44:54.367043 | 2018-12-04T23:44:30 | 2018-12-04T23:44:30 | 91,855,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | #!/usr/local/bin/python3
# Python Challenge - 4
# http://www.pythonchallenge.com/pc/def/linkedlist.html
# http://www.pythonchallenge.com/pc/def/linkedlist.php
# Keyword: peak
import urllib.request
import re
def main():
'''
html page shows: linkedlist.php
php page comment: urllib may help. DON'T TRY ALL NOTHINGS, since it will
never end. 400 times is more than enough.
Photo link: linkedlist.php?nothing=12345
http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=12345
44827
45439
...
'''
base_url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing='
nothing = '12345'
# nothing = '66831' # Cheat code for last nothing
pattern = re.compile(r'next nothing is (\d+)')
while True:
with urllib.request.urlopen(base_url + nothing) as page:
data = page.read().decode('utf-8')
# print(data)
match = re.search(pattern, data)
if match:
nothing = match.group(1)
if nothing == '16044':
nothing = str(16044 / 2)
else:
print('Hit break')
break
print('Last nothing found was: {}'.format(nothing))
return 0
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f1a76ee4f36025e73c4d4a367b9438d92d130164 | 4fa9c8ae53e13784d5602d97aa223cb2c904c213 | /Sliding Window/Longest Substring with maximum K Distinct Characters (medium)/solution.py | bc8555fe6a983694c07432709e179b966b4cb23b | [] | no_license | rykumar13/problem_solving- | e32c6f9a4d18fc53f2e37319fb0a6cd825d780f2 | 35e0c4a9c0b10bfefd83e0205fcd9a28c4c4d9d7 | refs/heads/main | 2023-07-23T11:02:20.435390 | 2021-08-27T16:30:05 | 2021-08-27T16:30:05 | 397,321,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | def longest_substring_with_k_distinct(str1, k):
window_start = 0
char_count = {}
longest_window_sofar = 0
for window_end in str1:
if window_end not in char_count:
char_count.update({window_end: 0})
char_count[window_end] += 1
while len(char_count.keys()) > k:
char_count[str1[window_start]] -= 1
if char_count[str1[window_start]] == 0:
del char_count[str1[window_start]]
window_start += 1
longest_window_sofar = max(longest_window_sofar, sum(char_count.values()))
return longest_window_sofar
str1_in = "araaci"
k_in = 2
print(longest_substring_with_k_distinct(str1_in, k_in))
| [
"[email protected]"
] | |
6ef05a8b7d726e1033756a873bcf34f33297c403 | 49b43859c65a0a32ddedd5c19eec30be37423b17 | /test/unit/test_lr_scheduler.py | d94a55e850df7d92b1227f6589ce5854438ca91b | [
"Apache-2.0"
] | permissive | KellenSunderland/sockeye | 665b9bccc7e4cacb70c28c21dc46adc4514b83dd | 23a6fd3f3614f935fc9f5588eee2ca507aba3b20 | refs/heads/master | 2021-01-01T19:17:03.911163 | 2017-07-27T15:27:08 | 2017-07-27T15:45:09 | 98,554,080 | 1 | 0 | null | 2017-07-27T15:43:21 | 2017-07-27T15:43:21 | null | UTF-8 | Python | false | false | 1,120 | py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from sockeye.lr_scheduler import LearningRateSchedulerInvSqrtT, LearningRateSchedulerInvT
import pytest
def test_lr_scheduler():
updates_per_epoch = 13
half_life_num_epochs = 3
schedulers = [LearningRateSchedulerInvT(updates_per_epoch, half_life_num_epochs),
LearningRateSchedulerInvSqrtT(updates_per_epoch, half_life_num_epochs)]
for scheduler in schedulers:
scheduler.base_lr = 1.0
# test correct half-life:
assert scheduler(updates_per_epoch * half_life_num_epochs) == pytest.approx(0.5)
| [
"[email protected]"
] | |
340bf8ec998fe6a67518036364be5978b887cf81 | 83003007b7bc12493e2bca2b5c78be5ea86df56c | /Day56-Day70/Day57/shape.py | 5ecbd3f55132627ccb0aa7fca7c2378a713dbdcc | [] | no_license | a6361117/code | fa7fe2f33c522ad38d92e6c429b50ef8a271bb1e | bd8bf877416acc5400dbda90212b7e83020ff643 | refs/heads/master | 2022-09-07T22:22:24.765271 | 2020-05-26T14:27:47 | 2020-05-26T14:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py |
import turtle
sides=int(input('enter the number of sides:'))
angle=360.0/sides
length=400.0/sides
turtle.fillcolor('blue')
turtle.begin_fill()
for side in range(sides):
turtle.forward(length)
turtle.right(angle)
turtle.end_fill()
turtle.done()
| [
"[email protected]"
] | |
ef9bcffbb4f3daa7e4edb6dbf073902ecc54e597 | cbbbd77bc292287003e622a8ff9231070364fb9b | /Users/migrations/0003_remove_articles_click_num.py | 941cf01563c0fd2e8e31daddb65d777c4fb36f92 | [] | no_license | NicoNicoDouban/Douban | b62d62de8269c206a02b3b7de6694b50c4f0b113 | 951bdc2b2bfd5b73db06209b7f50b9e4f5994502 | refs/heads/classes | 2021-04-09T17:57:51.708536 | 2018-03-21T13:28:41 | 2018-03-21T13:28:41 | 125,724,814 | 0 | 1 | null | 2018-04-02T09:20:24 | 2018-03-18T12:54:32 | Python | UTF-8 | Python | false | false | 394 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-17 00:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Users', '0002_auto_20180317_0810'),
]
operations = [
migrations.RemoveField(
model_name='articles',
name='click_num',
),
]
| [
"[email protected]"
] | |
caff3311e78445f44ab4fc68b41dd9ce624974ac | 87cecfcdcbae7fa42a5a9dc9c4e56ce4b5ee0645 | /openstackM/OM/apps.py | a5cba4b4c394ac2da5b948500522338d451e9b25 | [] | no_license | pinzBro/Django-for-openstack-services | a79afba24b1795bdb1c0d20cfcf690053f640ade | dcb4b1e5bf5356c1a124d1899a7f30328a7cc8b0 | refs/heads/master | 2021-10-19T04:38:01.595104 | 2019-02-18T05:47:48 | 2019-02-18T05:47:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from django.apps import AppConfig
class OmConfig(AppConfig):
name = 'OM'
| [
"[email protected]"
] | |
97075b6a6a696ee1e610f9344173a920d147eac9 | 793b45444a35ac762ebba1f7145aa4137efd0ef2 | /iscsi_initiator/scandev.py | 504644e54ac174ffd10e238c6b63cafe88eb2c5c | [
"Apache-2.0"
] | permissive | MatheMatrix/SoftSAN | aa6165b178c20868e877407b1be53b4496981018 | 1b8ab2cae92b7aac34211909b27d4ebe595275d7 | refs/heads/master | 2020-09-24T04:09:33.789116 | 2014-09-22T06:43:10 | 2014-09-22T06:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | import os
# the blockdevice information about a target
# can be found in directory'/sys/class/iscsi_session/'
#
# iscsi_session
# |
# -----------------------------
# | | ..... |
# session1 session2 ..... sessionN
# |----------
# | |
# device targetname #here check the targetname
# |
# targetNo:0:0
# |
# No:0:0:LunNo
# |
# block
# |
# blockdeviceName
def check_targetname(targetfile_path, target):
targetfile = file(targetfile_path).readlines()
for name in targetfile:
if name.find(target) > -1:
return True
return False
def get_blockdev_by_targetname(target):
path = '/sys/class/iscsi_session/'
for session in os.listdir(path):
if session.find('session') > -1:
path_session = path+session+'/'
if check_targetname(path_session+'targetname', target) is True:
path_session_dev = path_session + 'device/'
for tar in os.listdir(path_session_dev):
if tar.find('target') > -1:
path_session_dev_tar = path_session_dev+tar+'/'
for fin in os.listdir(path_session_dev_tar):
if fin.find(':0:0:') > -1 and fin.find(':0:0:0') is -1:
path_session_dev_tar_final = path_session_dev_tar+fin+'/block/'
while os.path.isdir(path_session_dev_tar_final) is False:
pass
device = os.listdir(path_session_dev_tar_final)
return device[0];
print 'target not found' | [
"[email protected]"
] | |
0751150ee28930c1d6705eb9e3833545c8d89bba | df6af3eb8800c8d417dec2025af5dc154c7d23ac | /python/day2.py | 240af16b046ffe0e15d0f014e858ff3e2a45e87d | [] | no_license | aravindsbr/GraduateTrainingProgram2018 | e8e7f213f43da16de1ab87d1e65be5091443be1f | c374f7d2082ba92d40a0700a28c860f4c5bb2b93 | refs/heads/master | 2020-03-27T21:36:18.594429 | 2018-10-04T06:44:59 | 2018-10-04T06:44:59 | 147,160,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | """
# 1)
inventory = {
'gold' : 500,
'pouch' : ['flint', 'twine', 'gemstone'],
'backpack' : ['xylophone','dagger', 'bedroll','bread loaf']
}
# qn 1
inventory["pocket"]=[]
print(inventory)
# qn 2
inventory["pocket"]=['seashell', 'strange berry','lint']
print(inventory)
# qn 3
inventory["backpack"].sort()
print(inventory)
#qn 4
del inventory["backpack"][2]
print(inventory)
#qn 5
inventory["gold"]=[500,50]
print(inventory)
"""
"""
2)
# qn 1
#student_details={'student1':[90,85, 80],
# 'student2':[70,80,60]}
student_details={}
n=input("Enter number of students")
s=input("Enter number of subjects")
i=1
while i<=n:
k=input("Enter key")
v=[]
j=1
while j<=s:
val=input("enter value for subject"+str(j))
v.append(val)
j=j+1
student_details[k]=v
i=i+1
print(student_details)
# qn 2
total=0
avg=0
for a in student_details.iterkeys():
total=str(sum(student_details[a]))
print("Total of "+a+" is "+total)
total=int(total)
avg=total/len(student_details[a])
print("Average of "+a+" is "+str(avg))
"""
| [
"[email protected]"
] | |
61bd7217fb13a193f920eb71fa014833220a44a0 | 2da68c7c34b19f6086c82e187a541a112e49bf91 | /src/node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi | 63cfd5ebcf7ab6860c30292b251d339af6940579 | [
"MIT"
] | permissive | nuzelac/zavrsni | ad227514adef1baea35e1645734dbad47a6ab6fc | da2e488ba119a92012501c719ad8f04295f04378 | refs/heads/master | 2021-01-18T14:06:23.250395 | 2014-06-08T22:38:58 | 2014-06-08T22:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,055 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr/local/Cellar/node/0.10.26",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/Users/Nino/.node-gyp/0.10.26",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/share/npm/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/share/npm/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/Nino/.npm-init.js",
"userconfig": "/Users/Nino/.npmrc",
"node_version": "v0.10.26",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/Nino/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "node/v0.10.26 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/s6/8nr3jnt52lq8sp4zgsjyjr1r0000gn/T/",
"unsafe_perm": "true",
"prefix": "/usr/local/share/npm",
"link": ""
}
}
| [
"[email protected]"
] | |
d6c55808982af7175dff9bb53cc42a74d57ef574 | f842e97b92f1e56354fe412e84ed86dc5b96af11 | /deoldify/utils.py | f2ae407d4b020e12f7c897c57959017781ece681 | [
"MIT"
] | permissive | teejs2012/DeOldify | c1af30f256bdbcee1bde5bed59a7a6654f6173f0 | 3aa97d2ad4f293dfe2e44f115beae425d9157c68 | refs/heads/master | 2022-11-29T21:01:58.277172 | 2020-08-12T08:31:09 | 2020-08-12T08:31:09 | 269,106,950 | 0 | 0 | MIT | 2020-06-03T14:15:44 | 2020-06-03T14:15:43 | null | UTF-8 | Python | false | false | 4,224 | py | import random
import numpy as np
# from skimage.measure import block_reduce
import cv2
from PIL import Image
import os
def mini_norm(x):
y = x.astype(np.float32)
y = 1 - y / 255.0
y -= np.min(y)
temp = np.max(y)
if temp>0:
y /= np.max(y)
return (255.0 - y * 255.0).astype(np.uint8)
def sensitive(x, s=15.0):
y = x.astype(np.float32)
y -= s
y /= 255.0 - s * 2.0
y *= 255.0
return y.clip(0, 255).astype(np.uint8)
def my_resize(img,size,divisible=64):
h,w = img.shape[0],img.shape[1]
if h<w:
target_w = w*size//h
target_w = target_w//64*64
return cv2.resize(img,(target_w,size))
else:
target_h = h*size//w
target_h = target_h//64*64
return cv2.resize(img,(size,target_h))
def generate_user_point(image,img_size=-1, is_random=True):
h,w,_ = image.shape
if img_size==-1:
result = np.zeros((h, w, 4)).astype(np.uint8)
else:
result = np.zeros((img_size,img_size,4)).astype(np.uint8)
if is_random:
hint_number = int(np.random.normal(20, 10, 1)[0])
for i in range(hint_number):
# sample location
y = int(np.clip(np.random.normal(h/2., h/5.), 0, h-1))
x = int(np.clip(np.random.normal(w/2., w/5.), 0, w-1))
# add color point
color = image[y,x]
if img_size == -1:
cv2.circle(result, (x, y), 1, (int(color[0]), int(color[1]), int(color[2]), 255), -1)
else:
cv2.circle(result,(int(x*img_size/w),int(y*img_size/h)),1,(int(color[0]),int(color[1]),int(color[2]),255),-1)
else:
step = 9
x_interval = w//step
y_interval = h//step
for i in range(1,step):
for j in range(1,step):
x = i*x_interval
y = j*y_interval
# add color point
color = image[y,x]
if img_size == -1:
cv2.circle(result, (x, y), 1, (int(color[0]), int(color[1]), int(color[2]), 255), -1)
else:
cv2.circle(result,(int(x*img_size/w),int(y*img_size/h)),1,(int(color[0]),int(color[1]),int(color[2]),255),-1)
return Image.fromarray(cv2.cvtColor(result,cv2.COLOR_BGRA2RGBA))
class user_point_generator:
def __init__(self):
self.hint_number_mu = 20
self.hint_number_sigma = 10
self.sample_number = 1000
self.samples = np.clip(np.random.normal(self.hint_number_mu, self.hint_number_sigma, self.sample_number), 0, 500)
def generate(self,image,img_size=-1, is_random=True):
h,w,_ = image.shape
if img_size==-1:
result = np.zeros((h, w, 4)).astype(np.uint8)
else:
result = np.zeros((img_size,img_size,4)).astype(np.uint8)
if is_random:
hint_number = int(self.samples[random.randint(0,self.sample_number-1)])
for i in range(hint_number):
# sample location
y = int(np.clip(np.random.normal(h/2., h/5.), 0, h-1))
x = int(np.clip(np.random.normal(w/2., w/5.), 0, w-1))
# add color point
color = image[y,x]
if img_size == -1:
cv2.circle(result, (x, y), 1, (int(color[0]), int(color[1]), int(color[2]), 255), -1)
else:
cv2.circle(result,(int(x*img_size/w),int(y*img_size/h)),1,(int(color[0]),int(color[1]),int(color[2]),255),-1)
else:
step = 9
x_interval = w//step
y_interval = h//step
for i in range(1,step):
for j in range(1,step):
x = i*x_interval
y = j*y_interval
# add color point
color = image[y,x]
if img_size == -1:
cv2.circle(result, (x, y), 1, (int(color[0]), int(color[1]), int(color[2]), 255), -1)
else:
cv2.circle(result,(int(x*img_size/w),int(y*img_size/h)),1,(int(color[0]),int(color[1]),int(color[2]),255),-1)
return Image.fromarray(cv2.cvtColor(result,cv2.COLOR_BGRA2RGBA)) | [
"[email protected]"
] | |
51e417701a68ede865d60f6a5c4e6cc84ef5d56b | 5a281cb78335e06c631181720546f6876005d4e5 | /senlin-7.0.0/senlin/api/openstack/v1/webhooks.py | 06fdf36a4269553ce0d9c367326b9dc133fdc167 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 1,976 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Webhook endpoint for Senlin v1 REST API.
"""
from senlin.api.common import util
from senlin.api.common import wsgi
from senlin.objects import base as obj_base
class WebhookController(wsgi.Controller):
"""WSGI controller for webhooks resource in Senlin v1 API."""
REQUEST_SCOPE = 'webhooks'
@wsgi.Controller.api_version("1.0", "1.9")
@util.policy_enforce
def trigger(self, req, webhook_id, body=None):
if body is None:
body = {'params': None}
body = obj_base.SenlinObject.normalize_req(
'WebhookTriggerRequestBody', body)
obj = util.parse_request(
'WebhookTriggerRequest', req, {'identity': webhook_id,
'body': body})
res = self.rpc_client.call(req.context, 'webhook_trigger', obj)
location = {'location': '/actions/%s' % res['action']}
res.update(location)
return res
@wsgi.Controller.api_version("1.10") # noqa
@util.policy_enforce
def trigger(self, req, webhook_id, body=None):
obj = util.parse_request(
'WebhookTriggerRequestParamsInBody', req, {'identity': webhook_id,
'body': body})
res = self.rpc_client.call(req.context, 'webhook_trigger', obj)
location = {'location': '/actions/%s' % res['action']}
res.update(location)
return res
| [
"Wayne [email protected]"
] | Wayne [email protected] |
164a9e59f5de5e0e83c9c2b467c583b578612e3b | aedd5f417a49d61799ff35984419e5fee50b67a0 | /NST-laplacian/NST_lp.py | 223ce2eb8766e1d593e573ab54d54808d373fb26 | [] | no_license | maxencealluin/NST | 74ce96a3cc4ea25dd9240d5c539afe12be3920ee | d818f8ed00b3b0fffc2e315e7edc39b521f4bf4a | refs/heads/master | 2020-05-31T15:33:56.772551 | 2019-06-24T09:38:18 | 2019-06-24T09:38:18 | 190,359,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,052 | py | import copy
import os
import sys
import time
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Image
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='Laplacian-Steered Neural Style Transfer')
parser.add_argument('--steps', type=int, default=800, metavar='N',
help='number of steps to train (default: 800)')
parser.add_argument('--sw', type=int, default=1000000, metavar='N',
help='Style weight (default: 1000000)')
parser.add_argument('--cw', type=int, default=20, metavar='N',
help='Content weight (default: 20)')
parser.add_argument('--lw', type=int, default=100, metavar='N',
help='Laplacian weight (default: 100)')
parser.add_argument('--style', type=str, default='starry_night.jpg', metavar='X.jpg',
help='Style image to use')
parser.add_argument('--content', type=str, default='lozere.jpg', metavar='X.jpg',
help='Content image to use')
parser.add_argument('--random', type=int, default=0 , metavar='0-1',
help='Initialize generated image at random (default 0: False)')
dargs = vars(parser.parse_args())
print(dargs['steps'])
#Timing program
begin = time.time()
#Defines directory for pretrained models download
# os.environ['TORCH_MODEL_ZOO'] = '/sgoinfre/goinfre/Perso/malluin'
#If GPU available use bigger image size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count():
print("Using GPU: {}\n".format(torch.cuda.get_device_name(device)))
else:
print("Using CPU")
imsize = 512 if torch.cuda.is_available() else 256
#Load input image to a Tensor and resize to desired shape, in line with device computational power.
loader = transforms.Compose([transforms.Resize((imsize, imsize)), transforms.ToTensor()])
#Unloader to transform back tensors to pillow images in order to save and plot output images.
unloader = transforms.ToPILImage()
def image_loader(image_name):
image_load = Image.open(image_name)
old_size = image_load.size
image_load = loader(image_load).unsqueeze(0)
new_size = (image_load.size()[2], image_load.size()[3])
print("Rescaling from {}x{} to {}x{}".format(old_size[0],
old_size[1], new_size[0], new_size[1]))
return image_load.to(device, torch.float)
def show_image(tensor, i, row = 1, col = 2):
tensor = tensor.squeeze(0)
image = unloader(tensor)
plt.subplot(row, col, i)
plt.imshow(image)
#Define content and image_style, if provided use arguments in command line to define images
argc = len(sys.argv)
default_content = "lozere.jpg"
default_style = "stary_night.jpg"
if (argc >= 3):
style_img_path = sys.argv[1]
content_img_path = sys.argv[2]
elif (argc == 2):
style_img_path = sys.argv[1]
content_img_path = default_content
else:
style_img_path = default_style
content_img_path = default_content
print("Content image: {} \nStyle image: {} \n".format(content_img_path, style_img_path))
#Load both style and content images, save original size for rescaling output
style_img = image_loader(style_img_path)
content_img = image_loader(content_img_path)
old_size = reversed(Image.open(content_img_path).size)
old_size = tuple(old_size)
# print(style_img.size(), content_img.size())
assert style_img.size() == content_img.size()
#loader to resize output image to its original size
load_resize = transforms.Compose([transforms.Resize(old_size), transforms.ToTensor()])
def scale_up_save(tensor, i):
resized = tensor.squeeze(0)
resized = unloader(resized.cpu())
resized = load_resize(resized)
resized = resized.squeeze(0)
resized = unloader(resized)
resized.save('results_NST/output' + str(i) +'.jpg')
print('saving results_NST/output' + str(i) +'.jpg...')
#Optional show style and content images
# show_image(style_img.cpu(), 1)
# show_image(content_img.cpu(), 2)
# plt.show()
#Content loss function which inherits from pytorch nn.nodule
#This function defines the mean squared error between the generated output and the input content image.
#It is computed at each iteration and is used in the loss function.
class ContentLoss(nn.Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
# print(self.loss)
return input
#Implementation of laplacian Loss, a pooling layer and a laplacian filter are applied
# to the original content image and the generated image
# MSE loss is used in place of loss function in the paper, the only effect is on the order of magnitude of the loss
# and therefore on the laplacian coefficient used later on.
class LaplacianLoss(nn.Module):
def __init__(self, target):
super(LaplacianLoss, self).__init__()
self.target = target.detach()
def forward(self, input):
lp_filter = torch.tensor([[0,-1,0], [-1,4,-1], [0,-1,0]], dtype= torch.float, device ='cuda')
lp_filter = lp_filter.view(1, 1, 3, 3).repeat(1, 3, 1, 1)
# print(lp_filter.shape)
# print(self.target.shape)
# print("\n\n\n\n\n")
p_size = 2
target_conv = F.avg_pool2d(self.target, (p_size,p_size))
content_conv = F.avg_pool2d(input, (p_size,p_size))
target_conv = F.conv2d(target_conv, lp_filter, stride = 1, padding = 0)
content_conv = F.conv2d(content_conv, lp_filter, stride = 1, padding = 0)
self.loss = F.mse_loss(target_conv, content_conv)
# self.loss = ((target_conv - content_conv) ** 2).sum()
return input
#Calculation of gram matrix to compute the style loss.
def gram_matrix(input):
a, b, c, d = input.size()
# a=batch size(=1) / b=number of feature maps / (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d)
#Torch.mm computes the gram product of the matruix by performing a dot product between the matrix and its transpose
G = torch.mm(features, features.t())
# we 'normalize' the values of the gram matrix by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
#Style loss function which inherits from pytorch nn.nodule
#This function calculate the mean squared difference between the gram matrix of the input and the gram matrix of the generated output image.
#It is computed at each iteration and is used in the loss function paired with the content loss.
class StyleLoss(nn.Module):
def __init__(self, target,):
super(StyleLoss, self).__init__()
# we 'detach' the target content from the tree used to dynamically compute the gradient: this is a stated value, not a variable.
# Otherwise the forward method of the criterion will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(gram_matrix(input), gram_matrix(self.target))
return input
#Loading of a pretrained vgg19 model, the model parameters are downloaded the first time this code is run.
cnn = models.vgg19(pretrained = True).features.to(device).eval()
#Normalization of input image is necessary for this pretrained network.
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, input):
return ((input - self.mean) / self.std)
#Defining the content and style layers that will be used to compute the loss function. Best results are achieved with early conv layers.
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
# style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5', 'conv_6', 'conv_7', 'conv_8', 'conv_9', 'conv_10', 'conv_11', 'conv_12', 'conv_13', 'conv_14', 'conv_15', 'conv_16']
#Defining model
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img, content_layers=content_layers_default,
style_layers=style_layers_default):
#Copies instead of referencing CNN parameters, it seems that it doesnt have much effect on memory usage
cnn = copy.deepcopy(cnn)
cnn = cnn[0:16] #temp
# print(cnn)
normalization = Normalization(normalization_mean, normalization_std).to(device)
content_losses = []
style_losses = []
laplacian_Loss = 0
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential to put in modules
# that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0
#Iterating through pretrained CNN model layers, we add each layer to our own model
# and build additionnal style and content layers depending on previously defined layers
#adding laplacian loss
target = model(content_img).detach()
laplacian_Loss = LaplacianLoss(target)
model.add_module('laplacian_loss_1', laplacian_Loss)
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
# print(name)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
#Add CNN layer to our model
model.add_module(name, layer)
if name in content_layers:
#Add content loss layer to our model which is defined by passing through the content loss function
# the input image which has been processed by previous layers of the CNN
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
#Add style loss layer to our model
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
#Removing additional layers
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses, laplacian_Loss
#Define input image as a clone of content image to speed up convergence
if dargs['random'] == 0:
input_img = content_img.clone()
else:
input_img = torch.randn(content_img.data.size(), device=device)
#Define LBFGS optimizer which converges quickly and efficiently compared to other algorithms.
#Adam uses slightly less memory but is much slower and has trouble converging.
def get_input_optimizer(input_img):
optimizer = optim.LBFGS([input_img.requires_grad_()], lr = 1, max_iter = 20, history_size = 10)
# optimizer = optim.Adam([input_img.requires_grad_()], lr = 1e-2)
return optimizer
def run_style_transfer(cnn, normalization_mean, normalization_std, content_img, style_img, input_img,
num_steps=dargs['steps'], style_weight=dargs['sw'], content_weight=dargs['cw'], laplacian_weight=dargs['lw']):
print('\n\nBuilding the style transfer model..')
model, style_losses, content_losses, laplacian_loss = get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img)
# print(model)
optimizer = get_input_optimizer(input_img)
print('Optimizing....')
run = [0]
#Iterate on num_steps
### var2 = input("\nTrain Model? (y/n) (default:y)\n")
### if var2 == "n":
### num_steps = 100
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
#??
optimizer.zero_grad()
#Run the input image through the model
model(input_img)
#Compute sum of style and content losses.
style_score, content_score, laplacian_score = 0, 0, laplacian_loss.loss
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
laplacian_score *= laplacian_weight
loss = style_score + content_score + laplacian_score
#Compute gradients based on loss function
loss.backward()
run[0] += 1
if run[0] % 50 == 49:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f} Laplacian Loss {:4f}'.format(
style_score.item(), content_score.item(), laplacian_score))
print('Memory usage: {} Mo'.format(round(torch.cuda.memory_allocated(device) / 1000000, 2)))
print('Memory cached: {} Mo'.format(round(torch.cuda.memory_cached(device) / 1000000, 2)))
print()
#Save image every 50 iterations
if run[0] % 50 == 49:
tmp = copy.deepcopy(input_img.data.clamp_(0, 1))
scale_up_save(tmp, run[0] / 5)
return style_score + content_score
optimizer.step(closure)
input_img.data.clamp_(0, 1)
print('Training Finished')
### save = input("Save model ? (y/n)(default:y)\n")
### if save != "n":
### torch.save(model.state_dict(), "model_save.txt")
return input_img
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std, content_img, style_img, input_img)
i = 1
#Show final output and print time
plt.figure(figsize=(20,20))
show_image(output.detach().cpu(), 1, 1, 1)
print("Program has taken {}s to compute.".format(round(time.time() - begin, 2)))
plt.show()
| [
"[email protected]"
] | |
286c7731d8f8b34e7e7b66859dbdceddead2240a | 9b01f7d430f7ee87217618cfa4567f42635e8923 | /22-06-2017/cloudformation/nginx-demo-1/ansible/.env/lib/python2.7/site-packages/ansible/plugins/lookup/sequence.py | 677d15319af9893132e24c8017e01a2a18639c86 | [] | no_license | awsusergroupsantiago/demos | ccb045545d2a407a39d865cf19800d2b6d284b8f | e7f0dc8d9a4e8f2547c33a5a294fd76bf3ac9c9c | refs/heads/master | 2022-04-30T23:43:30.646556 | 2020-08-08T01:35:40 | 2020-08-08T01:35:40 | 95,129,959 | 2 | 0 | null | 2022-03-29T21:54:09 | 2017-06-22T15:29:25 | Python | UTF-8 | Python | false | false | 7,113 | py | # (c) 2013, Jayson Vantuyl <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from re import compile as re_compile, IGNORECASE
from ansible.compat.six.moves import xrange
from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError( "must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError( "can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % self.format
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
| [
"[email protected]"
] | |
451282af5adab19e1e4c182523f7cc39af24bd1b | 8034b64a63e9c23e255a2171cd441512b60a8316 | /tasks.py | a3083f5a3b60dc8bc5418966c39c7972fd920447 | [
"MIT"
] | permissive | Design-Machine-Group/compas_roomacoustics | bb1e90bc5f0764209c44fc71831db101b90bc5a5 | 1eb166fa56a636222d985b542f5ce4cc4453ce62 | refs/heads/master | 2022-12-12T03:07:29.243427 | 2020-08-25T17:48:08 | 2020-08-25T17:48:08 | 210,474,746 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,988 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import contextlib
import glob
import os
import sys
from shutil import rmtree
from invoke import Exit
from invoke import task
try:
input = raw_input
except NameError:
pass
BASE_FOLDER = os.path.dirname(__file__)
class Log(object):
def __init__(self, out=sys.stdout, err=sys.stderr):
self.out = out
self.err = err
def flush(self):
self.out.flush()
self.err.flush()
def write(self, message):
self.flush()
self.out.write(message + '\n')
self.out.flush()
def info(self, message):
self.write('[INFO] %s' % message)
def warn(self, message):
self.write('[WARN] %s' % message)
log = Log()
def confirm(question):
while True:
response = input(question).lower().strip()
if not response or response in ('n', 'no'):
return False
if response in ('y', 'yes'):
return True
print('Focus, kid! It is either (y)es or (n)o', file=sys.stderr)
@task(default=True)
def help(ctx):
"""Lists available tasks and usage."""
ctx.run('invoke --list')
log.write('Use "invoke -h <taskname>" to get detailed help for a task.')
@task(help={
'docs': 'True to clean up generated documentation, otherwise False',
'bytecode': 'True to clean up compiled python files, otherwise False.',
'builds': 'True to clean up build/packaging artifacts, otherwise False.'})
def clean(ctx, docs=True, bytecode=True, builds=True):
"""Cleans the local copy from compiled artifacts."""
with chdir(BASE_FOLDER):
if builds:
ctx.run('python setup.py clean')
if bytecode:
for root, dirs, files in os.walk(BASE_FOLDER):
for f in files:
if f.endswith('.pyc'):
os.remove(os.path.join(root, f))
if '.git' in dirs:
dirs.remove('.git')
folders = []
if docs:
folders.append('docs/api/generated')
folders.append('dist/')
if bytecode:
for t in ('src', 'tests'):
folders.extend(glob.glob('{}/**/__pycache__'.format(t), recursive=True))
if builds:
folders.append('build/')
folders.append('src/compas_roomacoustics.egg-info/')
for folder in folders:
rmtree(os.path.join(BASE_FOLDER, folder), ignore_errors=True)
@task(help={
'rebuild': 'True to clean all previously built docs before starting, otherwise False.',
'doctest': 'True to run doctests, otherwise False.',
'check_links': 'True to check all web links in docs for validity, otherwise False.'})
def docs(ctx, doctest=False, rebuild=True, check_links=False):
"""Builds package's HTML documentation."""
if rebuild:
clean(ctx)
with chdir(BASE_FOLDER):
if doctest:
ctx.run('sphinx-build -E -b doctest docsource docs')
ctx.run('sphinx-build -E -b html docsource docs')
if check_links:
ctx.run('sphinx-build -E -b linkcheck docsource docs')
@task()
def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
with chdir(BASE_FOLDER):
log.write('Checking MANIFEST.in...')
ctx.run('check-manifest --ignore-bad-ideas=test.so,fd.so,smoothing.so,drx_c.so')
log.write('Checking metadata...')
ctx.run('python setup.py check --strict --metadata')
# log.write('Running flake8 python linter...')
# ctx.run('flake8 src tests setup.py')
# log.write('Checking python imports...')
# ctx.run('isort --check-only --diff --recursive src tests setup.py')
@task(help={
'checks': 'True to run all checks before testing, otherwise False.'})
def test(ctx, checks=False, doctest=False):
"""Run all tests."""
if checks:
check(ctx)
with chdir(BASE_FOLDER):
cmd = ['pytest']
if doctest:
cmd.append('--doctest-modules')
ctx.run(' '.join(cmd))
@task
def prepare_changelog(ctx):
"""Prepare changelog for next release."""
UNRELEASED_CHANGELOG_TEMPLATE = '## Unreleased\n\n### Added\n\n### Changed\n\n### Removed\n\n\n## '
with chdir(BASE_FOLDER):
# Preparing changelog for next release
with open('CHANGELOG.md', 'r+') as changelog:
content = changelog.read()
changelog.seek(0)
changelog.write(content.replace(
'## ', UNRELEASED_CHANGELOG_TEMPLATE, 1))
ctx.run('git add CHANGELOG.md && git commit -m "Prepare changelog for next release"')
@task(help={
'release_type': 'Type of release follows semver rules. Must be one of: major, minor, patch.'})
def release(ctx, release_type):
"""Releases the project in one swift command!"""
if release_type not in ('patch', 'minor', 'major'):
raise Exit('The release type parameter is invalid.\nMust be one of: major, minor, patch')
# Run checks
ctx.run('invoke check test')
# Bump version and git tag it
ctx.run('bumpversion %s --verbose' % release_type)
# Build project
ctx.run('python setup.py clean --all sdist bdist_wheel')
# Upload to pypi
if confirm('You are about to upload the release to pypi.org. Are you sure? [y/N]'):
files = ['dist/*.whl', 'dist/*.gz', 'dist/*.zip']
dist_files = ' '.join([pattern for f in files for pattern in glob.glob(f)])
if len(dist_files):
ctx.run('twine upload --skip-existing %s' % dist_files)
prepare_changelog(ctx)
else:
raise Exit('No files found to release')
else:
raise Exit('Aborted release')
@contextlib.contextmanager
def chdir(dirname=None):
current_dir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(current_dir)
| [
"[email protected]"
] | |
b48812a7cf6c9c96ef3032fea5de658e810a739d | 6527c6f9e0e3c81090a87894e1f9759d3ad9ca3e | /main_app/apps/delivery/delivery_exceptions.py | 3d0f1b6d469473870daf72f0eef318dd6684b4bd | [] | no_license | worlddeleteRin/eda_edet_backend | 7692611facc191b5a987dcc6a98b3c1e27ab1cff | 593a77bcc8f9619b9db6e99c92b2880716b450e3 | refs/heads/master | 2023-08-24T03:38:34.809418 | 2021-10-29T15:21:38 | 2021-10-29T15:21:38 | 405,609,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from fastapi import HTTPException, status
class DeliveryMethodNotExist(HTTPException):
def __init__(self):
self.status_code = 400
self.detail = "DeliveryMethod not exist"
| [
"[email protected]"
] | |
4526c354576f6fc27aea54a02a8cf7891d6e97a3 | dc7daedfaa6207380873a5243888cc8dc9b410c4 | /CataractProject_New/InformTable/migrations/0005_auto_20190923_0922.py | 9200708197aa2fb151ad6e5049aee74f774a7725 | [
"MIT"
] | permissive | Pangxiaox/Cataract-Platform | bd25ca897be8e333a34a05c70e91cb612e34c187 | 605293093f8a9c28b33e29ab4253f8e5c407788f | refs/heads/master | 2020-07-15T08:33:35.589392 | 2019-11-04T16:53:25 | 2019-11-04T16:53:25 | 205,521,190 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # Generated by Django 2.2.5 on 2019-09-23 01:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('InformTable', '0004_auto_20190922_2231'),
]
operations = [
migrations.AlterField(
model_name='eyeexamine',
name='left_intraocularpressure',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='eyeexamine',
name='left_vision',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='eyeexamine',
name='right_intraocularpressure',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='eyeexamine',
name='right_vision',
field=models.FloatField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
5fe18f1d52089d6f4a506a552bc32aadb587f980 | ed73bdb29c31f776fe22f7cae545b03084515bc2 | /stack_usingLL.py | 102dead96b9f53104ffcc4eff9ab9ebc9d2bc8ac | [] | no_license | pratiyush23/lecture0 | 8eca4d8ce22de717a01a72be5b4fb1511a8c46e7 | 0ad901bdce37cfbf0c6d3df5fbee5dcd628cda37 | refs/heads/master | 2021-05-23T08:16:00.913054 | 2020-12-25T12:45:44 | 2020-12-25T12:45:44 | 253,193,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # class to create new node
class Node:
def __init__(self,data):
self.data = data
self.next = None
#class to implement stack
class stack:
# to create stack using LL, need to intialise head
def __init__(self):
self.__head = None
self.__count = 0
def push(self,element):
newnode = Node(element)
newnode.next = self.__head
self.__head = newnode
self.__count = self.__count+1
def pop(self):
if self.isEmpty() is True:
print("stack is empty")
return
data = self.__head.data
self.__head = self.__head.next
self.__count = self.__count+1
return data
def top(self):
if self.isEmpty() is True:
print("stack is empty")
return
data = self.__head.data
return data
def size(self):
return self.__count
def isEmpty(self):
return self.size() == 0
print("Hello world") | [
"[email protected]"
] | |
7fcee8f82bd039ff027fc4ed9a2739cd4af5e95b | fb4bdb96dc895fe3d0d59e8087682499446d3338 | /lab7/demoo/asgi.py | 3c5792859cf22232d77073fd9f0fa01deef08ed1 | [] | no_license | aiddana19/web | 649fe21880a761bab7a7c008c71352c601970c9b | 3554f2ea7397a1a2ef67c5e2258ba138f1964923 | refs/heads/main | 2023-04-06T21:11:56.891931 | 2021-04-14T21:27:50 | 2021-04-14T21:27:50 | 336,527,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for demoo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demoo.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
be0e4151b1eef7e095765d3b69de5b4a280c88ab | 7207c610f771a20c7afd4e5e48ba79156ac621a9 | /MultiObjectiveModel_YMPNet_Pavan/genrate_3dmodel/nwrite_ply_seg.py | 4794a4e5ccd85b609b3301199aef1831635f7611 | [
"MIT"
] | permissive | eva5covergence/EVA5_AI_Projects_new | 56694909da2a057f85f47ebce0d04217a730f8c5 | 7052373c52b6b9901cd0bc05a4758dd4b63f7480 | refs/heads/master | 2023-01-22T11:40:14.236296 | 2020-12-06T18:09:56 | 2020-12-06T18:09:56 | 293,308,541 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,853 | py | import os
import cv2
import random
import numpy as np
from PIL import Image
from distutils.version import LooseVersion
from sacred import Experiment
from easydict import EasyDict as edict
import torch
import torch.nn.functional as F
import torchvision.transforms as tf
from models.baseline_same import Baseline as UNet
from utils.disp import tensor_to_image
from utils.disp import colors_256 as colors
from bin_mean_shift import Bin_Mean_Shift
from modules import get_coordinate_map
from utils.loss import Q_loss
from instance_parameter_loss import InstanceParameterLoss
ex = Experiment()
folder = './outputs'
index = 0
@ex.main
def predict(_run, _log):
cfg = edict(_run.config)
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# build network
network = UNet(cfg.model)
if not (cfg.resume_dir == 'None'):
model_dict = torch.load(cfg.resume_dir, map_location=lambda storage, loc: storage)
network.load_state_dict(model_dict)
# load nets into gpu
if cfg.num_gpus > 1 and torch.cuda.is_available():
network = torch.nn.DataParallel(network)
network.to(device)
network.eval()
transforms = tf.Compose([
tf.ToTensor(),
tf.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
bin_mean_shift = Bin_Mean_Shift(device=device)
k_inv_dot_xy1 = get_coordinate_map(device)
instance_parameter_loss = InstanceParameterLoss(k_inv_dot_xy1)
h, w = 192, 256
focal_length = 517.97
offset_x = 320
offset_y = 240
K = [[focal_length, 0, offset_x],
[0, focal_length, offset_y],
[0, 0, 1]]
K_inv = np.linalg.inv(np.array(K))
K_inv_dot_xy_1 = np.zeros((3, h, w))
for y in range(h):
for x in range(w):
yy = float(y) / h * 480
xx = float(x) / w * 640
ray = np.dot(K_inv,
np.array([xx, yy, 1]).reshape(3, 1))
K_inv_dot_xy_1[:, y, x] = ray[:, 0]
with torch.no_grad():
image = cv2.imread(cfg.image_path)
# the network is trained with 192*256 and the intrinsic parameter is set as ScanNet
image = cv2.resize(image, (w, h))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = transforms(image)
image = image.to(device).unsqueeze(0)
# forward pass
logit, embedding, _, _, param = network(image)
prob = torch.sigmoid(logit[0])
# infer per pixel depth using per pixel plane parameter, currently Q_loss need a dummy gt_depth as input
_, _, per_pixel_depth = Q_loss(param, k_inv_dot_xy1, torch.ones_like(logit))
# fast mean shift
segmentation, sampled_segmentation, sample_param = bin_mean_shift.test_forward(
prob, embedding[0], param, mask_threshold=0.1)
# since GT plane segmentation is somewhat noise, the boundary of plane in GT is not well aligned,
# we thus use avg_pool_2d to smooth the segmentation results
b = segmentation.t().view(1, -1, h, w)
pooling_b = torch.nn.functional.avg_pool2d(b, (7, 7), stride=1, padding=(3, 3))
b = pooling_b.view(-1, h*w).t()
segmentation = b
# infer instance depth
instance_loss, instance_depth, instance_abs_disntace, instance_parameter = instance_parameter_loss(
segmentation, sampled_segmentation, sample_param, torch.ones_like(logit), torch.ones_like(logit), False)
# return cluster results
segmentation = segmentation.cpu().numpy().argmax(axis=1)
# mask out non planar region
segmentation[prob.cpu().numpy().reshape(-1) <= 0.1] = 20
segmentation = segmentation.reshape(h, w)
# visualization and evaluation
image = tensor_to_image(image.cpu()[0])
mask = (prob > 0.1).float().cpu().numpy().reshape(h, w)
depth = instance_depth.cpu().numpy()[0, 0].reshape(h, w)
per_pixel_depth = per_pixel_depth.cpu().numpy()[0, 0].reshape(h, w)
# use per pixel depth for non planar region
depth = depth * (segmentation != 20) + per_pixel_depth * (segmentation == 20)
# change non planar to zero, so non planar region use the black color
segmentation += 1
segmentation[segmentation == 21] = 0
pred_seg = cv2.resize(np.stack([colors[segmentation, 0],
colors[segmentation, 1],
colors[segmentation, 2]], axis=2), (w, h))
# blend image
blend_pred = (pred_seg * 0.4 + image * 0.6).astype(np.uint8)
mask = cv2.resize((mask * 255).astype(np.uint8), (w, h))
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# visualize depth map as PlaneNet
depth = 255 - np.clip(depth / 5 * 255, 0, 255).astype(np.uint8)
depth = cv2.cvtColor(cv2.resize(depth, (w, h)), cv2.COLOR_GRAY2BGR)
image_c = np.concatenate((image, pred_seg, blend_pred, mask, depth), axis=1)
imageFilename = str(index) + '_model_texture.png'
cv2.imwrite(folder + '/' + imageFilename, image_c)
# create face from segmentation
faces = []
for y in range(h-1):
for x in range(w-1):
segmentIndex = segmentation[y, x]
# ignore non planar region
if segmentIndex == 0:
continue
# add face if three pixel has same segmentatioin
depths = [depth[y][x], depth[y + 1][x], depth[y + 1][x + 1]]
if segmentation[y + 1, x] == segmentIndex and segmentation[y + 1, x + 1] == segmentIndex and np.array(depths).min() > 0 and np.array(depths).max() < 10:
faces.append((x, y, x, y + 1, x + 1, y + 1))
depths = [depth[y][x], depth[y][x + 1], depth[y + 1][x + 1]]
if segmentation[y][x + 1] == segmentIndex and segmentation[y + 1][x + 1] == segmentIndex and np.array(depths).min() > 0 and np.array(depths).max() < 10:
faces.append((x, y, x + 1, y + 1, x + 1, y))
with open(folder + '/' + str(index) + '_model.ply', 'w') as f:
header = """ply
format ascii 1.0
comment VCGLIB generated
comment TextureFile """
header += imageFilename
header += """
element vertex """
header += str(h * w)
header += """
property float x
property float y
property float z
property uchar red { start of vertex color }
property uchar green
property uchar blue
element face """
header += str(len(faces))
header += """
property list uchar int vertex_indices
property list uchar float texcoord
end_header
"""
f.write(header)
for y in range(h):
for x in range(w):
segmentIndex = segmentation[y][x]
if segmentIndex == 20:
f.write("0.0 0.0 0.0\n")
continue
ray = K_inv_dot_xy_1[:, y, x]
X, Y, Z = ray * depth[y, x]
R, G, B = image[y,x]
f.write(str(X) + ' ' + str(Y) + ' ' + str(Z) + ' ' + str(R) + ' ' + str(G) + ' ' + str(B) + '\n')
for face in faces:
f.write('3 ')
for c in range(3):
f.write(str(face[c * 2 + 1] * w + face[c * 2]) + ' ')
f.write('6 ')
for c in range(3):
f.write(str(float(face[c * 2]) / w) + ' ' + str(1 - float(face[c * 2 + 1]) / h) + ' ')
f.write('\n')
f.close()
pass
return
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
ex.add_config('./configs/predict.yaml')
ex.run_commandline() | [
"[email protected]"
] | |
9766f5ae5551f7b344ac55fbab96416af2689fc8 | 0bcf4b5027080223031b3c697408cedd7798afaa | /common/xrd-ui-tests-python/tests/xroad_audit_log/XroadAuditLog.py | 3d0ad6aac1fad359d20d0a1adc0d4f58c919f9e3 | [
"MIT"
] | permissive | asaquality/X-Road-tests | 0fbfcf64a41ff4bc23f0083e35f834fab951ae24 | 51208cb7b06619f391c881e938a270c11ff00fab | refs/heads/develop | 2021-01-15T23:20:55.096511 | 2018-06-13T13:28:06 | 2018-06-13T13:28:06 | 99,924,513 | 0 | 2 | null | 2017-08-10T13:11:48 | 2017-08-10T13:11:48 | null | UTF-8 | Python | false | false | 3,303 | py | # coding=utf-8
import unittest
import tests.xroad_audit_log.audit_log as audit_log
from main.maincontroller import MainController
import json
import sys
class XroadAuditLog(unittest.TestCase):
"""
Stand-alone test for checking X-Road audit.log after running tests that generate log entries.
Gets the parameters of the server to connect to from the configuration, specified by parameter audit.server.
RIA URL: https://jira.ria.ee/browse/XTKB-8
Depends on finishing other test(s):
Requires helper scenarios:
X-Road version: 6.16.0
"""
def test_xroad_audit_log(self):
'''
audit.log checking test. Checks if audit.log of a specified server contains specified (in configuration or
command-line parameters) entries. Test succeeds if all of the entries are found; fails otherwise.
:return: None
'''
main = MainController(self)
# Set test name and number
main.test_number = 'XroadAuditLog'
main.test_name = self.__class__.__name__
# Get parameters from the configuration file.
# We can supply a "server" name to this test. This means that it uses this name as a category name and
# fetches ssh_host, ssh_user and ssh_pass of this category. For example, you can set audit.server=ss1 and
# the values that are used are ss1.ssh_host, ss1.ssh_user, and ss1.ssh_pass respectively.
audit_server = main.config.get('audit.server')
if audit_server is not None:
# audit.server was supplied so we're reading data from the sections
xroad_server = main.config.get('{0}.ssh_host'.format(audit_server))
ssh_username = main.config.get('{0}.ssh_user'.format(audit_server))
ssh_password = main.config.get('{0}.ssh_pass'.format(audit_server))
else:
# If audit.server was not supplied, we read each parameter separately
xroad_server = main.config.get('audit.ssh_host')
ssh_username = main.config.get('audit.ssh_user')
ssh_password = main.config.get('audit.ssh_pass')
# Get logfile
logfile = main.config.get('audit.logfile')
# Get data to be checked
check_json = main.config.get('audit.check-logs')
# Read data from this line
from_line = main.config.get_int('audit.from-line', 0)
# Because the supplied parameter may also be a string, use try-except
try:
check_entries = json.loads(check_json)
except (ValueError, TypeError):
check_entries = [check_json]
sys.exc_clear()
# Configure the service
test_audit_log = audit_log.test_audit_log(case=main, xroad_server=xroad_server, ssh_username=ssh_username,
ssh_password=ssh_password, logfile=logfile)
try:
# Run audit.log checking
test_audit_log(check_lines=check_entries, from_line=from_line)
except:
main.log('XroadAuditLog: audit.log check failed for: {0}'.format(check_json))
main.save_exception_data()
raise
finally:
# Test teardown
main.tearDown()
| [
"[email protected]"
] | |
3b830d17f916075a8bf9d0a4962c5d1436ba5103 | 3ae34c59cf0a0eb1ad02fc89bfa69acf32e3a581 | /code/utils/patch.py | d712cf7090aea1f399b5f8342817551a8199d278 | [] | no_license | hyliang96/pytorch_template | 883b06658521ca214a2ee7a74cf2227cf1d26047 | 3031e812b9e1bab5955256cb21d303ae6d5f7ffc | refs/heads/master | 2021-07-10T09:47:24.610579 | 2020-11-08T20:24:03 | 2020-11-08T20:24:03 | 203,366,515 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # __all__ = ["_tqdm", "_meter", "_state_dict", "_misc"]
# from ._tqdm import *
# from ._meter import *
# from ._state_dict import *
# from ._misc import *
# from _tqdm import *
# from _meter import *
# from _state_dict import *
# from _misc import *
from . import _tqdm
from . import _meter
from . import _state_dict
from . import _misc | [
"[email protected]"
] | |
8114a5dbf8b1606bf1387ec8f8939d36c5ea1eaf | 1abb5ab0580d69b11d1688424adf1e624c554a7a | /gitdemo/wsgi.py | 3ce89c28f219afa3c2dab7cad42bdda0fc01776a | [] | no_license | wzw5566/gitdemo | 2994e768168f840cf9bb4e19a32ef49025fdde14 | efe89493b1801830d3e445c049eabd52191fa527 | refs/heads/master | 2021-01-19T14:46:06.197242 | 2017-04-01T00:07:34 | 2017-04-01T00:07:34 | 86,638,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
WSGI config for gitdemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gitdemo.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
5288b19cab98fb5e5d327276ad479db412c99148 | 51c13596cb3f56380c847ee6e5b888963f887929 | /program.py | 9761142394236bf414d37280b0286b3f7c13dcfe | [] | no_license | j-skalicky/shamir | 9144c1d7cd12d040c7b88223ee4256fbf8c01750 | 9412255cc7c94e2311ab266a7d33a751c1995850 | refs/heads/main | 2023-07-05T14:48:44.639094 | 2021-09-04T12:10:24 | 2021-09-04T12:10:24 | 399,807,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,196 | py | from shamir import ShamirSecretShare
from base64 import b32decode, b32encode, decode
import os
class Program:
def __init__(self) -> None:
self.shamir = ShamirSecretShare()
def intro(self) -> None:
print('\nVítejte! Toto je jednoduchý program na Shamirovo sdílení tajemství.\n\n')
#print('Současné nastavení: ')
print('Možnosti:\n\t1 zadat tajné heslo a rozdělit ho mezi několik účastníků\n'
'\t2 zadat jednotlivé části a rekonstruovat celé tajemství\n')
self.main_menu()
def main_menu(self) -> None:
r = input("Co chcete dělat? Zadejte 1 nebo 2 a stiskněte ENTER: ")
if r == '1':
self.split_secret()
elif r == '2':
self.enter_secret()
else:
self.cls()
print('Musíte zadat 1 nebo 2, nic jiného. Zkuste to znovu.\n\n')
self.main_menu()
def split_secret(self) -> None:
self.cls()
secret = input("Zadejte své tajemství, které chcete rozdělit: ")
self.shamir.set_message(secret)
shares = self.shamir.split_secret()
self.cls()
print('Zadané tajemství bylo rozděleno na následující části:\n')
for i in range(1, len(shares)+1):
print('\t{:d}: {:s}'.format(i, self.share_to_string(shares[i])))
print('\nPamatujte, k obnovení tajemství stačí jen {:d} z {:d} částí.'.format(self.shamir.get_threshold(), self.shamir.get_holders()))
def enter_secret(self) -> None:
if self.shamir.is_solvable():
self.show_result()
return
print('Už mám {:d} část(i), potřebuji aspoň {:d}.\n'.format(self.shamir.get_shares_count(), self.shamir.get_threshold()))
s = input('Zadejte 1 a ENTER k zadání další části tajemství - anebo cokoliv jiného pro ukončení programu: ')
if s == '1':
# secret number
i = input('Zadejte číslo části tajemství: ')
try:
i = int(i)
# secret value
s = input('Zadejte část tajemství: ')
# convert the Base32-encoded value to int and add it to the ShamirShare object
b = b32decode(self.remove_human_readability(s))
r = int.from_bytes(b, byteorder='big')
self.cls()
self.shamir.add_share(i, r)
except:
print('Hmm, špatné zadání, zkuste to znovu.\n\n')
self.enter_secret()
else:
s = input('Opravdu opustit program? (y/n) ')
if s == 'y':
print('Ukončování...\n\n')
quit()
else:
self.enter_secret()
def show_result(self) -> None:
if self.shamir.is_solvable():
secret = self.shamir.reconstruct_secret()
print('HOORAY! Podařilo se odkrýt skryté tajemství!\n\nSkrytá tajná hodnota je: {:s}\n\nA nezapomeňte: with great power comes great responsibility! :-)\n\n\n'.format(secret))
def remove_human_readability(self, val: str):
noDashes = val.replace('-', '').upper()
padLen = (len(noDashes) * 5) % 8
if padLen > 0:
padLen = 8 - padLen
return noDashes + ('=' * padLen)
def string_to_human_readable(self, val: str):
val = val.strip('=')
r = ''
for i in range(0, len(val), 5):
r += val[i:i+5] + '-'
return r[:-1]
def share_to_string(self, share: int) -> str:
shareBytes = self.remove_zeros((share).to_bytes(self.shamir.get_max_bytes(), byteorder='big'))
return self.string_to_human_readable(b32encode(shareBytes).decode('utf-8'))
def remove_zeros(self, b: bytes) -> bytes:
if len(b) < 1:
return b
split = -1
for i in range(len(b)):
if b[i] == 0:
split = i
else:
break
if split >= 0:
b = b[split+1:]
return b
def cls(self):
os.system('cls' if os.name=='nt' else 'clear') | [
"[email protected]"
] | |
aef0c8480c917bc229594cae6ec37dddd5516166 | 546646a49b5cc0a19c3659ecab21743401c1f418 | /Problem 0010/main.py | 834113b49cbbf92ae9a33748e3f637e4a7b3583a | [] | no_license | Kippiii/Project-Euler | 12c2bd336c27e7408aee730f6a6094fc4afae8e0 | 2f1218929aab4c580882f8078572fb55e8fbc100 | refs/heads/master | 2020-07-07T06:38:46.627887 | 2019-08-23T02:44:55 | 2019-08-23T02:44:55 | 203,280,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | LENGTH = 2000000
def attempt_removal(n, nums):
for i in range(2 * n, LENGTH + 1, n):
nums[i] = 0
nums = [0] * (LENGTH + 1)
for i in range(2, len(nums)):
nums[i] = i;
for n in nums:
if n != 0:
attempt_removal(n, nums)
sum = 0
for i in nums:
sum += i
print(f'The sum is {sum}.') | [
"[email protected]"
] | |
04bdd549e01ff64fbc6ecf237ca1619467cc6d2a | fb8dfbd2758ed01995687f2ce46923c96d86506f | /bikeshare.py | f302474ccac3f660854f7c568a587a0380684677 | [] | no_license | dhruv-3d/bikeshare-data-analysis | a23aab90b9d74b93d726bfc504ef65f419b0e658 | 1615c66e13cd916bc103e275379eaefcb109d075 | refs/heads/master | 2022-12-12T15:17:38.340860 | 2021-02-16T21:49:31 | 2021-02-16T21:49:31 | 143,008,522 | 0 | 0 | null | 2021-06-01T22:32:59 | 2018-07-31T12:02:39 | Python | UTF-8 | Python | false | false | 8,841 | py | import time
import pandas as pd
import numpy as np
CITY_DATA = {'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv'}
DAYS = ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday', 'Saturday']
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June']
def get_filters():
"""
Asks user to specify a city and a choice for month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('='*60)
print("\nHello! Let\'s explore some US bikeshare data!")
print("Available cities are:- \n \
chicago\n \
new york city\n \
washington")
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = ''
while city not in ['chicago', 'new york city', 'washington']:
if city != '':
print('Wrong input, please enter a city name again from above.')
city = input('Enter city name to analyze: ').lower()
more_filter = input(
'Would you like to explore the bikshare data for particular month and day? \
Enter yes or no.\n').lower()
if more_filter == 'yes':
# get user input for month (all, january, february, ... , june)
print('\nThe months range from January to June.')
month = input(
"Enter name of the month to filter by, or 'all' to apply no month filter:\n")
# get user input for day of week (all, monday, tuesday, ... sunday)
day = input(
"Enter name of the day of week to filter by, or 'all' to apply no day filter:\n")
else:
month = 'all'
day = 'all'
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
print('\nPreparing the data...\n')
df = pd.read_csv('./web_app/' + CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour # hour column
df['routes'] = df['Start Station'] + ' to ' + \
df['End Station'] # station combination
try:
# filtering by month if applicable
if month != 'all':
# use the index of the MONTHS list to get the corresponding int
month = MONTHS.index(month.title()) + 1
df = df[df['month'] == month]
# filtering by day of week if applicable
if day != 'all':
df = df[df['day_of_week'] == day.title()]
return df
except:
raise Exception
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# Display the most common month
common_month = df['month'].mode()[0]
common_month = MONTHS[common_month-1].title()
print("\nThe most popular month is: ", common_month)
# Display the most common day of week
common_day = df['day_of_week'].mode()[0]
print("\nThe most popular day of the week is: ", common_day)
# Display the most common start hour
df['hour'] = df['Start Time'].dt.hour
common_hour = df['hour'].mode()[0]
print("\nThe most popular starting hour is: ", common_hour)
print("\nThis took %.5s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# Display most commonly used start station
popular_start_st = df['Start Station'].mode()[0]
print("\nThe most popular start station is: ", popular_start_st)
# Display most commonly used end station
popular_end_st = df['End Station'].mode()[0]
print("\nThe most popular end station is: ", popular_end_st)
# Display most frequent combination of start station and end station trip
pop = df['routes'].value_counts()
# fetched the id of the max value occured from "routes" column
popular_route = pop.idxmax()
print('\nMost popular route from Start Station to End Station is from\n', popular_route)
print("\nThis took %.5s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# Display total travel time
total_time = df['Trip Duration'].sum()
print("\nTotal travel time of all the trips is %d hours and %d minutes" %
(total_time/3600, (total_time/60) % 60))
# Display mean travel time
mean_time = df['Trip Duration'].mean()
print("\nMean travel time of all the trips is %d minutes and %d seconds" %
(mean_time/60, mean_time % 60))
print("\nThis took %.5s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
if('User Type' in df):
# Display counts of user types
user_types = df['User Type'].value_counts()
print("The types of users and their counts:-\n")
for i in range(len(user_types)):
print("%s: %s" %
(user_types.index[i], user_types[user_types.index[i]]))
if('Gender' in df):
# Display counts of gender
gender_counts = df['Gender'].value_counts()
print("\nGender counts of user:-\n")
for i in range(len(gender_counts)):
print("%s: %s" %
(gender_counts.index[i], gender_counts[gender_counts.index[i]]))
if('Birth Year' in df):
# Display earliest, most recent, and most common year of birth
most_recent_yob = int(df['Birth Year'].max())
earliest_yob = int(df['Birth Year'].min())
most_common_yob = int(df['Birth Year'].mode()[0])
print(
"\n Earliest year of birth: {} \
\n Most recent year of birth: {} \
\n Most common year of birth: {} "
.format(earliest_yob, most_recent_yob, most_common_yob)
)
print("\nThis took %.5s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
try:
df = load_data(city, month, day)
stat_choice = input(
'For what do you want the insights for?\n \
1. Regarding the users\n \
2. Regarding popular stations\n \
3. Regarding the most frequent times of travel\n \
4. Regarding trip durations\n \
5. For all of the above\n \
6. Show me the Raw Data\n \
Enter a number of your choice. \n'
)
if int(stat_choice) == 1:
user_stats(df)
elif int(stat_choice) == 2:
station_stats(df)
elif int(stat_choice) == 3:
time_stats(df)
elif int(stat_choice) == 4:
trip_duration_stats(df)
elif int(stat_choice) == 5:
user_stats(df)
station_stats(df)
time_stats(df)
trip_duration_stats(df)
elif int(stat_choice) == 6:
print('-'*140 + '\nRaw data\n' + '-'*140)
print(df.head(10))
restart = input(
'\nWould you like to explore some more? Enter yes or no.\n')
if restart.lower() != 'yes':
break
except:
print(
'\nSomething went wrong! You might have entered something incorrectly.\n' +
'Things you might have entered incorrect:\n' +
'>> Enter a "Number" as your choice.\n' +
'>> Enter a full name of the "Month" or "Day".\n' +
'Please try again...'
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
432a1ed4d5c68641381ae0c6964b987b2d21b412 | f2357c7fded6032e4d488e8f4437a089bab5454b | /numpy예제/소스코드_예제/ch10/ex10-2.py | d4aa5e401bba0f5390baa4e761f2483250a57569 | [] | no_license | stufit/pycharm_test | 094ba681b3bc5d27a827aa1ed12305da8be7ce30 | aa98cd1d871596a71c52d068d08c1a529e5f13cd | refs/heads/master | 2023-02-07T12:41:04.881144 | 2020-12-28T04:05:14 | 2020-12-28T04:05:14 | 324,903,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | #!/usr/bin/env python
# coding: utf-8
# # 10.2 Numpy를 이용한 plot 기능
# In[1]:
#p164
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
points = np.array([[1,1], [1,2], [1,3], [2,1], [2,2], [2,3], [3,1], [3,2], [3,3]])
p = np.array([2.5, 2])
import matplotlib.pyplot as plt
plt.plot(points[:,0], points[:,1], "ro")
plt.plot(p[0], p[1], "bo")
# In[2]:
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 5, 10)
y = x**2
plt.plot(x, y);
# In[3]:
#p166
x = np.linspace(0, 10, 20)
y = x**2.0
plt.plot(x, y, "bo-", linewidth=3, markersize=5);
# In[4]:
plt.plot(x, y, "gs-", linewidth=1, markersize=3);
# In[5]:
#p167
x = np.linspace(0, 10, 20)
y1 = x**2.0
y2 = x**1.5
plt.plot(x, y1, "bo-", linewidth=2, markersize=12, label="First")
plt.plot(x, y2, "gs-", linewidth=2, markersize=12, label="Second")
plt.xlabel("X")
plt.ylabel("Y")
plt.axis([-0.5, 10.5, -5, 105])
plt.legend(loc="upper left")
plt.savefig("mplot.pdf")
# In[6]:
x = np.logspace(-1, 1, 20)
y1 = x**2.0
y2 = x**1.5
plt.plot(x, y1, "bo-", linewidth=2, markersize=5, label="First")
plt.plot(x, y2, "gs-", linewidth=2, markersize=5, label="Second")
plt.xlabel("X")
plt.ylabel("Y")
plt.axis([-0.5, 10.5, -5, 105])
plt.legend(loc="upper left")
plt.savefig("mplot.pdf")
# In[7]:
#p169
import matplotlib.pyplot as plt
import numpy as np
x = np.random.standard_normal(size=1000)
plt.hist(x)
# In[8]:
plt.hist(x, density=True)
# In[9]:
#p170
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randn(1000)
plt.hist(x)
# In[10]:
import numpy as np
import matplotlib.pyplot as plt
x = np.random.rand(30)
y = np.random.rand(30)
colors = np.random.rand(30)
shape = np.pi * (np.random.rand(30)*20) **2
plt.scatter(x, y, s=shape, c = colors, marker='*', alpha=0.7)
plt.show()
# In[ ]:
| [
"[email protected]"
] | |
9bbd0defc841cae9736b86805843e05884596ebc | f137f67d96a83cb698a8f1bfc4547c3a8acf324c | /Chapter01/keras_MINST_V3.py | 25d3357a9a695cd19e8e736ed33ed7d51dca0813 | [
"MIT"
] | permissive | PacktPublishing/Deep-Learning-with-Keras | 55740afe4680d9f8aaaa2232890cfcef462ec134 | 44b2b7e11e0db168ff861e91de832d4b22d80414 | refs/heads/master | 2023-01-23T02:23:23.560272 | 2023-01-18T09:24:20 | 2023-01-18T09:24:20 | 88,967,358 | 1,151 | 796 | MIT | 2018-10-05T16:43:05 | 2017-04-21T09:31:27 | Jupyter Notebook | UTF-8 | Python | false | false | 2,528 | py | from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import matplotlib.pyplot as plt
np.random.seed(1671) # for reproducibility
# network and training
NB_EPOCH = 250
BATCH_SIZE = 128
VERBOSE = 1
NB_CLASSES = 10 # number of outputs = number of digits
OPTIMIZER = SGD() # optimizer, explained later in this chapter
N_HIDDEN = 128
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION
DROPOUT = 0.3
# data: shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784
RESHAPED = 784
#
X_train = X_train.reshape(60000, RESHAPED)
X_test = X_test.reshape(10000, RESHAPED)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# normalize
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# M_HIDDEN hidden layers
# 10 outputs
# final stage is softmax
model = Sequential()
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,)))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(N_HIDDEN))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH,
verbose=VERBOSE, validation_split=VALIDATION_SPLIT)
score = model.evaluate(X_test, Y_test, verbose=VERBOSE)
print("\nTest score:", score[0])
print('Test accuracy:', score[1])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show() | [
"[email protected]"
] | |
0a46bde86ddac7465d01e383e35eba1b8671078b | f3fb71fa8477812578448001dd87ed965a5848cd | /wxgigo/wxmp/api/webauth.py | 77e1048da9cc794719964911b7bbf2dd86b86e2a | [
"MIT"
] | permissive | rfancn/wxgigo | 0da920c27f8db2c0d30f9573184ea254c8e3554b | 7ed64248e4df037dbb5ff0683f21f458d3f29a0e | refs/heads/master | 2021-01-12T04:19:47.041858 | 2017-03-31T06:01:41 | 2017-03-31T06:01:41 | 77,586,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,696 | py | #!/usr/bin/env python
# coding=utf-8
"""
Copyright (C) 2010-2013, Ryan Fan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import absolute_import
import requests
import json
import urllib
from celery import shared_task, Task
from sdk.constants import *
from sdk.web.helper import WebHelper
class BaseWeb(Task):
abstract = True
web_helper = WebHelper(Task.app.db)
app_id, app_key = Task.app.db.hmget(WXMP_CONFIG, 'APP_ID', 'APP_KEY')
#class get_access_token(BaseWeb):
# def run(self, open_id):
# return self.web_helper.get_access_token(open_id)
class auth(BaseWeb):
"""
Authorization to obtain web access token
@param: code
@return: if succeed, returns openid
"""
def run(self, code):
if not self.app_id or not self.app_key:
print "No app_id or app_key when doing web authentication"
return None
url = 'https://api.weixin.qq.com/sns/oauth2/access_token?' \
'appid={0}&secret={1}&code={2}&' \
'grant_type=authorization_code'.format(self.app_id, self.app_key, code)
try:
resp = requests.get(url).json()
except Exception,e:
print "Failed to do web authentication because of:{0}".format(e)
return None
if not isinstance(resp, dict):
print "Invalid response format when do web authentication"
return None
if 'errcode' in resp.keys() and (resp['errcode'] != 0):
print "Error response when do web authentication: {0}".format(resp['errmsg'])
return None
if not self.web_helper.save_auth_info(resp):
return None
return resp['openid']
class get_auth_url(BaseWeb):
def run(self, redirect_url, scope):
if not self.app_id:
print "Failed to get app_id in get_auth_url()"
return None
auth_url = 'https://open.weixin.qq.com/connect/oauth2/authorize?' \
'appid={0}&redirect_uri={1}&response_type=code' \
'&scope={2}#wechat_redirect'.format(self.app_id, urllib.quote_plus(redirect_url), scope)
return auth_url
class get_user_info(BaseWeb):
def refresh_access_token(self, open_id):
if not self.app_id:
print "Failed to get app_id when refresh web access token"
return None
refresh_token = self.web_helper.get_refresh_token(open_id)
if not refresh_token:
return None
url = 'https://api.weixin.qq.com/sns/oauth2/refresh_token?' \
'appid={0}&grant_type=refresh_token&refresh_token={1}'.format(
self.app_id, refresh_token
)
try:
resp = requests.get(url).json()
except Exception,e:
print "Failed to get refresh web access token because of:{0}".format(e)
return None
if not isinstance(resp, dict):
print "Invalid response format when refresh web access token"
return None
if 'errcode' in resp.keys() and (resp['errcode'] != 0):
print "Error response when refresh web access token: {0}".format(resp['errmsg'])
return None
# resp is a authentication info dict contains following:
#
# {
# "access_token":"ACCESS_TOKEN",
# "expires_in":7200,
# "refresh_token":"REFRESH_TOKEN",
# "openid":"OPENID",
# "scope":"SCOPE"
# }
if not self.web_helper.save_auth_info(resp):
return None
return resp['access_token']
def run(self, open_id):
access_token = self.web_helper.get_access_token(open_id)
# first time check if we can get valid access_token from db
if not access_token:
# may be access_token expired, try refresh it
print "Failed to get valid access_token from db, try to refresh it..."
access_token = self.refresh_access_token(open_id)
# second time check after refresh
if not access_token:
print "Failed to get access_token after refresh"
return None
url = 'https://api.weixin.qq.com/sns/userinfo?' \
'access_token={0}&openid={1}&lang=zh_CN'.format(access_token, open_id)
try:
resp = requests.get(url)
# Important: Must not use requests.response.json() method here
# otherwise, requests will doing ascii encode against the unicode string
resp = json.loads(resp.content)
except Exception,e:
print "Failed to get userinfo because of:{0}".format(e)
return None
if not isinstance(resp, dict):
print "Invalid response format when get userinfo from Weixin server"
return None
if 'errcode' in resp.keys() and (resp['errcode'] != 0):
print "Error response when get userinfo info from Weixin server: {0}".format(resp['errmsg'])
return None
return resp | [
"[email protected]"
] | |
d6e643149dea7d26fbc701c6232120ad1003e55e | beca83e67a86ec7c68f7b96d541938e31b2d1297 | /index.py | eb66115c2226639be978c88c71348cfcf67b8985 | [] | no_license | temilaj/opencv-object-tracking | 8c5fc2212df73985c8792ad13bc70f978a3a31b0 | 83fb2b4f442696bc2f676cd9c338278b5505916d | refs/heads/master | 2023-02-13T05:11:28.321623 | 2021-01-07T08:08:24 | 2021-01-07T08:08:24 | 326,603,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,546 | py | # Import Libraries
import cv2
import sys
from random import randint
# Tracker Types
tracker_types = ['BOOSTING',
'MIL',
'KCF',
'TLD',
'MEDIANFLOW',
'GOTURN',
'MOSSE',
'CSRT']
# Define trackers by name
def tracker_name(tracker_type):
# Create trackers by name with if statement
if tracker_type == tracker_types[0]:
tracker = cv2.TrackerBoosting_create()
elif tracker_type == tracker_types[1]:
tracker = cv2.TrackerMIL_create()
elif tracker_type == tracker_types[2]:
tracker = cv2.TrackerKCF_create()
elif tracker_type == tracker_types[3]:
tracker = cv2.TrackerTLD_create()
elif tracker_type == tracker_types[4]:
tracker = cv2.TrackerMedianFlow_create()
elif tracker_type == tracker_types[5]:
tracker = cv2.TrackerGOTURN_create()
elif tracker_type == tracker_types[6]:
tracker = cv2.TrackerMOSSE_create()
elif tracker_type == tracker_types[7]:
tracker = cv2.TrackerCSRT_create()
else:
tracker = None
print('No tracker found')
print('Choose from these trackers: ')
for tr in tracker_types:
print(tr)
# return
return tracker
if __name__ == '__main__':
print("Default tracking algorithm MOSSE \n"
"Available algorithms are: \n")
for tr in tracker_types:
print(tr)
tracker_type = 'MOSSE'
# Create a video capture
cap = cv2.VideoCapture('Video/Vehicles.mp4')
# Read first frame
success, frame = cap.read()
# Quit if failure
if not success:
print('Cannot read the video')
# Select boxes and colors
rects = []
colors = []
# While loop
while True:
# draw rectangles, select ROI, open new window
rect_box = cv2.selectROI('MultiTracker', frame)
rects.append(rect_box)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
print('Press q to stop selecting boxes and start multitracking')
print('Press any key to select another box')
#close window
if cv2.waitKey(0) & 0xFF == 113:
break
# print message
print(f'Selected boxes {rects}')
# Create multitracker
multi_tracker = cv2.MultiTracker_create()
# Initialize multitracker
for rect_box in rects:
multi_tracker.add(tracker_name(tracker_type),
frame,
rect_box)
#Video and Tracker
# while loop
while cap.isOpened():
success, frame = cap.read()
if not success:
break
# update location objects
success, boxes = multi_tracker.update(frame)
# draw the objects tracked
for i, newbox in enumerate(boxes):
pts1 = (int(newbox[0]),
int(newbox[1]))
pts2 = (int(newbox[0] + newbox[2]),
int(newbox[1] + newbox[3]))
cv2.rectangle(frame,
pts1,
pts2,
colors[i],
2,
1)
# display frame
cv2.imshow('Multitracker', frame)
# Close the frame
if cv2.waitKey(30) & 0xFF == 27:
break
# Release and Destroy
cap.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
ec0a70d28ff06e456905b87458756bd729bde88f | f99062c0aed4794b2e8ea0ce72222694e822a39f | /Contents/Code/common.py | b8b7e9e51820fb61e31f59e10937a6e1c7bac0de | [] | no_license | mrhenko/SVT-Play.bundle | b4e8f7fc775f59c06e2fc30feba0ac9112c28e85 | 71a89d13bb00cfa2b45378e655e6c8ece89a5089 | refs/heads/master | 2021-01-18T03:29:03.184943 | 2012-06-19T13:34:43 | 2012-06-19T13:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | # -*- coding: utf-8 -*
# Global constants
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
VERSION="3.2b4"
PLUGIN_PREFIX = "/video/svt"
#URLs
URL_SITE = "http://www.svtplay.se"
URL_INDEX = URL_SITE + "/program"
URL_LIVE = URL_SITE + "/?live=1"
URL_LATEST_SHOWS = URL_SITE + "/?ep=1"
URL_LATEST_NEWS = URL_SITE + "/?en=1"
#Texts
TEXT_LIVE_SHOWS = u'Livesändningar'
TEXT_INDEX_SHOWS = u'Program A-Ö'
TEXT_TITLE = u'SVT Play'
#TEXT_NO_INFO = u'Ingen information hittades'
TEXT_PREFERENCES = u'Inställningar'
TEXT_LATEST_SHOWS = u'Senaste program'
TEXT_LATEST_NEWS = u'Senaste nyhetsprogram'
#The page step function will only step this many pages deep. Can be changed / function call.
MAX_PAGINATE_PAGES = 5
ART = "art-default.jpg"
THUMB = 'icon-default.png'
#CACHE_TIME_LONG = 60*60*24*30 # Thirty days
CACHE_TIME_SHORT = 60*10 # 10 minutes
CACHE_TIME_1DAY = 60*60*24
CACHE_TIME_SHOW = CACHE_TIME_1DAY
#CACHE_TIME_EPISODE = CACHE_TIME_LONG
#Prefs settings
PREF_PAGINATE_DEPTH = 'paginate_depth'
def GetPaginateUrls(url, dataname="pr", baseurl=None):
pageElement = HTML.ElementFromURL(url)
xpath = "//div[@class='svtXClearFix']//ul[@data-name='%s']//@data-lastpage" % dataname
urls = []
try:
noPages = int(pageElement.xpath(xpath)[0])
except IndexError:
return urls
args = "?%s=%d"
if(baseurl != None):
url = baseurl
for i in range(1, min(MAX_PAGINATE_PAGES, noPages + 1)):
suburl = url + args % (dataname, i)
urls.append(suburl)
Log(suburl)
return urls
| [
"[email protected]"
] | |
c4172ddda4b37e3b5899a65e1c84aa267a569a13 | ce39aefe6560e9671ab1979a94eb6768118dbbcc | /venv/bin/python-config | 6065aada329a0d461974cb5c3b8a462085ea3057 | [] | no_license | prakharrr/FlaskWebApp | 47b6e859e813c4e1452746158c1869d20af78779 | 4576ebe52f1b4ed0e1f6a7f36bb92d2786f03552 | refs/heads/master | 2021-04-15T04:44:02.040680 | 2018-03-25T05:32:45 | 2018-03-25T05:32:45 | 126,663,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | #!/Users/prakharrawat/PycharmProjects/untitled2/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
8bcc948639a7d8f437c507c88481025bef23509d | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/dataproc/v1/dataproc-v1-py/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py | 6ecc0cd0d9b184d60845ea13568c5783c926dc36 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,108 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.dataproc_v1.types import autoscaling_policies
class ListAutoscalingPoliciesPager:
"""A pager for iterating through ``list_autoscaling_policies`` requests.
This class thinly wraps an initial
:class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and
provides an ``__iter__`` method to iterate through its
``policies`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListAutoscalingPolicies`` requests and continue to iterate
through the ``policies`` field on the
corresponding responses.
All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse],
request: autoscaling_policies.ListAutoscalingPoliciesRequest,
response: autoscaling_policies.ListAutoscalingPoliciesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest):
The initial request object.
response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]:
for page in self.pages:
yield from page.policies
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListAutoscalingPoliciesAsyncPager:
"""A pager for iterating through ``list_autoscaling_policies`` requests.
This class thinly wraps an initial
:class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``policies`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListAutoscalingPolicies`` requests and continue to iterate
through the ``policies`` field on the
corresponding responses.
All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]],
request: autoscaling_policies.ListAutoscalingPoliciesRequest,
response: autoscaling_policies.ListAutoscalingPoliciesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest):
The initial request object.
response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]:
async def async_generator():
async for page in self.pages:
for response in page.policies:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
3670b6fc6b5f37305e60b54993958638f85b4b0e | 6c67ba252649110d4fc59a67bb1a7bf4691d0502 | /backend/urls.py | 18e6540e725fe35773ab413e2bf91d12a86942fd | [] | no_license | zhongrunqiu/WechatApp_assistant | f632cc03f9c4597cd326e04cf0a27db04d469b25 | d8ed1cf5fe78e142d940907e77017586824ca7d5 | refs/heads/master | 2021-07-12T12:10:14.179701 | 2020-08-16T14:02:31 | 2020-08-16T14:02:31 | 192,075,746 | 0 | 0 | null | 2020-08-16T14:02:33 | 2019-06-15T12:24:36 | Python | UTF-8 | Python | false | false | 863 | py | """backend_ch1_sec1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
# path('weather/',include('apis.urls'))
path('api/v1.0/',include('backend.version_1_0'))
]
| [
"[email protected]"
] | |
dc71c972bd595603815a1a31980e602af8a47e8a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02775/s024011055.py | 5d23c1a3f899c3391966a9a6d01203faca1d9ba6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | N = list(map(int, list(input())))
dp=[[-1,-1] for i in range(len(N)+1)]
dp[0]=[0,1]
b=0
N.insert(0,0)
for i in range(1,len(N)):
up = 10-N[i]
dp[i][0]=min(dp[i-1][0]+N[i],dp[i-1][1]+up)
dp[i][1] = min(dp[i-1][0]+N[i]+1, dp[i-1][1]+up-1)
print(dp[-1][0])
| [
"[email protected]"
] | |
91b0cc3fb2bd9ff42cf6883343d3e40cc279c24f | 2114ffa5fe09efaaf519b041a5c459561ba8f1ff | /work03/window.py | 755c3c9206bd65fa9373aec2b394405aa6fec829 | [] | no_license | wasabi-candy/python | daea3ff7d6265b156691bc4d5434e2cf9ef59b7a | ceec91be8eb12e50063bd4e3a065bdb8a764ab07 | refs/heads/master | 2020-05-01T12:44:29.211695 | 2015-07-09T05:11:30 | 2015-07-09T05:11:30 | 36,272,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import threading
import time
import urllib.request as ur
import tkinter as tk
class GetBoard():
def __init__(self):
self.url = "http://viper.2ch.net/news4vip/subback.html";
def get_data(self):
self.fp = ur.urlopen(self.url)
html = self.fp.read().decode("cp932");
self.fp.close()
return html;
def __del__(self):
pass
class LoopThread(threading.Thread):
def __init__(self,fnc):
super(LoopThread,self).__init__()
self.daemon = True
self.fnc = fnc
def run(self):
while True:
time.sleep(1)
self.fnc();
def __del__(self):
print("loopend");
class Frame(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self,master,width=600,height=800,bg="#999")
#ラベル追加・設定
self.label = tk.Label(self,height=55,width=80,bg="#ddd",anchor=tk.N)
self.label.pack(padx=5,pady=5)
#LoopThread
self.timer = LoopThread(self.reload)
#GetBoard
self.board = GetBoard()
self.pack()
self.timer.start()
self.reload()
def reload(self):
html = self.board.get_data()
self.label.configure(text=html)
def __del__(self):
pass
f = Frame()
f.mainloop()
| [
"[email protected]"
] | |
c39805898c26aa007db8eacb4fab4f796edbbadf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/52/usersdata/106/20724/submittedfiles/matriz1.py | add0d8a2bfdbd3cf2ff19de84bb0eee193c9d9e3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
#Funções
def colunaEsquerda(a):
Esquerda = 0
for j in range (0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j] == 1:
Esquerda = j
break
return Esquerda
def colunaDireita(a):
Direita = a.shape[1] -1
for j in range (0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j] == 1:
Direita = j
return Direita
def linhaCima(a):
LC = 0
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if a[i,j] == 1:
LC = i
break
return LC
def linhaBaixo(a):
LB = a.shape[0]-1
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if a[i,j] == 1:
LB = i
return LB
#CódigoPrincipal
linhas = input ('Digite a quantidade de linhas:')
colunas = input ('Digite a quantidade de colunas:')
a= np.zeros ((linhas,colunas))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j] = input ('Digite um número:')
print (a[linhaCima(a):(linhaBaixo(a)+1),colunaEsquerda(a):(colunaDireita(a)+1)]) | [
"[email protected]"
] | |
492c10c00dde0cddb679dbe7cc3733120630ba5b | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/tests/unit/virt/libvirt/test_imagecache.py | 040ee0c223bedf396922f127ac400909a4e0bed7 | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 87,385 | py | begin_unit
comment|'# Copyright 2012 Michael Still and Canonical Inc'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
nl|'\n'
name|'import'
name|'contextlib'
newline|'\n'
name|'import'
name|'hashlib'
newline|'\n'
name|'import'
name|'os'
newline|'\n'
name|'import'
name|'time'
newline|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'oslo_concurrency'
name|'import'
name|'lockutils'
newline|'\n'
name|'from'
name|'oslo_concurrency'
name|'import'
name|'processutils'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'formatters'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'from'
name|'oslo_serialization'
name|'import'
name|'jsonutils'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'importutils'
newline|'\n'
name|'from'
name|'six'
op|'.'
name|'moves'
name|'import'
name|'cStringIO'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'conductor'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_instance'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'libvirt'
name|'import'
name|'imagecache'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'libvirt'
name|'import'
name|'utils'
name|'as'
name|'libvirt_utils'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'contextlib'
op|'.'
name|'contextmanager'
newline|'\n'
DECL|function|intercept_log_messages
name|'def'
name|'intercept_log_messages'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'mylog'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
string|"'nova'"
op|')'
newline|'\n'
name|'stream'
op|'='
name|'cStringIO'
op|'('
op|')'
newline|'\n'
name|'handler'
op|'='
name|'logging'
op|'.'
name|'logging'
op|'.'
name|'StreamHandler'
op|'('
name|'stream'
op|')'
newline|'\n'
name|'handler'
op|'.'
name|'setFormatter'
op|'('
name|'formatters'
op|'.'
name|'ContextFormatter'
op|'('
op|')'
op|')'
newline|'\n'
name|'mylog'
op|'.'
name|'logger'
op|'.'
name|'addHandler'
op|'('
name|'handler'
op|')'
newline|'\n'
name|'yield'
name|'stream'
newline|'\n'
dedent|''
name|'finally'
op|':'
newline|'\n'
indent|' '
name|'mylog'
op|'.'
name|'logger'
op|'.'
name|'removeHandler'
op|'('
name|'handler'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ImageCacheManagerTestCase
dedent|''
dedent|''
name|'class'
name|'ImageCacheManagerTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|setUp
indent|' '
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ImageCacheManagerTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stock_instance_names'
op|'='
name|'set'
op|'('
op|'['
string|"'instance-00000001'"
op|','
nl|'\n'
string|"'instance-00000002'"
op|','
nl|'\n'
string|"'instance-00000003'"
op|','
nl|'\n'
string|"'banana-42-hamster'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_read_stored_checksum_missing
dedent|''
name|'def'
name|'test_read_stored_checksum_missing'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
name|'lambda'
name|'x'
op|':'
name|'False'
op|')'
newline|'\n'
name|'csum'
op|'='
name|'imagecache'
op|'.'
name|'read_stored_checksum'
op|'('
string|"'/tmp/foo'"
op|','
name|'timestamped'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'csum'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'os'
op|'.'
name|'path'
op|','
string|"'exists'"
op|','
name|'return_value'
op|'='
name|'True'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'time'
op|','
string|"'time'"
op|','
name|'return_value'
op|'='
number|'2000000'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'os'
op|'.'
name|'path'
op|','
string|"'getmtime'"
op|','
name|'return_value'
op|'='
number|'1000000'
op|')'
newline|'\n'
DECL|member|test_get_age_of_file
name|'def'
name|'test_get_age_of_file'
op|'('
name|'self'
op|','
name|'mock_getmtime'
op|','
name|'mock_time'
op|','
name|'mock_exists'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'exists'
op|','
name|'age'
op|'='
name|'image_cache_manager'
op|'.'
name|'_get_age_of_file'
op|'('
string|"'/tmp'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'exists'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1000000'
op|','
name|'age'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'os'
op|'.'
name|'path'
op|','
string|"'exists'"
op|','
name|'return_value'
op|'='
name|'False'
op|')'
newline|'\n'
DECL|member|test_get_age_of_file_not_exists
name|'def'
name|'test_get_age_of_file_not_exists'
op|'('
name|'self'
op|','
name|'mock_exists'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'exists'
op|','
name|'age'
op|'='
name|'image_cache_manager'
op|'.'
name|'_get_age_of_file'
op|'('
string|"'/tmp'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'exists'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'age'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_read_stored_checksum
dedent|''
name|'def'
name|'test_read_stored_checksum'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
name|'csum_input'
op|'='
string|'\'{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\\n\''
newline|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
name|'info_fname'
op|'='
name|'imagecache'
op|'.'
name|'get_info_filename'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'f'
op|'='
name|'open'
op|'('
name|'info_fname'
op|','
string|"'w'"
op|')'
newline|'\n'
name|'f'
op|'.'
name|'write'
op|'('
name|'csum_input'
op|')'
newline|'\n'
name|'f'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'csum_output'
op|'='
name|'imagecache'
op|'.'
name|'read_stored_checksum'
op|'('
name|'fname'
op|','
nl|'\n'
name|'timestamped'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'csum_input'
op|'.'
name|'rstrip'
op|'('
op|')'
op|','
nl|'\n'
string|'\'{"sha1": "%s"}\''
op|'%'
name|'csum_output'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_read_stored_checksum_legacy_essex
dedent|''
dedent|''
name|'def'
name|'test_read_stored_checksum_legacy_essex'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
name|'old_fname'
op|'='
name|'fname'
op|'+'
string|"'.sha1'"
newline|'\n'
name|'f'
op|'='
name|'open'
op|'('
name|'old_fname'
op|','
string|"'w'"
op|')'
newline|'\n'
name|'f'
op|'.'
name|'write'
op|'('
string|"'fdghkfhkgjjksfdgjksjkghsdf'"
op|')'
newline|'\n'
name|'f'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'csum_output'
op|'='
name|'imagecache'
op|'.'
name|'read_stored_checksum'
op|'('
name|'fname'
op|','
nl|'\n'
name|'timestamped'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'csum_output'
op|','
string|"'fdghkfhkgjjksfdgjksjkghsdf'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'old_fname'
op|')'
op|')'
newline|'\n'
name|'info_fname'
op|'='
name|'imagecache'
op|'.'
name|'get_info_filename'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_list_base_images
dedent|''
dedent|''
name|'def'
name|'test_list_base_images'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'listing'
op|'='
op|'['
string|"'00000001'"
op|','
nl|'\n'
string|"'ephemeral_0_20_None'"
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c_5368709120.info'"
op|','
nl|'\n'
string|"'00000004'"
op|','
nl|'\n'
string|"'swap_1000'"
op|']'
newline|'\n'
name|'images'
op|'='
op|'['
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|','
nl|'\n'
string|"'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'"
op|','
nl|'\n'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3'"
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c'"
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c_5368709120'"
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c_10737418240'"
op|']'
newline|'\n'
name|'listing'
op|'.'
name|'extend'
op|'('
name|'images'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
name|'lambda'
name|'x'
op|':'
name|'listing'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.isfile'"
op|','
name|'lambda'
name|'x'
op|':'
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'base_dir'
op|'='
string|"'/var/lib/nova/instances/_base'"
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
string|"'/var/lib/nova/instances'"
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_list_base_images'
op|'('
name|'base_dir'
op|')'
newline|'\n'
nl|'\n'
name|'sanitized'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'ent'
name|'in'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|':'
newline|'\n'
indent|' '
name|'sanitized'
op|'.'
name|'append'
op|'('
name|'ent'
op|'.'
name|'replace'
op|'('
name|'base_dir'
op|'+'
string|"'/'"
op|','
string|"''"
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'sorted'
op|'('
name|'sanitized'
op|')'
op|','
name|'sorted'
op|'('
name|'images'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'expected'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
nl|'\n'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
name|'expected'
op|','
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|')'
newline|'\n'
nl|'\n'
name|'expected'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c_'"
nl|'\n'
string|"'10737418240'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
name|'expected'
op|','
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|')'
newline|'\n'
nl|'\n'
name|'unexpected'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
string|"'00000004'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
name|'unexpected'
op|','
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|')'
newline|'\n'
nl|'\n'
name|'for'
name|'ent'
name|'in'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'ent'
op|'.'
name|'startswith'
op|'('
name|'base_dir'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'originals'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
nl|'\n'
name|'expected'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
name|'expected'
op|','
name|'image_cache_manager'
op|'.'
name|'originals'
op|')'
newline|'\n'
nl|'\n'
name|'unexpected'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
nl|'\n'
string|"'17d1b00b81642842e514494a78e804e9a511637c_'"
nl|'\n'
string|"'10737418240'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
name|'unexpected'
op|','
name|'image_cache_manager'
op|'.'
name|'originals'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'back_swap_images'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
string|"'swap_1000'"
op|','
name|'image_cache_manager'
op|'.'
name|'back_swap_images'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_list_backing_images_small
dedent|''
name|'def'
name|'test_list_backing_images_small'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
op|'['
string|"'_base'"
op|','
string|"'instance-00000001'"
op|','
nl|'\n'
string|"'instance-00000002'"
op|','
string|"'instance-00000003'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'x'
op|'.'
name|'find'
op|'('
string|"'instance-'"
op|')'
op|'!='
op|'-'
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'libvirt_utils'
op|','
string|"'get_disk_backing_file'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|')'
newline|'\n'
nl|'\n'
name|'found'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'image_cache_subdirectory_name'
op|','
nl|'\n'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'found'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'instance_names'
op|'='
name|'self'
op|'.'
name|'stock_instance_names'
newline|'\n'
nl|'\n'
name|'inuse_images'
op|'='
name|'image_cache_manager'
op|'.'
name|'_list_backing_images'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'inuse_images'
op|','
op|'['
name|'found'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_list_backing_images_resized
dedent|''
name|'def'
name|'test_list_backing_images_resized'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
op|'['
string|"'_base'"
op|','
string|"'instance-00000001'"
op|','
nl|'\n'
string|"'instance-00000002'"
op|','
string|"'instance-00000003'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'x'
op|'.'
name|'find'
op|'('
string|"'instance-'"
op|')'
op|'!='
op|'-'
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'libvirt_utils'
op|','
string|"'get_disk_backing_file'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
op|'('
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_'"
nl|'\n'
string|"'10737418240'"
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'found'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'image_cache_subdirectory_name'
op|','
nl|'\n'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_'"
nl|'\n'
string|"'10737418240'"
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'found'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'instance_names'
op|'='
name|'self'
op|'.'
name|'stock_instance_names'
newline|'\n'
nl|'\n'
name|'inuse_images'
op|'='
name|'image_cache_manager'
op|'.'
name|'_list_backing_images'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'inuse_images'
op|','
op|'['
name|'found'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_list_backing_images_instancename
dedent|''
name|'def'
name|'test_list_backing_images_instancename'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
op|'['
string|"'_base'"
op|','
string|"'banana-42-hamster'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'x'
op|'.'
name|'find'
op|'('
string|"'banana-42-hamster'"
op|')'
op|'!='
op|'-'
number|'1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'libvirt_utils'
op|','
string|"'get_disk_backing_file'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|')'
newline|'\n'
nl|'\n'
name|'found'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'image_cache_subdirectory_name'
op|','
nl|'\n'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'found'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'instance_names'
op|'='
name|'self'
op|'.'
name|'stock_instance_names'
newline|'\n'
nl|'\n'
name|'inuse_images'
op|'='
name|'image_cache_manager'
op|'.'
name|'_list_backing_images'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'inuse_images'
op|','
op|'['
name|'found'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_list_backing_images_disk_notexist
dedent|''
name|'def'
name|'test_list_backing_images_disk_notexist'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
op|'['
string|"'_base'"
op|','
string|"'banana-42-hamster'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'x'
op|'.'
name|'find'
op|'('
string|"'banana-42-hamster'"
op|')'
op|'!='
op|'-'
number|'1'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_get_disk
name|'def'
name|'fake_get_disk'
op|'('
name|'disk_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'processutils'
op|'.'
name|'ProcessExecutionError'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'libvirt_utils'
op|','
string|"'get_disk_backing_file'"
op|','
name|'fake_get_disk'
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'instance_names'
op|'='
name|'self'
op|'.'
name|'stock_instance_names'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'processutils'
op|'.'
name|'ProcessExecutionError'
op|','
nl|'\n'
name|'image_cache_manager'
op|'.'
name|'_list_backing_images'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_find_base_file_nothing
dedent|''
name|'def'
name|'test_find_base_file_nothing'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
name|'lambda'
name|'x'
op|':'
name|'False'
op|')'
newline|'\n'
nl|'\n'
name|'base_dir'
op|'='
string|"'/var/lib/nova/instances/_base'"
newline|'\n'
name|'fingerprint'
op|'='
string|"'549867354867'"
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'res'
op|'='
name|'list'
op|'('
name|'image_cache_manager'
op|'.'
name|'_find_base_file'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'len'
op|'('
name|'res'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_find_base_file_small
dedent|''
name|'def'
name|'test_find_base_file_small'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fingerprint'
op|'='
string|"'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'"
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'x'
op|'.'
name|'endswith'
op|'('
string|"'%s_sm'"
op|'%'
name|'fingerprint'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'base_dir'
op|'='
string|"'/var/lib/nova/instances/_base'"
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'res'
op|'='
name|'list'
op|'('
name|'image_cache_manager'
op|'.'
name|'_find_base_file'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'base_file'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|'+'
string|"'_sm'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'res'
op|','
op|'['
op|'('
name|'base_file'
op|','
name|'True'
op|','
name|'False'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_find_base_file_resized
dedent|''
name|'def'
name|'test_find_base_file_resized'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fingerprint'
op|'='
string|"'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'"
newline|'\n'
name|'listing'
op|'='
op|'['
string|"'00000001'"
op|','
nl|'\n'
string|"'ephemeral_0_20_None'"
op|','
nl|'\n'
string|"'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240'"
op|','
nl|'\n'
string|"'00000004'"
op|']'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
name|'lambda'
name|'x'
op|':'
name|'listing'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'x'
op|'.'
name|'endswith'
op|'('
string|"'%s_10737418240'"
op|'%'
name|'fingerprint'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.isfile'"
op|','
name|'lambda'
name|'x'
op|':'
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'base_dir'
op|'='
string|"'/var/lib/nova/instances/_base'"
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_list_base_images'
op|'('
name|'base_dir'
op|')'
newline|'\n'
name|'res'
op|'='
name|'list'
op|'('
name|'image_cache_manager'
op|'.'
name|'_find_base_file'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'base_file'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|'+'
string|"'_10737418240'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'res'
op|','
op|'['
op|'('
name|'base_file'
op|','
name|'False'
op|','
name|'True'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_find_base_file_all
dedent|''
name|'def'
name|'test_find_base_file_all'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fingerprint'
op|'='
string|"'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'"
newline|'\n'
name|'listing'
op|'='
op|'['
string|"'00000001'"
op|','
nl|'\n'
string|"'ephemeral_0_20_None'"
op|','
nl|'\n'
string|"'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm'"
op|','
nl|'\n'
string|"'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240'"
op|','
nl|'\n'
string|"'00000004'"
op|']'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
name|'lambda'
name|'x'
op|':'
name|'listing'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
name|'lambda'
name|'x'
op|':'
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.isfile'"
op|','
name|'lambda'
name|'x'
op|':'
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'base_dir'
op|'='
string|"'/var/lib/nova/instances/_base'"
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_list_base_images'
op|'('
name|'base_dir'
op|')'
newline|'\n'
name|'res'
op|'='
name|'list'
op|'('
name|'image_cache_manager'
op|'.'
name|'_find_base_file'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'base_file1'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|')'
newline|'\n'
name|'base_file2'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|'+'
string|"'_sm'"
op|')'
newline|'\n'
name|'base_file3'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'base_dir'
op|','
name|'fingerprint'
op|'+'
string|"'_10737418240'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'res'
op|','
op|'['
op|'('
name|'base_file1'
op|','
name|'False'
op|','
name|'False'
op|')'
op|','
nl|'\n'
op|'('
name|'base_file2'
op|','
name|'True'
op|','
name|'False'
op|')'
op|','
nl|'\n'
op|'('
name|'base_file3'
op|','
name|'False'
op|','
name|'True'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'contextlib'
op|'.'
name|'contextmanager'
newline|'\n'
DECL|member|_make_base_file
name|'def'
name|'_make_base_file'
op|'('
name|'self'
op|','
name|'checksum'
op|'='
name|'True'
op|','
name|'lock'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Make a base file for testing."""'
newline|'\n'
nl|'\n'
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
nl|'\n'
name|'base_file'
op|'='
name|'open'
op|'('
name|'fname'
op|','
string|"'w'"
op|')'
newline|'\n'
name|'base_file'
op|'.'
name|'write'
op|'('
string|"'data'"
op|')'
newline|'\n'
name|'base_file'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'lock'
op|':'
newline|'\n'
indent|' '
name|'lockdir'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'locks'"
op|')'
newline|'\n'
name|'lockname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'lockdir'
op|','
string|"'nova-aaa'"
op|')'
newline|'\n'
name|'os'
op|'.'
name|'mkdir'
op|'('
name|'lockdir'
op|')'
newline|'\n'
name|'lock_file'
op|'='
name|'open'
op|'('
name|'lockname'
op|','
string|"'w'"
op|')'
newline|'\n'
name|'lock_file'
op|'.'
name|'write'
op|'('
string|"'data'"
op|')'
newline|'\n'
name|'lock_file'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'base_file'
op|'='
name|'open'
op|'('
name|'fname'
op|','
string|"'r'"
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'checksum'
op|':'
newline|'\n'
indent|' '
name|'imagecache'
op|'.'
name|'write_stored_checksum'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'base_file'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
name|'yield'
name|'fname'
newline|'\n'
nl|'\n'
DECL|member|test_remove_base_file
dedent|''
dedent|''
name|'def'
name|'test_remove_base_file'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'self'
op|'.'
name|'_make_base_file'
op|'('
op|')'
name|'as'
name|'fname'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'info_fname'
op|'='
name|'imagecache'
op|'.'
name|'get_info_filename'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'lock_name'
op|'='
string|"'nova-'"
op|'+'
name|'os'
op|'.'
name|'path'
op|'.'
name|'split'
op|'('
name|'fname'
op|')'
op|'['
op|'-'
number|'1'
op|']'
newline|'\n'
name|'lock_dir'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
string|"'locks'"
op|')'
newline|'\n'
name|'lock_file'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'lock_dir'
op|','
name|'lock_name'
op|')'
newline|'\n'
nl|'\n'
comment|'# Files are initially too new to delete'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'lock_file'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Old files get cleaned up though'
nl|'\n'
name|'os'
op|'.'
name|'utime'
op|'('
name|'fname'
op|','
op|'('
op|'-'
number|'1'
op|','
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
number|'3601'
op|')'
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'lock_file'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_remove_base_file_original
dedent|''
dedent|''
name|'def'
name|'test_remove_base_file_original'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'self'
op|'.'
name|'_make_base_file'
op|'('
op|')'
name|'as'
name|'fname'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'originals'
op|'='
op|'['
name|'fname'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'info_fname'
op|'='
name|'imagecache'
op|'.'
name|'get_info_filename'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
comment|'# Files are initially too new to delete'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# This file should stay longer than a resized image'
nl|'\n'
name|'os'
op|'.'
name|'utime'
op|'('
name|'fname'
op|','
op|'('
op|'-'
number|'1'
op|','
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
number|'3601'
op|')'
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|"# Originals don't stay forever though"
nl|'\n'
name|'os'
op|'.'
name|'utime'
op|'('
name|'fname'
op|','
op|'('
op|'-'
number|'1'
op|','
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
number|'3600'
op|'*'
number|'25'
op|')'
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_remove_base_file_dne
dedent|''
dedent|''
name|'def'
name|'test_remove_base_file_dne'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# This test is solely to execute the "does not exist" code path. We'
nl|'\n'
comment|"# don't expect the method being tested to do anything in this case."
nl|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_remove_base_file_oserror
dedent|''
dedent|''
name|'def'
name|'test_remove_base_file_oserror'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'intercept_log_messages'
op|'('
op|')'
name|'as'
name|'stream'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
nl|'\n'
name|'os'
op|'.'
name|'mkdir'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'os'
op|'.'
name|'utime'
op|'('
name|'fname'
op|','
op|'('
op|'-'
number|'1'
op|','
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
number|'3601'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# This will raise an OSError because of file permissions'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_base_file'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'fname'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'stream'
op|'.'
name|'getvalue'
op|'('
op|')'
op|'.'
name|'find'
op|'('
string|"'Failed to remove'"
op|')'
op|','
nl|'\n'
op|'-'
number|'1'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_handle_base_image_unused
dedent|''
dedent|''
dedent|''
name|'def'
name|'test_handle_base_image_unused'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'img'
op|'='
string|"'123'"
newline|'\n'
nl|'\n'
name|'with'
name|'self'
op|'.'
name|'_make_base_file'
op|'('
op|')'
name|'as'
name|'fname'
op|':'
newline|'\n'
indent|' '
name|'os'
op|'.'
name|'utime'
op|'('
name|'fname'
op|','
op|'('
op|'-'
number|'1'
op|','
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
number|'3601'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'fname'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_handle_base_image'
op|'('
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|','
nl|'\n'
op|'['
name|'fname'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'libvirt_utils'
op|','
string|"'update_mtime'"
op|')'
newline|'\n'
DECL|member|test_handle_base_image_used
name|'def'
name|'test_handle_base_image_used'
op|'('
name|'self'
op|','
name|'mock_mtime'
op|')'
op|':'
newline|'\n'
indent|' '
name|'img'
op|'='
string|"'123'"
newline|'\n'
nl|'\n'
name|'with'
name|'self'
op|'.'
name|'_make_base_file'
op|'('
op|')'
name|'as'
name|'fname'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'fname'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'used_images'
op|'='
op|'{'
string|"'123'"
op|':'
op|'('
number|'1'
op|','
number|'0'
op|','
op|'['
string|"'banana-42'"
op|']'
op|')'
op|'}'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_handle_base_image'
op|'('
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'mock_mtime'
op|'.'
name|'assert_called_once_with'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'libvirt_utils'
op|','
string|"'update_mtime'"
op|')'
newline|'\n'
DECL|member|test_handle_base_image_used_remotely
name|'def'
name|'test_handle_base_image_used_remotely'
op|'('
name|'self'
op|','
name|'mock_mtime'
op|')'
op|':'
newline|'\n'
indent|' '
name|'img'
op|'='
string|"'123'"
newline|'\n'
nl|'\n'
name|'with'
name|'self'
op|'.'
name|'_make_base_file'
op|'('
op|')'
name|'as'
name|'fname'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'fname'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'used_images'
op|'='
op|'{'
string|"'123'"
op|':'
op|'('
number|'0'
op|','
number|'1'
op|','
op|'['
string|"'banana-42'"
op|']'
op|')'
op|'}'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_handle_base_image'
op|'('
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'mock_mtime'
op|'.'
name|'assert_called_once_with'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_handle_base_image_absent
dedent|''
dedent|''
name|'def'
name|'test_handle_base_image_absent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'img'
op|'='
string|"'123'"
newline|'\n'
nl|'\n'
name|'with'
name|'intercept_log_messages'
op|'('
op|')'
name|'as'
name|'stream'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'used_images'
op|'='
op|'{'
string|"'123'"
op|':'
op|'('
number|'1'
op|','
number|'0'
op|','
op|'['
string|"'banana-42'"
op|']'
op|')'
op|'}'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_handle_base_image'
op|'('
name|'img'
op|','
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'stream'
op|'.'
name|'getvalue'
op|'('
op|')'
op|'.'
name|'find'
op|'('
string|"'an absent base file'"
op|')'
op|','
nl|'\n'
op|'-'
number|'1'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_handle_base_image_used_missing
dedent|''
dedent|''
name|'def'
name|'test_handle_base_image_used_missing'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'img'
op|'='
string|"'123'"
newline|'\n'
nl|'\n'
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'fname'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'used_images'
op|'='
op|'{'
string|"'123'"
op|':'
op|'('
number|'1'
op|','
number|'0'
op|','
op|'['
string|"'banana-42'"
op|']'
op|')'
op|'}'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_handle_base_image'
op|'('
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'libvirt_utils'
op|','
string|"'update_mtime'"
op|')'
newline|'\n'
DECL|member|test_handle_base_image_checksum_fails
name|'def'
name|'test_handle_base_image_checksum_fails'
op|'('
name|'self'
op|','
name|'mock_mtime'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'checksum_base_images'
op|'='
name|'True'
op|','
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
name|'img'
op|'='
string|"'123'"
newline|'\n'
nl|'\n'
name|'with'
name|'self'
op|'.'
name|'_make_base_file'
op|'('
op|')'
name|'as'
name|'fname'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'open'
op|'('
name|'fname'
op|','
string|"'w'"
op|')'
name|'as'
name|'f'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'.'
name|'write'
op|'('
string|"'banana'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'d'
op|'='
op|'{'
string|"'sha1'"
op|':'
string|"'21323454'"
op|'}'
newline|'\n'
name|'with'
name|'open'
op|'('
string|"'%s.info'"
op|'%'
name|'fname'
op|','
string|"'w'"
op|')'
name|'as'
name|'f'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'.'
name|'write'
op|'('
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
name|'d'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|'='
op|'['
name|'fname'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'used_images'
op|'='
op|'{'
string|"'123'"
op|':'
op|'('
number|'1'
op|','
number|'0'
op|','
op|'['
string|"'banana-42'"
op|']'
op|')'
op|'}'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_handle_base_image'
op|'('
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'mock_mtime'
op|'.'
name|'assert_called_once_with'
op|'('
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'unexplained_images'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|','
nl|'\n'
op|'['
name|'fname'
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'libvirt_utils'
op|','
string|"'update_mtime'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'lockutils'
op|','
string|"'external_lock'"
op|')'
newline|'\n'
DECL|member|test_verify_base_images
name|'def'
name|'test_verify_base_images'
op|'('
name|'self'
op|','
name|'mock_lock'
op|','
name|'mock_mtime'
op|')'
op|':'
newline|'\n'
indent|' '
name|'hashed_1'
op|'='
string|"'356a192b7913b04c54574d18c28d46e6395428ab'"
newline|'\n'
name|'hashed_21'
op|'='
string|"'472b07b9fcf2c2451e8781e944bf5f77cd8457c8'"
newline|'\n'
name|'hashed_22'
op|'='
string|"'12c6fc06c99a462375eeb3f43dfd832b08ca9e17'"
newline|'\n'
name|'hashed_42'
op|'='
string|"'92cfceb39d57d914ed8b14d0e37643de0797ae56'"
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
string|"'/instance_path'"
op|','
nl|'\n'
name|'image_cache_subdirectory_name'
op|'='
string|"'_base'"
op|')'
newline|'\n'
nl|'\n'
name|'base_file_list'
op|'='
op|'['
string|"'00000001'"
op|','
nl|'\n'
string|"'ephemeral_0_20_None'"
op|','
nl|'\n'
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|','
nl|'\n'
string|"'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'"
op|','
nl|'\n'
name|'hashed_42'
op|','
nl|'\n'
name|'hashed_1'
op|','
nl|'\n'
name|'hashed_21'
op|','
nl|'\n'
name|'hashed_22'
op|','
nl|'\n'
string|"'%s_5368709120'"
op|'%'
name|'hashed_1'
op|','
nl|'\n'
string|"'%s_10737418240'"
op|'%'
name|'hashed_1'
op|','
nl|'\n'
string|"'00000004'"
op|']'
newline|'\n'
nl|'\n'
DECL|function|fq_path
name|'def'
name|'fq_path'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
string|"'/instance_path/_base/'"
op|','
name|'path'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake base directory existence'
nl|'\n'
dedent|''
name|'orig_exists'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
newline|'\n'
nl|'\n'
DECL|function|exists
name|'def'
name|'exists'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
comment|'# The python coverage tool got angry with my overly broad mocks'
nl|'\n'
indent|' '
name|'if'
name|'not'
name|'path'
op|'.'
name|'startswith'
op|'('
string|"'/instance_path'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'orig_exists'
op|'('
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'path'
name|'in'
op|'['
string|"'/instance_path'"
op|','
nl|'\n'
string|"'/instance_path/_base'"
op|','
nl|'\n'
string|"'/instance_path/instance-1/disk'"
op|','
nl|'\n'
string|"'/instance_path/instance-2/disk'"
op|','
nl|'\n'
string|"'/instance_path/instance-3/disk'"
op|','
nl|'\n'
string|"'/instance_path/_base/%s.info'"
op|'%'
name|'hashed_42'
op|']'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'for'
name|'p'
name|'in'
name|'base_file_list'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'path'
op|'=='
name|'fq_path'
op|'('
name|'p'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'True'
newline|'\n'
dedent|''
name|'if'
name|'path'
op|'=='
name|'fq_path'
op|'('
name|'p'
op|')'
op|'+'
string|"'.info'"
op|':'
newline|'\n'
indent|' '
name|'return'
name|'False'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'if'
name|'path'
name|'in'
op|'['
string|"'/instance_path/_base/%s_sm'"
op|'%'
name|'i'
name|'for'
name|'i'
name|'in'
op|'['
name|'hashed_1'
op|','
nl|'\n'
name|'hashed_21'
op|','
nl|'\n'
name|'hashed_22'
op|','
nl|'\n'
name|'hashed_42'
op|']'
op|']'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'False'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'fail'
op|'('
string|"'Unexpected path existence check: %s'"
op|'%'
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.exists'"
op|','
name|'lambda'
name|'x'
op|':'
name|'exists'
op|'('
name|'x'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake up some instances in the instances directory'
nl|'\n'
name|'orig_listdir'
op|'='
name|'os'
op|'.'
name|'listdir'
newline|'\n'
nl|'\n'
DECL|function|listdir
name|'def'
name|'listdir'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
comment|'# The python coverage tool got angry with my overly broad mocks'
nl|'\n'
indent|' '
name|'if'
name|'not'
name|'path'
op|'.'
name|'startswith'
op|'('
string|"'/instance_path'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'orig_listdir'
op|'('
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'path'
op|'=='
string|"'/instance_path'"
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
string|"'instance-1'"
op|','
string|"'instance-2'"
op|','
string|"'instance-3'"
op|','
string|"'_base'"
op|']'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'path'
op|'=='
string|"'/instance_path/_base'"
op|':'
newline|'\n'
indent|' '
name|'return'
name|'base_file_list'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'fail'
op|'('
string|"'Unexpected directory listed: %s'"
op|'%'
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.listdir'"
op|','
name|'lambda'
name|'x'
op|':'
name|'listdir'
op|'('
name|'x'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake isfile for these faked images in _base'
nl|'\n'
name|'orig_isfile'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'isfile'
newline|'\n'
nl|'\n'
DECL|function|isfile
name|'def'
name|'isfile'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
comment|'# The python coverage tool got angry with my overly broad mocks'
nl|'\n'
indent|' '
name|'if'
name|'not'
name|'path'
op|'.'
name|'startswith'
op|'('
string|"'/instance_path'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'orig_isfile'
op|'('
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'for'
name|'p'
name|'in'
name|'base_file_list'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'path'
op|'=='
name|'fq_path'
op|'('
name|'p'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'True'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'self'
op|'.'
name|'fail'
op|'('
string|"'Unexpected isfile call: %s'"
op|'%'
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.isfile'"
op|','
name|'lambda'
name|'x'
op|':'
name|'isfile'
op|'('
name|'x'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake the database call which lists running instances'
nl|'\n'
name|'instances'
op|'='
op|'['
op|'{'
string|"'image_ref'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'host'"
op|':'
name|'CONF'
op|'.'
name|'host'
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'instance-1'"
op|','
nl|'\n'
string|"'uuid'"
op|':'
string|"'123'"
op|','
nl|'\n'
string|"'vm_state'"
op|':'
string|"''"
op|','
nl|'\n'
string|"'task_state'"
op|':'
string|"''"
op|'}'
op|','
nl|'\n'
op|'{'
string|"'image_ref'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'kernel_id'"
op|':'
string|"'21'"
op|','
nl|'\n'
string|"'ramdisk_id'"
op|':'
string|"'22'"
op|','
nl|'\n'
string|"'host'"
op|':'
name|'CONF'
op|'.'
name|'host'
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'instance-2'"
op|','
nl|'\n'
string|"'uuid'"
op|':'
string|"'456'"
op|','
nl|'\n'
string|"'vm_state'"
op|':'
string|"''"
op|','
nl|'\n'
string|"'task_state'"
op|':'
string|"''"
op|'}'
op|']'
newline|'\n'
name|'all_instances'
op|'='
op|'['
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
name|'None'
op|','
op|'**'
name|'instance'
op|')'
nl|'\n'
name|'for'
name|'instance'
name|'in'
name|'instances'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake the utils call which finds the backing image'
nl|'\n'
DECL|function|get_disk_backing_file
name|'def'
name|'get_disk_backing_file'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'path'
name|'in'
op|'['
string|"'/instance_path/instance-1/disk'"
op|','
nl|'\n'
string|"'/instance_path/instance-2/disk'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'fq_path'
op|'('
string|"'%s_5368709120'"
op|'%'
name|'hashed_1'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'fail'
op|'('
string|"'Unexpected backing file lookup: %s'"
op|'%'
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'libvirt_utils'
op|','
string|"'get_disk_backing_file'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|':'
name|'get_disk_backing_file'
op|'('
name|'x'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake out verifying checksums, as that is tested elsewhere'
nl|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'image_cache_manager'
op|','
string|"'_verify_checksum'"
op|','
nl|'\n'
name|'lambda'
name|'x'
op|','
name|'y'
op|':'
name|'True'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake getmtime as well'
nl|'\n'
name|'orig_getmtime'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'getmtime'
newline|'\n'
nl|'\n'
DECL|function|getmtime
name|'def'
name|'getmtime'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'path'
op|'.'
name|'startswith'
op|'('
string|"'/instance_path'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'orig_getmtime'
op|'('
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
number|'1000000'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.path.getmtime'"
op|','
name|'lambda'
name|'x'
op|':'
name|'getmtime'
op|'('
name|'x'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|"# Make sure we don't accidentally remove a real file"
nl|'\n'
name|'orig_remove'
op|'='
name|'os'
op|'.'
name|'remove'
newline|'\n'
nl|'\n'
DECL|function|remove
name|'def'
name|'remove'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'path'
op|'.'
name|'startswith'
op|'('
string|"'/instance_path'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'orig_remove'
op|'('
name|'path'
op|')'
newline|'\n'
nl|'\n'
comment|"# Don't try to remove fake files"
nl|'\n'
dedent|''
name|'return'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'os.remove'"
op|','
name|'lambda'
name|'x'
op|':'
name|'remove'
op|'('
name|'x'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'objects'
op|'.'
name|'block_device'
op|'.'
name|'BlockDeviceMappingList'
op|','
nl|'\n'
string|"'bdms_by_instance_uuid'"
op|')'
newline|'\n'
nl|'\n'
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'objects'
op|'.'
name|'block_device'
op|'.'
name|'BlockDeviceMappingList'
op|'.'
name|'bdms_by_instance_uuid'
op|'('
nl|'\n'
name|'ctxt'
op|','
op|'['
string|"'123'"
op|','
string|"'456'"
op|']'
op|')'
op|'.'
name|'AndReturn'
op|'('
op|'{'
op|'}'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
comment|"# And finally we can make the call we're actually testing..."
nl|'\n'
comment|'# The argument here should be a context, but it is mocked out'
nl|'\n'
name|'image_cache_manager'
op|'.'
name|'update'
op|'('
name|'ctxt'
op|','
name|'all_instances'
op|')'
newline|'\n'
nl|'\n'
comment|'# Verify'
nl|'\n'
name|'active'
op|'='
op|'['
name|'fq_path'
op|'('
name|'hashed_1'
op|')'
op|','
name|'fq_path'
op|'('
string|"'%s_5368709120'"
op|'%'
name|'hashed_1'
op|')'
op|','
nl|'\n'
name|'fq_path'
op|'('
name|'hashed_21'
op|')'
op|','
name|'fq_path'
op|'('
name|'hashed_22'
op|')'
op|']'
newline|'\n'
name|'for'
name|'act'
name|'in'
name|'active'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertIn'
op|'('
name|'act'
op|','
name|'image_cache_manager'
op|'.'
name|'active_base_files'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'active_base_files'
op|')'
op|','
nl|'\n'
name|'len'
op|'('
name|'active'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'for'
name|'rem'
name|'in'
op|'['
name|'fq_path'
op|'('
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3_sm'"
op|')'
op|','
nl|'\n'
name|'fq_path'
op|'('
string|"'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'"
op|')'
op|','
nl|'\n'
name|'fq_path'
op|'('
name|'hashed_42'
op|')'
op|','
nl|'\n'
name|'fq_path'
op|'('
string|"'%s_10737418240'"
op|'%'
name|'hashed_1'
op|')'
op|']'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertIn'
op|'('
name|'rem'
op|','
name|'image_cache_manager'
op|'.'
name|'removable_base_files'
op|')'
newline|'\n'
nl|'\n'
comment|'# Ensure there are no "corrupt" images as well'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'corrupt_base_files'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_verify_base_images_no_base
dedent|''
name|'def'
name|'test_verify_base_images_no_base'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
string|"'/tmp/no/such/dir/name/please'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'update'
op|'('
name|'None'
op|','
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_is_valid_info_file
dedent|''
name|'def'
name|'test_is_valid_info_file'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'hashed'
op|'='
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3'"
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
string|"'/tmp/no/such/dir/name/please'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/_base/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
name|'base_filename'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
string|"'_base'"
op|','
name|'hashed'
op|')'
newline|'\n'
nl|'\n'
name|'is_valid_info_file'
op|'='
name|'imagecache'
op|'.'
name|'is_valid_info_file'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'is_valid_info_file'
op|'('
string|"'banana'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'is_valid_info_file'
op|'('
nl|'\n'
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
string|"'_base'"
op|','
string|"'00000001'"
op|')'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'is_valid_info_file'
op|'('
name|'base_filename'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'is_valid_info_file'
op|'('
name|'base_filename'
op|'+'
string|"'.sha1'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'is_valid_info_file'
op|'('
name|'base_filename'
op|'+'
string|"'.info'"
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_configured_checksum_path
dedent|''
name|'def'
name|'test_configured_checksum_path'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
comment|'# Ensure there is a base directory'
nl|'\n'
name|'os'
op|'.'
name|'mkdir'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'_base'"
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Fake the database call which lists running instances'
nl|'\n'
name|'instances'
op|'='
op|'['
op|'{'
string|"'image_ref'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'host'"
op|':'
name|'CONF'
op|'.'
name|'host'
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'instance-1'"
op|','
nl|'\n'
string|"'uuid'"
op|':'
string|"'123'"
op|','
nl|'\n'
string|"'vm_state'"
op|':'
string|"''"
op|','
nl|'\n'
string|"'task_state'"
op|':'
string|"''"
op|'}'
op|','
nl|'\n'
op|'{'
string|"'image_ref'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'host'"
op|':'
name|'CONF'
op|'.'
name|'host'
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'instance-2'"
op|','
nl|'\n'
string|"'uuid'"
op|':'
string|"'456'"
op|','
nl|'\n'
string|"'vm_state'"
op|':'
string|"''"
op|','
nl|'\n'
string|"'task_state'"
op|':'
string|"''"
op|'}'
op|']'
newline|'\n'
nl|'\n'
name|'all_instances'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'instance'
name|'in'
name|'instances'
op|':'
newline|'\n'
indent|' '
name|'all_instances'
op|'.'
name|'append'
op|'('
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
nl|'\n'
name|'None'
op|','
op|'**'
name|'instance'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|function|touch
dedent|''
name|'def'
name|'touch'
op|'('
name|'filename'
op|')'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'='
name|'open'
op|'('
name|'filename'
op|','
string|"'w'"
op|')'
newline|'\n'
name|'f'
op|'.'
name|'write'
op|'('
string|"'Touched'"
op|')'
newline|'\n'
name|'f'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'old'
op|'='
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
op|'('
number|'25'
op|'*'
number|'3600'
op|')'
newline|'\n'
name|'hashed'
op|'='
string|"'e97222e91fc4241f49a7f520d1dcf446751129b3'"
newline|'\n'
name|'base_filename'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
name|'hashed'
op|')'
newline|'\n'
name|'touch'
op|'('
name|'base_filename'
op|')'
newline|'\n'
name|'touch'
op|'('
name|'base_filename'
op|'+'
string|"'.info'"
op|')'
newline|'\n'
name|'os'
op|'.'
name|'utime'
op|'('
name|'base_filename'
op|'+'
string|"'.info'"
op|','
op|'('
name|'old'
op|','
name|'old'
op|')'
op|')'
newline|'\n'
name|'touch'
op|'('
name|'base_filename'
op|'+'
string|"'.info'"
op|')'
newline|'\n'
name|'os'
op|'.'
name|'utime'
op|'('
name|'base_filename'
op|'+'
string|"'.info'"
op|','
op|'('
name|'old'
op|','
name|'old'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
nl|'\n'
name|'objects'
op|'.'
name|'block_device'
op|'.'
name|'BlockDeviceMappingList'
op|','
nl|'\n'
string|"'bdms_by_instance_uuid'"
op|')'
newline|'\n'
nl|'\n'
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'objects'
op|'.'
name|'block_device'
op|'.'
name|'BlockDeviceMappingList'
op|'.'
name|'bdms_by_instance_uuid'
op|'('
nl|'\n'
name|'ctxt'
op|','
op|'['
string|"'123'"
op|','
string|"'456'"
op|']'
op|')'
op|'.'
name|'AndReturn'
op|'('
op|'{'
op|'}'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'update'
op|'('
name|'ctxt'
op|','
nl|'\n'
name|'all_instances'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'base_filename'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'base_filename'
op|'+'
string|"'.info'"
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_run_image_cache_manager_pass
dedent|''
dedent|''
name|'def'
name|'test_run_image_cache_manager_pass'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'was'
op|'='
op|'{'
string|"'called'"
op|':'
name|'False'
op|'}'
newline|'\n'
nl|'\n'
DECL|function|fake_get_all_by_filters
name|'def'
name|'fake_get_all_by_filters'
op|'('
name|'context'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'was'
op|'['
string|"'called'"
op|']'
op|'='
name|'True'
newline|'\n'
name|'instances'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'x'
name|'in'
name|'range'
op|'('
number|'2'
op|')'
op|':'
newline|'\n'
indent|' '
name|'instances'
op|'.'
name|'append'
op|'('
name|'fake_instance'
op|'.'
name|'fake_db_instance'
op|'('
nl|'\n'
name|'image_ref'
op|'='
string|"'1'"
op|','
nl|'\n'
name|'uuid'
op|'='
name|'x'
op|','
nl|'\n'
name|'name'
op|'='
name|'x'
op|','
nl|'\n'
name|'vm_state'
op|'='
string|"''"
op|','
nl|'\n'
name|'task_state'
op|'='
string|"''"
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'instances'
newline|'\n'
nl|'\n'
dedent|''
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_get_all_by_filters'"
op|','
nl|'\n'
name|'fake_get_all_by_filters'
op|')'
newline|'\n'
name|'compute'
op|'='
name|'importutils'
op|'.'
name|'import_object'
op|'('
name|'CONF'
op|'.'
name|'compute_manager'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'use_local'
op|'='
name|'True'
op|','
name|'group'
op|'='
string|"'conductor'"
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'conductor_api'
op|'='
name|'conductor'
op|'.'
name|'API'
op|'('
op|')'
newline|'\n'
name|'ctxt'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'_run_image_cache_manager_pass'
op|'('
name|'ctxt'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'was'
op|'['
string|"'called'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_store_swap_image
dedent|''
dedent|''
name|'def'
name|'test_store_swap_image'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_store_swap_image'
op|'('
string|"'swap_'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_store_swap_image'
op|'('
string|"'swap_123'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_store_swap_image'
op|'('
string|"'swap_456'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_store_swap_image'
op|'('
string|"'swap_abc'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_store_swap_image'
op|'('
string|"'123_swap'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_store_swap_image'
op|'('
string|"'swap_129_'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'image_cache_manager'
op|'.'
name|'back_swap_images'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'expect_set'
op|'='
name|'set'
op|'('
op|'['
string|"'swap_123'"
op|','
string|"'swap_456'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image_cache_manager'
op|'.'
name|'back_swap_images'
op|','
name|'expect_set'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'lockutils'
op|','
string|"'external_lock'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'libvirt_utils'
op|','
string|"'update_mtime'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'os.path.exists'"
op|','
name|'return_value'
op|'='
name|'True'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'os.path.getmtime'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'os.remove'"
op|')'
newline|'\n'
DECL|member|test_age_and_verify_swap_images
name|'def'
name|'test_age_and_verify_swap_images'
op|'('
name|'self'
op|','
name|'mock_remove'
op|','
name|'mock_getmtime'
op|','
nl|'\n'
name|'mock_exist'
op|','
name|'mock_mtime'
op|','
name|'mock_lock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'expected_remove'
op|'='
name|'set'
op|'('
op|')'
newline|'\n'
name|'expected_exist'
op|'='
name|'set'
op|'('
op|'['
string|"'swap_128'"
op|','
string|"'swap_256'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'.'
name|'back_swap_images'
op|'.'
name|'add'
op|'('
string|"'swap_128'"
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'back_swap_images'
op|'.'
name|'add'
op|'('
string|"'swap_256'"
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'.'
name|'used_swap_images'
op|'.'
name|'add'
op|'('
string|"'swap_128'"
op|')'
newline|'\n'
nl|'\n'
DECL|function|getmtime
name|'def'
name|'getmtime'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
number|'1000000'
newline|'\n'
nl|'\n'
dedent|''
name|'mock_getmtime'
op|'.'
name|'side_effect'
op|'='
name|'getmtime'
newline|'\n'
nl|'\n'
DECL|function|removefile
name|'def'
name|'removefile'
op|'('
name|'path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'path'
op|'.'
name|'startswith'
op|'('
string|"'/tmp_age_test'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'os'
op|'.'
name|'remove'
op|'('
name|'path'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'fn'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'split'
op|'('
name|'path'
op|')'
op|'['
op|'-'
number|'1'
op|']'
newline|'\n'
name|'expected_remove'
op|'.'
name|'add'
op|'('
name|'fn'
op|')'
newline|'\n'
name|'expected_exist'
op|'.'
name|'remove'
op|'('
name|'fn'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'mock_remove'
op|'.'
name|'side_effect'
op|'='
name|'removefile'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'.'
name|'_age_and_verify_swap_images'
op|'('
name|'None'
op|','
string|"'/tmp_age_test'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'expected_exist'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'expected_remove'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
string|"'swap_128'"
op|','
name|'expected_exist'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
string|"'swap_256'"
op|','
name|'expected_remove'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'utils'
op|','
string|"'synchronized'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|','
string|"'_get_age_of_file'"
op|','
nl|'\n'
name|'return_value'
op|'='
op|'('
name|'True'
op|','
number|'100'
op|')'
op|')'
newline|'\n'
DECL|member|test_lock_acquired_on_removing_old_enough_files
name|'def'
name|'test_lock_acquired_on_removing_old_enough_files'
op|'('
name|'self'
op|','
name|'mock_get_age'
op|','
nl|'\n'
name|'mock_synchronized'
op|')'
op|':'
newline|'\n'
indent|' '
name|'base_file'
op|'='
string|"'/tmp_age_test'"
newline|'\n'
name|'lock_path'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'CONF'
op|'.'
name|'instances_path'
op|','
string|"'locks'"
op|')'
newline|'\n'
name|'lock_file'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'split'
op|'('
name|'base_file'
op|')'
op|'['
op|'-'
number|'1'
op|']'
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'image_cache_manager'
op|'.'
name|'_remove_old_enough_file'
op|'('
nl|'\n'
name|'base_file'
op|','
number|'60'
op|','
name|'remove_sig'
op|'='
name|'False'
op|','
name|'remove_lock'
op|'='
name|'False'
op|')'
newline|'\n'
name|'mock_synchronized'
op|'.'
name|'assert_called_once_with'
op|'('
name|'lock_file'
op|','
name|'external'
op|'='
name|'True'
op|','
nl|'\n'
name|'lock_path'
op|'='
name|'lock_path'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|VerifyChecksumTestCase
dedent|''
dedent|''
name|'class'
name|'VerifyChecksumTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|setUp
indent|' '
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'VerifyChecksumTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'img'
op|'='
op|'{'
string|"'container_format'"
op|':'
string|"'ami'"
op|','
string|"'id'"
op|':'
string|"'42'"
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'checksum_base_images'
op|'='
name|'True'
op|','
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|_make_checksum
dedent|''
name|'def'
name|'_make_checksum'
op|'('
name|'self'
op|','
name|'tmpdir'
op|')'
op|':'
newline|'\n'
indent|' '
name|'testdata'
op|'='
op|'('
string|"'OpenStack Software delivers a massively scalable cloud '"
nl|'\n'
string|"'operating system.'"
op|')'
newline|'\n'
nl|'\n'
name|'fname'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'tmpdir'
op|','
string|"'aaa'"
op|')'
newline|'\n'
name|'info_fname'
op|'='
name|'imagecache'
op|'.'
name|'get_info_filename'
op|'('
name|'fname'
op|')'
newline|'\n'
nl|'\n'
name|'with'
name|'open'
op|'('
name|'fname'
op|','
string|"'w'"
op|')'
name|'as'
name|'f'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'.'
name|'write'
op|'('
name|'testdata'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'fname'
op|','
name|'info_fname'
op|','
name|'testdata'
newline|'\n'
nl|'\n'
DECL|member|_write_file
dedent|''
name|'def'
name|'_write_file'
op|'('
name|'self'
op|','
name|'info_fname'
op|','
name|'info_attr'
op|','
name|'testdata'
op|')'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'='
name|'open'
op|'('
name|'info_fname'
op|','
string|"'w'"
op|')'
newline|'\n'
name|'if'
name|'info_attr'
op|'=='
string|'"csum valid"'
op|':'
newline|'\n'
indent|' '
name|'csum'
op|'='
name|'hashlib'
op|'.'
name|'sha1'
op|'('
op|')'
newline|'\n'
name|'csum'
op|'.'
name|'update'
op|'('
name|'testdata'
op|')'
newline|'\n'
name|'f'
op|'.'
name|'write'
op|'('
string|'\'{"sha1": "%s"}\\n\''
op|'%'
name|'csum'
op|'.'
name|'hexdigest'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'elif'
name|'info_attr'
op|'=='
string|'"csum invalid, not json"'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'.'
name|'write'
op|'('
string|"'banana'"
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'.'
name|'write'
op|'('
string|'\'{"sha1": "banana"}\''
op|')'
newline|'\n'
dedent|''
name|'f'
op|'.'
name|'close'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|_check_body
dedent|''
name|'def'
name|'_check_body'
op|'('
name|'self'
op|','
name|'tmpdir'
op|','
name|'info_attr'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
name|'fname'
op|','
name|'info_fname'
op|','
name|'testdata'
op|'='
name|'self'
op|'.'
name|'_make_checksum'
op|'('
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_write_file'
op|'('
name|'info_fname'
op|','
name|'info_attr'
op|','
name|'testdata'
op|')'
newline|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'return'
name|'image_cache_manager'
op|','
name|'fname'
newline|'\n'
nl|'\n'
DECL|member|test_verify_checksum
dedent|''
name|'def'
name|'test_verify_checksum'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|','
name|'fname'
op|'='
name|'self'
op|'.'
name|'_check_body'
op|'('
name|'tmpdir'
op|','
string|'"csum valid"'
op|')'
newline|'\n'
name|'res'
op|'='
name|'image_cache_manager'
op|'.'
name|'_verify_checksum'
op|'('
name|'self'
op|'.'
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_verify_checksum_disabled
dedent|''
dedent|''
name|'def'
name|'test_verify_checksum_disabled'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'checksum_base_images'
op|'='
name|'False'
op|','
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|','
name|'fname'
op|'='
name|'self'
op|'.'
name|'_check_body'
op|'('
name|'tmpdir'
op|','
string|'"csum valid"'
op|')'
newline|'\n'
name|'res'
op|'='
name|'image_cache_manager'
op|'.'
name|'_verify_checksum'
op|'('
name|'self'
op|'.'
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_verify_checksum_invalid_json
dedent|''
dedent|''
name|'def'
name|'test_verify_checksum_invalid_json'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'intercept_log_messages'
op|'('
op|')'
name|'as'
name|'stream'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|','
name|'fname'
op|'='
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_check_body'
op|'('
name|'tmpdir'
op|','
string|'"csum invalid, not json"'
op|')'
op|')'
newline|'\n'
name|'res'
op|'='
name|'image_cache_manager'
op|'.'
name|'_verify_checksum'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'img'
op|','
name|'fname'
op|','
name|'create_if_missing'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'res'
op|')'
newline|'\n'
name|'log'
op|'='
name|'stream'
op|'.'
name|'getvalue'
op|'('
op|')'
newline|'\n'
nl|'\n'
comment|'# NOTE(mikal): this is a skip not a fail because the file is'
nl|'\n'
comment|'# present, but is not in valid JSON format and therefore is'
nl|'\n'
comment|'# skipped.'
nl|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'log'
op|'.'
name|'find'
op|'('
string|"'image verification skipped'"
op|')'
op|','
op|'-'
number|'1'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_verify_checksum_invalid_repaired
dedent|''
dedent|''
dedent|''
name|'def'
name|'test_verify_checksum_invalid_repaired'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|','
name|'fname'
op|'='
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_check_body'
op|'('
name|'tmpdir'
op|','
string|'"csum invalid, not json"'
op|')'
op|')'
newline|'\n'
name|'res'
op|'='
name|'image_cache_manager'
op|'.'
name|'_verify_checksum'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'img'
op|','
name|'fname'
op|','
name|'create_if_missing'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_verify_checksum_invalid
dedent|''
dedent|''
name|'def'
name|'test_verify_checksum_invalid'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'intercept_log_messages'
op|'('
op|')'
name|'as'
name|'stream'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'image_cache_manager'
op|','
name|'fname'
op|'='
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_check_body'
op|'('
name|'tmpdir'
op|','
string|'"csum invalid, valid json"'
op|')'
op|')'
newline|'\n'
name|'res'
op|'='
name|'image_cache_manager'
op|'.'
name|'_verify_checksum'
op|'('
name|'self'
op|'.'
name|'img'
op|','
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'res'
op|')'
newline|'\n'
name|'log'
op|'='
name|'stream'
op|'.'
name|'getvalue'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotEqual'
op|'('
name|'log'
op|'.'
name|'find'
op|'('
string|"'image verification failed'"
op|')'
op|','
op|'-'
number|'1'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_verify_checksum_file_missing
dedent|''
dedent|''
dedent|''
name|'def'
name|'test_verify_checksum_file_missing'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'utils'
op|'.'
name|'tempdir'
op|'('
op|')'
name|'as'
name|'tmpdir'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'instances_path'
op|'='
name|'tmpdir'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'image_info_filename_pattern'
op|'='
op|'('
string|"'$instances_path/'"
nl|'\n'
string|"'%(image)s.info'"
op|')'
op|','
nl|'\n'
name|'group'
op|'='
string|"'libvirt'"
op|')'
newline|'\n'
name|'fname'
op|','
name|'info_fname'
op|','
name|'testdata'
op|'='
name|'self'
op|'.'
name|'_make_checksum'
op|'('
name|'tmpdir'
op|')'
newline|'\n'
nl|'\n'
name|'image_cache_manager'
op|'='
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|'('
op|')'
newline|'\n'
name|'res'
op|'='
name|'image_cache_manager'
op|'.'
name|'_verify_checksum'
op|'('
string|"'aaa'"
op|','
name|'fname'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
comment|'# Checksum requests for a file with no checksum now have the'
nl|'\n'
comment|'# side effect of creating the checksum'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'info_fname'
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
dedent|''
endmarker|''
end_unit
| [
"[email protected]"
] | |
368870cf443f5ff87fef8280e99d7f7666221e82 | ab80d1978214ff59dc1baa18e09b7ec602f09664 | /src/tp1/lab/repetidas.py | 25070ab8782f17aa9622dfea12c1ce9fd6115898 | [] | no_license | unmateo/7506-TP | 5accb6dc72202748dcb8cb5e7897cbfce0af2b0e | 3875906e3820c7eab597d71e08add5ab18297a61 | refs/heads/master | 2022-08-10T15:29:56.054968 | 2019-12-02T12:56:30 | 2019-12-02T12:56:30 | 204,795,454 | 0 | 0 | null | 2022-07-29T23:05:20 | 2019-08-27T21:43:01 | HTML | UTF-8 | Python | false | false | 1,455 | py | #!/usr/bin/env python
# coding: utf-8
# ### Analizaremos la existencia de publicaciones repetidas
# In[35]:
import pandas as pd
#importo las funciones para levantar los dataframes
get_ipython().run_line_magic('run', '"../../utils/dataset_parsing.ipynb"')
df = levantar_datos("../../"+DATASET_RELATIVE_PATH)
#importo las funciones para graficar
get_ipython().run_line_magic('run', '"../../utils/graphs.ipynb"')
# ### Consideramos que una publicación es igual a otra si comparten ciudad, precio, direccion, tipo de propiedad y metros totales.
# In[57]:
repetidas = df.groupby(['ciudad','provincia','precio','direccion','metrostotales','tipodepropiedad']).agg({"id":"count"})
repetidas=repetidas.loc[repetidas.id>1]
repetidas
# ### La cantidad de publicaciones repetidas según nuestro criterio no es significativa frente al total de los datos. Quisieramos mencionar que en el caso de diferentes departamentos con iguales caracteristicas en un mismo edificio, las publicaciones matchearán como repetidas.
# In[58]:
repetidas=repetidas.groupby("tipodepropiedad").agg({"id":"count"})
repetidas=repetidas.rename(columns={"id":"total"})
get_barplot(repetidas["total"], title="Tipo de propiedad de las publicaciones repetidas", x_label="Tipo de propiedad", y_label="Total",)
# #### El grafico nos permite ver que la cantidad de apartamentos repetidos es muy baja, de modo que la influencia de departamentos iguales en un edificio es casi nula.
| [
"[email protected]"
] | |
130edc3275a971600e9bc89ac9832da9afce3a92 | dee2d5c0d0308cab977856ebe5c3ac4b36a8f025 | /hello.py | d592b533e124f63f3b0bf443db0b22b297c51ebe | [] | no_license | ShubhamGarg2001/nonu1 | 346e7301290794e513a5b9e405250c3cc95f4b66 | 54aca1f26380ea5dd74794548c28a8a38c695a8b | refs/heads/master | 2022-04-26T04:10:45.974592 | 2020-04-29T15:55:05 | 2020-04-29T15:55:05 | 259,642,527 | 0 | 0 | null | 2020-04-28T13:49:34 | 2020-04-28T13:19:16 | null | UTF-8 | Python | false | false | 24 | py | print("Greetings Mummy") | [
"[email protected]"
] | |
738273917f078668e89f2bf2c5c54e511654c183 | e1e804221b203c50d49569a68539cf2e61414ebb | /tools/static_code_analysis.py | ee7ce44c1ce26f316a2ab7636a51b56018285a44 | [] | no_license | declankeyesbevan/api-skeleton | f6b5150d7edfd319f71e5b2891417ee5322ff736 | 432090cb2ed92939146ba9bda3aa447321d12de2 | refs/heads/master | 2023-05-07T19:35:02.413306 | 2021-05-27T04:44:57 | 2021-05-27T04:44:57 | 371,290,643 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,624 | py | import contextlib
import os
from pathlib import Path
import anybadge
from pylint.lint import Run
from radon.cli import CCHarvester, Config, RawHarvester
from radon.complexity import SCORE
from app.constants import FIRST
class StaticCodeAnalysis:
@property
def _paths(self):
return ['app']
@property
def _analyser(self):
raise NotImplementedError('Subclasses should implement this')
@property
def _thresholds(self):
raise NotImplementedError('Subclasses should implement this')
def run_test(self):
raise NotImplementedError('Subclasses should implement this')
def create_badge(self, score):
badge = anybadge.Badge(
self._analyser, score, thresholds=self._thresholds, value_prefix=' ', value_suffix=' '
)
filename = self._analyser.replace(' ', '-')
Path(f'{os.environ.get("BUILD_DIR", "build")}').mkdir(parents=True, exist_ok=True)
analyser_svg = f'{os.environ.get("BUILD_DIR", "build")}/{filename}.svg'
with contextlib.suppress(FileNotFoundError):
os.remove(analyser_svg)
badge.write_badge(analyser_svg)
class Lint(StaticCodeAnalysis):
@property
def _analyser(self):
return 'pylint'
@property
def _thresholds(self):
return {
2: 'red',
4: 'orange',
6: 'yellow',
10: 'green',
}
def run_test(self):
results = Run(['app'], do_exit=False)
score = round(results.linter.stats['global_note'], 2)
return score
class CyclomaticComplexity(StaticCodeAnalysis):
@property
def _analyser(self):
return 'cyclomatic complexity'
@property
def _thresholds(self):
return {
'F': 'red',
'E': 'red',
'D': 'red',
'C': 'orange',
'B': 'yellow',
'A': 'green',
}
@property
def _config(self):
return Config(
exclude=None,
ignore=None,
order=SCORE,
no_assert=False,
show_closures=False,
average=True,
total_average=True,
show_complexity=True,
min='A',
max='F',
)
def run_test(self):
harvester = CCHarvester(self._paths, self._config)
# Weird ripping apart of iterators because to_terminal() seems to be the only way to get the
# overall average. And it is only returned through iterators. Maybe I should do a pull
# request to the project.
*_, last = harvester.to_terminal()
_, mid, _ = last
_, score, *_ = mid
return score
class LogicalLinesOfCode(StaticCodeAnalysis):
@property
def _analyser(self):
return 'logical lines of code'
@property
def _thresholds(self):
return {
0: 'green',
}
@property
def _config(self):
return Config(
exclude=None,
ignore=None,
summary=True,
)
def run_test(self):
harvester = RawHarvester(self._paths, self._config)
# This is horrible but the code wasn't designed to be used this way. This is parsing the
# terminal output programmatically. I smells a pull request to the project.
target_idx = 0
lloc = 0
for idx, item in enumerate(harvester.to_terminal()):
if item[FIRST] == '** Total **':
target_idx = idx + 2
if target_idx == idx:
lloc = item
score = lloc[1][1]
return score
| [
"[email protected]"
] | |
3b2bf47b29e824907b68e5277d4220df836181ad | 5ac3f3ea75065a5ba1649b4b869f4f98136b9c41 | /P1-Clasificación/ParseandoDatosConParametros.py | fa0522f2484ee9ba02cb4f536f6c9c1d62118f5f | [
"MIT"
] | permissive | jcgq/Inteligecia-de-negocio | 4d199eec0c1bc334d1418019fe76bddb624b8bcc | 78a993c339fb8eb399ef3513fc225bc951b293e6 | refs/heads/main | 2023-04-30T18:26:31.036977 | 2021-05-25T14:12:37 | 2021-05-25T14:12:37 | 369,571,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,462 | py | #!/usr/bin/env python
# coding: utf-8
#Todas las librerías para los distintos algoritmos
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.svm import OneClassSVM
from sklearn.svm import SVC
from sklearn.svm import NuSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import tree
from statistics import mode
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
import numpy as np
from sklearn import impute
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing as pp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
#Primero con esto y luego con la media para ver si la mejora
mamografias= pd.read_csv("./mamografias.csv",na_values=["?"])
mamografias['Density']=mamografias['Density'].fillna(mode(mamografias['Density']))
mamografias['BI-RADS']=mamografias['BI-RADS'].fillna(mode(mamografias['BI-RADS']))
mamografias['Margin']=mamografias['Margin'].fillna(mode(mamografias['Margin']))
mamografias['Age']=mamografias['Age'].fillna(mode(mamografias['Age']))
mamografias['Shape']=mamografias['Shape'].fillna(mode(mamografias['Shape']))
le = pp.LabelEncoder()
columna_codificada=le.fit_transform(mamografias['Shape'])
mamografias['Shape']=le.fit_transform(mamografias['Shape'])
mamografias['Severity']=le.fit_transform(mamografias['Severity'])
atributos=mamografias[['BI-RADS','Age','Shape','Margin','Density']]
target=mamografias['Severity']
data_train, data_test, target_train, target_test = train_test_split(atributos ,target, test_size = 0.8, random_state = 5)
#Definición de la función de la matriz
def matrizCruzada(prediccion):
m = confusion_matrix(target_test, prediccion, normalize="all")
tn,fp,fn,tp=m.ravel();
print("TN ",tn*100)
print("FP ",fp*100)
print("FN ",fn*100)
print("TP ",tp*100)
print("FP-FN ",(fp-fn)*100)
print("---------------------------------")
return m
#Primer algoritmo Nayve-Bayes
#Nayve-Bayes Gaussian
gnb = GaussianNB()
modeloNBgau = gnb.fit(data_train, target_train)
predNBgau = modeloNBgau.predict(data_test)
scoresGau = cross_val_score(modeloNBgau, atributos, target, cv=5, scoring='accuracy')
#Nayve-Bayes Complement
cnb = ComplementNB()
modeloNBcom = cnb.fit(data_train, target_train)
predNBcom = modeloNBcom.predict(data_test)
scoresCom = cross_val_score(modeloNBcom, atributos, target, cv=5, scoring='accuracy')
#Nayve-Bayes Bernoulli
bnb = BernoulliNB()
modelNBBer = bnb.fit(data_train, target_train)
predNBber = modelNBBer.predict(data_test)
scoresBer = cross_val_score(modelNBBer, atributos, target, cv=5, scoring='accuracy')
#Nayve-Bayes Multinominal
mnb = MultinomialNB()
modelNBMul = mnb.fit(data_train, target_train)
predNBmul = modelNBMul.predict(data_test)
scoresMul = cross_val_score(modelNBMul, atributos, target, cv=5, scoring='accuracy')
#Porcentajes de acierto
print("Usando NB Gaussian se tiene una tasa de acierto del ",np.mean(scoresGau)*100)
print("Usando NB Complement se tiene una tasa de acierto del ",np.mean(scoresCom)*100)
print("Usando NB Bernoulli se tiene una tasa de acierto del ",np.mean(scoresBer)*100)
print("Usando NB Multinominal se tiene una tasa de acierto del ",np.mean(scoresMul)*100)
#Matrices de validación
print("Matriz Gaussian: ", matrizCruzada(predNBgau))
print("Matriz Complement: ", matrizCruzada(predNBcom))
print("Matriz Bernoulli: ", matrizCruzada(predNBber))
print("Matriz Multinominal: ", matrizCruzada(predNBmul))
#Segundo algoritmo Árboles de decisión
#Árbol de decisión normal
arbNor = tree.DecisionTreeClassifier(random_state=2, max_depth=2)
arbNor = arbNor.fit(data_train, target_train)
predADnor = arbNor.predict(data_test)
scoresADnor = cross_val_score(arbNor, atributos, target, cv=5, scoring='accuracy')
#Árbol de decisión extra
arbEx = tree.ExtraTreeClassifier(random_state=4, max_depth=2)
arbEx = arbEx.fit(data_train, target_train)
predADex = arbEx.predict(data_test)
scoresADex = cross_val_score(arbEx, atributos, target, cv=5, scoring='accuracy')
#Porcentajes de acierto
print("Usando AD normal se tiene una tasa de acierto del ",np.mean(scoresADnor)*100)
print("Usando AD extra se tiene una tasa de acierto del ",np.mean(scoresADex)*100)
#Matrices de validación
print("Matriz ArbDec Normal: ",matrizCruzada(predADnor))
print("Matriz ArbDec Extra: ",matrizCruzada(predADex))
#Pintamos los árboles
tree.plot_tree(arbNor)
tree.plot_tree(arbEx)
#Tercer algoritmo SUPPORT VECTOR MACHINE
#SVM - NuSVC
svr_nu = NuSVC(random_state=10,max_iter=3000)
svr_nu.fit(data_train, target_train)
predsvNu = svr_nu.predict(data_test)
scoresNu = cross_val_score(svr_nu, atributos, target, cv=5, scoring='accuracy')
#SVM - SVC
svr_svc = SVC(max_iter=3000)
svr_svc.fit(data_train, target_train)
predsvSvc = svr_svc.predict(data_test)
scoresSvc = cross_val_score(svr_svc, atributos, target, cv=5, scoring='accuracy')
#Porcentajes de acierto
print("Usando NuSVC se tiene una tasa de acierto del ",np.mean(scoresNu)*100)
print("Usando SVC se tiene una tasa de acierto del ",np.mean(scoresSvc)*100)
#Matrices de validación
print("Matriz SVM - Nu: ",matrizCruzada(predsvNu))
print("Matriz SVM - SVC: ",matrizCruzada(predsvSvc))
#Cuarto algoritmo ENSEMBLED METHODS
#Bagging meta-estimator
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5, max_features=0.5)
bagging.fit(data_train, target_train)
preBag = bagging.predict(data_test)
scoresBag = cross_val_score(bagging, atributos, target, cv=5, scoring='accuracy')
#Random Forests
forests = RandomForestClassifier(n_estimators=10, max_depth=None,min_samples_split=2, random_state=0)
forests.fit(data_train, target_train)
preFo = forests.predict(data_test)
scoresFo = cross_val_score(forests, atributos, target, cv=5, scoring='accuracy')
#Porcentajes de acierto
print("Usando EM meta-estimator se tiene una tasa de acierto del ",np.mean(scoresBag)*100)
print("Usando EM Random Forests se tiene una tasa de acierto del ",np.mean(scoresFo)*100)
#Matrices de validación
print("Matriz EM - Nmeta-estimatoru: ",matrizCruzada(preBag))
print("Matriz EM - Random Forests: ",matrizCruzada(preFo))
#Quinto algoritmo Redes neuronales
#MLPClassifier
modelMLP = MLPClassifier(activation='tanh', max_iter=10000)
modelMLP.fit(data_test, target_test)
preMLP=modelMLP.predict(data_test)
scoreMLP = cross_val_score(modelMLP, atributos, target, cv=5, scoring='accuracy')
#KNC
KNC = KNeighborsClassifier(n_neighbors= 2)
KNC.fit(data_test,target_test)
preKNC=KNC.predict(data_test)
scoreKNC = cross_val_score(KNC, atributos, target, cv=5, scoring='accuracy')
#Porcentajes de acierto
print("Usando RN MLPClassifier se tiene una tasa de acierto del ",np.mean(scoreMLP)*100)
print("Usando RN KNC se tiene una tasa de acierto del ",np.mean(scoreKNC)*100)
#Matrices de validación
print("Matriz RN MLPClassifier: ",matrizCruzada(preMLP))
print("Matriz RN KNC: ",matrizCruzada(preKNC))
| [
"[email protected]"
] | |
8c92a6705b031f5ec8052f1b6563088effcb9459 | a863483b9fa4e940718a3206340e698d9cdbc341 | /elpy/rpc-venv/lib/python3.8/site-packages/yapf/yapflib/yapf_api.py | 13b02a44f1dd80cfc0eb9d3899a867b92e56c35f | [
"BSD-2-Clause"
] | permissive | jhsygg/emacs.d | ebed42b52d4609e23ad9fa41fa873d2f52980bec | addf36f3e2fb3419f547df6180b835036a295542 | refs/heads/main | 2023-06-12T07:54:26.863128 | 2023-06-06T07:02:02 | 2023-06-06T07:02:02 | 124,079,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,937 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry points for YAPF.
The main APIs that YAPF exposes to drive the reformatting.
FormatFile(): reformat a file.
FormatCode(): reformat a string of code.
These APIs have some common arguments:
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
verify: (bool) True if reformatted code should be verified for syntax.
"""
import difflib
import re
import sys
from yapf.pyparser import pyparser
from yapf.pytree import pytree_unwrapper
from yapf.pytree import pytree_utils
from yapf.pytree import blank_line_calculator
from yapf.pytree import comment_splicer
from yapf.pytree import continuation_splicer
from yapf.pytree import split_penalty
from yapf.pytree import subtype_assigner
from yapf.yapflib import errors
from yapf.yapflib import file_resources
from yapf.yapflib import identify_container
from yapf.yapflib import py3compat
from yapf.yapflib import reformatter
from yapf.yapflib import style
def FormatFile(filename,
style_config=None,
lines=None,
print_diff=False,
verify=False,
in_place=False,
logger=None):
"""Format a single Python file and return the formatted code.
Arguments:
filename: (unicode) The file to reformat.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
verify: (bool) True if reformatted code should be verified for syntax.
in_place: (bool) If True, write the reformatted code back to the file.
logger: (io streamer) A stream to output logging.
Returns:
Tuple of (reformatted_code, encoding, changed). reformatted_code is None if
the file is successfully written to (having used in_place). reformatted_code
is a diff if print_diff is True.
Raises:
IOError: raised if there was an error reading the file.
ValueError: raised if in_place and print_diff are both specified.
"""
_CheckPythonVersion()
if in_place and print_diff:
raise ValueError('Cannot pass both in_place and print_diff.')
original_source, newline, encoding = ReadFile(filename, logger)
reformatted_source, changed = FormatCode(
original_source,
style_config=style_config,
filename=filename,
lines=lines,
print_diff=print_diff,
verify=verify)
if newline != '\n':
reformatted_source = reformatted_source.replace('\n', newline)
if in_place:
if changed:
file_resources.WriteReformattedCode(filename, reformatted_source,
encoding, in_place)
return None, encoding, changed
return reformatted_source, encoding, changed
def FormatTree(tree, style_config=None, lines=None, verify=False):
"""Format a parsed lib2to3 pytree.
This provides an alternative entry point to YAPF.
Arguments:
tree: (pytree.Node) The root of the pytree to format.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
verify: (bool) True if reformatted code should be verified for syntax.
Returns:
The source formatted according to the given formatting style.
"""
_CheckPythonVersion()
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
# Run passes on the tree, modifying it in place.
comment_splicer.SpliceComments(tree)
continuation_splicer.SpliceContinuations(tree)
subtype_assigner.AssignSubtypes(tree)
identify_container.IdentifyContainers(tree)
split_penalty.ComputeSplitPenalties(tree)
blank_line_calculator.CalculateBlankLines(tree)
llines = pytree_unwrapper.UnwrapPyTree(tree)
for lline in llines:
lline.CalculateFormattingInformation()
lines = _LineRangesToSet(lines)
_MarkLinesToFormat(llines, lines)
return reformatter.Reformat(_SplitSemicolons(llines), verify, lines)
def FormatAST(ast, style_config=None, lines=None, verify=False):
"""Format a parsed lib2to3 pytree.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
verify: (bool) True if reformatted code should be verified for syntax.
Returns:
The source formatted according to the given formatting style.
"""
_CheckPythonVersion()
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
llines = pyparser.ParseCode(ast)
for lline in llines:
lline.CalculateFormattingInformation()
lines = _LineRangesToSet(lines)
_MarkLinesToFormat(llines, lines)
return reformatter.Reformat(_SplitSemicolons(llines), verify, lines)
def FormatCode(unformatted_source,
filename='<unknown>',
style_config=None,
lines=None,
print_diff=False,
verify=False):
"""Format a string of Python code.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
filename: (unicode) The name of the file being reformatted.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
verify: (bool) True if reformatted code should be verified for syntax.
Returns:
Tuple of (reformatted_source, changed). reformatted_source conforms to the
desired formatting style. changed is True if the source changed.
"""
try:
tree = pytree_utils.ParseCodeToTree(unformatted_source)
except Exception as e:
e.filename = filename
raise errors.YapfError(errors.FormatErrorMsg(e))
reformatted_source = FormatTree(
tree, style_config=style_config, lines=lines, verify=verify)
if unformatted_source == reformatted_source:
return '' if print_diff else reformatted_source, False
if print_diff:
code_diff = _GetUnifiedDiff(
unformatted_source, reformatted_source, filename=filename)
return code_diff, code_diff.strip() != '' # pylint: disable=g-explicit-bool-comparison # noqa
return reformatted_source, True
def _CheckPythonVersion(): # pragma: no cover
errmsg = 'yapf is only supported for Python 2.7 or 3.6+'
if sys.version_info[0] == 2:
if sys.version_info[1] < 7:
raise RuntimeError(errmsg)
elif sys.version_info[0] == 3:
if sys.version_info[1] < 6:
raise RuntimeError(errmsg)
def ReadFile(filename, logger=None):
"""Read the contents of the file.
An optional logger can be specified to emit messages to your favorite logging
stream. If specified, then no exception is raised. This is external so that it
can be used by third-party applications.
Arguments:
filename: (unicode) The name of the file.
logger: (function) A function or lambda that takes a string and emits it.
Returns:
The contents of filename.
Raises:
IOError: raised if there was an error reading the file.
"""
try:
encoding = file_resources.FileEncoding(filename)
# Preserves line endings.
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding, newline='') as fd:
lines = fd.readlines()
line_ending = file_resources.LineEnding(lines)
source = '\n'.join(line.rstrip('\r\n') for line in lines) + '\n'
return source, line_ending, encoding
except IOError as e: # pragma: no cover
if logger:
logger(e)
e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3]))
raise
except UnicodeDecodeError as e: # pragma: no cover
if logger:
logger('Could not parse %s! Consider excluding this file with --exclude.',
filename)
logger(e)
e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3]))
raise
def _SplitSemicolons(lines):
res = []
for line in lines:
res.extend(line.Split())
return res
DISABLE_PATTERN = r'^#.*\b(?:yapf:\s*disable|fmt: ?off)\b'
ENABLE_PATTERN = r'^#.*\b(?:yapf:\s*enable|fmt: ?on)\b'
def _LineRangesToSet(line_ranges):
"""Return a set of lines in the range."""
if line_ranges is None:
return None
line_set = set()
for low, high in sorted(line_ranges):
line_set.update(range(low, high + 1))
return line_set
def _MarkLinesToFormat(llines, lines):
"""Skip sections of code that we shouldn't reformat."""
if lines:
for uwline in llines:
uwline.disable = not lines.intersection(
range(uwline.lineno, uwline.last.lineno + 1))
# Now go through the lines and disable any lines explicitly marked as
# disabled.
index = 0
while index < len(llines):
uwline = llines[index]
if uwline.is_comment:
if _DisableYAPF(uwline.first.value.strip()):
index += 1
while index < len(llines):
uwline = llines[index]
line = uwline.first.value.strip()
if uwline.is_comment and _EnableYAPF(line):
if not _DisableYAPF(line):
break
uwline.disable = True
index += 1
elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE):
uwline.disable = True
index += 1
def _DisableYAPF(line):
return (re.search(DISABLE_PATTERN,
line.split('\n')[0].strip(), re.IGNORECASE) or
re.search(DISABLE_PATTERN,
line.split('\n')[-1].strip(), re.IGNORECASE))
def _EnableYAPF(line):
return (re.search(ENABLE_PATTERN,
line.split('\n')[0].strip(), re.IGNORECASE) or
re.search(ENABLE_PATTERN,
line.split('\n')[-1].strip(), re.IGNORECASE))
def _GetUnifiedDiff(before, after, filename='code'):
"""Get a unified diff of the changes.
Arguments:
before: (unicode) The original source code.
after: (unicode) The reformatted source code.
filename: (unicode) The code's filename.
Returns:
The unified diff text.
"""
before = before.splitlines()
after = after.splitlines()
return '\n'.join(
difflib.unified_diff(
before,
after,
filename,
filename,
'(original)',
'(reformatted)',
lineterm='')) + '\n'
| [
"[email protected]"
] | |
00e9ff6636aa69231a33c1ca0e566fc09609f5cc | eb7ff98746500472a7e36f5fae666d68a76120e8 | /world-maze.py | 961b0444fd9eb9f83648c842d715392d3eb9d37a | [] | no_license | Jony-5/Python-2019 | 8d3744a0d595fcb9d59a49582b5810a78b968171 | 6a699f5962e2765ff2ff6f4432da77bca5ce7a7d | refs/heads/master | 2020-07-29T02:04:31.935828 | 2020-02-20T19:38:59 | 2020-02-20T19:38:59 | 209,627,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,527 | py | #cwc 200121
from mcpi.minecraft import Minecraft
from mcpi import block
from array import *
import random
def flowe(mc,x,y,z, total):
done = 0
while(done < total):
h = random.randint(0,100)
l = random.randint(0,100)
mc.setBlock(x+h,y,z+l,37)
done = done + 1
def init():
#ipString = "192.168.1.73"
ipString = "192.168.7.2"
#mc = Minecraft.create("127.0.0.1", 4711)
mc = Minecraft.create(ipString, 4711)
mc.setting("world_immutable",False)
#x, y, z = mc.player.getPos()
return mc
numlist=[0,1,2,3,4,64,6,7,0,0,0,12,13,14,15,16,64,18,20,21,22,24,26,30,31,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,64,54,56,57,58,60,61,62,64,65,67,71,73,78,79,80,81,82,83,64,89,95,98,102,103,64
,246,247]
'''
mc.setBlocks(x,y, zz, x+4, y+4, zz, block.IRON_BLOCK.id)
mc.setBlocks(x-1,y, zz, x-1, y+4, zz+4, block.SANDSTONE .id)
mc.setBlocks(x-1,y, zz+4, x+4, y+4, zz+4, block.GOLD_ORE.id)
mc.setBlocks(x+4,y, zz+1, x+4, y+4, zz+4, block.STONE.id)
'''
def main():
mc=flowe
mc=init()
x,y,z=mc.player.getPos()
for h in range (0,100):
for l in range (0,100):
mc.setBlocks(x+h,y, z+l, x+h,y+1,z+l,numlist[random.randint(0,len(numlist)-1)])
print()
#mc.setBlocks(x-1,y-5, z-1, x+11,y-5,z+11,89)
#mc.setBlocks(x-1,y+10, z-1, x+11,y+10,z+11,89)
#mc.setBlocks(x-1,y+20, z-1, x+11,y+20,z+11,89)
mc.player.setPos(x,y+20,z-10)
if __name__ == "__main__":
main()
#API Blocks
#====================
# AIR 0
# STONE 1
# GRASS 2
# DIRT 3
# COBBLESTONE 4
# WOOD_PLANKS 5
# SAPLING 6
# BEDROCK 7
# WATER_FLOWING 8
# WATER 8
# WATER_STATIONARY 9
# LAVA_FLOWING 10
# LAVA 10
# LAVA_STATIONARY 11
# SAND 12
# GRAVEL 13
# GOLD_ORE 14
# IRON_ORE 15
# COAL_ORE 16
# WOOD 17
# LEAVES 18
# GLASS 20
# LAPIS_LAZULI_ORE 21
# LAPIS_LAZULI_BLOCK 22
# SANDSTONE 24
# BED 26
# COBWEB 30
# GRASS_TALL 31
# WOOL 35
# FLOWER_YELLOW 37
# FLOWER_CYAN 38
# MUSHROOM_BROWN 39
# MUSHROOM_RED 40
# GOLD_BLOCK 41
# IRON_BLOCK 42
# STONE_SLAB_DOUBLE 43
# STONE_SLAB 44
# BRICK_BLOCK 45
# TNT 46
# BOOKSHELF 47
# MOSS_STONE 48
# OBSIDIAN 49
# TORCH 50
# FIRE 51
# STAIRS_WOOD 53
# CHEST 54
# DIAMOND_ORE 56
# DIAMOND_BLOCK 57
# CRAFTING_TABLE 58
# FARMLAND 60
# FURNACE_INACTIVE 61
# FURNACE_ACTIVE 62
# DOOR_WOOD 64
# LADDER 65
# STAIRS_COBBLESTONE 67
# DOOR_IRON 71
# REDSTONE_ORE 73
# SNOW 78
# ICE 79
# SNOW_BLOCK 80
# CACTUS 81
# CLAY 82
# SUGAR_CANE 83
# FENCE 85
# GLOWSTONE_BLOCK 89
# BEDROCK_INVISIBLE 95
# STONE_BRICK 98
# GLASS_PANE 102
# MELON 103
# FENCE_GATE 107
# GLOWING_OBSIDIAN 246
# NETHER_REACTOR_CORE 247
| [
"[email protected]"
] | |
873f3c5c2bfb0a1878a7e950aea4ecd21d965b33 | 732605a2bf9bc5470fcca8d8710440ad563ac452 | /23-day.py | 76a422ba2c6fc0d5f4bf816b2f449a84458e77e4 | [] | no_license | capJavert/advent-of-code-2017 | 6417f6b6fa16cc0c3383baa6bf0cab6edb47292a | 0ad7669ea00251e0cbf63c30b964b363d4270d2f | refs/heads/master | 2021-09-01T06:56:41.066536 | 2017-12-25T14:11:37 | 2017-12-25T14:11:37 | 112,718,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | h = 0
for num in range(106500, 123500, step=17):
prime = True
for i in range(2, num):
if num % i == 0:
prime = False
if prime:
h += 1
print(1001-h)
| [
"[email protected]"
] | |
42d4e5c41eb1c7f9addf3cd7d114f77ca73b49ea | 1f1b62a23e9267fba41a5f9dc757d2c107d3d7c1 | /nongji.butie/butie.1.py | 7cf3504f4412053a303b8464d1143667406520d5 | [
"Apache-2.0"
] | permissive | zzh-python/all-project | 0e45e2e542b9e63f6ed080ad47725d71f1590772 | 915a47fb42d63ff3a36814992283c2f4ed8703a3 | refs/heads/master | 2020-07-24T01:47:19.924959 | 2019-09-11T08:48:17 | 2019-09-11T08:48:17 | 207,765,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py |
from bs4 import BeautifulSoup
import requests
import xlsxwriter
import re
page=226
privince='10内蒙古'
urlroot='http://222.143.21.233:2018/pub/GongShiSearch'
cook='__RequestVerificationToken=; __RequestVerificationToken_Lw__=4EDW+mpynwTkqW3DETpxdWrxn5W8nj6kMXUofKCESrXXkSYTWX4iH7MuETUbxuum0oVst5OqG1adH3bD5CcZHbYB/Wi/nKWUpPe6aKuBOPQ9QUbdZJ+YGMdsYzv4so8C9APWkGQV8G1anChDbon7Gc/mgNG6PDcIVRMAd79hLPg=; ASP.NET_SessionId=3xallifuxjx0o4d34efwznwj'
cook='__RequestVerificationToken=; CKIE_AD_ANGE='
dd='m+S6hUGtlQn2i+RhVMHrF2Am0045wGbq0cTIZLnTxHRD58cFn009D35pmOnJ8s2D2WwJs/cgQGdOX7soZYorHKQDQcEdOlCSZvmbbvHJbEVwzCbJVXC+Dw+LF6blmD84kkgr2189J4cuydeQR4vb/o0xtJFqrggQ0v0CMruTGKA='
header={
'Cookie':cook,
}
data={
'__RequestVerificationToken':dd,
'p':'打(压)捆机'
}
def get_data():
f=open(privince+'.txt','w+',encoding='utf-8')
for i in range(1,page+1):
print('第'+str(i)+'页')
# url=urlroot+'?pageIndex='+str(i)
# req=requests.post(url,data=data,headers=header)
url='http://2018.nmnjbt2015.com/pub/gongshi?pageIndex='+str(i)+'&p=%E6%89%93%EF%BC%88%E5%8E%8B%EF%BC%89%E6%8D%86%E6%9C%BA'
req=requests.get(url,headers=header)
bsObj=BeautifulSoup(req.text,'html.parser')
trlist=bsObj.find('table',{'width':'1190'}).find_all('tr')[1:]
for tr in trlist:
row=[]
tdlist=tr.find_all('td')
# print(len(tdlist))
if len(tdlist)!=15:
print('横向长度出现不为15的')
for td in tdlist:
row.append(td.get_text())
f.write(str(row)+'\n')
if len(trlist)<15:
break
# break
# print(req.text)
f.close()
def write_excel():
workbook = xlsxwriter.Workbook(privince+ '农机购置补贴情况.xlsx') #创建工作簿
sheet = workbook.add_worksheet()
workformat = workbook.add_format({
'bold': True, #字体加粗
})
# rowname=['序号','县','所在乡(镇)','所在村组','购机者姓名','机具品目','生产厂家','产品名称','购买机型','购买数量(台)','经销商','单台销售价格(元)','单台补贴额(元)','总补贴额(元)','状态',]
# for m in range(15):
# sheet.write(0,m,rowname[m],workformat)
r=1
for line in open(privince+'.txt','r',encoding='utf-8'):
line=eval(line)
for m in range(len(line)):
sheet.write(r,m,line[m].replace('\r','').replace('\n','').replace('\t','').replace('\xa0','').strip())
r=r+1
workbook.close()
# get_data()
write_excel() | [
"[email protected]"
] | |
1766d2cff2bfbb73ab2a5eaafc0d7abda24093a8 | 67b7e6d2c08f08403ec086c510622be48b8d26d8 | /src/test/tinc/tincrepo/mpp/gpdb/tests/package/metadata_track/__init__.py | 2b24a0179775914babb588e562817fc26853a33d | [
"Apache-2.0",
"PostgreSQL",
"LicenseRef-scancode-rsa-md4",
"OLDAP-2.8",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"Zlib",
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-mit-modification-obligations",
"OpenSSL",
"MIT",
"LicenseRef-scancode-other-copyleft",
"bzip2-1.0.6",
"NTP",
"W3C",
"metamail",
"Beerware",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-stream-benchmark",
"LicenseRef-scancode-openssl",
"X11-distribute-modifications-variant",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-ssleay-windows",
"Spencer-94",
"ISC",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause",
"Python-2.0",
"curl",
"LicenseRef-scancode-sun-bcl-sdk-5.0",
"MIT-CMU",
"W3C-19980720"
] | permissive | sshyran/gpdb | 41012411d22b0294204dfb0fe67a1f4c8d1ecaf6 | 2d065ecdd2b5535cb42474f17a0ee6592b4e6837 | refs/heads/master | 2023-04-09T14:05:44.030212 | 2016-11-12T08:33:33 | 2016-11-12T08:34:36 | 73,544,159 | 0 | 0 | Apache-2.0 | 2023-04-04T00:30:10 | 2016-11-12T09:43:54 | PLpgSQL | UTF-8 | Python | false | false | 3,727 | py | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import shutil
import fileinput
import os, re
import socket
import tinctest
import getpass
from mpp.lib.mppUtil import getOpenPort
from mpp.lib.GPFDIST import GPFDIST
from tinctest.lib import local_path
class MDT:
def __init__(self):
self.host = str(socket.gethostbyname(socket.gethostname()))
self.port = str(getOpenPort())
self.gpfdist_dir = local_path('')
self.gpfdist = GPFDIST(self.port, self.host, directory=self.gpfdist_dir)
def setup_gpfdist(self):
self.gpfdist.killGpfdist()
self.gpfdist.startGpfdist()
def cleanup_gpfdist(self):
self.gpfdist.killGpfdist()
return True
def pre_process_sql(self, sql_path = local_path("sql")):
for dir in os.listdir(sql_path):
file = os.path.join(local_path('sql'), dir)
if os.path.isfile(file):
self.do_insert_select(file)
self.modify_sql_file(file)
def pre_process_ans(self, sql_path = local_path("expected")):
for dir in os.listdir(sql_path):
file = os.path.join(local_path('expected'), dir)
if os.path.isfile(file):
self.modify_ans_file(file)
def do_insert_select(self, filename=None):
tmp_file = filename + '.tmp'
a=0
#if (filename.find('alter_part_table')>=0) or (filename.find('create_table_partitions')>=0):
if (filename.find('part')>=0):
selectString='select classname,schemaname, objname, usestatus, usename, actionname, subtype, partitionlevel, parenttablename, parentschemaname from pg_stat_partition_operations where statime > ( select statime from pg_stat_partition_operations where objname =\'my_first_table\' and actionname =\'CREATE\') and objname not in (\'pg_stat_operations\',\'pg_stat_partition_operations\') order by statime;'
else:
selectString='select classname , schemaname , objname , usestatus , usename , actionname , subtype from pg_stat_operations where statime > ( select statime from pg_stat_operations where objname =\'my_first_table\' and actionname =\'CREATE\') and objname not in (\'pg_stat_operations\',\'pg_stat_partition_operations\') order by statime;'
f = open(filename,'r')
f1 = open(tmp_file, 'w')
for line in f:
if (line.find('drop ')!=-1) and (a==0):
f1.write(selectString)
f1.write('\n')
a = 1
f1.write(line)
f.close()
f1.write(selectString)
f1.write('\n')
f1.close()
shutil.move(tmp_file, filename)
def modify_sql_file(self, file = None):
for line in fileinput.FileInput(file,inplace=1):
line = re.sub('(\d+)\.(\d+)\.(\d+)\.(\d+)\:(\d+)', self.host+':'+self.port, line)
print str(re.sub('\n','',line))
def modify_ans_file(self, file = None):
for line in fileinput.FileInput(file,inplace=1):
line = re.sub('gpadmin', getpass.getuser(), line)
print str(re.sub('\n','',line))
| [
"[email protected]"
] | |
b59d0204f4a30c0e448cfec7c2eaa61ec8c8b964 | db48843f666aed5d3c0a38a147e6b758fcac5376 | /config/development.py | e6fa47f24e6ba64dcd93223715acd8740d577262 | [
"Apache-2.0"
] | permissive | Cmlsltnl/mdpress | 6dcb5dff97139a6a490c7b9ad8a51bd0487d38c0 | 6fe44f36833443dc7a6ae1c7c4609137ef20b2e2 | refs/heads/master | 2021-01-11T11:25:08.695125 | 2016-10-31T15:02:14 | 2016-10-31T15:02:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # coding: utf-8
from .default import Config
class DevelopmentConfig(Config):
"""Base config class."""
TESTING = False
SECRET_KEY = "DevelopmentConfig"
# Site domain
SITE_TITLE = "mdpress"
REDIS_CONFIG = {
'HOST': 'localhost',
'PORT': 6379,
'DB': 10
}
UPLOAD_FOLDER = "/tmp/upload
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.