blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80c17d8f71b2c154e6a79fc0e019d8b3749683f8 | eaf54e6c022b748fd0d04f076634aafdfad5f69f | /motors/tools/enable_test.py | a873643226d5ae664ca70200712378dfb4e4abd4 | [] | no_license | PenguPilot/PenguPilot | 91f131effa11a3c1ef47abb161463772325ae63b | 9cb08836789cf17b9de57517040188c79765046b | refs/heads/ng-wip | 2020-03-28T00:41:06.634109 | 2015-11-19T16:06:00 | 2015-11-19T16:06:00 | 7,300,320 | 78 | 30 | null | 2015-11-19T16:06:00 | 2012-12-24T00:18:48 | C | UTF-8 | Python | false | false | 1,364 | py | #!/usr/bin/env python
"""
___________________________________________________
| _____ _____ _ _ _ |
| | __ \ | __ (_) | | | |
| | |__) |__ _ __ __ _ _ _| |__) || | ___ | |_ |
| | ___/ _ \ '_ \ / _` | | | | ___/ | |/ _ \| __| |
| | | | __/ | | | (_| | |_| | | | | | (_) | |_ |
| |_| \___|_| |_|\__, |\__,_|_| |_|_|\___/ \__| |
| __/ | |
| GNU/Linux based |___/ Multi-Rotor UAV Autopilot |
|___________________________________________________|
OMAP3-PWM Motor Test Program
Copyright (C) 2014 Tobias Simon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. """
from time import sleep
from sys import argv
from scl import scl_get_socket
socket = scl_get_socket('mot_en', 'push')
sleep(0.5)
m1 = int(argv[1])
m2 = int(argv[2])
m3 = int(argv[3])
m4 = int(argv[4])
socket.send([m1, m2, m3, m4])
| [
"[email protected]"
] | |
0690830ce3ac14f34a1c85b94348aab7a7d9b37f | 1db2e2238b4ef9c1b6ca3b99508693ee254d6904 | /develop/md_sim_holo_analysis/plot_cc_data.py | d5b6fd2975790d699c8e3b82243558e3a33f4170 | [] | no_license | pgreisen/pythonscripts | 8674e08095f76edf08ef2059300349218079724c | 0aadf8f96d19b306c1bc44a772e766a06fe3408b | refs/heads/master | 2021-07-06T23:54:57.774342 | 2021-06-08T19:36:36 | 2021-06-08T19:36:36 | 22,017,192 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from numpy import *
import sys
def get_matrix(datafile):
return matrixfile
def main():
inputfile = sys.argv[1]
with open(inputfile, 'r') as f:
for line in f:
print len(line.split())
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f70a761e34b86161b69ddcd65bbc7765bcdabcb8 | 95540a155c043dd84ea6c0fb7d59ba06dc78b875 | /python/第三周:自助小练习/3.天堂图片网.py | a5fee2fb4afdc4fa8f2dc1cc71cc084044603f84 | [] | no_license | Lilenn/must | 41b95d8e80f48a6b82febb222936bbc3502cc01f | a510a8d0e58fde1bc97ab7ad9bd2738158dcba5e | refs/heads/master | 2020-04-09T23:09:20.116439 | 2018-12-06T09:02:09 | 2018-12-06T09:02:09 | 160,648,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from lxml import etree
import requests
import shutil,os
| [
"[email protected]"
] | |
f356b46e0e3ef8c8113b7799bab89f96ae631642 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /base/lib/python2.7/site-packages/wx-3.0-gtk2/wx/tools/Editra/src/extern/stcprint.py | e477e0d248b68e8f79b14a351f09dbd9616d09a7 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"WxWindows-exception-3.1",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 25,840 | py | #-----------------------------------------------------------------------------
# Name: stcprint.py
# Purpose: wx.StyledTextCtrl printing support
#
# Author: Rob McMullen
#
# Created: 2009
# RCS-ID: $Id: stcprint.py 67499 2011-04-15 20:33:40Z CJP $
# Copyright: (c) 2009 Rob McMullen <[email protected]>
# (c) 2007 Cody Precord <[email protected]>
# License: wxWidgets
#-----------------------------------------------------------------------------
"""Printing support for the wx.StyledTextCtrl
Concrete implementation of the wx.Printout class to generate a print preview
and paper copies of the contents of a wx.StyledTextCtrl. This was written
for U{Peppy<http://peppy.flipturn.org>} but has been designed as a standalone
class with no dependencies on peppy. It can be used for general purpose
printing or print preview of a wx.StyledTextCtrl. See the demo application at
the end of this file for more information.
I used code from U{Editra<http://www.editra.org>} as a starting point; other
pointers came from the wxPython mailing list, and lots was just pure ol'
trial and error because I couldn't find much specific documentation on the
FormatRange method of the STC.
NOTE: there are issues with certain scale factors when using print preview on
MSW. Some scale factors seem to work correctly, like 150%, but other smaller
scale factors cause the preview font size to fluctuate. Some zoom levels will
use very small fonts and render all the lines in the top half of the page,
while other zoom levels use an incorrectly large font and render lines off the
bottom of the page. The printed output is unaffected, however, and renders
the correct number of lines.
"""
import os
import wx
import wx.stc
_ = wx.GetTranslation
class STCPrintout(wx.Printout):
"""Specific printing support of the wx.StyledTextCtrl for the wxPython
framework
This class can be used for both printing to a printer and for print preview
functions. Unless otherwise specified, the print is scaled based on the
size of the current font used in the STC so that specifying a larger font
produces a larger font in the printed output (and correspondingly fewer
lines per page). Alternatively, you can eihdec specify the number of
lines per page, or you can specify the print font size in points which
produces a constant number of lines per inch regardless of the paper size.
Note that line wrapping in the source STC is currently ignored and lines
will be truncated at the right margin instead of wrapping. The STC doesn't
provide a convenient method for determining where line breaks occur within
a wrapped line, so it may be a difficult task to ever implement printing
with line wrapping using the wx.StyledTextCtrl.FormatRange method.
"""
debuglevel = 0
def __init__(self, stc, page_setup_data=None, print_mode=None, title=None,
border=False, lines_per_page=None, output_point_size=None,
job_title=None):
"""Constructor.
@param stc: wx.StyledTextCtrl to print
@kwarg page_setup_data: optional wx.PageSetupDialogData instance that
is used to determine the margins of the page.
@kwarg print_mode: optional; of the wx.stc.STC_PRINT_*
flags indicating how to render color text. Defaults to
wx.stc.STC_PRINT_COLOURONWHITEDEFAULTBG
@kwarg title: optional text string to use as the title which will be
centered above the first line of text on each page
@kwarg border: optional flag indicating whether or not to draw a black
border around the text on each page
@kwarg lines_per_page: optional integer that will force the page to
contain the specified number of lines. Either of C{output_point_size}
and C{lines_per_page} fully specifies the page, so if both are
specified, C{lines_per_page} will be used.
@kwarg output_point_size: optional integer that will force the output
text to be drawn in the specified point size. (Note that there are
72 points per inch.) If not specified, the point size of the text in
the STC will be used unless C{lines_per_page} is specified. Either of
C{output_point_size} and C{lines_per_page} fully specifies the page,
so if both are specified, C{lines_per_page} will be used.
"""
if not job_title:
job_title = wx.PrintoutTitleStr
wx.Printout.__init__(self, job_title)
self.stc = stc
if print_mode:
self.print_mode = print_mode
else:
self.print_mode = wx.stc.STC_PRINT_COLOURONWHITEDEFAULTBG
if title is not None:
self.title = title
else:
self.title = ""
if page_setup_data is None:
self.top_left_margin = wx.Point(15,15)
self.bottom_right_margin = wx.Point(15,15)
else:
self.top_left_margin = page_setup_data.GetMarginTopLeft()
self.bottom_right_margin = page_setup_data.GetMarginBottomRight()
try:
value = float(output_point_size)
if value > 0.0:
self.output_point_size = value
except (TypeError, ValueError):
self.output_point_size = None
try:
value = int(lines_per_page)
if value > 0:
self.user_lines_per_page = value
except (TypeError, ValueError):
self.user_lines_per_page = None
self.border_around_text = border
self.setHeaderFont()
def OnPreparePrinting(self):
"""Called once before a print job is started to set up any defaults.
"""
dc = self.GetDC()
self._calculateScale(dc)
self._calculatePageCount()
def _calculateScale(self, dc):
"""Scale the DC
This routine scales the DC based on the font size, determines the
number of lines on a page, and saves some useful pixel locations like
the top left corner and the width and height of the drawing area in
logical coordinates.
"""
if self.debuglevel > 0:
print
dc.SetFont(self.stc.GetFont())
# Calculate pixels per inch of the various devices. The dc_ppi will be
# equivalent to the page or screen PPI if the target is the printer or
# a print preview, respectively.
page_ppi_x, page_ppi_y = self.GetPPIPrinter()
screen_ppi_x, screen_ppi_y = self.GetPPIScreen()
dc_ppi_x, dc_ppi_y = dc.GetPPI()
if self.debuglevel > 0:
print("printer ppi: %dx%d" % (page_ppi_x, page_ppi_y))
print("screen ppi: %dx%d" % (screen_ppi_x, screen_ppi_y))
print("dc ppi: %dx%d" % (dc_ppi_x, dc_ppi_y))
# Calculate paper size. Note that this is the size in pixels of the
# entire paper, which may be larger than the printable range of the
# printer. We need to use the entire paper size because we calculate
# margins ourselves. Note that GetPageSizePixels returns the
# dimensions of the printable area.
px, py, pw, ph = self.GetPaperRectPixels()
page_width_inch = float(pw) / page_ppi_x
page_height_inch = float(ph) / page_ppi_y
if self.debuglevel > 0:
print("page pixels: %dx%d" % (pw, ph))
print("page size: %fx%f in" % (page_width_inch, page_height_inch))
dw, dh = dc.GetSizeTuple()
dc_pixels_per_inch_x = float(dw) / page_width_inch
dc_pixels_per_inch_y = float(dh) / page_height_inch
if self.debuglevel > 0:
print("device pixels: %dx%d" % (dw, dh))
print("device pixels per inch: %fx%f" % (dc_pixels_per_inch_x, dc_pixels_per_inch_y))
# Calculate usable page size
page_height_mm = page_height_inch * 25.4
margin_mm = self.top_left_margin[1] + self.bottom_right_margin[1]
usable_page_height_mm = page_height_mm - margin_mm
# Lines per page is then the number of lines (based on the point size
# reported by wx) that will fit into the usable page height
self.lines_pp = self._calculateLinesPerPage(dc, usable_page_height_mm)
# The final DC scale factor is then the ratio of the total height in
# pixels inside the margins to the number of pixels that it takes to
# represent the number of lines
dc_margin_pixels = float(dc_pixels_per_inch_y) * margin_mm / 25.4
dc_usable_pixels = dh - dc_margin_pixels
page_to_dc = self._calculateScaleFactor(dc, dc_usable_pixels, self.lines_pp)
dc.SetUserScale(page_to_dc, page_to_dc)
if self.debuglevel > 0:
print("Usable page height: %f in" % (usable_page_height_mm / 25.4))
print("Usable page pixels: %d" % dc_usable_pixels)
print("lines per page: %d" % self.lines_pp)
print("page_to_dc: %f" % page_to_dc)
self.x1 = dc.DeviceToLogicalXRel(float(self.top_left_margin[0]) / 25.4 * dc_pixels_per_inch_x)
self.y1 = dc.DeviceToLogicalXRel(float(self.top_left_margin[1]) / 25.4 * dc_pixels_per_inch_y)
self.x2 = dc.DeviceToLogicalXRel(dw) - dc.DeviceToLogicalXRel(float(self.bottom_right_margin[0]) / 25.4 * dc_pixels_per_inch_x)
self.y2 = dc.DeviceToLogicalYRel(dh) - dc.DeviceToLogicalXRel(float(self.bottom_right_margin[1]) / 25.4 * dc_pixels_per_inch_y)
page_height = self.y2 - self.y1
#self.lines_pp = int(page_height / dc_pixels_per_line)
if self.debuglevel > 0:
print("page size: %d,%d -> %d,%d, height=%d" % (int(self.x1), int(self.y1), int(self.x2), int(self.y2), page_height))
def _calculateLinesPerPage(self, dc, usable_page_height_mm):
"""Calculate the number of lines that will fit on the page.
@param dc: the Device Context
@param usable_page_height_mm: height in mm of the printable part of the
page (i.e. with the border height removed)
@returns: the number of lines on the page
"""
if self.user_lines_per_page is not None:
return self.user_lines_per_page
font = dc.GetFont()
if self.output_point_size is not None:
points_per_line = self.output_point_size
else:
points_per_line = font.GetPointSize()
# desired lines per mm based on point size. Note: printer points are
# defined as 72 points per inch
lines_per_inch = 72.0 / float(points_per_line)
if self.debuglevel > 0:
print("font: point size per line=%d" % points_per_line)
print("font: lines per inch=%f" % lines_per_inch)
# Lines per page is then the number of lines (based on the point size
# reported by wx) that will fit into the usable page height
return float(usable_page_height_mm) / 25.4 * lines_per_inch
def _calculateScaleFactor(self, dc, dc_usable_pixels, lines_pp):
"""Calculate the scale factor for the DC to fit the number of lines
onto the printable area
@param dc: the Device Context
@param dc_usable_pixels: the number of pixels that defines usable
height of the printable area
@param lines_pp: the number of lines to fit into the printable area
@returns: the scale facter to be used in wx.DC.SetUserScale
"""
# actual line height in pixels according to the DC
dc_pixels_per_line = dc.GetCharHeight()
# actual line height in pixels according to the STC. This can be
# different from dc_pixels_per_line even though it is the same font.
# Don't know why this is the case; maybe because the STC takes into
# account additional spacing?
stc_pixels_per_line = self.stc.TextHeight(0)
if self.debuglevel > 0:
print("font: dc pixels per line=%d" % dc_pixels_per_line)
print("font: stc pixels per line=%d" % stc_pixels_per_line)
# Platform dependency alert: I don't know why this works, but through
# experimentation it seems like the scaling factor depends on
# different font heights depending on the platform.
if wx.Platform == "__WXMSW__":
# On windows, the important font height seems to be the number of
# pixels reported by the STC
page_to_dc = float(dc_usable_pixels) / (stc_pixels_per_line * lines_pp)
else:
# Linux and Mac: the DC font height seems to be the correct height
page_to_dc = float(dc_usable_pixels) / (dc_pixels_per_line * lines_pp)
return page_to_dc
def _calculatePageCount(self, attempt_wrap=False):
"""Calculates offsets into the STC for each page
This pre-calculates the page offsets for each page to support print
preview being able to seek backwards and forwards.
"""
page_offsets = []
page_line_start = 0
lines_on_page = 0
num_lines = self.stc.GetLineCount()
line = 0
while line < num_lines:
if attempt_wrap:
wrap_count = self.stc.WrapCount(line)
if wrap_count > 1 and self.debuglevel > 0:
print("found wrapped line %d: %d" % (line, wrap_count))
else:
wrap_count = 1
# If the next line pushes the count over the edge, mark a page and
# start the next page
if lines_on_page + wrap_count > self.lines_pp:
start_pos = self.stc.PositionFromLine(page_line_start)
end_pos = self.stc.GetLineEndPosition(page_line_start + lines_on_page - 1)
if self.debuglevel > 0:
print("Page: line %d - %d" % (page_line_start, page_line_start + lines_on_page))
page_offsets.append((start_pos, end_pos))
page_line_start = line
lines_on_page = 0
lines_on_page += wrap_count
line += 1
if lines_on_page > 0:
start_pos = self.stc.PositionFromLine(page_line_start)
end_pos = self.stc.GetLineEndPosition(page_line_start + lines_on_page)
page_offsets.append((start_pos, end_pos))
self.page_count = len(page_offsets)
self.page_offsets = page_offsets
if self.debuglevel > 0:
print("page offsets: %s" % self.page_offsets)
def _getPositionsOfPage(self, page):
"""Get the starting and ending positions of a page
@param page: page number
@returns: tuple containing the start and end positions that can be
passed to FormatRange to render a page
"""
page -= 1
start_pos, end_pos = self.page_offsets[page]
return start_pos, end_pos
def GetPageInfo(self):
"""Return the valid page ranges.
Note that pages are numbered starting from one.
"""
return (1, self.page_count, 1, self.page_count)
def HasPage(self, page):
"""Returns True if the specified page is within the page range
"""
return page <= self.page_count
def OnPrintPage(self, page):
"""Draws the specified page to the DC
@param page: page number to render
"""
dc = self.GetDC()
self._calculateScale(dc)
self._drawPageContents(dc, page)
self._drawPageHeader(dc, page)
self._drawPageBorder(dc)
return True
def _drawPageContents(self, dc, page):
"""Render the STC window into a DC for printing.
Force the right margin of the rendered window to be huge so the STC
won't attempt word wrapping.
@param dc: the device context representing the page
@param page: page number
"""
start_pos, end_pos = self._getPositionsOfPage(page)
render_rect = wx.Rect(self.x1, self.y1, 32000, self.y2)
page_rect = wx.Rect(self.x1, self.y1, self.x2, self.y2)
self.stc.SetPrintColourMode(self.print_mode)
edge_mode = self.stc.GetEdgeMode()
self.stc.SetEdgeMode(wx.stc.STC_EDGE_NONE)
end_point = self.stc.FormatRange(True, start_pos, end_pos, dc, dc,
render_rect, page_rect)
self.stc.SetEdgeMode(edge_mode)
def _drawPageHeader(self, dc, page):
"""Draw the page header into the DC for printing
@param dc: the device context representing the page
@param page: page number
"""
# Set font for title/page number rendering
dc.SetFont(self.getHeaderFont())
dc.SetTextForeground ("black")
dum, yoffset = dc.GetTextExtent(".")
yoffset /= 2
if self.title:
title_w, title_h = dc.GetTextExtent(self.title)
dc.DrawText(self.title, self.x1, self.y1 - title_h - yoffset)
# Page Number
page_lbl = _("Page: %d") % page
pg_lbl_w, pg_lbl_h = dc.GetTextExtent(page_lbl)
dc.DrawText(page_lbl, self.x2 - pg_lbl_w, self.y1 - pg_lbl_h - yoffset)
def setHeaderFont(self, point_size=10, family=wx.FONTFAMILY_SWISS,
style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_NORMAL):
"""Set the font to be used as the header font
@param point_size: point size of the font
@param family: one of the wx.FONTFAMILY_* values, e.g.
wx.FONTFAMILY_SWISS, wx.FONTFAMILY_ROMAN, etc.
@param style: one of the wx.FONTSTYLE_* values, e.g.
wxFONTSTYLE_NORMAL, wxFONTSTYLE_ITALIC, etc.
@param weight: one of the wx.FONTWEIGHT_* values, e.g.
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_LIGHT, etc.
"""
self.header_font_point_size = point_size
self.header_font_family = family
self.header_font_style = style
self.header_font_weight = weight
def getHeaderFont(self):
"""Returns the font to be used to draw the page header text
@returns: wx.Font instance
"""
point_size = self.header_font_point_size
font = wx.Font(point_size, self.header_font_family,
self.header_font_style, self.header_font_weight)
return font
def _drawPageBorder(self, dc):
"""Draw the page border into the DC for printing
@param dc: the device context representing the page
"""
if self.border_around_text:
dc.SetPen(wx.BLACK_PEN)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(self.x1, self.y1, self.x2 - self.x1 + 1, self.y2 - self.y1 + 1)
if __name__ == "__main__":
import sys
import __builtin__
__builtin__._ = unicode
# Set up sample print data
top_left_margin = wx.Point(15,15)
bottom_right_margin = wx.Point(15,15)
def wrap(text, width=80):
"""A word-wrap function that preserves existing line breaks
and most spaces in the text.
Expects that existing line breaks are posix newlines (\n).
http://code.activestate.com/recipes/148061/
"""
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line)-line.rfind('\n')-1
+ len(word.split('\n',1)[0]
) >= width)],
word),
text.split(' ')
)
class TestSTC(wx.stc.StyledTextCtrl):
def __init__(self, *args, **kwargs):
wx.stc.StyledTextCtrl.__init__(self, *args, **kwargs)
self.SetMarginType(0, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginWidth(0, 32)
class Frame(wx.Frame):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.stc = TestSTC(self, -1)
self.CreateStatusBar()
menubar = wx.MenuBar()
self.SetMenuBar(menubar) # Adding the MenuBar to the Frame content.
menu = wx.Menu()
menubar.Append(menu, "File")
self.menuAdd(menu, "Open", "Open File", self.OnOpenFile)
menu.AppendSeparator()
self.menuAdd(menu, "Print Preview", "Display print preview", self.OnPrintPreview)
self.menuAdd(menu, "Print", "Print to printer or file", self.OnPrint)
menu.AppendSeparator()
self.menuAdd(menu, "Quit", "Exit the pragram", self.OnQuit)
self.print_data = wx.PrintData()
self.print_data.SetPaperId(wx.PAPER_LETTER)
def loadFile(self, filename, word_wrap=False):
fh = open(filename)
text = fh.read()
if word_wrap:
text = wrap(text)
self.stc.SetText(fh.read())
def loadSample(self, paragraphs=10, word_wrap=False):
lorem_ipsum = u"""\
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Vivamus mattis
commodo sem. Phasellus scelerisque tellus id lorem. Nulla facilisi.
Suspendisse potenti. Fusce velit odio, scelerisque vel, consequat nec,
dapibus sit amet, tortor.
Vivamus eu turpis. Nam eget dolor. Integer at elit. Praesent mauris. Nullam non nulla at nulla tincidunt malesuada. Phasellus id ante. Sed mauris. Integer volutpat nisi non diam.
Etiam elementum. Pellentesque interdum justo eu risus. Cum sociis natoque
penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc
semper.
In semper enim ut odio. Nulla varius leo commodo elit. Quisque condimentum, nisl eget elementum laoreet, mauris turpis elementum felis, ut accumsan nisl velit et mi.
And some Russian: \u041f\u0438\u0442\u043e\u043d - \u043b\u0443\u0447\u0448\u0438\u0439 \u044f\u0437\u044b\u043a \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u043c\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f!
"""
if word_wrap:
lorem_ipsum = wrap(lorem_ipsum)
self.stc.ClearAll()
for i in range(paragraphs):
self.stc.AppendText(lorem_ipsum)
wx.CallAfter(self.OnPrintPreview, None)
def menuAdd(self, menu, name, desc, fcn, id=-1, kind=wx.ITEM_NORMAL):
if id == -1:
id = wx.NewId()
a = wx.MenuItem(menu, id, name, desc, kind)
menu.AppendItem(a)
wx.EVT_MENU(self, id, fcn)
menu.SetHelpString(id, desc)
def OnOpenFile(self, evt):
dlg = wx.FileDialog(self, "Choose a text file",
defaultDir = "",
defaultFile = "",
wildcard = "*")
if dlg.ShowModal() == wx.ID_OK:
print("Opening %s" % dlg.GetPath())
self.loadFile(dlg.GetPath())
dlg.Destroy()
def OnQuit(self, evt):
self.Close(True)
def getPrintData(self):
return self.print_data
def OnPrintPreview(self, evt):
wx.CallAfter(self.showPrintPreview)
def showPrintPreview(self):
printout = STCPrintout(self.stc, title="Testing!!!", border=True, output_point_size=10)
printout2 = STCPrintout(self.stc, title="Testing!!!", border=True, output_point_size=10)
preview = wx.PrintPreview(printout, printout2, self.getPrintData())
preview.SetZoom(100)
if preview.IsOk():
pre_frame = wx.PreviewFrame(preview, self, _("Print Preview"))
dsize = wx.GetDisplaySize()
pre_frame.SetInitialSize((self.GetSize()[0],
dsize.GetHeight() - 100))
pre_frame.Initialize()
pre_frame.Show()
else:
wx.MessageBox(_("Failed to create print preview"),
_("Print Error"),
style=wx.ICON_ERROR|wx.OK)
def OnPrint(self, evt):
wx.CallAfter(self.showPrint)
def showPrint(self):
pdd = wx.PrintDialogData(self.getPrintData())
printer = wx.Printer(pdd)
printout = STCPrintout(self.stc)
result = printer.Print(self.stc, printout)
if result:
data = printer.GetPrintDialogData()
self.print_data = wx.PrintData(data.GetPrintData())
elif printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(_("There was an error when printing.\n"
"Check that your printer is properly connected."),
_("Printer Error"),
style=wx.ICON_ERROR|wx.OK)
printout.Destroy()
app = wx.App(False)
frame = Frame(None, size=(800, -1))
word_wrap = False
filename = None
if len(sys.argv) > 1:
if not sys.argv[-1].startswith("-"):
filename = sys.argv[-1]
if '-d' in sys.argv:
STCPrintout.debuglevel = 1
if '-w' in sys.argv:
word_wrap = True
if filename:
frame.loadFile(filename, word_wrap)
else:
frame.loadSample(word_wrap=word_wrap)
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
c5b358ad0899de087b7ee223a7bcd45f3ad3f642 | a333ef95f7deeb7a0a6ee4700beb022dc7649256 | /ecommerce/forms.py | e21e2d600b125de9ddfc4520c15ab34d25323cb8 | [] | no_license | gmachielsen/fullstackproject | 3884dc2b301c3aeab1eb6aa025159754e5a3b9ea | 7a4879d9fb83ec5c83ff39ea12f7986deae4cfcc | refs/heads/master | 2020-07-27T04:25:04.494803 | 2019-10-07T13:35:43 | 2019-10-07T13:35:43 | 208,867,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"id": "form_full_name",
"placeholder":"Uw volledige naam"
}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"placeholder":"Uw e-mailadres"
}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder":"Uw bericht"
}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
| [
"[email protected]"
] | |
ccdb326548458a86ef14d736d9d50c92db7f1157 | a74b980fd95d5d810315f181449fc9d1710e6923 | /phonedata/yeahnet.py | ed6ea5f874b95e8ee37ab7dee51a5a1e02d7bdf8 | [
"Apache-2.0"
] | permissive | cbbbbbbbb/sspywork | b70f5539203b47b21eec2f0514ddca155affc2b8 | 8f05a6b91fc205960edd57f9076facec04f49a1a | refs/heads/master | 2023-03-22T19:45:13.024076 | 2021-03-08T01:24:21 | 2021-03-08T01:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py | """
获取yeahnet邮箱的信息
create by swm 2018/05/29
"""
import requests
class MAIL:
def __init__(self):
self.cookie = 'starttime=; NTES_SESS=piyH0LD0YptBM8leUeE478YlP7T8.Vgig.MBVVoa5OYn_q13_H8IY.T_jau1Xgd13aK5m3qSOIgAXZoIvQ5woT3FPsROJ0QIeSIcFGHnqdecWJhKt57dXjBMcAe._UfATVVcxmFpPiC8JDSOTYF22vro.RoXSY7ZXU1AouztMtkGbQtnhxWNgts3zVK4ZyLQ7gr8A0K0FJ75ngNGBNtGzH3k0; S_INFO=1527558033|0|##2&70|[email protected]; [email protected]|1527558033|0|mailyeah|00&99|null&null&null#sic&510100#10#0#0|&0||[email protected]; mail_upx=c2bj.mail.yeah.net|c3bj.mail.yeah.net|c4bj.mail.yeah.net|c5bj.mail.yeah.net|c6bj.mail.yeah.net|c7bj.mail.yeah.net|c1bj.mail.yeah.net; mail_upx_nf=; mail_idc=""; Coremail=b2b7e21178053%IAAKzLnnpYMzneWhycnnReRLtzYetSUs%g1a6.mail.yeah.net; cm_last_info=dT1ubXNieWVhaG5ldCU0MHllYWgubmV0JmQ9aHR0cCUzQSUyRiUyRm1haWwueWVhaC5uZXQlMkZtJTJGbWFpbi5qc3AlM0ZzaWQlM0RJQUFLekxubnBZTXpuZVdoeWNublJlUkx0ellldFNVcyZzPUlBQUt6TG5ucFlNem5lV2h5Y25uUmVSTHR6WWV0U1VzJmg9aHR0cCUzQSUyRiUyRm1haWwueWVhaC5uZXQlMkZtJTJGbWFpbi5qc3AlM0ZzaWQlM0RJQUFLekxubnBZTXpuZVdoeWNublJlUkx0ellldFNVcyZ3PW1haWwueWVhaC5uZXQmbD0wJnQ9MTEmYXM9ZmFsc2U=; MAIL_SESS=piyH0LD0YptBM8leUeE478YlP7T8.Vgig.MBVVoa5OYn_q13_H8IY.T_jau1Xgd13aK5m3qSOIgAXZoIvQ5woT3FPsROJ0QIeSIcFGHnqdecWJhKt57dXjBMcAe._UfATVVcxmFpPiC8JDSOTYF22vro.RoXSY7ZXU1AouztMtkGbQtnhxWNgts3zVK4ZyLQ7gr8A0K0FJ75ngNGBNtGzH3k0; MAIL_SINFO=1527558033|0|##2&70|[email protected]; [email protected]|1527558033|0|mailyeah|00&99|null&null&null#sic&510100#10#0#0|&0||[email protected]; secu_info=1; mail_entry_sess=1dc3df717e7f1a97e095a96a74c01af63b2611c62f1c9abb487a17cdaa6d00b0aa3f1c3306c11a1296822eb70687c087ac64dc50b6ca8bded529139920f21396f6a671c5b998410f6c3888bb3cffefe6ac02700d06e3d9d57da6c31aecc736a882a5867cfa93ff6e3e091954d47df88e68264f3acdbac5bd768da7c6c8e63bbfbfc38060ebec9eb9c94094939410f0dfe89c6e79c3044387f02ab6461946ffe38b5217a2338f8a0290a4bf2f1f695104697b31f86a15d45f1bd44ad64f5244d3; JSESSIONID=4CD2E7ED787E17A580350265121691F2; locale='
self.usragent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
self.headers = {
'Cookie': self.cookie,
'User-Agent': self.usragent
}
def getmailist(self):
# 登陆获取邮件列表
url = 'http://mail.yeah.net/m/s?sid=IAAKzLnnpYMzneWhycnnReRLtzYetSUs&func=global:sequential'
data = {
'var': '<?xml version="1.0"?><object><array name="items"><object><string name="func">mbox:getAllFolders</string><object name="var"><boolean name="stats">true</boolean></object></object><object><string name="func">mbox:listMessages</string><object name="var"><string name="order">date</string><boolean name="desc">true</boolean><int name="start">0</int><int name="limit">20</int><int name="fid">1</int></object></object></array></object>'
}
data = requests.post(url, headers=self.headers, data=data)
print(data.text)
def getmailinfo(self):
# 获取邮件的内容
url = 'http://mail.yeah.net/m/s?sid=IAAKzLnnpYMzneWhycnnReRLtzYetSUs&func=mbox:readMessage&l=read&action=read'
data = {
"var": '<?xml version="1.0"?><object><string name="id">14:1tbiDgAMGlH7iuVvswAAbB</string><string name="mode">both</string><boolean name="autoName">true</boolean><boolean name="supportTNEF">true</boolean><boolean name="filterStylesheets">true</boolean><boolean name="returnImageInfo">true</boolean><boolean name="markRead">true</boolean></object>'
}
data = requests.post(url, headers=self.headers, data=data)
print(data.text)
if __name__ == '__main__':
MAIL().getmailinfo() | [
"[email protected]"
] | |
03e28e10b2f413a51c84df41bcf5de6ac327d799 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/messenger/proto/bw_chat2/__init__.py | ac4f1deb4dfe87e1d77ff2abdd87fb006403ac1c | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 4,907 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/messenger/proto/bw_chat2/__init__.py
from messenger import g_settings
from messenger.m_constants import MESSENGER_SCOPE, PROTO_TYPE
from messenger.proto.bw_chat2 import chat_handlers
from messenger.proto.bw_chat2.VOIPChatProvider import VOIPChatProvider
from messenger.proto.bw_chat2.VOIPChatController import VOIPChatController
from messenger.proto.events import g_messengerEvents
from messenger.proto.interfaces import IProtoPlugin
from messenger.proto.bw_chat2.provider import BWChatProvider
from messenger.proto.bw_chat2.UsersHandler import UsersHandler
class BWProtoPlugin(IProtoPlugin):
__slots__ = ('__provider', '__adminChat', '__users', '__arenaChat', '__battleCmd', '__unitChat', '__voipProvider', '__voipCtrl', '__isConnected')
def __init__(self):
super(BWProtoPlugin, self).__init__()
self.__provider = None
self.__adminChat = None
self.__users = None
self.__arenaChat = None
self.__battleCmd = None
self.__unitChat = None
self.__voipProvider = None
self.__voipCtrl = None
self.__isConnected = False
return
@property
def arenaChat(self):
return self.__arenaChat
@property
def battleCmd(self):
return self.__battleCmd
@property
def unitChat(self):
return self.__unitChat
@property
def adminChat(self):
return self.__adminChat
@property
def provider(self):
return self.__provider
@property
def voipProvider(self):
return self.__voipProvider
@property
def voipController(self):
return self.__voipCtrl
@property
def users(self):
return self.__users
def isConnected(self):
return self.__isConnected
def connect(self, scope):
if scope != MESSENGER_SCOPE.BATTLE:
self.__arenaChat.leave()
if not self.__isConnected:
self.__isConnected = True
self.__voipCtrl.start()
g_messengerEvents.onPluginConnected(PROTO_TYPE.BW_CHAT2)
def view(self, scope):
self.__provider.setEnable(True)
self.__battleCmd.switch(scope)
def disconnect(self):
if not self.__isConnected:
return
self.__isConnected = False
self.__arenaChat.disconnect()
self.__unitChat.disconnect()
self.__voipProvider.leave()
self.__voipCtrl.stop()
self.__provider.setEnable(False)
g_messengerEvents.onPluginDisconnected(PROTO_TYPE.BW_CHAT2)
def goToReplay(self):
self.__provider.goToReplay()
self.__battleCmd.goToReplay()
def setFilters(self, msgFilterChain):
self.__provider.setFilters(msgFilterChain)
def init(self):
self.__provider = BWChatProvider()
self.__adminChat = chat_handlers.AdminChatCommandHandler(self.__provider)
self.__adminChat.registerHandlers()
self.__users = UsersHandler(self.__provider)
self.__users.registerHandlers()
self.__arenaChat = chat_handlers.ArenaChatHandler(self.__provider, self.__adminChat)
self.__arenaChat.registerHandlers()
self.__battleCmd = chat_handlers.BattleChatCommandHandler(self.__provider)
self.__battleCmd.registerHandlers()
self.__unitChat = chat_handlers.UnitChatHandler(self.__provider, self.__adminChat)
self.__unitChat.registerHandlers()
self.__voipProvider = VOIPChatProvider(self.__provider)
self.__voipProvider.registerHandlers()
self.__voipCtrl = VOIPChatController()
def clear(self):
if self.__arenaChat:
self.__arenaChat.unregisterHandlers()
self.__arenaChat.clear()
self.__arenaChat = None
if self.__battleCmd:
self.__battleCmd.unregisterHandlers()
self.__battleCmd.clear()
self.__battleCmd = None
if self.__unitChat:
self.__unitChat.unregisterHandlers()
self.__unitChat.clear()
self.__unitChat = None
if self.__adminChat:
self.__adminChat.unregisterHandlers()
self.__adminChat.clear()
self.__adminChat = None
if self.__voipProvider:
self.__voipProvider.unregisterHandlers()
self.__voipProvider.clear()
self.__voipProvider = None
if self.__voipCtrl:
self.__voipCtrl.stop()
self.__voipCtrl = None
if self.__provider:
self.__provider.clear()
self.__provider = None
if self.__users:
self.__users.clear()
self.__users = None
return
def onActionReceived(self, actionID, reqID, args):
if g_settings.server.BW_CHAT2.isEnabled():
self.__provider.onActionReceived(actionID, reqID, args)
| [
"[email protected]"
] | |
481aef95b2e0e4b7e38a5f718281cc3d4c43de04 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/containerservice/snapshot.py | 146c8fe3f5c751dc328ebf2524362453d22cb87b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,040 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SnapshotArgs', 'Snapshot']
@pulumi.input_type
class SnapshotArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
creation_data: Optional[pulumi.Input['CreationDataArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
snapshot_type: Optional[pulumi.Input[Union[str, 'SnapshotType']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Snapshot resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['CreationDataArgs'] creation_data: CreationData to be used to specify the source agent pool resource ID to create this snapshot.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_name: The name of the managed cluster resource.
:param pulumi.Input[Union[str, 'SnapshotType']] snapshot_type: The type of a snapshot. The default is NodePool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if creation_data is not None:
pulumi.set(__self__, "creation_data", creation_data)
if location is not None:
pulumi.set(__self__, "location", location)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if snapshot_type is not None:
pulumi.set(__self__, "snapshot_type", snapshot_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> Optional[pulumi.Input['CreationDataArgs']]:
"""
CreationData to be used to specify the source agent pool resource ID to create this snapshot.
"""
return pulumi.get(self, "creation_data")
@creation_data.setter
def creation_data(self, value: Optional[pulumi.Input['CreationDataArgs']]):
pulumi.set(self, "creation_data", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> Optional[pulumi.Input[Union[str, 'SnapshotType']]]:
"""
The type of a snapshot. The default is NodePool.
"""
return pulumi.get(self, "snapshot_type")
@snapshot_type.setter
def snapshot_type(self, value: Optional[pulumi.Input[Union[str, 'SnapshotType']]]):
pulumi.set(self, "snapshot_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Snapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
snapshot_type: Optional[pulumi.Input[Union[str, 'SnapshotType']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A node pool snapshot resource.
API Version: 2021-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['CreationDataArgs']] creation_data: CreationData to be used to specify the source agent pool resource ID to create this snapshot.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the managed cluster resource.
:param pulumi.Input[Union[str, 'SnapshotType']] snapshot_type: The type of a snapshot. The default is NodePool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A node pool snapshot resource.
API Version: 2021-08-01.
:param str resource_name: The name of the resource.
:param SnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
snapshot_type: Optional[pulumi.Input[Union[str, 'SnapshotType']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SnapshotArgs.__new__(SnapshotArgs)
__props__.__dict__["creation_data"] = creation_data
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["snapshot_type"] = snapshot_type
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice:Snapshot"), pulumi.Alias(type_="azure-native:containerservice/v20210801:Snapshot"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210801:Snapshot"), pulumi.Alias(type_="azure-native:containerservice/v20210901:Snapshot"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210901:Snapshot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Snapshot, __self__).__init__(
'azure-native:containerservice:Snapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Snapshot':
"""
Get an existing Snapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SnapshotArgs.__new__(SnapshotArgs)
__props__.__dict__["creation_data"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["snapshot_type"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Snapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> pulumi.Output[Optional['outputs.CreationDataResponse']]:
"""
CreationData to be used to specify the source agent pool resource ID to create this snapshot.
"""
return pulumi.get(self, "creation_data")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of a snapshot. The default is NodePool.
"""
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this snapshot.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
620b7c3b1f1b3ae6cb598abf93fc176deb65cae5 | 5c0c0176db0ccf2c24b6b5ed459a8dc144518b13 | /nni/nas/tensorflow/base_mutator.py | 860680f199278d3fd38910b82e7661b17d2f652e | [
"MIT"
] | permissive | petuum/nni | ac4f4a1c4d6df71684eeffa127b7c4858fd29e97 | 8134be6269902939232482d63649c06f9864be6d | refs/heads/master | 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 | MIT | 2020-11-20T20:21:15 | 2020-10-09T19:34:11 | Python | UTF-8 | Python | false | false | 2,957 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from tensorflow.keras import Model
from .mutables import Mutable, MutableScope, InputChoice
from .utils import StructuredMutableTreeNode
class BaseMutator(Model):
def __init__(self, model):
super().__init__()
self.__dict__['model'] = model
self._structured_mutables = self._parse_search_space(self.model)
def _parse_search_space(self, module, root=None, prefix='', memo=None, nested_detection=None):
if memo is None:
memo = set()
if root is None:
root = StructuredMutableTreeNode(None)
if module not in memo:
memo.add(module)
if isinstance(module, Mutable):
if nested_detection is not None:
raise RuntimeError('Cannot have nested search space. Error at {} in {}'
.format(module, nested_detection))
module.name = prefix
module.set_mutator(self)
root = root.add_child(module)
if not isinstance(module, MutableScope):
nested_detection = module
if isinstance(module, InputChoice):
for k in module.choose_from:
if k != InputChoice.NO_KEY and k not in [m.key for m in memo if isinstance(m, Mutable)]:
raise RuntimeError('"{}" required by "{}" not found in keys that appeared before, and is not NO_KEY.'
.format(k, module.key))
for submodule in module.layers:
if not isinstance(submodule, Model):
continue
submodule_prefix = prefix + ('.' if prefix else '') + submodule.name
self._parse_search_space(submodule, root, submodule_prefix, memo=memo, nested_detection=nested_detection)
return root
@property
def mutables(self):
return self._structured_mutables
def undedup_mutables(self):
return self._structured_mutables.traverse(deduplicate=False)
def call(self, *inputs):
raise RuntimeError('Call is undefined for mutators.')
def __setattr__(self, name, value):
if name == 'model':
raise AttributeError("Attribute `model` can be set at most once, and you shouldn't use `self.model = model` to "
"include your network, as it will include all parameters in model into the mutator.")
return super().__setattr__(name, value)
def enter_mutable_scope(self, mutable_scope):
pass
def exit_mutable_scope(self, mutable_scope):
pass
def on_forward_layer_choice(self, mutable, *inputs):
raise NotImplementedError
def on_forward_input_choice(self, mutable, tensor_list):
raise NotImplementedError
def export(self):
raise NotImplementedError
| [
"[email protected]"
] | |
108d1dfb0862d12efa5e05cbbe676147dcf2ad65 | 32daa457e295c74b96c99f74a6a3031cf03c571e | /aliyun-python-sdk-onsmqtt/aliyunsdkonsmqtt/request/v20191211/ApplyTokenRequest.py | c2f46482b8d34454284f4fd68b823522489df47f | [
"Apache-2.0"
] | permissive | BertonLan/aliyun-openapi-python-sdk | 0836057c888f7534f37b0001fe2a338c6d505e8e | fd9723c2a800b991179231c1ac4bc92dd8bb5934 | refs/heads/master | 2022-04-23T16:57:26.354904 | 2020-04-22T02:51:45 | 2020-04-22T02:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ApplyTokenRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OnsMqtt', '2019-12-11', 'ApplyToken','onsmqtt')
self.set_method('POST')
def get_ExpireTime(self):
return self.get_query_params().get('ExpireTime')
def set_ExpireTime(self,ExpireTime):
self.add_query_param('ExpireTime',ExpireTime)
def get_Resources(self):
return self.get_query_params().get('Resources')
def set_Resources(self,Resources):
self.add_query_param('Resources',Resources)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Actions(self):
return self.get_query_params().get('Actions')
def set_Actions(self,Actions):
self.add_query_param('Actions',Actions) | [
"[email protected]"
] | |
69f21e60bf08fd723da51b261ecc602779004ec5 | 3eb20ec4cf54cd01fc71caa3e8561ef1ff80b893 | /revyoume_club/admin.py | 00b452e6560ea90fc25b570e531a788c63bcb318 | [] | no_license | hottredpen/laowai_panda | 38f0db4f6d848ed0b7b6449d1bc77aa952dac695 | e862bbe64b84698b7981176c7c190775edf99a67 | refs/heads/master | 2023-01-03T05:11:01.012487 | 2020-10-12T08:04:19 | 2020-10-12T08:04:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | from django.contrib import admin
from .models import *
from solo.admin import SingletonModelAdmin
from django import forms
from django.contrib import messages
# Register your models here.
class PostInline(admin.TabularInline):
model = Post
extra = 0
readonly_fields = ('text', 'type',
'media', 'channel', 'liked_by_users',)
def has_add_permission(self, request):
return False
class PostAdmin(admin.ModelAdmin):
list_filter = ('type', 'show_in', 'channel')
search_fields = ('text',)
list_display = ('text', 'type', 'show_in', 'channel', 'likes')
readonly_fields = ('liked_by_users',)
def save_model(self, request, obj, form, change):
error = ""
if obj.type == Post.TXT_IMAGE and obj.media == "":
error = "Please upload an image to the media."
elif obj.type == Post.TXT_VIDEO and obj.media == "" and not obj.youko_link:
error = "Please upload an video to the media or add youko url."
else:
super().save_model(request, obj, form, change)
return obj
self.message_user(request, error, messages.ERROR)
return obj
class ChannelAdmin(admin.ModelAdmin):
inlines = [PostInline, ]
list_display = ('name',)
admin.site.register(RevyoumeClubSetting, SingletonModelAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Channel, ChannelAdmin)
| [
"[email protected]"
] | |
cc4076f02f63594508a9d96b275032e977bddf42 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03003/s019657668.py | 2a8dd04d77c9270c431b31d84ff7f1a1871ddf1e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | #15:35
h,w = map(int,input().split())
a = list(map(int,input().split()))
b = list(map(int,input().split()))
mod = 10 ** 9 + 7
now = [1 for _ in range(w+1)]
for i in range(h):
last = now
now = [1]
for j in range(w):
if a[i] == b[j]:
now.append((last[j+1]+now[-1])%mod)
else:
now.append((last[j+1]+now[-1]-last[j])%mod)
#print(now)
print(now[-1]) | [
"[email protected]"
] | |
9778a8ca7ff30e47c535cb48b6e916ec98ae7099 | c9dc1df17ecb9e279eb4403b83358363cdbe7fee | /project/urls.py | 3c97f6855709a1835bcf8389736c05b2eba7bea8 | [] | no_license | m0nte-cr1st0/keyua | c3894a94c9bfe73409078be11cb1d3f64831054c | b964ebb7e260fbebdbc27e3a571fed6278196cac | refs/heads/master | 2022-11-25T16:03:51.882386 | 2020-01-09T12:57:54 | 2020-01-09T12:57:54 | 232,809,529 | 0 | 0 | null | 2022-11-22T02:24:49 | 2020-01-09T12:58:10 | Python | UTF-8 | Python | false | false | 1,534 | py | """IncheckSite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from project.landing.views import GoogleSiteVerificationView
urlpatterns = [
# WYSIWYG HTML editor
url(r'^tinymce/', include('tinymce.urls')),
url('keyua-admin/filebrowser/', admin.site.urls),
url('grappelli/', include('grappelli.urls')),
# API
url(r'^api/', include('project.api.urls')),
# Blog
url(r'^blog/', include('project.blog.urls')),
# admin side
url(r'^keyua-admin/', admin.site.urls),
#Other files
url(r'^googlef65a9f395670d60e.html/$', GoogleSiteVerificationView.as_view(), name='google-file'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# It's important to hold it on this place
urlpatterns += [
url(r'^', include('project.landing.urls')),
]
| [
"[email protected]"
] | |
dd23642f0663cc57638424411a54c21671bcc149 | 35a88ca38bb850b5c82d8a4e4de430d1d48660b7 | /www_dytt8_net/www_dytt8_net/spiders/dytt8.py | b7eca5b30f78c900600914c17c3e5cbc47451ade | [] | no_license | gyc567/spider_world | 3798cf854efcaacc4918c82358836480e6245a11 | 4bf04e5a4b0578cd7a28c14f3c10f9a0cad63f7c | refs/heads/master | 2020-05-22T02:14:12.210582 | 2019-05-07T12:34:06 | 2019-05-07T12:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | # -*- coding: utf-8 -*-
import re
import scrapy
import sys
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader.processors import Compose
from www_dytt8_net.items import WwwDytt8NetItem
class Dytt8Spider(CrawlSpider):
__ERROR_INFO = "很抱歉,您要访问的页面已被删除或不存在。"
name = 'dytt8'
allowed_domains = ['www.dytt8.net']
start_urls = ['http://www.dytt8.net/']
rules = (
# 追踪除游戏外的所有列表页
Rule(LinkExtractor(deny=r'.*game.*', allow='.*/index\.html')),
# 对下一页进行追踪
Rule(LinkExtractor(restrict_xpaths=u'//a[text()="下一页"]')),
# 对文章进行提取并回调给parse_item处理, 过滤掉游戏
Rule(LinkExtractor(allow=r'.*/\d+/\d+\.html', deny=r".*game.*"), callback='parse_item', follow=True),
)
def parse_item(self, response):
if self.__ERROR_INFO in response.text:
return
item = WwwDytt8NetItem()
item['title'] = response.xpath('//div[@class="title_all"]/h1/font/text()').extract_first()
item['publish_time'] = response.xpath('//div[@class="co_content8"]/ul/text()').extract_first().strip().replace('发布时间:', '')
imgs_xpath = response.xpath('//div[@id="Zoom"]//img')
item['images'] = [i.xpath('./@src').extract_first() for i in imgs_xpath if i.xpath('./@src')]
item['download_links'] = re.compile('<a href="(ftp://.*?)">').findall(response.text)
item['contents'] = [i.strip().replace('\n', '').replace('\r', '') for i in response.xpath('string(//div[@id="Zoom"])').extract()]
yield item
| [
"[email protected]"
] | |
d87431582d1e6699e5bfae0c1ec66b9c87041503 | eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6 | /ccpnmr2.4/python/ccpnmr/analysis/popups/BrowseStrucGen.py | d6a57c109db13ce4d0d86d320642832b5b103990 | [] | no_license | edbrooksbank/ccpnmr2.4 | cfecb0896dcf8978d796e6327f7e05a3f233a921 | f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c | refs/heads/master | 2021-06-30T22:29:44.043951 | 2019-03-20T15:01:09 | 2019-03-20T15:01:09 | 176,757,815 | 0 | 1 | null | 2020-07-24T14:40:26 | 2019-03-20T14:59:23 | HTML | UTF-8 | Python | false | false | 15,870 | py | """
======================COPYRIGHT/LICENSE START==========================
BrowseStrucGen.py: Part of the CcpNmr Analysis program
Copyright (C) 2003-2010 Wayne Boucher and Tim Stevens (University of Cambridge)
=======================================================================
The CCPN license can be found in ../../../../license/CCPN.license.
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- email: [email protected]
- contact the authors: [email protected], [email protected]
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
import Tkinter
from memops.general import Implementation
from memops.gui.ButtonList import ButtonList, UtilityButtonList
from memops.gui.Entry import Entry
from memops.gui.LabelFrame import LabelFrame
from memops.gui.MessageReporter import showOkCancel
from memops.gui.ScrolledMatrix import ScrolledMatrix
from ccpnmr.analysis.popups.BasePopup import BasePopup
# Removed - not used in file/ 24/4/08 Rasmus Fogh
#from ccpnmr.analysis.core.StructureBasic import makeStructures, makeStructureDictFromCnsPdb, makePdbFromStructure
class BrowseStrucGenPopup(BasePopup):
def __init__(self, parent, *args, **kw):
self.guiParent = parent
self.strucGen = None
self.violList = None
self.constrList = None
self.waiting = 0
BasePopup.__init__(self, parent=parent, title="Structure Generation Runs", **kw)
#self.geometry("+150+150")
def body(self, guiFrame):
guiFrame.grid_columnconfigure(0, weight=1)
row = 0
strucGenFrame = LabelFrame(guiFrame, text='Generation Runs')
strucGenFrame.grid(row = row, column = 0, columnspan=1, sticky='nsew')
strucGenFrame.grid_columnconfigure(0, weight=1)
strucGenFrame.grid_rowconfigure(0, weight=1)
guiFrame.grid_rowconfigure(row, weight=0)
#self.editDetailsEntry = Entry(self,text='',returnCallback = self.setDetails, width=12)
#editWidgets = [None, None, None, None, None, self.editDetailsEntry]
#editGetCallbacks = [None, None, None, None, None, self.getDetails]
#editSetCallbacks = [None, None, None, None, None, self.setDetails]
colHeadings = ['#','Constraint\nLists','Violation\nLists','Structures','Fixed\nAtom Sets','Fixes\nResonance Sets','Chain\nStates','Database\nEntries','Resonance\nClouds']
editWidgets = [None, None, None, None, None, None, None, None, None]
editGetCallbacks = [None, None, None, None, None, None, None, None, None]
editSetCallbacks = [None, None, None, None, None, None, None, None, None]
self.structGenMatrix = ScrolledMatrix(strucGenFrame, editSetCallbacks=editSetCallbacks, editGetCallbacks=editGetCallbacks, editWidgets=editWidgets,initialRows=3,initialCols=6, headingList=colHeadings, callback=self.selectStructGenCell, objectList=[], textMatrix=[[],])
self.structGenMatrix.grid(row = 0, column = 0, sticky='nsew')
texts = ['View Structures','Delete']
commands = [self.viewStructures,self.deleteStrucGen]
self.structGenButtons = ButtonList(strucGenFrame,texts=texts, expands=True, commands=commands)
self.structGenButtons.grid(row=1, column=0, sticky='ew')
row += 1
constrFrame = LabelFrame(guiFrame, text='Constraint Lists')
constrFrame.grid(row=row, column=0, columnspan=1, sticky='nsew')
constrFrame.grid_columnconfigure(0, weight=1)
constrFrame.grid_rowconfigure(0, weight=1)
guiFrame.grid_rowconfigure(row, weight=1)
colHeadings = ['#','Type','Name','Constraints','Experiments','Details','Unit']
editWidgets = [None, None, None, None, None, None, None]
editGetCallbacks = [None, None, None, None, None, None, None]
editSetCallbacks = [None, None, None, None, None, None, None]
self.constrListMatrix = ScrolledMatrix(constrFrame, editSetCallbacks=editSetCallbacks, editGetCallbacks=editGetCallbacks, editWidgets=editWidgets,initialRows=10, headingList=colHeadings, callback=self.selectConstrListCell, objectList=[], textMatrix=[[],])
self.constrListMatrix.grid(row = 0, column = 0, sticky='nsew')
texts = ['View Constraints','Create List','Delete List']
commands = [self.viewConstraints,self.createConstraints,self.deleteConstraints]
self.constrListButtons = ButtonList(constrFrame,texts=texts, expands=True, commands=commands)
self.constrListButtons.grid(row=1, column=0, sticky='ew')
self.constrListButtons.buttons[1].disable()
row += 1
violFrame = LabelFrame(guiFrame, text='Violation Lists')
violFrame.grid(row=row, column=0, columnspan=1, sticky='nsew')
violFrame.grid_columnconfigure(0, weight=1)
violFrame.grid_rowconfigure(0, weight=1)
guiFrame.grid_rowconfigure(row, weight=1)
colHeadings = ['#','Violations','Structures','Details',]
editWidgets = [None, None, None, None]
editGetCallbacks = [None, None, None, None]
editSetCallbacks = [None, None, None, None]
self.violListMatrix = ScrolledMatrix(violFrame, editSetCallbacks=editSetCallbacks, editGetCallbacks=editGetCallbacks, editWidgets=editWidgets,initialRows=10, headingList=colHeadings, callback=self.selectViolListCell, objectList=[], textMatrix=[[],])
self.violListMatrix.grid(row = 0, column = 0, sticky='nsew')
texts = ['View Violations','Delete List']
commands = [self.viewViolations,self.deleteViolations]
self.violListButtons = ButtonList(violFrame,texts=texts, expands=True, commands=commands)
self.violListButtons.grid(row=1, column=0, sticky='ew')
row += 1
self.bottomButtons = UtilityButtonList(guiFrame, helpUrl=self.help_url)
self.bottomButtons.grid(row = row, column = 0, columnspan=1, sticky = 'ew')
self.update()
for func in ('__init__', 'delete','setName','setDetails','setUnit','setExperiments','addExperiment','removeExperiment'):
for clazz in ('ccp.nmr.Nmr.ChemShiftConstraintList','ccp.nmr.Nmr.DihedralConstraintList','ccp.nmr.Nmr.DistanceConstraintList',
'ccp.nmr.Nmr.HBondConstraintList','ccp.nmr.Nmr.JCouplingConstraintList','ccp.nmr.Nmr.RdcConstraintList'):
self.registerNotify(self.updateAfter, clazz, func)
for func in ('__init__', 'delete',):
for clazz in ('ccp.nmr.Nmr.ChemShiftConstraint','ccp.nmr.Nmr.DihedralConstraint','ccp.nmr.Nmr.DistanceConstraint',
'ccp.nmr.Nmr.HBondConstraint','ccp.nmr.Nmr.JCouplingConstraint','ccp.nmr.Nmr.RdcConstraint'):
self.registerNotify(self.updateAfter, clazz, func)
for func in ('__init__', 'delete','setChainStates','addChainState','removeChainState',
'addEntry','removeEntry','setResStructures','addResStructure','setEntries',
'removeResStructure','setStructures','addStructure','removeStructure'):
self.registerNotify(self.updateAfter, 'ccp.nmr.Nmr.StructureGeneration', func)
for func in ('__init__', 'delete','setDetails'):
for clazz in ('ccp.nmr.Nmr.ViolationList',):
self.registerNotify(self.updateAfter, clazz, func)
for func in ('__init__', 'delete',):
for clazz in ('ccp.nmr.Nmr.Violation',):
self.registerNotify(self.updateAfter, clazz, func)
def open(self):
self.updateAfter()
BasePopup.open(self)
def deleteViolations(self):
if self.violList and showOkCancel('Confirm','Really delete violation list?', parent=self):
self.violList.delete()
self.violList = None
self.violListButtons.buttons[0].disable()
self.violListButtons.buttons[1].disable()
def deleteConstraints(self):
if self.constrList and showOkCancel('Confirm','Really delete constraint list?', parent=self):
self.constrList.delete()
self.constrList = None
self.constrListButtons.buttons[0].disable()
self.constrListButtons.buttons[2].disable()
def createConstraints(self):
pass
def viewViolations(self):
self.guiParent.browseViolations()
popup = self.guiParent.popups['browse_violations']
popup.structGen = self.strucGen
popup.violList = self.violList
popup.updateAfter()
def viewConstraints(self):
self.guiParent.browseConstraints()
popup = self.guiParent.popups['browse_constraints']
popup.structGen = self.strucGen
popup.constrList = self.constrList
popup.updateAfter()
def viewStructures(self):
self.guiParent.editStructures()
popup = self.guiParent.popups['edit_structures']
popup.structGen = self.strucGen
popup.updateAfter()
def deleteStrucGen(self):
if self.strucGen and showOkCancel('Confirm','Really delete structure generation run?', parent=self):
self.strucGen.delete()
self.strucGen = None
self.constrList = None
self.violist = None
self.structGenButtons.buttons[0].disable()
self.structGenButtons.buttons[1].disable()
def selectStructGenCell(self, strucGen, row, col):
self.strucGen = strucGen
if strucGen:
self.structGenButtons.buttons[1].enable()
if len(strucGen.molStructures) > 0:
self.structGenButtons.buttons[0].enable()
if self.constrList and (self.constrList.structureGeneration is not self.strucGen):
self.constrList = None
self.updateConstrLists(self.strucGen)
def selectConstrListCell(self, constrList, row, col):
self.constrList = constrList
if constrList:
self.constrListButtons.buttons[0].enable()
self.constrListButtons.buttons[2].enable()
def selectViolListCell(self, violList, row, col):
self.violList = violList
if violList:
self.violListButtons.buttons[0].enable()
self.violListButtons.buttons[1].enable()
def updateAfter(self, object=None):
if self.waiting:
return
strucGen = None
name = object.className
if name == 'StructureGeneration':
self.waiting = True
self.after_idle(self.update)
return
elif name == 'ViolationList':
strucGen = object.structureGeneration
elif name == 'Violation':
strucGen = object.violationList.structureGeneration
elif name[-14:] == 'ConstraintList':
strucGen = object.structureGeneration
elif name[-10:] == 'Constraint':
strucGen = object.parentList.structureGeneration
if (object is None) or (strucGen is self.strucGen) :
self.waiting = True
self.after_idle(self.update)
def destroy(self):
for func in ('__init__', 'delete','setName','setDetails','setUnit','setExperiments','addExperiment','removeExperiment'):
for clazz in ('ccp.nmr.Nmr.ChemShiftConstraintList','ccp.nmr.Nmr.DihedralConstraintList','ccp.nmr.Nmr.DistanceConstraintList',
'ccp.nmr.Nmr.HBondConstraintList','ccp.nmr.Nmr.JCouplingConstraintList','ccp.nmr.Nmr.RdcConstraintList'):
self.unregisterNotify(self.updateAfter, clazz, func)
for func in ('__init__', 'delete',):
for clazz in ('ccp.nmr.Nmr.ChemShiftConstraint','ccp.nmr.Nmr.DihedralConstraint','ccp.nmr.Nmr.DistanceConstraint',
'ccp.nmr.Nmr.HBondConstraint','ccp.nmr.Nmr.JCouplingConstraint','ccp.nmr.Nmr.RdcConstraint'):
self.unregisterNotify(self.updateAfter, clazz, func)
for func in ('__init__', 'delete','setChainStates','addChainState','removeChainState',
'addEntry','removeEntry','setResStructures','addResStructure','setEntries',
'removeResStructure','setStructures','addStructure','removeStructure'):
self.unregisterNotify(self.updateAfter, 'ccp.nmr.Nmr.StructureGeneration', func)
for func in ('__init__', 'delete','setDetails'):
for clazz in ('ccp.nmr.Nmr.ViolationList',):
self.unregisterNotify(self.updateAfter, clazz, func)
for func in ('__init__', 'delete',):
for clazz in ('ccp.nmr.Nmr.Violation',):
self.unregisterNotify(self.updateAfter, clazz, func)
BasePopup.destroy(self)
def updateConstrLists(self, *opt):
strucGen = self.strucGen
objectList = []
textMatrix = []
if strucGen:
for constraintList in strucGen.constraintLists:
objectList.append(constraintList)
else:
textMatrix.append([])
for constrList in objectList:
datum = []
expText = ''
for e in constrList.experiments:
if expText:
expText += ' '
expText += e.name
datum.append(constrList.serial)
datum.append(constrList.className[0:-14])
datum.append(constrList.name)
datum.append(len(constrList.constraints))
datum.append(expText)
datum.append(constrList.details)
datum.append(constrList.unit)
textMatrix.append(datum)
if self.constrList:
self.constrListButtons.buttons[0].enable()
self.constrListButtons.buttons[2].enable()
else:
self.constrListButtons.buttons[0].disable()
self.constrListButtons.buttons[2].disable()
self.constrListMatrix.update(objectList=objectList, textMatrix=textMatrix)
def updateViolLists(self, *opt):
strucGen = self.strucGen
objectList = []
textMatrix = []
if strucGen:
for violationList in strucGen.violationLists:
objectList.append(violationList)
else:
textMatrix.append([])
for violationList in objectList:
datum = []
datum.append(violationList.serial)
datum.append(len(violationList.violations))
datum.append(len(violationList.molStructures))
datum.append(violationList.details)
textMatrix.append(datum)
if self.violList:
self.violListButtons.buttons[0].enable()
self.violListButtons.buttons[1].enable()
else:
self.violListButtons.buttons[0].disable()
self.violListButtons.buttons[1].disable()
self.violListMatrix.update(objectList=objectList, textMatrix=textMatrix)
def update(self):
objectList = []
textMatrix = []
project = self.project
for strucGen in self.nmrProject.structureGenerations:
objectList.append(strucGen)
if not objectList:
textMatrix.append([])
for strucGen in objectList:
datum = []
datum.append(strucGen.serial)
datum.append(len(strucGen.constraintLists))
datum.append(len(strucGen.violationLists))
datum.append(len(strucGen.molStructures))
datum.append(len(strucGen.fixedAtomSets))
datum.append(len(strucGen.fixedResonanceSets))
datum.append(len(strucGen.chainStates))
datum.append(len(strucGen.entries))
datum.append(len(strucGen.resStructures))
textMatrix.append(datum)
if not self.strucGen:
self.structGenButtons.buttons[0].disable()
self.structGenButtons.buttons[1].disable()
self.structGenMatrix.update(objectList=objectList, textMatrix=textMatrix)
self.updateConstrLists()
self.updateViolLists()
self.waiting = False
| [
"[email protected]"
] | |
5cb10b21ff50792c7351d990d7a45c0414b624f8 | 8040c2be85e686df30143600ed91100b6094812a | /csv-file-operation/csvFileGen2.py | c3d7c7992c25a52c3e574525bb8070c6b63f51b7 | [] | no_license | eLtronicsVilla/Miscellaneous-Useful-code | 0b6e3d6dd4b9feca3364f98929e26ee9da3221af | cc5be1e8b8e9b0d2f49f2abcba16b2548bf4a41e | refs/heads/master | 2021-08-07T23:16:08.088315 | 2020-05-22T19:38:17 | 2020-05-22T19:38:17 | 181,491,048 | 0 | 0 | null | 2019-05-18T09:06:46 | 2019-04-15T13:19:55 | Python | UTF-8 | Python | false | false | 703 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 16:23:34 2019
@author: brijesh Gupta
"""
import csv
import os
import sys
import time
with open('Test.csv','wb') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['SN','Date','objectName','Path','imageName','No of Objects','Comments'])
todays_date = time.strftime("%Y-%m-%d %H:%M:%S")
SN = 0
no_of_objects = input("Enter the no of objects present in a file: ")
comments = input("Enter the comments on the current image: ")
filewriter.writerow([str(SN),str(todays_date),'Eye',str(os.getcwd()),str(sys.argv[0]),no_of_objects,comments])
csvfile.close() | [
"[email protected]"
] | |
f09f51e942e96e258583d9deb9f7490ac54883aa | f3fdfdf714e23ef69c9ce6631c188f1ebc328546 | /setup.py | a65c551e0c1b8aae6a58f0ff2a4edfb44f1e6111 | [
"BSD-2-Clause"
] | permissive | liujie40/PSpider | bf2a134812ce81357588b260cee9e3d039c73df0 | f1162c777ec87250edfd2532882eb15b8d712e6a | refs/heads/master | 2022-02-21T18:20:41.468852 | 2022-01-19T06:55:54 | 2022-01-19T06:56:00 | 112,547,656 | 1 | 0 | null | 2017-11-30T01:17:47 | 2017-11-30T01:17:47 | null | UTF-8 | Python | false | false | 383 | py | # _*_ coding: utf-8 _*_
"""
install script: python3 setup.py install
"""
from setuptools import setup, find_packages
setup(
name="spider",
version="3.0.4",
author="xianhu",
keywords=["spider", "crawler"],
packages=find_packages(exclude=("test.*",)),
package_data={
"config": ["*.conf"], # include all *.conf files
},
install_requires=[]
)
| [
"[email protected]"
] | |
4380d953b1142157b79abdcc6ac89919e7e88fc9 | 90f545733f076747bad979faa3a8cf23867f7a3a | /HS5f.py | a49ff24bdca897a53c0c4690860b5d5d81f31ae2 | [] | no_license | kunweiTAN/techgym_ai | f85dc52ce6e75f4c08213d5796171908beb9a69e | 051274bcc789a563c46ed5661301535e76ae1e18 | refs/heads/master | 2023-08-17T05:15:18.758183 | 2021-09-21T11:57:07 | 2021-09-21T11:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | #AI-TECHGYM-4-7-Q-1(AI-TECHGYM-3-20-Q-1)
#回帰問題と分類問題
#インポート
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, stratify = iris.target, random_state=0)
models = {
'AdaBoost': ,
'GradientBoost':
}
#正解率
scores = {}
for model_name, model in models.items():
model.fit(X_train, y_train)
scores[(model_name, 'train_score')] =
scores[(model_name, 'test_score')] =
#表示
display(pd.Series(scores).unstack())
| [
"[email protected]"
] | |
1cfebbe7fd51cc5599eba80d68db0e7cd7e7fbdd | 2d694018e5f1ca0d8a60e2ecd3debc094a0ce9a2 | /venv/Scripts/autopep8-script.py | 03573f16b86b67d8a2fcfee0dfb8e380b465a0ce | [] | no_license | gajanantadas/Ecommerce_project | 9a380fd5c0c37440b3d48982a9aac742d6831d2a | 0b0251d30f8a10a79f72cc8dfb2780d99e62fe05 | refs/heads/master | 2023-03-05T03:51:02.690861 | 2021-02-15T07:43:34 | 2021-02-15T07:43:34 | 338,998,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | #!"G:\New folder\Ecommerce_project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.4','console_scripts','autopep8'
__requires__ = 'autopep8==1.5.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.5.4', 'console_scripts', 'autopep8')()
)
| [
"[email protected]"
] | |
a8b19f48de819574504f9c9c8e74496ef9571385 | 5e9dedabeeabf45d9e3e76bc884bdb6f7a76ed92 | /server/src/uds/services/OVirt/OVirtLinkedService.py | 827f4fcff5ba7f8f16546c8e3da600a8d10558fb | [] | no_license | kostyan6812/openuds | 7f98e7db8031685cc69490ab270c8e2179a983eb | 4793667435923fd7db5c92007b30eab2839aaca1 | refs/heads/master | 2021-01-22T19:30:50.017752 | 2017-03-15T09:54:06 | 2017-03-15T09:54:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,254 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
'''
from django.utils.translation import ugettext_noop as _, ugettext
from uds.core.transports import protocols
from uds.core.services import Service, types as serviceTypes
from .OVirtPublication import OVirtPublication
from .OVirtLinkedDeployment import OVirtLinkedDeployment
from .Helpers import oVirtHelpers
from uds.core.ui import gui
import logging
__updated__ = '2017-01-17'
logger = logging.getLogger(__name__)
class OVirtLinkedService(Service):
'''
oVirt Linked clones service. This is based on creating a template from selected vm, and then use it to
'''
# : Name to show the administrator. This string will be translated BEFORE
# : sending it to administration interface, so don't forget to
# : mark it as _ (using ugettext_noop)
typeName = _('oVirt/RHEV Linked Clone')
# : Type used internally to identify this provider
typeType = 'oVirtLinkedService'
# : Description shown at administration interface for this provider
typeDescription = _('oVirt Services based on templates and COW (experimental)')
# : Icon file used as icon for this provider. This string will be translated
# : BEFORE sending it to administration interface, so don't forget to
# : mark it as _ (using ugettext_noop)
iconFile = 'service.png'
# Functional related data
# : If the service provides more than 1 "deployed user" (-1 = no limit,
# : 0 = ???? (do not use it!!!), N = max number to deploy
maxDeployed = -1
# : If we need to generate "cache" for this service, so users can access the
# : provided services faster. Is usesCache is True, you will need also
# : set publicationType, do take care about that!
usesCache = True
# : Tooltip shown to user when this item is pointed at admin interface, none
# : because we don't use it
cacheTooltip = _('Number of desired machines to keep running waiting for a user')
# : If we need to generate a "Level 2" cache for this service (i.e., L1
# : could be running machines and L2 suspended machines)
usesCache_L2 = True
# : Tooltip shown to user when this item is pointed at admin interface, None
# : also because we don't use it
cacheTooltip_L2 = _('Number of desired machines to keep suspended waiting for use')
# : If the service needs a s.o. manager (managers are related to agents
# : provided by services itselfs, i.e. virtual machines with actors)
needsManager = True
# : If true, the system can't do an automatic assignation of a deployed user
# : service from this service
mustAssignManually = False
# : Types of publications (preparated data for deploys)
# : In our case, we do no need a publication, so this is None
publicationType = OVirtPublication
# : Types of deploys (services in cache and/or assigned to users)
deployedType = OVirtLinkedDeployment
allowedProtocols = protocols.GENERIC + (protocols.SPICE,)
servicesTypeProvided = (serviceTypes.VDI,)
# Now the form part
cluster = gui.ChoiceField(
label=_("Cluster"),
order=100,
fills={
'callbackName': 'ovFillResourcesFromCluster',
'function': oVirtHelpers.getResources,
'parameters': ['cluster', 'ov', 'ev']
},
tooltip=_("Cluster to contain services"), required=True
)
datastore = gui.ChoiceField(
label=_("Datastore Domain"),
rdonly=False,
order=101,
tooltip=_('Datastore domain where to publish and put incrementals'),
required=True
)
minSpaceGB = gui.NumericField(
length=3,
label=_('Reserved Space'),
defvalue='32',
order=102,
tooltip=_('Minimal free space in GB'),
required=True
)
machine = gui.ChoiceField(
label=_("Base Machine"),
order=110,
tooltip=_('Service base machine'),
tab=_('Machine'),
required=True
)
memory = gui.NumericField(
label=_("Memory (Mb)"),
length=4,
defvalue=512,
rdonly=False,
order=111,
tooltip=_('Memory assigned to machines'),
tab=_('Machine'),
required=True
)
memoryGuaranteed = gui.NumericField(
label=_("Memory Guaranteed (Mb)"),
length=4,
defvalue=256,
rdonly=False,
order=112,
tooltip=_('Physical memory guaranteed to machines'),
tab=_('Machine'),
required=True
)
usb = gui.ChoiceField(
label=_('USB'),
rdonly=False,
order=113,
tooltip=_('Enable usb redirection for SPICE'),
values=[
gui.choiceItem('disabled', 'disabled'),
gui.choiceItem('native', 'native'),
gui.choiceItem('legacy', 'legacy (deprecated)')
],
tab=_('Machine'),
defvalue='1' # Default value is the ID of the choicefield
)
display = gui.ChoiceField(
label=_('Display'),
rdonly=False,
order=114,
tooltip=_('Display type (only for administration purposes)'),
values=[
gui.choiceItem('spice', 'Spice'),
gui.choiceItem('vnc', 'Vnc')
],
tab=_('Machine'),
defvalue='1' # Default value is the ID of the choicefield
)
baseName = gui.TextField(
label=_('Machine Names'),
rdonly=False,
order=115,
tooltip=('Base name for clones from this machine'),
tab=_('Machine'),
required=True
)
lenName = gui.NumericField(
length=1,
label=_('Name Length'),
defvalue=5,
order=116,
tooltip=_('Size of numeric part for the names of these machines (between 3 and 6)'),
tab=_('Machine'),
required=True
)
ov = gui.HiddenField(value=None)
ev = gui.HiddenField(value=None) # We need to keep the env so we can instantiate the Provider
def initialize(self, values):
'''
We check here form values to see if they are valid.
Note that we check them throught FROM variables, that already has been
initialized by __init__ method of base class, before invoking this.
'''
if values is not None:
length = int(self.lenName.value)
if len(self.baseName.value) + length > 15:
raise Service.ValidationException(_('The length of basename plus length must not be greater than 15'))
if self.baseName.value.isdigit():
raise Service.ValidationException(_('The machine name can\'t be only numbers'))
if int(self.memory.value) < 256 or int(self.memoryGuaranteed.value) < 256:
raise Service.ValidationException(_('The minimum allowed memory is 256 Mb'))
if int(self.memoryGuaranteed.value) > int(self.memory.value):
self.memoryGuaranteed.value = self.memory.value
def initGui(self):
'''
Loads required values inside
'''
# Here we have to use "default values", cause values aren't used at form initialization
# This is that value is always '', so if we want to change something, we have to do it
# at defValue
self.ov.defValue = self.parent().serialize()
self.ev.defValue = self.parent().env.key
machines = self.parent().getMachines()
vals = []
for m in machines:
vals.append(gui.choiceItem(m['id'], m['name']))
# This is not the same case, values is not the "value" of the field, but
# the list of values shown because this is a "ChoiceField"
self.machine.setValues(vals)
clusters = self.parent().getClusters()
vals = []
for c in clusters:
vals.append(gui.choiceItem(c['id'], c['name']))
self.cluster.setValues(vals)
def datastoreHasSpace(self):
# Get storages for that datacenter
logger.debug('Checking datastore space for {0}'.format(self.datastore.value))
info = self.parent().getStorageInfo(self.datastore.value)
logger.debug('Datastore Info: {0}'.format(info))
availableGB = info['available'] / (1024 * 1024 * 1024)
if availableGB < self.minSpaceGB.num():
raise Exception('Not enough free space available: (Needs at least {0} GB and there is only {1} GB '.format(self.minSpaceGB.num(), availableGB))
def sanitizeVmName(self, name):
'''
Ovirt only allows machine names with [a-zA-Z0-9_-]
'''
import re
return re.sub("[^a-zA-Z0-9_-]", "_", name)
def makeTemplate(self, name, comments):
'''
Invokes makeTemplate from parent provider, completing params
Args:
name: Name to assign to template (must be previously "sanitized"
comments: Comments (UTF-8) to add to template
Returns:
template Id of the template created
Raises an exception if operation fails.
'''
# Checks datastore size
# Get storages for that datacenter
self.datastoreHasSpace()
return self.parent().makeTemplate(name, comments, self.machine.value, self.cluster.value, self.datastore.value, self.display.value)
def getTemplateState(self, templateId):
'''
Invokes getTemplateState from parent provider
Args:
templateId: templateId to remove
Returns nothing
Raises an exception if operation fails.
'''
return self.parent().getTemplateState(templateId)
def deployFromTemplate(self, name, comments, templateId):
'''
Deploys a virtual machine on selected cluster from selected template
Args:
name: Name (sanitized) of the machine
comments: Comments for machine
templateId: Id of the template to deploy from
displayType: 'vnc' or 'spice'. Display to use ad oVirt admin interface
memoryMB: Memory requested for machine, in MB
guaranteedMB: Minimum memory guaranteed for this machine
Returns:
Id of the machine being created form template
'''
logger.debug('Deploying from template {0} machine {1}'.format(templateId, name))
self.datastoreHasSpace()
return self.parent().deployFromTemplate(name, comments, templateId, self.cluster.value,
self.display.value, self.usb.value, int(self.memory.value), int(self.memoryGuaranteed.value))
def removeTemplate(self, templateId):
'''
invokes removeTemplate from parent provider
'''
return self.parent().removeTemplate(templateId)
def getMachineState(self, machineId):
'''
Invokes getMachineState from parent provider
(returns if machine is "active" or "inactive"
Args:
machineId: If of the machine to get state
Returns:
one of this values:
unassigned, down, up, powering_up, powered_down,
paused, migrating_from, migrating_to, unknown, not_responding,
wait_for_launch, reboot_in_progress, saving_state, restoring_state,
suspended, image_illegal, image_locked or powering_down
Also can return'unknown' if Machine is not known
'''
return self.parent().getMachineState(machineId)
def startMachine(self, machineId):
'''
Tries to start a machine. No check is done, it is simply requested to oVirt.
This start also "resume" suspended/paused machines
Args:
machineId: Id of the machine
Returns:
'''
return self.parent().startMachine(machineId)
def stopMachine(self, machineId):
'''
Tries to start a machine. No check is done, it is simply requested to oVirt
Args:
machineId: Id of the machine
Returns:
'''
return self.parent().stopMachine(machineId)
def suspendMachine(self, machineId):
'''
Tries to start a machine. No check is done, it is simply requested to oVirt
Args:
machineId: Id of the machine
Returns:
'''
return self.parent().suspendMachine(machineId)
def removeMachine(self, machineId):
'''
Tries to delete a machine. No check is done, it is simply requested to oVirt
Args:
machineId: Id of the machine
Returns:
'''
return self.parent().removeMachine(machineId)
def updateMachineMac(self, machineId, macAddres):
'''
Changes the mac address of first nic of the machine to the one specified
'''
return self.parent().updateMachineMac(machineId, macAddres)
def getMacRange(self):
'''
Returns de selected mac range
'''
return self.parent().getMacRange()
def getBaseName(self):
'''
Returns the base name
'''
return self.baseName.value
def getLenName(self):
'''
Returns the length of numbers part
'''
return int(self.lenName.value)
def getDisplay(self):
'''
Returns the selected display type (for created machines, for administration
'''
return self.display.value
def getConsoleConnection(self, machineId):
return self.parent().getConsoleConnection(machineId)
def desktopLogin(self, machineId, username, password, domain):
return self.parent().desktopLogin(machineId, username, password, domain)
| [
"[email protected]"
] | |
d74d978814589cb360e60156707fb640d5ac8a75 | 6b587069460046cefbb3f2d18bafbbe4ffbc00d1 | /further_study.py | 74b6ba5f552ad93ec1ae9135307eced4fe91d5bd | [] | no_license | rachel-lynch-lin/list-slicing | bd992c84ff5865d2b9010e80f9fc01648f161906 | d598df20941f34a6e993b73897cfada38f52ca88 | refs/heads/master | 2020-03-21T06:40:38.214931 | 2018-06-22T00:06:16 | 2018-06-22T00:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,103 | py | """Custom implementations of several standard Python list methods.
Implement your own versions of Python's standard list methods, as functions.
You should use only the primitive operations from Part 1 and 2 in your
implementations. For loops are also allowed, such as the following:
for element in some_list:
# Do something with element
Each function imitates a built-in list method, as described by the docstring
for each function.
Play with the built-in methods in the Python REPL to get a feel
for how they work before trying to write your custom version.
"""
from list_operations import *
def custom_len(input_list):
"""Return number of items in the list.
The function custom_len(input_list) should have
the same functionality and result as len(input_list).
For example:
>>> custom_len(['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do'])
8
"""
counter = 0
for i in input_list:
counter += 1
return counter
# For the next four exercises, you'll need to be clever and think about ways
# to use list slice assignment.
#
# NOTE: these are especially contrived. You wouldn't really want
# to typically append things to a list like this (you'd want to use the
# list.append() method), but we want you to practice list slicing assignment
# in different ways so it sticks in your brain.
def custom_append(input_list, value):
"""Add the value to the end of the list.
The function custom_append(input_list, value) should have the same
functionality as input_list.append(value) where value is added to the
end of the list and the function returns nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> custom_append(notes, 'Re')
>>> notes == ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do', 'Re']
True
"""
input_list[len(input_list):] = [value]
def custom_extend(input_list, second_list):
"""Append every item in second_list to input_list.
Like input_list.extend(second_list), custom_extend(input_list, second_list)
should append every item in the second list to the end of the first list
and return nothing.
For example:
>>> months = ['Jan', 'Feb', 'Mar']
>>> custom_extend(months, ['Apr', 'May'])
>>> months == ['Jan', 'Feb', 'Mar', 'Apr', 'May']
True
"""
input_list[len(input_list):] = second_list
def custom_insert(input_list, index, value):
"""Insert value at index in the list.
Like input_list.insert(index, value), should insert (not replace) the value
at the specified index of the input list and return nothing.
For example:
>>> months = ['Jan', 'Mar']
>>> custom_insert(months, 1, 'Feb')
>>> months == ['Jan', 'Feb', 'Mar']
True
"""
input_list[index:index] = [value]
def custom_remove(input_list, value):
"""Remove the first item of the value in list.
The function custom_remove(input_list, value) should have the same
functionality as input_list.remove(value) where the first item of
the value specified is removed and the function returns nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> custom_remove(notes, 'Do')
>>> notes == ['Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
True
"""
for i in range(len(input_list)):
if input_list[i] == value:
del input_list[i]
break
def custom_pop(input_list):
"""Remove the last item in the list and returns it.
The function custom_pop(input_list) should have the same functionality
and result as input_list.pop().
For example:
>>> months = ['Jan', 'Feb', 'March']
>>> custom_pop(months)
'March'
>>> months
['Jan', 'Feb']
"""
last_item = input_list[-1]
del input_list[-1]
return last_item
def custom_index(input_list, value):
"""Return the index of the first item of value found in input_list.
The function custom_index(input_list, value) should have the same
functionality and result as input_list.index(value).
For example:
>>> custom_index(['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do'], 'Re')
1
"""
for i in range(len(input_list)):
if input_list[i] == value:
return i
def custom_count(input_list, value):
"""Return the number of times value appears in the list.
Like input_list.count(value), custom_count(input_list, value) should
return the number of times the specified value appears in the list.
For example:
>>> custom_count(['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do'], 'Do')
2
"""
count = 0
for i in range(len(input_list)):
if input_list[i] == value:
count += 1
return count
def custom_reverse(input_list):
"""Reverse the elements of the input_list.
Like input_list.reverse(), custom_reverse(input_list) should reverse the
elements of the original list and return nothing (we call this reversing
"in place").
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> custom_reverse(multiples)
>>> multiples == [27, 24, 21, 18, 15, 12, 9, 6, 3, 0]
True
"""
for i in range(len(input_list)):
input_list[i:i] = [input_list[-1]]
del input_list[-1]
def custom_contains(input_list, value):
"""Return True or False if value is in the input_list.
Like (value in input_list), should return True if the list contains the
specified value and False if it does not. Remember, do not use the `if X in Y`
statement -- find another way to solve it!
For example:
>>> custom_contains([0, 3, 6, 9, 12, 15, 18, 21, 24], 23)
False
>>> custom_contains([0, 3, 6, 9, 12, 15, 18, 21, 24], 24)
True
"""
for num in input_list:
if num == value:
return True
return False
def custom_equality(some_list, another_list):
"""Return True if passed lists are identical, False otherwise.
Like (some_list == another_list), custom_equality(some_list, another_list)
should return True if both lists contain the same values in the same indexes.
For example:
>>> custom_equality(['Jan', 'Feb', 'Mar'], ['Jan', 'Feb', 'Mar'])
True
>>> custom_equality(['Jan', 'Feb', 'Mar'], ['Jan', 'Mar', 'Feb'])
False
"""
for i in range(len(some_list)):
if some_list[i] == another_list[i]:
continue
else:
return False
return True
##############################################################################
# Please ask for a code review. Also, give your partner a high-five!
##############################################################################
# This is the part were we actually run the doctests.
if __name__ == "__main__":
import doctest
result = doctest.testmod()
if result.failed == 0:
print("ALL TESTS PASSED")
| [
"[email protected]"
] | |
839bbb95ff3a972b5ab6d75ef01bd4339081612a | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/inspur/sm/plugins/modules/download_auto_screenshot.py | 7c8f830ec5e4a390bc12d58743e4817959ff16e9 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 2,448 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: download_auto_screenshot
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Download auto screenshots.
description:
- Download auto screenshots on Inspur server.
options:
file_url:
description:
- Screen capture file path.
type: str
required: true
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: Screen test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Download auto screenshots"
inspur.sm.download_auto_screenshot:
file_url: "/home/wbs/screen"
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class Screen(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=False)
def run_command(self):
self.module.params['subcommand'] = 'downscreen'
self.results = get_connection(self.module)
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict(
file_url=dict(type='str', required=True),
)
argument_spec.update(ism_argument_spec)
screen_obj = Screen(argument_spec)
screen_obj.work()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e4bf678fe219d6b3554fa9948f51af523385ce03 | b2b79cc61101ddf54959b15cf7d0887d114fb4e5 | /web/pgadmin/tools/debugger/tests/test_restart_debugger.py | 6c30562bf91dcb925a73c5ff388af96cf835ee8a | [
"PostgreSQL"
] | permissive | 99Percent/pgadmin4 | 8afe737eb2ec1400ab034ad1d8a4f7c4ba4c35c8 | 5e0c113c7bc4ffefbec569e7ca5416d9acf9dd8a | refs/heads/master | 2021-10-10T20:08:48.321551 | 2021-09-30T12:51:43 | 2021-09-30T12:51:43 | 165,702,958 | 0 | 0 | NOASSERTION | 2019-01-14T17:18:40 | 2019-01-14T17:18:39 | null | UTF-8 | Python | false | false | 3,016 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2021, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.utils.route import BaseTestGenerator
from regression.python_test_utils import test_utils as utils
from . import utils as debugger_utils
from unittest.mock import patch
from regression import parent_node_dict
from pgadmin.browser.server_groups.servers.databases.schemas.functions \
.tests import utils as funcs_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as db_utils
class RestartDebugger(BaseTestGenerator):
""" This class will Restart the debugger """
scenarios = utils.generate_scenarios('restart_debugger',
debugger_utils.test_cases)
def setUp(self):
super(RestartDebugger, self).setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_id = self.schema_data['schema_id']
local_self = funcs_utils.set_up(self)
func_name = "test_function_%s" % str(uuid.uuid4())[1:8]
function_info = funcs_utils.create_function(
local_self.server, local_self.db_name, local_self.schema_name,
func_name)
self.func_id = function_info[0]
if self.add_extension:
debugger_utils.add_extension(self, utils, db_utils=db_utils)
init_debugger = debugger_utils.init_debugger_function(self)
self.trans_id = json.loads(init_debugger.data)['data']['trans_id']
if self.init_target:
debugger_utils.initialize_target(self, utils)
def restart_debugger(self):
return self.tester.get(
self.url + str(self.trans_id),
content_type='application/json')
def runTest(self):
"""
This function will initialize the debugger for function and procedures.
"""
if self.is_positive_test:
response = self.restart_debugger()
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.restart_debugger()
else:
response = self.restart_debugger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function delete the server from SQLite """
debugger_utils.close_debugger(self)
debugger_utils.delete_function(self, utils)
db_utils.disconnect_database(self, self.server_id, self.db_id)
| [
"[email protected]"
] | |
efda514ddc0e46df56878a94c5569a740445e0fb | d429c131df32789e11a98e9e965e652176fcee97 | /454B - Little Pony and Sort by Shift.py | 8dcfd32741c9cd4e29c47a0c82bd29701b71a480 | [] | no_license | shan-mathi/Codeforces | a11841a1ef1a1ef78e3d506d58d9fdf4439421bd | 6f8166b79bea0eb1f575dbfc74c252ba71472c7e | refs/heads/main | 2023-06-15T08:25:41.130432 | 2021-06-24T10:36:06 | 2021-06-24T10:36:06 | 341,176,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #118706045 Jun/07/2021 14:53UTC+5.5 Shan_XD 454B - Little Pony and Sort by Shift PyPy 3 Accepted 218 ms
def one_shift(n,x):
c = x.copy()
c.sort()
if c ==x:
return 0
set =0
for i in range(n):
if x[i+1] < x[i] and not set:
set = 1
if (x[i+1:] + x[:i+1])== c:
return n-i-1
else:
return -1
if x[i+1] < x[i] and set:
return -1
return n - i -1
n = int(input())
x = list(map(int, input().split()))
print(one_shift(n,x))
| [
"[email protected]"
] | |
f0d75c548356b509d1ce973bd3524ff051486610 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004907.py | f15c185acaf516f2e92f4364b40ff14c731692d7 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher80951(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.4.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher80951._instance is None:
CommutativeMatcher80951._instance = CommutativeMatcher80951()
return CommutativeMatcher80951._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 80950
return
yield
from collections import deque | [
"[email protected]"
] | |
f79e2cae087ea96ee7bf206c8c58102f233d61a4 | aadad415f425b9f45fed14290235488a46687a4f | /2009/bastieri/cadmio/passosbagliato/wavelength.py | 54e7ed3023e3d79258a794b356218a90c4540593 | [] | no_license | enucatl-university/lab-unipd | c1fdae198ccc3af3f75ad07554e148427a9cc096 | c197bb92f479913c1183375fa22fd1619e6bbad4 | refs/heads/master | 2023-08-15T01:59:55.502505 | 2016-11-11T19:20:13 | 2016-11-11T19:20:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | from __future__ import division
import math
from ROOT import TGraph, TF1, TCanvas
def to_decimal(deg):
int = math.floor(deg)
rem = 5*(deg - int) / 3
return int + rem
class WaveLength(object):
def __init__(self, file_name):
self.output_file = file_name + '.out'
with open(file_name) as input_file:
with open(self.output_file, 'w') as output:
for line in input_file:
o, n1, n2 = [float(x) for x in line.split()]
n1, n2 = to_decimal(n1), to_decimal(n2)
angle = ((n1 - 180) + n2)*math.pi/360
sine = math.sin(angle)
out_string = str(o) + ' ' + str(sine) + '\n'
output.write(out_string)
def fit_graph(self):
self.graph = TGraph(self.output_file)
self.func = TF1('line', 'pol1', -6, 6)
self.graph.Fit('line', 'QW')
self.slope = self.func.GetParameter(1)
#canv = TCanvas('can', 'can')
self.graph.SetMarkerStyle(8)
#self.graph.Draw('AP')
def get_separation(self, wavelen):
self.separation = math.fabs(wavelen / self.slope)
return self.separation
| [
"[email protected]"
] | |
522f43d045aead4510090ccba73165183d45dd2a | a3cf848e37683b45ea570578e398ab85f1ca4732 | /DEMO/write_excel.py | c874f2fbbb1796ab96bf428e6653507c492782df | [
"MIT"
] | permissive | AceCoooool/python-example | 7f456f702ecc59909d500bcf62e478d0a86082de | 1d0068627210f08d31f027b6a333118d9f743956 | refs/heads/master | 2020-04-18T07:27:45.200465 | 2019-02-24T11:22:26 | 2019-02-24T11:22:26 | 167,360,679 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | import xlwt
if __name__ == '__main__':
# Workbook is created
wb = xlwt.Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Sheet 1')
sheet1.write(1, 0, 'ISBT DEHRADUN')
sheet1.write(2, 0, 'SHASTRADHARA')
sheet1.write(3, 0, 'CLEMEN TOWN')
sheet1.write(4, 0, 'RAJPUR ROAD')
sheet1.write(5, 0, 'CLOCK TOWER')
sheet1.write(0, 1, 'ISBT DEHRADUN')
sheet1.write(0, 2, 'SHASTRADHARA')
sheet1.write(0, 3, 'CLEMEN TOWN')
sheet1.write(0, 4, 'RAJPUR ROAD')
sheet1.write(0, 5, 'CLOCK TOWER')
wb.save('../data/csv/example.xls') | [
"[email protected]"
] | |
ecb2639a5a72d1cd7a09a16340dc5cfff6926757 | 55eda01bdcbda99f72cfdf0b29afb5ea36756873 | /arxiv/kdgan/mdlcompr_xw/train_kd.py | 5fac90bbad9ad11f7d44ec75643db75b3b861de5 | [] | no_license | yyht/KDGAN | 7489a0ca1a2f044b6bcb7cd8bb0d6f2dae1da5e7 | 8f1367d242d7d174bf5bb2740aa18e3846d7b521 | refs/heads/master | 2020-05-16T08:36:18.872239 | 2019-01-12T04:17:31 | 2019-01-12T04:17:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,738 | py | from kdgan import config
from kdgan import metric
from kdgan import utils
from flags import flags
from data_utils import AffineGenerator
from gen_model import GEN
from tch_model import TCH
import data_utils
from os import path
from tensorflow.contrib import slim
import math
import os
import time
import numpy as np
import tensorflow as tf
mnist = data_utils.read_data_sets(flags.dataset_dir,
one_hot=True,
train_size=flags.train_size,
valid_size=flags.valid_size,
reshape=True)
datagen = AffineGenerator(mnist)
tn_size, vd_size = mnist.train.num_examples, mnist.test.num_examples
print('tn size=%d vd size=%d' % (tn_size, vd_size))
tn_num_batch = int(flags.num_epoch * tn_size / flags.batch_size)
print('tn #batch=%d' % (tn_num_batch))
eval_interval = int(tn_size / flags.batch_size)
print('ev #interval=%d' % (eval_interval))
tn_gen = GEN(flags, mnist.train, is_training=True)
tn_tch = TCH(flags, mnist.train, is_training=True)
scope = tf.get_variable_scope()
scope.reuse_variables()
vd_gen = GEN(flags, mnist.test, is_training=False)
vd_tch = TCH(flags, mnist.test, is_training=False)
tf.summary.scalar(tn_gen.learning_rate.name, tn_gen.learning_rate)
tf.summary.scalar(tn_gen.kd_loss.name, tn_gen.kd_loss)
summary_op = tf.summary.merge_all()
init_op = tf.global_variables_initializer()
tot_params = 0
for variable in tf.trainable_variables():
num_params = 1
for dim in variable.shape:
num_params *= dim.value
print('%-50s (%d params)' % (variable.name, num_params))
tot_params += num_params
print('%-50s (%d params)' % (' '.join(['kd', flags.kd_model]), tot_params))
def main(_):
bst_acc, bst_epk = 0.0, 0
writer = tf.summary.FileWriter(config.logs_dir, graph=tf.get_default_graph())
with tf.train.MonitoredTrainingSession() as sess:
sess.run(init_op)
tn_gen.saver.restore(sess, flags.gen_model_ckpt)
tn_tch.saver.restore(sess, flags.tch_model_ckpt)
ini_gen = metric.eval_mdlcompr(sess, vd_gen, mnist)
ini_tch = metric.eval_mdlcompr(sess, vd_tch, mnist)
start = time.time()
# for tn_batch in range(tn_num_batch):
# tn_image_np, tn_label_np = mnist.train.next_batch(flags.batch_size)
tn_batch = -1
for epoch in range(flags.num_epoch):
for tn_image_np, tn_label_np in datagen.generate(batch_size=flags.batch_size):
tn_batch += 1
feed_dict = {vd_tch.image_ph:tn_image_np}
soft_logit_np, = sess.run([vd_tch.logits], feed_dict=feed_dict)
feed_dict = {
tn_gen.image_ph:tn_image_np,
tn_gen.hard_label_ph:tn_label_np,
tn_gen.soft_logit_ph:soft_logit_np,
}
_, summary = sess.run([tn_gen.kd_update, summary_op], feed_dict=feed_dict)
writer.add_summary(summary, tn_batch)
if (tn_batch + 1) % eval_interval != 0:
continue
feed_dict = {
vd_gen.image_ph:mnist.test.images,
vd_gen.hard_label_ph:mnist.test.labels,
}
acc = sess.run(vd_gen.accuracy, feed_dict=feed_dict)
if acc > bst_acc:
bst_acc = max(acc, bst_acc)
bst_epk = epoch
tot_time = time.time() - start
global_step = sess.run(tn_gen.global_step)
avg_time = (tot_time / global_step) * (tn_size / flags.batch_size)
print('#%08d curacc=%.4f curbst=%.4f tot=%.0fs avg=%.2fs/epoch' %
(tn_batch, acc, bst_acc, tot_time, avg_time))
if acc <= bst_acc:
continue
# save gen parameters if necessary
tot_time = time.time() - start
ini_gen *= 100
bst_acc *= 100
bst_epk += 1
print('#mnist=%d %s@%d=%.2f iniacc=%.2f et=%.0fs' %
(tn_size, flags.kd_model, bst_epk, bst_acc, ini_gen, tot_time))
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
03f83b3a694301dea1d55fc6a15c0c9f2974f189 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/test/test_pep263.py | 598d980b2a67e289dfefb23de2265ee229c13b0e | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | WINDOWS-1252 | Python | false | false | 1,852 | py | # -*- coding: koi8-r -*-
import unittest
from test import support
class PEP263Test(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
"ðÉÔÏÎ".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\ð".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile(b"\n# coding: utf-8\nu = '\xc3\xb3'\n", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['u'], '\xf3')
def test_issue2301(self):
try:
compile(b"# coding: cp932\nprint '\x94\x4e'", "dummy", "exec")
except SyntaxError as v:
self.assertEqual(v.text, "print '\u5e74'\n")
else:
self.fail()
def test_issue4626(self):
c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['\xc6'], '\xc6')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_main():
support.run_unittest(PEP263Test)
if __name__=="__main__":
test_main()
| [
"[email protected]"
] | |
b73c82ceb84172586c6092a5ce99dceb1c4cfeb1 | fe06311a7de13a02ca0be37d84c542c3cece3f33 | /Chapter35/file_35_1_3b.py | ea3649be523e31c869c950e58a5182dc36a0424f | [] | no_license | mooksys/Python_Algorithms | a4a84ddabc34ec4b7cc0ac01d55019880af38514 | 375817e3dfdec94411cf245fe3f685a69d92b948 | refs/heads/master | 2020-08-24T06:35:05.791979 | 2018-07-30T01:22:24 | 2018-07-30T01:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | ELEMENTS_OF_A = 20
ELEMENTS_OF_B = 30
# 입력받은 값을 리스트 a와 b에 각각 저장한다.
a = [None] * ELEMENTS_OF_A
b = [None] * ELEMENTS_OF_B
for i in range(ELEMENTS_OF_A):
a[i] = float(input())
for i in range(ELEMENTS_OF_B):
b[i] = float(input())
# 리스트 new_arr를 생성한다.
new_arr = []
for element in a:
new_arr.append(element)
for element in b:
new_arr.append(element)
# 리스트 new_arr를 출력한다.
for element in new_arr:
print(element, end = "\t") | [
"[email protected]"
] | |
7f34525db79355d8b29d94cd4d602be3e174f216 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/rafter.py | 4901806228c6a101f8104a174897153cd9ad05db | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 666 | py | ii = [('CookGHP3.py', 1), ('MarrFDI.py', 3), ('GodwWSL2.py', 3), ('KembFJ1.py', 1), ('WilkJMC3.py', 1), ('GellWPT.py', 1), ('GellWPT2.py', 2), ('WilkJMC2.py', 10), ('AinsWRR3.py', 1), ('KiddJAE.py', 3), ('CrokTPS.py', 1), ('BuckWGM.py', 1), ('GilmCRS.py', 1), ('WestJIT2.py', 1), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('LeakWTI4.py', 1), ('LeakWTI.py', 2), ('MedwTAI2.py', 1), ('BailJD3.py', 1), ('WilkJMC.py', 1), ('WestJIT.py', 1), ('FitzRNS4.py', 2), ('FitzRNS.py', 4), ('KembFJ2.py', 1), ('LewiMJW.py', 1), ('BellCHM.py', 1), ('AinsWRR2.py', 2), ('ClarGE3.py', 1), ('FitzRNS2.py', 2), ('HogaGMM2.py', 12), ('WordWYR.py', 2), ('ThomWEC.py', 1), ('ClarGE4.py', 1)] | [
"[email protected]"
] | |
f8066949fde26242a622104d46dbf942a6148195 | 78f3fe4a148c86ce9b80411a3433a49ccfdc02dd | /2012 and earlier/superdonors-20121023/graphic_config.py | f8301f5d3df1c9e94821f63c2824fad43e9647b1 | [] | no_license | nprapps/graphics-archive | 54cfc4d4d670aca4d71839d70f23a8bf645c692f | fe92cd061730496cb95c9df8fa624505c3b291f8 | refs/heads/master | 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1o6fwDxOQI70FxDnK7_PaXad8viYcTkY7Q1a_B6uzdlE'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| [
"[email protected]"
] | |
c1a3a789c006348284a0eb6f9830c24029f49b40 | 2940f5416082dadd9c646cd9a46d2d0a99883efb | /venv/Lib/site-packages/scipy/sparse/base.py | 75a8f1815614e98f8ce7b5394d6b3f13ca7e93b0 | [
"MIT",
"BSD-3-Clause",
"Python-2.0",
"Qhull",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause",
"GPL-3.0-or-later",
"BSD-3-Clause-Open-MPI",
"GCC-exception-3.1",
"GPL-3.0-only"
] | permissive | tpike3/SugarScape | 4813e4fefbfb0a701f5913d74f045fd0eaed1942 | 39efe4007fba2b12b75c72f7795827a1f74d640b | refs/heads/main | 2021-06-20T03:55:46.288721 | 2021-01-20T17:06:35 | 2021-01-20T17:06:35 | 168,583,530 | 11 | 3 | MIT | 2021-01-20T17:19:53 | 2019-01-31T19:29:40 | Jupyter Notebook | UTF-8 | Python | false | false | 42,103 | py | """Base class for sparse matrices"""
import numpy as np
from .sputils import (isdense, isscalarlike, isintlike,
get_sum_dtype, validateaxis, check_reshape_kwargs,
check_shape, asmatrix)
__all__ = ['spmatrix', 'isspmatrix', 'issparse',
'SparseWarning', 'SparseEfficiencyWarning']
class SparseWarning(Warning):
pass
class SparseFormatWarning(SparseWarning):
pass
class SparseEfficiencyWarning(SparseWarning):
pass
# The formats that we might potentially understand.
_formats = {'csc': [0, "Compressed Sparse Column"],
'csr': [1, "Compressed Sparse Row"],
'dok': [2, "Dictionary Of Keys"],
'lil': [3, "List of Lists"],
'dod': [4, "Dictionary of Dictionaries"],
'sss': [5, "Symmetric Sparse Skyline"],
'coo': [6, "COOrdinate"],
'lba': [7, "Linpack BAnded"],
'egd': [8, "Ellpack-itpack Generalized Diagonal"],
'dia': [9, "DIAgonal"],
'bsr': [10, "Block Sparse Row"],
'msr': [11, "Modified compressed Sparse Row"],
'bsc': [12, "Block Sparse Column"],
'msc': [13, "Modified compressed Sparse Column"],
'ssk': [14, "Symmetric SKyline"],
'nsk': [15, "Nonsymmetric SKyline"],
'jad': [16, "JAgged Diagonal"],
'uss': [17, "Unsymmetric Sparse Skyline"],
'vbr': [18, "Variable Block Row"],
'und': [19, "Undefined"]
}
# These univariate ufuncs preserve zeros.
_ufuncs_with_fixed_point_at_zero = frozenset([
np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
MAXPRINT = 50
class spmatrix(object):
""" This class provides a base class for all sparse matrices. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
ndim = 2
def __init__(self, maxprint=MAXPRINT):
self._shape = None
if self.__class__.__name__ == 'spmatrix':
raise ValueError("This class is not intended"
" to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self, shape):
"""See `reshape`."""
# Make sure copy is False since this is in place
# Make sure format is unchanged because we are doing a __dict__ swap
new_matrix = self.reshape(shape, copy=False).asformat(self.format)
self.__dict__ = new_matrix.__dict__
def get_shape(self):
"""Get shape of a matrix."""
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def reshape(self, *args, **kwargs):
"""reshape(self, shape, order='C', copy=False)
Gives a new shape to a sparse matrix without changing its data.
Parameters
----------
shape : length-2 tuple of ints
The new shape should be compatible with the original shape.
order : {'C', 'F'}, optional
Read the elements using this index order. 'C' means to read and
write the elements using C-like index order; e.g., read entire first
row, then second row, etc. 'F' means to read and write the elements
using Fortran-like index order; e.g., read entire first column, then
second column, etc.
copy : bool, optional
Indicates whether or not attributes of self should be copied
whenever possible. The degree to which attributes are copied varies
depending on the type of sparse matrix being used.
Returns
-------
reshaped_matrix : sparse matrix
A sparse matrix with the given `shape`, not necessarily of the same
format as the current object.
See Also
--------
numpy.matrix.reshape : NumPy's implementation of 'reshape' for
matrices
"""
# If the shape already matches, don't bother doing an actual reshape
# Otherwise, the default is to convert to COO and use its reshape
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
if shape == self.shape:
if copy:
return self.copy()
else:
return self
return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
def resize(self, shape):
"""Resize the matrix in-place to dimensions given by ``shape``
Any elements that lie within the new shape will remain at the same
indices, while non-zero elements lying outside the new shape are
removed.
Parameters
----------
shape : (int, int)
number of rows and columns in the new matrix
Notes
-----
The semantics are not identical to `numpy.ndarray.resize` or
`numpy.resize`. Here, the same data will be maintained at each index
before and after reshape, if that index is within the new bounds. In
numpy, resizing maintains contiguity of the array, moving elements
around in the logical matrix but not within a flattened representation.
We give no guarantees about whether the underlying data attributes
(arrays, etc.) will be modified in place or replaced with new objects.
"""
# As an inplace operation, this requires implementation in each format.
raise NotImplementedError(
'{}.resize is not implemented'.format(type(self).__name__))
def astype(self, dtype, casting='unsafe', copy=True):
"""Cast the matrix elements to a specified type.
Parameters
----------
dtype : string or numpy dtype
Typecode or data-type to which to cast the data.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
Defaults to 'unsafe' for backwards compatibility.
'no' means the data types should not be cast at all.
'equiv' means only byte-order changes are allowed.
'safe' means only casts which can preserve values are allowed.
'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
'unsafe' means any data conversions may be done.
copy : bool, optional
If `copy` is `False`, the result might share some memory with this
matrix. If `copy` is `True`, it is guaranteed that the result and
this matrix do not share any memory.
"""
dtype = np.dtype(dtype)
if self.dtype != dtype:
return self.tocsr().astype(
dtype, casting=casting, copy=copy).asformat(self.format)
elif copy:
return self.copy()
else:
return self
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
def __iter__(self):
for r in range(self.shape[0]):
yield self[r, :]
def getmaxprint(self):
"""Maximum number of elements to display when printed."""
return self.maxprint
def count_nonzero(self):
"""Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray())
Unlike getnnz() and the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
"""
raise NotImplementedError("count_nonzero not implemented for %s." %
self.__class__.__name__)
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros.
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole matrix, in
each column, or in each row.
See also
--------
count_nonzero : Number of non-zero entries
"""
raise NotImplementedError("getnnz not implemented for %s." %
self.__class__.__name__)
@property
def nnz(self):
"""Number of stored values, including explicit zeros.
See also
--------
count_nonzero : Number of non-zero entries
"""
return self.getnnz()
def getformat(self):
"""Format of a matrix representation as a string."""
return getattr(self, 'format', 'und')
def __repr__(self):
_, format_name = _formats[self.getformat()]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, format_name))
def __str__(self):
maxprint = self.getmaxprint()
A = self.tocoo()
# helper function, outputs "(i,j) v"
def tostr(row, col, data):
triples = zip(list(zip(row, col)), data)
return '\n'.join([(' %s\t%s' % t) for t in triples])
if self.nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
def __bool__(self): # Simple -- other ideas?
if self.shape == (1, 1):
return self.nnz != 0
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all().")
__nonzero__ = __bool__
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
raise TypeError("sparse matrix length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format, copy=False):
"""Return this matrix in the passed format.
Parameters
----------
format : {str, None}
The desired matrix format ("csr", "csc", "lil", "dok", "array", ...)
or None for no conversion.
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : This matrix in the passed format.
"""
if format is None or format == self.format:
if copy:
return self.copy()
else:
return self
else:
try:
convert_method = getattr(self, 'to' + format)
except AttributeError as e:
raise ValueError('Format {} is unknown.'.format(format)) from e
# Forward the copy kwarg, if it's accepted.
try:
return convert_method(copy=copy)
except TypeError:
return convert_method()
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse matrix format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
return self.tocsr().multiply(other)
def maximum(self, other):
"""Element-wise maximum between this and another matrix."""
return self.tocsr().maximum(other)
def minimum(self, other):
"""Element-wise minimum between this and another matrix."""
return self.tocsr().minimum(other)
def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
return self * other
def power(self, n, dtype=None):
"""Element-wise power."""
return self.tocsr().power(n, dtype=dtype)
def __eq__(self, other):
return self.tocsr().__eq__(other)
def __ne__(self, other):
return self.tocsr().__ne__(other)
def __lt__(self, other):
return self.tocsr().__lt__(other)
def __gt__(self, other):
return self.tocsr().__gt__(other)
def __le__(self, other):
return self.tocsr().__le__(other)
def __ge__(self, other):
return self.tocsr().__ge__(other)
def __abs__(self):
return abs(self.tocsr())
def __round__(self, ndigits=0):
return round(self.tocsr(), ndigits=ndigits)
def _add_sparse(self, other):
return self.tocsr()._add_sparse(other)
def _add_dense(self, other):
return self.tocoo()._add_dense(other)
def _sub_sparse(self, other):
return self.tocsr()._sub_sparse(other)
def _sub_dense(self, other):
return self.todense() - other
def _rsub_dense(self, other):
# note: this can't be replaced by other + (-self) for unsigned types
return other - self.todense()
def __add__(self, other): # self + other
if isscalarlike(other):
if other == 0:
return self.copy()
# Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._add_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._add_dense(other)
else:
return NotImplemented
def __radd__(self,other): # other + self
return self.__add__(other)
def __sub__(self, other): # self - other
if isscalarlike(other):
if other == 0:
return self.copy()
raise NotImplementedError('subtracting a nonzero scalar from a '
'sparse matrix is not supported')
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._sub_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._sub_dense(other)
else:
return NotImplemented
def __rsub__(self,other): # other - self
if isscalarlike(other):
if other == 0:
return -self.copy()
raise NotImplementedError('subtracting a sparse matrix from a '
'nonzero scalar is not supported')
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._rsub_dense(other)
else:
return NotImplemented
def __mul__(self, other):
"""interpret other and call one of the following
self._mul_scalar()
self._mul_vector()
self._mul_multivector()
self._mul_sparse_matrix()
"""
M, N = self.shape
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._mul_vector(other)
elif other.shape == (N, 1):
return self._mul_vector(other.ravel()).reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._mul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[1] != other.shape[0]:
raise ValueError('dimension mismatch')
return self._mul_sparse_matrix(other)
# If it's a list or whatever, treat it like a matrix
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# Not interpretable as an array; return NotImplemented so that
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
try:
other.shape
except AttributeError:
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N, 1):
raise ValueError('dimension mismatch')
result = self._mul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = asmatrix(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1, 1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch')
result = self._mul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = asmatrix(result)
return result
else:
raise ValueError('could not interpret dimensions')
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _mul_vector(self, other):
return self.tocsr()._mul_vector(other)
def _mul_multivector(self, other):
return self.tocsr()._mul_multivector(other)
def _mul_sparse_matrix(self, other):
return self.tocsr()._mul_sparse_matrix(other)
def __rmul__(self, other): # other * self
if isscalarlike(other):
return self.__mul__(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
return (self.transpose() * tr).transpose()
#######################
# matmul (@) operator #
#######################
def __matmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
####################
# Other Arithmetic #
####################
def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float_):
return self.astype(np.float_)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer) and
np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
return np.true_divide(self.todense(), other)
else:
return np.divide(self.todense(), other)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif isspmatrix(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float_):
return self_csr.astype(np.float_)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
def __truediv__(self, other):
return self._divide(other, true_divide=True)
def __div__(self, other):
# Always do true division
return self._divide(other, true_divide=True)
def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __rdiv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __imul__(self, other):
return NotImplemented
def __idiv__(self, other):
return self.__itruediv__(other)
def __itruediv__(self, other):
return NotImplemented
def __pow__(self, other):
if self.shape[0] != self.shape[1]:
raise TypeError('matrix is not square')
if isintlike(other):
other = int(other)
if other < 0:
raise ValueError('exponent must be >= 0')
if other == 0:
from .construct import eye
return eye(self.shape[0], dtype=self.dtype)
elif other == 1:
return self.copy()
else:
tmp = self.__pow__(other//2)
if (other % 2):
return self * tmp * tmp
else:
return tmp * tmp
elif isscalarlike(other):
raise ValueError('exponent must be an integer')
else:
return NotImplemented
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
elif attr == 'H':
return self.getH()
elif attr == 'real':
return self._real()
elif attr == 'imag':
return self._imag()
elif attr == 'size':
return self.getnnz()
else:
raise AttributeError(attr + " not found")
def transpose(self, axes=None, copy=False):
"""
Reverses the dimensions of the sparse matrix.
Parameters
----------
axes : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value.
copy : bool, optional
Indicates whether or not attributes of `self` should be
copied whenever possible. The degree to which attributes
are copied varies depending on the type of sparse matrix
being used.
Returns
-------
p : `self` with the dimensions reversed.
See Also
--------
numpy.matrix.transpose : NumPy's implementation of 'transpose'
for matrices
"""
return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
def conj(self, copy=True):
"""Element-wise complex conjugation.
If the matrix is of non-complex data type and `copy` is False,
this method does nothing and the data is not copied.
Parameters
----------
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : The element-wise complex conjugate.
"""
if np.issubdtype(self.dtype, np.complexfloating):
return self.tocsr(copy=copy).conj(copy=False)
elif copy:
return self.copy()
else:
return self
def conjugate(self, copy=True):
return self.conj(copy=copy)
conjugate.__doc__ = conj.__doc__
# Renamed conjtranspose() -> getH() for compatibility with dense matrices
def getH(self):
"""Return the Hermitian transpose of this matrix.
See Also
--------
numpy.matrix.getH : NumPy's implementation of `getH` for matrices
"""
return self.transpose().conj()
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask], A.col[nz_mask])
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
from .csc import csc_matrix
n = self.shape[1]
if j < 0:
j += n
if j < 0 or j >= n:
raise IndexError("index out of bounds")
col_selector = csc_matrix(([1], [[j], [0]]),
shape=(n, 1), dtype=self.dtype)
return self * col_selector
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
from .csr import csr_matrix
m = self.shape[0]
if i < 0:
i += m
if i < 0 or i >= m:
raise IndexError("index out of bounds")
row_selector = csr_matrix(([1], [[0], [i]]),
shape=(1, m), dtype=self.dtype)
return row_selector * self
# The following dunder methods cannot be implemented.
#
# def __array__(self):
# # Sparse matrices rely on NumPy wrapping them in object arrays under
# # the hood to make unary ufuncs work on them. So we cannot raise
# # TypeError here - which would be handy to not give users object
# # arrays they probably don't want (they're looking for `.toarray()`).
# #
# # Conversion with `toarray()` would also break things because of the
# # behavior discussed above, plus we want to avoid densification by
# # accident because that can too easily blow up memory.
#
# def __array_ufunc__(self):
# # We cannot implement __array_ufunc__ due to mismatching semantics.
# # See gh-7707 and gh-7349 for details.
#
# def __array_function__(self):
# # We cannot implement __array_function__ due to mismatching semantics.
# # See gh-10362 for details.
def todense(self, order=None, out=None):
"""
Return a dense matrix representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array (or `numpy.matrix`) as the
output buffer instead of allocating a new array to
return. The provided array must have the same shape and
dtype as the sparse matrix on which you are calling the
method.
Returns
-------
arr : numpy.matrix, 2-D
A NumPy matrix object with the same shape and containing
the same data represented by the sparse matrix, with the
requested memory order. If `out` was passed and was an
array (rather than a `numpy.matrix`), it will be filled
with the appropriate values and returned wrapped in a
`numpy.matrix` object that shares the same memory.
"""
return asmatrix(self.toarray(order=order, out=out))
def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multidimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-D
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo(copy=False).toarray(order=order, out=out)
# Any sparse matrix format deriving from spmatrix must define one of
# tocsr or tocoo. The other conversion methods may be implemented for
# efficiency, but are not required.
def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csr_matrix.
"""
return self.tocoo(copy=copy).tocsr(copy=False)
def todok(self, copy=False):
"""Convert this matrix to Dictionary Of Keys format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dok_matrix.
"""
return self.tocoo(copy=copy).todok(copy=False)
def tocoo(self, copy=False):
"""Convert this matrix to COOrdinate format.
With copy=False, the data/indices may be shared between this matrix and
the resultant coo_matrix.
"""
return self.tocsr(copy=False).tocoo(copy=copy)
def tolil(self, copy=False):
"""Convert this matrix to List of Lists format.
With copy=False, the data/indices may be shared between this matrix and
the resultant lil_matrix.
"""
return self.tocsr(copy=False).tolil(copy=copy)
def todia(self, copy=False):
"""Convert this matrix to sparse DIAgonal format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dia_matrix.
"""
return self.tocoo(copy=copy).todia(copy=False)
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix to Block Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant bsr_matrix.
When blocksize=(R, C) is provided, it will be used for construction of
the bsr_matrix.
"""
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csc_matrix.
"""
return self.tocsr(copy=copy).tocsc(copy=False)
def copy(self):
"""Returns a copy of this matrix.
No data/indices will be shared between the returned value and current
matrix.
"""
return self.__class__(self, copy=True)
def sum(self, axis=None, dtype=None, out=None):
"""
Sum the matrix elements over a given axis.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the sum of all the matrix elements, returning a scalar
(i.e., `axis` = `None`).
dtype : dtype, optional
The type of the returned matrix and of the accumulator in which
the elements are summed. The dtype of `a` is used by default
unless `a` has an integer dtype of less precision than the default
platform integer. In that case, if `a` is signed then the platform
integer is used while if `a` is unsigned then an unsigned integer
of the same precision as the platform integer is used.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
sum_along_axis : np.matrix
A matrix with the same shape as `self`, with the specified
axis removed.
See Also
--------
numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
"""
validateaxis(axis)
# We use multiplication by a matrix of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
res_dtype = get_sum_dtype(self.dtype)
if axis is None:
# sum over rows and columns
return (self * asmatrix(np.ones(
(n, 1), dtype=res_dtype))).sum(
dtype=dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
# sum over columns
ret = asmatrix(np.ones(
(1, m), dtype=res_dtype)) * self
else:
# sum over rows
ret = self * asmatrix(
np.ones((n, 1), dtype=res_dtype))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
def mean(self, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the matrix elements. The average is taken
over all elements in the matrix by default, otherwise over the
specified axis. `float64` intermediate and return values are used
for integer inputs.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the mean is computed. The default is to compute
the mean of all elements in the matrix (i.e., `axis` = `None`).
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
m : np.matrix
See Also
--------
numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
"""
def _is_integral(dtype):
return (np.issubdtype(dtype, np.integer) or
np.issubdtype(dtype, np.bool_))
validateaxis(axis)
res_dtype = self.dtype.type
integral = _is_integral(self.dtype)
# output dtype
if dtype is None:
if integral:
res_dtype = np.float64
else:
res_dtype = np.dtype(dtype).type
# intermediate dtype for summation
inter_dtype = np.float64 if integral else res_dtype
inter_self = self.astype(inter_dtype)
if axis is None:
return (inter_self / np.array(
self.shape[0] * self.shape[1]))\
.sum(dtype=res_dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
return (inter_self * (1.0 / self.shape[0])).sum(
axis=0, dtype=res_dtype, out=out)
else:
return (inter_self * (1.0 / self.shape[1])).sum(
axis=1, dtype=res_dtype, out=out)
def diagonal(self, k=0):
"""Returns the kth diagonal of the matrix.
Parameters
----------
k : int, optional
Which diagonal to get, corresponding to elements a[i, i+k].
Default: 0 (the main diagonal).
.. versionadded:: 1.0
See also
--------
numpy.diagonal : Equivalent numpy function.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> A.diagonal()
array([1, 0, 5])
>>> A.diagonal(k=1)
array([2, 3])
"""
return self.tocsr().diagonal(k=k)
def setdiag(self, values, k=0):
"""
Set diagonal or off-diagonal elements of the array.
Parameters
----------
values : array_like
New values of the diagonal elements.
Values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values are
longer than the diagonal, then the remaining values are ignored.
If a scalar value is given, all of the diagonal is set to it.
k : int, optional
Which off-diagonal to set, corresponding to elements a[i,i+k].
Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds matrix dimensions")
self._setdiag(np.asarray(values), k)
def _setdiag(self, values, k):
M, N = self.shape
if k < 0:
if values.ndim == 0:
# broadcast
max_index = min(M+k, N)
for i in range(max_index):
self[i - k, i] = values
else:
max_index = min(M+k, N, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
if values.ndim == 0:
# broadcast
max_index = min(M, N-k)
for i in range(max_index):
self[i, i + k] = values
else:
max_index = min(M, N-k, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i, i + k] = v
def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse matrix')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
def isspmatrix(x):
"""Is x of a sparse matrix type?
Parameters
----------
x
object to check for being a sparse matrix
Returns
-------
bool
True if x is a sparse matrix, False otherwise
Notes
-----
issparse and isspmatrix are aliases for the same function.
Examples
--------
>>> from scipy.sparse import csr_matrix, isspmatrix
>>> isspmatrix(csr_matrix([[5]]))
True
>>> from scipy.sparse import isspmatrix
>>> isspmatrix(5)
False
"""
return isinstance(x, spmatrix)
issparse = isspmatrix
| [
"[email protected]"
] | |
7a047c3eaac3fc87eeeb8acc8bd41efee40e159c | 897871d09b8b1e86c5a48599839ba9534260f2c9 | /aromawine3-new_update__with_checkout/admin_manage_notification/admin.py | d94ca7e5b51a57bff11127a6e62b3e27e9e735ab | [] | no_license | sidkushwah123/wine | 0b8b8fdf44068b4488d5f1ae5d34a24d3fff19a9 | bb29e84fb4a0709aca36e819ae6191147a9691b5 | refs/heads/main | 2023-07-27T14:03:06.814484 | 2021-09-11T15:25:39 | 2021-09-11T15:25:39 | 405,354,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from django.contrib import admin
from .models import AwNotification
from import_export.admin import ImportExportModelAdmin
# Register your models here.
class AwNotificationAdmin(ImportExportModelAdmin):
list_display = ('user','Send_Status', 'Created_by', 'Created_by','Created_date','Read_Status','Read_date')
admin.site.register(AwNotification,AwNotificationAdmin) | [
"[email protected]"
] | |
b23ab19206208ca963896607269c13453e188470 | 0d61f90e3a7877e91d72fed71b0895c7070dc046 | /final_project/.history/project/menu_app/urls_20210103102257.py | 433c7dab24301af7a57b5a070851582c545d4fcd | [] | no_license | lienusrob/final_project | 44d7d90dc0b7efc0cf55501549a5af0110d09b3b | 4164769626813f044ec2af3e7842514b5699ef77 | refs/heads/master | 2023-02-10T16:36:33.439215 | 2021-01-05T09:34:01 | 2021-01-05T09:34:01 | 325,002,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py |
# from project.account_app.views import HomeView
from django.urls import path
from . import views
from .views import view_cart, add_to_cart, adjust_cart
urlpatterns = [
#path('', views.menu_list_view(template_name = 'menu_app/menu_list.html'), name = 'menu_list'),
#path('menu/', views.menu_list_view, name = 'menu_list'),
#path ('', views.menu_category, name = 'menu_category'),
# path ('admin_page/', views.MenuItem, name = 'menu_item'),
# path ('', views.home, name = "home"),
# path ('cart/', views.cart, name = "cart"),
# path ('<str:name>/', views.menu_details, name = 'menu_details'),
path('', views.home, name="home"),
path('cart/', views.cart, name="cart"),
path('<str:name>/', views.menu_details, name="menu_details"),
pat(r'^$', view_cart, name='view_cart'),
url(r'^add/(?P<id>\d+)', add_to_cart, name='add_to_cart'),
url(r'^adjust/(?P<id>\d+)', adjust_cart, name='adjust_cart'),
] | [
"[email protected]"
] | |
80cecf6b14e008ae28b91dce83556e31dd9fb1a7 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_81/507.py | bdd84a38baf779ae289c6f1817d6c5003b53e93f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | fp = open('A-large.in')
T = int(fp.readline())
for t in range(T):
N = int(fp.readline())
M = []
for n in range(N):
s = fp.readline().strip()
M.append(s)
print "Case #%d:"%(t+1)
WP = []
for n in range(N):
playcount = 0
wins = 0.0
wp = 0.0
for i in range(len(M[n])):
if M[n][i] == '0' :
playcount += 1
if M[n][i] == '1' :
playcount += 1
wins += 1
if playcount != 0: wp = wins/playcount
# print playcount, wins, (wins/playcount)
WP.append(wp)
OWP = []
for n in range(N):
owp = 0.0
wpcount = 0.0
for j in range(N):
if M[j][n] != '.':
playcount = 0
wins = 0.0
wp = 0.0
for i in range(len(M[n])):
if (M[j][i] == '0') and (i != n):
playcount += 1
if (M[j][i] == '1') and (i != n):
playcount += 1
wins += 1
if playcount != 0:
wp = wins/playcount
wpcount +=1
owp += wp
OWP.append(owp/wpcount)
OOWP = []
for n in range(N):
oowp = 0.0
count = 0.0
for i in range(N):
if M[n][i] != '.':
oowp += OWP[i]
count += 1
OOWP.append(oowp/count)
for n in range(N):
RPI = 0.25 * WP[n] + 0.50 * OWP[n] + 0.25 * OOWP[n]
print RPI
| [
"[email protected]"
] | |
d8b174c28eccb7c63f64135fafc8e975c5d0d119 | fd574c2d37afd8fddc548f64aa8befdfcf96d43a | /greedy/queueByHeight.py | bcee8e588c9ce1d64f6ecc744310778402e743e9 | [] | no_license | g10guang/LeetCode | 023f1630611b05edf0ba069adf383e86db66c3f3 | da2a75c3c4a853b4768ae03bab7725e11cf38d1a | refs/heads/master | 2021-07-05T15:29:03.007752 | 2020-07-15T14:32:10 | 2020-07-15T14:32:10 | 129,694,867 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | # https://leetcode.com/problems/queue-reconstruction-by-height/description/
class Solution:
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
length = len(people)
i = length - 1
l = sorted(people, key=lambda x: x[0] * 1000 + x[1])
while i >= 0 and l[-1][0] == l[i][0]:
i -= 1
while i >= 0:
pos = l[i][1]
k = i - 1
while k >= 0 and l[k][0] == l[i][0]:
pos -= 1
k -= 1
for j in range(pos):
t = i+j
l[t+1], l[t] = l[t], l[t+1]
i -= 1
return l
| [
"[email protected]"
] | |
747653f0003ab3203563cf84d3100573691845ae | 20b04495f17e7cb9970feffb92eb2b2fc05289a3 | /sample/ThreadPy.py | 7bf1df16327b09d783ffb6cae3f622729d8c4e3a | [] | no_license | to-yuki/pythonLab | 4a69e5f24df86005f32bda0e41ddfd23a2942335 | 270487690818faa90d5c17e6619d0b531f9a4f39 | refs/heads/master | 2021-09-09T23:59:25.938777 | 2018-03-20T09:26:18 | 2018-03-20T09:26:18 | 108,782,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # -*- coding: UTF-8 -*-
import threading
from time import sleep
# スレッド化する関数1
def thread1():
# 1秒間隔で "T1" を表示
for i in range(5):
sleep(1)
print "T1 ",
# スレッド化する関数2
def thread2():
# 2秒間隔で "T2" を表示
for i in range(5):
sleep(2)
print "T2 ",
# メインスレッド関数
def mainThread():
# スレッドオブジェクトの作成
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
# 作成したスレッドオブジェクトのスタート
t1.start()
t2.start()
# メインスレッド関数を呼び出し
if __name__=='__main__':
mainThread() | [
"[email protected]"
] | |
d033b899c9e461cf8c7d511f8aea043dfb973638 | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /grouptesting/utils.py | 8a680f40932fb818fdc48333c6f8d26210eaa058 | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 2,223 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Some useful array manipulations for sampling."""
import jax
import jax.numpy as np
def unique(rng, binary_vectors):
"""Computes the number of unique binary columns."""
alpha = jax.random.normal(rng, shape=((1, binary_vectors.shape[1])))
return 1 + np.count_nonzero(
np.diff(np.sort(np.sum(binary_vectors * alpha, axis=-1))))
def select_from_sizes(values, sizes):
"""Selects using indices group_sizes the relevant values for a parameter.
Given a parameter vector (or possibly constant) that describes values
for groups of size 1,2,...., k_max selects values according to vector
group_sizes. When an item in group_sizes is larger than the size of
the vector, we revert to the last element of the vector by default.
Note that the values array is 0-indexed, therefore the values corresponding
to size 1 is values[0], to size 2 values[1] and more generally, the value for
a group of size i is values[i-1].
Args:
values: a np.ndarray that can be of size 1 or more, from which to seleect
the values from.
sizes: np.array[int] representing the group sizes we want to extract the
values of.
Returns:
vector of parameter values, chosen at corresponding group sizes,
of the same size of group_sizes.
Raises:
ValueError when the size array is not one dimensional.
"""
dim = np.ndim(values)
if dim > 1:
raise ValueError(f"sizes argument has dimension {dim} > 1.")
# The values are 0-indexed, but sizes are strictly positives.
indices = np.minimum(sizes, np.size(values)) - 1
return np.squeeze(values[list(indices)])
| [
"[email protected]"
] | |
2377ec59602cc8f7f6606e2b2bc07593ae2982a3 | 7b6377050fba4d30f00e9fb5d56dfacb22d388e1 | /xData/Documentation/keyword.py | 047eedd677d3fb462e8a9cbd6adeadcbebc84fca | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/fudge | 0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370 | 6ba80855ae47cb32c37f635d065b228fadb03412 | refs/heads/master | 2023-08-16T21:05:31.111098 | 2023-08-01T22:09:32 | 2023-08-01T22:09:32 | 203,678,373 | 21 | 4 | NOASSERTION | 2023-06-28T20:51:02 | 2019-08-21T23:22:20 | Python | UTF-8 | Python | false | false | 1,266 | py | # <<BEGIN-copyright>>
# Copyright 2022, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
This module contains the GNDS documentation child nodes keywords and keywork classes.
"""
from LUPY import ancestry as ancestryModule
from .. import suite as suiteModule
from .. import text as textModule
class Keyword( textModule.Text ) :
"""A class representing a GNDS documentation/abstract node."""
moniker = 'keyword'
keyName = 'type'
def __init__( self, label, type, text ) :
textModule.Text.__init__( self, text, label = label )
self.__type = type
@property
def type( self ) :
return( self.__type )
def XML_extraAttributes( self, **kwargs ) :
if( self.__type == '' ) : return ''
return ' type="%s"' % self.__type
@classmethod
def parseNodeUsingClass(cls, node, xPath, linkData, **kwargs):
label = node.get( 'label' )
type = node.get( 'type' )
return cls(label, type, None)
class Keywords( suiteModule.Suite ) :
moniker = 'keywords'
suiteName = 'type'
def __init__( self ) :
suiteModule.Suite.__init__( self, [ Keyword ] )
| [
"[email protected]"
] | |
4fb633e66e24821fa918896ec72f310a2c7d7300 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/count-square-submatrices-with-all-ones.py | aedd724cf51f450b6e3698b4879fa2c9d95b2743 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 501 | py | # Time: O(m * n)
# Space: O(1)
class Solution(object):
def countSquares(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if not matrix[i][j]:
continue
l = min(matrix[i-1][j], matrix[i][j-1])
matrix[i][j] = l+1 if matrix[i-l][j-l] else l
return sum(x for row in matrix for x in row)
| [
"[email protected]"
] | |
9b66a08d83f538492518e8d3c941d747b9fd27df | b7851ffc689990a5c394697b1d016ba34307630c | /venv/lib/python3.8/site-packages/faker/providers/ssn/uk_UA/__init__.py | 7673ee7bc606d47605ae2f9243e218267865b6d0 | [] | no_license | denokenya/django-schooling-rest-api | f38fb5cc31a6f40462f9cb1dcc6c3fd36e1301c6 | 552b98d5494344049541df615f446713cb5da1fa | refs/heads/main | 2023-06-14T12:53:11.897887 | 2021-07-10T18:02:11 | 2021-07-10T18:02:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | from datetime import date
from .. import Provider as SsnProvider
class Provider(SsnProvider):
def ssn(self):
"""
Ukrainian "Реєстраційний номер облікової картки платника податків"
also known as "Ідентифікаційний номер фізичної особи".
"""
digits = []
# Number of days between 1899-12-31 and a birth date
for digit in str((self.generator.date_object() - date(1899, 12, 31)).days):
digits.append(int(digit))
# Person's sequence number
for _ in range(4):
digits.append(self.random_int(0, 9))
checksum = (
digits[0] * -1
+ digits[1] * 5
+ digits[2] * 7
+ digits[3] * 9
+ digits[4] * 4
+ digits[5] * 6
+ digits[6] * 10
+ digits[7] * 5
+ digits[8] * 7
)
# Remainder of a checksum divided by 11 or 1 if it equals to 10
digits.append(checksum % 11 % 10)
return "".join(str(digit) for digit in digits)
| [
"[email protected]"
] | |
2a5d6a8fa6c48f12df1474865332576ad3e7cadc | 7882860350c714e6c08368288dab721288b8d9db | /구현/swea1954.py | ed62a9b2ebde3549be409ada6635d1d7bd8ba1fd | [] | no_license | park-seonju/Algorithm | 682fca984813a54b92a3f2ab174e4f05a95921a8 | 30e5bcb756e9388693624e8880e57bc92bfda969 | refs/heads/master | 2023-08-11T18:23:49.644259 | 2021-09-27T10:07:49 | 2021-09-27T10:07:49 | 388,741,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | delta = (
(0, 1), # 열증가
(1, 0), # 행증가
(0, -1), # 열감소
(-1, 0), # 행감소
)
T = int(input())
for test_case in range(1, T + 1):
N = int(input())
board = [[0] * N for _ in range(N)]
def snail():
row = 0
col = 0
num = 1
distance = max(N - 1,1)
while True:
for i in range(4):
for _ in range(distance):
print(board)
board[row][col] = num
num += 1
if num > N ** 2:
return
dr, dc = delta[i]
row += dr
col += dc
row += 1
col += 1
distance = max(1,distance-2)
snail()
print('#%d' % test_case)
for r in range(N):
for c in range(N):
print(board[r][c], end=' ')
print() | [
"[email protected]"
] | |
51cdbcf4d68698ce397c18e4b7206e52ed374f3e | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/speedchat/TTSCSellbotNerfMenu.py | bbd255756bc5a8d3dac7b3e4c5b633742d7a73a5 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 1,965 | py | #Embedded file name: toontown.speedchat.TTSCSellbotNerfMenu
from toontown.toonbase import ToonPythonUtil as PythonUtil
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCMenuHolder import SCMenuHolder
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from otp.otpbase import OTPLocalizer
SellbotNerfMenu = [(OTPLocalizer.SellbotNerfMenuSections[0], [30150,
30151,
30152,
30153,
30154,
30155,
30156]), (OTPLocalizer.SellbotNerfMenuSections[1], [30157,
30158,
30159,
30160,
30161,
30162,
30163,
30164]), (OTPLocalizer.SellbotNerfMenuSections[2], [30165,
30166,
30167,
30168,
30169,
30170,
30171,
30172,
30173,
30174,
30175])]
class TTSCSellbotNerfMenu(SCMenu):
def __init__(self):
SCMenu.__init__(self)
self.__messagesChanged()
def destroy(self):
SCMenu.destroy(self)
def clearMenu(self):
SCMenu.clearMenu(self)
def __messagesChanged(self):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
for section in SellbotNerfMenu:
if section[0] == -1:
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Sellbot Nerf phrase %s which does not seem to exist' % phrase
break
self.append(SCStaticTextTerminal(phrase))
else:
menu = SCMenu()
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Sellbot Nerf phrase %s which does not seem to exist' % phrase
break
menu.append(SCStaticTextTerminal(phrase))
menuName = str(section[0])
self.append(SCMenuHolder(menuName, menu))
| [
"[email protected]"
] | |
820c683151119ae0f56135a8a28e4731af344619 | dc9f2638209a9be235a1c4acc44fe2a26256c4b4 | /venv/projects/lib/python3.8/site-packages/pip/_vendor/requests/cookies.py | 98f2d259603a6600bbc45c48d57f097417ad8087 | [] | no_license | alwinruby/RealWorld | 4f5fcaed68fdd2d9fc37f5973fec365195cb3e9e | ec446f96f3545cb847429b5e33cefdc4f00ce432 | refs/heads/main | 2023-08-13T10:28:40.528047 | 2021-10-10T14:58:23 | 2021-10-10T14:58:23 | 408,079,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,430 | py | # -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
from ._internal_utils import to_native_string
from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
try:
import threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers['Host'], encoding='utf-8')
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (
(domain is None or cookie.domain == domain) and
(path is None or cookie.path == path)
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.set_policy(self.get_policy())
new_cj.update(self)
return new_cj
def get_policy(self):
"""Return the CookiePolicy instance used."""
return self._policy
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = {
'version': 0,
'name': name,
'value': value,
'port': None,
'domain': '',
'path': '/',
'secure': False,
'expires': None,
'discard': True,
'comment': None,
'comment_url': None,
'rest': {'HttpOnly': None},
'rfc2109': False,
}
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
:rtype: CookieJar
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
:rtype: CookieJar
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| [
"[email protected]"
] | |
18130661e23c4633371e6d3b4d5f35ee3b109d5a | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/whitish.py | cda5fd3383760008658310fb2429a5b96a5120d6 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 221 | py | ii = [('RennJIT.py', 11), ('SeniNSP.py', 1), ('LyelCPG.py', 3), ('WestJIT2.py', 9), ('MedwTAI.py', 3), ('WestJIT.py', 19), ('FitzRNS4.py', 3), ('FitzRNS.py', 8), ('BrewDTO.py', 17), ('FitzRNS2.py', 4), ('LyelCPG3.py', 1)] | [
"[email protected]"
] | |
f0a517b00e54ed242fe81c1004b6aee54258f5a3 | e7f67295e62fc5301ab23bce06c61f2311c2eeee | /mjml/core/registry.py | e2271ec00d336fba2502dc0692184cf39c26c0c4 | [
"MIT"
] | permissive | bayesimpact/mjml-stub | 94d10588359990cd58d2085429b19a3777c51f15 | 30bab3f2e197d2f940f58439f2e8cd9fadb58d48 | refs/heads/main | 2023-05-08T11:54:19.313877 | 2021-01-25T21:30:48 | 2021-01-25T21:30:48 | 344,026,118 | 0 | 0 | MIT | 2021-03-03T06:31:49 | 2021-03-03T06:31:48 | null | UTF-8 | Python | false | false | 828 | py |
__all__ = []
def _components():
from ..elements import (MjButton, MjText, MjSection, MjColumn, MjBody,
MjGroup, MjImage, MjDivider, MjTable, MjRaw)
from ..elements.head import (MjAttributes, MjFont, MjHead, MjPreview, MjStyle,
MjTitle)
components = {
'mj-button': MjButton,
'mj-text': MjText,
'mj-divider': MjDivider,
'mj-image': MjImage,
'mj-section': MjSection,
'mj-column': MjColumn,
'mj-body': MjBody,
'mj-group' : MjGroup,
'mj-table' : MjTable,
'mj-raw' : MjRaw,
# --- head components ---
'mj-attributes': MjAttributes,
'mj-font': MjFont,
'mj-head': MjHead,
'mj-preview': MjPreview,
'mj-title': MjTitle,
'mj-style': MjStyle,
}
return components
| [
"[email protected]"
] | |
cead28b04e4a16444377b87a16a95113cc2f446d | a6a5ae108c06685ce86ba878906bc2c1838454ba | /manage.py | 55445425d91572e8460e0ea47c599b5fcce4ba24 | [] | no_license | williamsko/sutura | bd8e1116492cb3cd5b4df811a3386317236520f2 | 1d21b655107c44703e2259a7770cff6fc75a77ab | refs/heads/main | 2023-08-13T08:35:08.881261 | 2021-09-30T21:41:47 | 2021-09-30T21:41:47 | 355,569,208 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sutura.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0ed0dfe65d95fab0c170fd9efd0fde92b547adc1 | 8f980a143c729693ddf4e90a3efd07511158be08 | /tensorflow/python/debug/wrappers/local_cli_wrapper_test.py | e22f6e783e83bcb81a94c13e510346c3bfe3ec7f | [
"Apache-2.0"
] | permissive | alexwelcing/tensorflow | 0eb3ac0a3a37356198ba9a15ee121aa4d1ef33f5 | 3273cf4f4d18820daf83ba7dd8b4c8a4329a4f01 | refs/heads/master | 2023-02-06T02:41:49.851497 | 2017-05-04T20:55:20 | 2017-05-04T20:55:20 | 90,309,170 | 0 | 0 | Apache-2.0 | 2023-02-03T04:17:41 | 2017-05-04T21:00:19 | C++ | UTF-8 | Python | false | false | 18,101 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self,
command_args_sequence,
sess,
dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_args_sequence: (list of list of str) A list of arguments for the
"run" command.
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_args_sequence = command_args_sequence
self._response_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
command_args = self._command_args_sequence[self._response_pointer]
self._response_pointer += 1
try:
self._run_handler(command_args)
except debugger_cli_common.CommandLineExit as e:
response = e.exit_token
return response
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.w = variables.Variable(21.0, name="w")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.w_int = control_flow_ops.with_dependencies(
[self.inc_v],
math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
name="w_int_outer")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sess = session.Session()
# Initialize variable.
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
# Test command sequence: run; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsWithEmptyStringDumpRootWorks(self):
# Test command sequence: run, run
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], []], self.sess, dump_root="")
# run under debug mode.
wrapped_sess.run(self.inc_v)
self.assertAllClose(11.0, self.sess.run(self.v))
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], [], []], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
# Test command sequence: run -n; run -n; run -n;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], ["-n"]], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunsUnderNonDebugThenDebugMode(self):
# Test command sequence: run -n; run -n; run; run;
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-n"], [], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
# Test command sequence: run -t 3; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"], []], self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
# Test command sequence: run -t 3;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
# Test command sequence: run -n; run -t 2; run; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-n"], ["-t", "2"], [], []], self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[], []], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRuntimeErrorBeforeGraphExecutionIsRaised(self):
# Use an impossible device name to cause an error before graph execution.
with ops.device("/gpu:1337"):
w = variables.Variable([1.0] * 10, name="w")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[[]], self.sess, dump_root=self._tmp_dir)
with self.assertRaisesRegexp(errors.OpError, r".*[Dd]evice.*1337.*"):
wrapped_sess.run(w)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
# Test command sequence:
# run -f greater_than_twelve; run -f greater_than_twelve; run;
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["-f", "v_greater_than_twelve"], ["-f", "v_greater_than_twelve"], []],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsUnderDebugModeWithWatchFnFilteringNodeNames(self):
# Test command sequence:
# run --node_name_filter inc.*
# run --node_name_filter delta
# run
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["--node_name_filter", "inc.*"], ["--node_name_filter", "delta"], []],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringOpTypes(self):
# Test command sequence:
# run --node_name_filter delta
# run --op_type_filter AssignAdd
# run
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["--node_name_filter", "delta"],
["--op_type_filter", "AssignAdd"],
[]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringTensorDTypes(self):
# Test command sequence:
# run --op_type_filter Variable.*
# run --dtype_filter int32
# run
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["--op_type_filter", "Variable.*"],
["--tensor_dtype_filter", "int32"], []],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(2, dumps.size)
self.assertItemsEqual(
["v", "w"], [dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(2, dumps.size)
self.assertEqual(
["w_int_inner", "w_int_outer"],
[dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
def testRunsUnderDebugModeWithWatchFnFilteringOpTypesAndTensorDTypes(self):
# Test command sequence:
# run --op_type_filter Cast --dtype_filter int32
# run
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["--op_type_filter", "Cast", "--tensor_dtype_filter", "int32"], []],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("w_int_inner", dumps.dumped_tensor_data[0].node_name)
if __name__ == "__main__":
googletest.main()
| [
"[email protected]"
] | |
6105554d84433e416576e543c9d8029e5b038601 | f889bc01147869459c0a516382e7b95221295a7b | /test/test_body_61.py | 83de2fb29e48fbb0b61eadc44a78bdfab46ca709 | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.body_61 import Body61
class TestBody61(unittest.TestCase):
""" Body61 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testBody61(self):
"""
Test Body61
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.body_61.Body61()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
f7754d30c7af009e5787a949e16e849192197347 | fde8c89b352076f95cc16e589b1baf18f7befb51 | /tempest/api/volume/admin/v2/test_snapshot_manage.py | 111492428e46f21cf3942f3c59602e1e69b2eb0a | [] | no_license | 571451370/devstack_mitaka | b11145256deab817bcdf60a01a67bb6b2f9ddb52 | 1bdd3f2598f91c1446b85c5b6def7784a2f6ab02 | refs/heads/master | 2020-08-26T12:53:07.482514 | 2017-04-12T01:32:55 | 2017-04-12T01:32:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | # Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class SnapshotManageAdminV2Test(base.BaseVolumeAdminTest):
"""Unmanage & manage snapshots
This feature provides the ability to import/export volume snapshot
from one Cinder to another and to import snapshots that have not been
managed by Cinder from a storage back end to Cinder
"""
@decorators.idempotent_id('0132f42d-0147-4b45-8501-cc504bbf7810')
@testtools.skipUnless(CONF.volume_feature_enabled.manage_snapshot,
"Manage snapshot tests are disabled")
def test_unmanage_manage_snapshot(self):
# Create a volume
volume = self.create_volume()
# Create a snapshot
snapshot = self.create_snapshot(volume_id=volume['id'])
# Unmanage the snapshot
# Unmanage snapshot function works almost the same as delete snapshot,
# but it does not delete the snapshot data
self.admin_snapshots_client.unmanage_snapshot(snapshot['id'])
self.admin_snapshots_client.wait_for_resource_deletion(snapshot['id'])
# Fetch snapshot ids
snapshot_list = [
snap['id'] for snap in
self.snapshots_client.list_snapshots()['snapshots']
]
# Verify snapshot does not exist in snapshot list
self.assertNotIn(snapshot['id'], snapshot_list)
# Manage the snapshot
snapshot_ref = '_snapshot-%s' % snapshot['id']
new_snapshot = self.admin_snapshot_manage_client.manage_snapshot(
volume_id=volume['id'],
ref={'source-name': snapshot_ref})['snapshot']
self.addCleanup(self.delete_snapshot,
self.admin_snapshots_client, new_snapshot['id'])
# Wait for the snapshot to be available after manage operation
waiters.wait_for_snapshot_status(self.admin_snapshots_client,
new_snapshot['id'],
'available')
# Verify the managed snapshot has the expected parent volume
self.assertEqual(new_snapshot['volume_id'], volume['id'])
| [
"[email protected]"
] | |
6231e67fb193f1342ac7b6c8ffec4b3a73693b57 | e500ab4de52039625608dbc6273ec1bb9f7b5593 | /user_details/urls.py | a90ecd452544374a39beb0e3d6faf9fb868f2a3f | [] | no_license | AdrianHavengaBennett/hackathon2020team2 | 62cd943ddfe4a7dc5300fe4a103600aa0233c637 | 94c77d3a36935f3245ba9eaa1e69d836c6392ca2 | refs/heads/master | 2021-02-24T10:17:45.441401 | 2020-03-06T14:34:26 | 2020-03-06T14:34:26 | 245,428,511 | 0 | 0 | null | 2020-03-06T13:35:38 | 2020-03-06T13:35:37 | null | UTF-8 | Python | false | false | 893 | py | """user_details URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from get_user_details import views
app_name = "get_user_details"
urlpatterns = [
url(r"^$", views.get_user_details),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
abde780147865cf4ca652871760406dd4091293a | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/scipy-0.18.0-np111py27_nomkl_0/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.py | 32fe9f8c0bebcd638557a82c1b221021da135544 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 14,532 | py | ''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_raises,
assert_equal, run_module_suite)
from scipy.io.netcdf import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr: ndarray or MaskedArray
Array to test.
expected_mask: array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
| [
"[email protected]"
] | |
856d2373425f060429aa63fde2ae8ce926777eec | 888899f0cb3e6e7b28a9de39001a1fd1c177cd35 | /COMPLETE PYTHON-3 COURSE/Chapter-03-IF_ELSE_FOR_WHILE_LOOP/infinite_loop.py | 9e010471ab0611accc49974e87c73a684c37d8ed | [] | no_license | VivakaNand/COMPLETE_PYTHON_3 | ef162d71d3a44bf661fcc1a8aacce31e7953cd7c | b3b835afe7671fdc3d29d912650fd4ccd3bc83f6 | refs/heads/master | 2023-02-04T10:13:41.881939 | 2020-12-23T08:30:51 | 2020-12-23T08:30:51 | 323,839,528 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | # infinite loop
#i = 0
#while i<10:
# print(Hello World)
while True:
print("Hello World")
# to terminate infinite loop use command # Ctrl + c
| [
"[email protected]"
] | |
5fd0337e4783437ab44e5fcf862272a2e3c1070e | aa0270b351402e421631ebc8b51e528448302fab | /sdk/identity/azure-identity/azure/identity/_credentials/device_code.py | 688c44c12f6b04cad5a40b0980d84b109f0cfcad | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 5,815 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime
import time
from typing import Dict, Optional, Callable, Any
from azure.core.exceptions import ClientAuthenticationError
from .._constants import DEVELOPER_SIGN_ON_CLIENT_ID
from .._internal import InteractiveCredential, wrap_exceptions
class DeviceCodeCredential(InteractiveCredential):
"""Authenticates users through the device code flow.
When :func:`get_token` is called, this credential acquires a verification URL and code from Azure Active Directory.
A user must browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user
authenticates successfully, the credential receives an access token.
This credential is primarily useful for authenticating a user in an environment without a web browser, such as an
SSH session. If a web browser is available, :class:`~azure.identity.InteractiveBrowserCredential` is more
convenient because it automatically opens a browser to the login page.
:param str client_id: client ID of the application users will authenticate to. When not specified users will
authenticate to an Azure development application.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword str tenant_id: an Azure Active Directory tenant ID. Defaults to the "organizations" tenant, which can
authenticate work or school accounts. **Required for single-tenant applications.**
:keyword int timeout: seconds to wait for the user to authenticate. Defaults to the validity period of the
device code as set by Azure Active Directory, which also prevails when **timeout** is longer.
:keyword prompt_callback: A callback enabling control of how authentication
instructions are presented. Must accept arguments (``verification_uri``, ``user_code``, ``expires_on``):
- ``verification_uri`` (str) the URL the user must visit
- ``user_code`` (str) the code the user must enter there
- ``expires_on`` (datetime.datetime) the UTC time at which the code will expire
If this argument isn't provided, the credential will print instructions to stdout.
:paramtype prompt_callback: Callable[str, str, ~datetime.datetime]
:keyword AuthenticationRecord authentication_record: :class:`AuthenticationRecord` returned by :func:`authenticate`
:keyword bool disable_automatic_authentication: if True, :func:`get_token` will raise
:class:`AuthenticationRequiredError` when user interaction is required to acquire a token. Defaults to False.
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
:keyword bool disable_authority_validation_and_instance_discovery: Determines whether or not instance discovery
is performed when attempting to authenticate. Setting this to true will completely disable instance discovery
and authority validation.
.. admonition:: Example:
.. literalinclude:: ../samples/credential_creation_code_snippets.py
:start-after: [START create_device_code_credential]
:end-before: [END create_device_code_credential]
:language: python
:dedent: 4
:caption: Create a DeviceCodeCredential.
"""
def __init__(
self,
client_id: str = DEVELOPER_SIGN_ON_CLIENT_ID,
*,
timeout: Optional[int] = None,
prompt_callback: Optional[Callable[[str, str, datetime], None]] = None,
**kwargs: Any
) -> None:
self._timeout = timeout
self._prompt_callback = prompt_callback
super(DeviceCodeCredential, self).__init__(client_id=client_id, **kwargs)
@wrap_exceptions
def _request_token(self, *scopes: str, **kwargs: Any) -> Dict:
# MSAL requires scopes be a list
scopes = list(scopes) # type: ignore
app = self._get_app(**kwargs)
flow = app.initiate_device_flow(scopes)
if "error" in flow:
raise ClientAuthenticationError(
message="Couldn't begin authentication: {}".format(flow.get("error_description") or flow.get("error"))
)
if self._prompt_callback:
self._prompt_callback(
flow["verification_uri"], flow["user_code"], datetime.utcfromtimestamp(flow["expires_at"])
)
else:
print(flow["message"])
if self._timeout is not None and self._timeout < flow["expires_in"]:
# user specified an effective timeout we will observe
deadline = int(time.time()) + self._timeout
result = app.acquire_token_by_device_flow(
flow, exit_condition=lambda flow: time.time() > deadline, claims_challenge=kwargs.get("claims")
)
else:
# MSAL will stop polling when the device code expires
result = app.acquire_token_by_device_flow(flow, claims_challenge=kwargs.get("claims"))
# raise for a timeout here because the error is particular to this class
if "access_token" not in result and result.get("error") == "authorization_pending":
raise ClientAuthenticationError(message="Timed out waiting for user to authenticate")
# base class will raise for other errors
return result
| [
"[email protected]"
] | |
35dbc8417982370fd70de25f449e08b98d2b5632 | 0ba2c3776618b5b8b76f4a23f21e9c6ad3f6e2e1 | /afterclass/homework3/6.1.py | fc73fce3ae1c98fb5f0ed793b9585b6b9b1ba543 | [] | no_license | WangDongDong1234/python_code | 6dc5ce8210b1dcad7d57320c9e1946fd4b3fe302 | 6a785306a92d328a0d1427446ca773a9803d4cc0 | refs/heads/master | 2020-04-15T12:35:03.427589 | 2019-09-16T15:38:25 | 2019-09-16T15:38:25 | 164,681,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | while 1:
try:
str=input()
array=[int(i) for i in str.strip().split(" ")]
list=array[1::]
result=[]
for i in range(array[0]):
result.append(0)
for i in range(array[0]):
for j in range(array[0]):
if list[i]>list[j]:
result[i]+=1
print_result=[]
for i in range(0,array[0]):
for j in range(0,len(result)):
if i==result[j]:
print_result.append(list[j])
for i in range(0,len(print_result)):
if i==0:
print(print_result[0],end="")
else:
print("",print_result[i],end="")
print()
except EOFError:
break
| [
"[email protected]"
] | |
8dc1e96652aaf00ffc9d44fcce57038bec5ae2c1 | 06f56c201e3988bdf4add289a84c2d6d9ceaa6bb | /build/lib/test/test_inline_response_200.py | a996719676e83d5c36552f7bd24d02faeb054478 | [] | no_license | kevinlansel/yanport-client | 8534311c7ae9fb3a78689d7b348eb56c49aab224 | 8bea6468c12e7789a6740d590db45f98d555db4a | refs/heads/master | 2021-09-24T16:09:47.126776 | 2018-10-11T15:51:35 | 2018-10-11T15:51:35 | 110,860,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,478 | py | # coding: utf-8
"""
Yanport API
## À propos Voici quelques ressources qui vous aideront à explorer notre API, si vous avez des problèmes ou des demandes, veuillez contacter le [support](mailto:[email protected]). ## REST API Nos services webs utilisent le protocle **HTTPS** sur le port 443, tout accès en HTTP sur le port 80 est bloqué par notre pare-feu. Les échanges de données sont réalisés en **JSON**. ## Authentification Avant de commencer à explorer notre API, vous devez nous [contacter](https://www.yanport.com/contact) afin d'obtenir un [JSON Web Token](https://jwt.io) (**JWT**) qui vous permettra de vous identifier à chaque requêtes. ### JWT (header) La méthode privilégiée pour s'authentifier est de passer à chaque requêtes le token dans le header `Authorization: Bearer {{ JWT }}` en remplaçant `{{ JWT }}` par votre token. ### JWT (query param) Il est aussi possible de passer le token directement en query param de la requête `https://api.yanport.com/?token={{ JWT }}`. (_privilégié le passage par header en production._) ## Pour démarrer Lorsque vous disposez de votre token d'authentification, vous pouvez commencer à explorer notre API grâce au boutton `Try it out` sur chacun de nos webs services. Mais au préalable, vous devez vous authentifier en cliquant sur le boutton `Authorize `, en remplissant l'input `api_key` avec `Bearer {{ JWT }}`. **Exemple** `Bearer eyUEkiLCJh...CHCUiBfD63oxoo=` ## Limitation Toutes les requêtes à notre API sont loggées, c'est pourquoi vous devez respecter nos [CGU](https://dev.yanport.com/cgu) afin d'éviter tout usage abusif de notre API.
OpenAPI spec version: 1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.inline_response_200 import InlineResponse200
class TestInlineResponse200(unittest.TestCase):
""" InlineResponse200 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse200(self):
"""
Test InlineResponse200
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.inline_response_200.InlineResponse200()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6908c575ccde591afe9caf7d459b020ca4b7f5b9 | 56ca0c81e6f8f984737f57c43ad8d44a84f0e6cf | /src/api_v1/serializers/openaccess.py | aada052fbc6179eb4bd84c660136bfb111f92683 | [
"MIT"
] | permissive | iplweb/bpp | c40f64c78c0da9f21c1bd5cf35d56274a491f840 | a3d36a8d76733a479e6b580ba6ea57034574e14a | refs/heads/dev | 2023-08-09T22:10:49.509079 | 2023-07-25T04:55:54 | 2023-07-25T04:55:54 | 87,017,024 | 2 | 0 | NOASSERTION | 2023-03-04T04:02:36 | 2017-04-02T21:22:20 | Python | UTF-8 | Python | false | false | 288 | py | from rest_framework import serializers
from bpp.models import Czas_Udostepnienia_OpenAccess
class Czas_Udostepnienia_OpenAccess_Serializer(serializers.HyperlinkedModelSerializer,):
class Meta:
model = Czas_Udostepnienia_OpenAccess
fields = ["id", "nazwa", "skrot"]
| [
"[email protected]"
] | |
5a8427de6fdcde4bce96df3cfac1ad3cec39ace3 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil1203.py | fafeb57574e59bf7a05a7e9fb1487768b3914af1 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | # qubit number=5
# total number=51
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=31
prog += CZ(1,0) # number=32
prog += H(0) # number=33
prog += H(1) # number=44
prog += CZ(0,1) # number=45
prog += H(1) # number=46
prog += X(1) # number=41
prog += H(1) # number=48
prog += CZ(0,1) # number=49
prog += H(1) # number=50
prog += X(0) # number=26
prog += CNOT(1,0) # number=27
prog += H(1) # number=37
prog += CZ(0,1) # number=38
prog += H(1) # number=39
prog += X(1) # number=35
prog += CNOT(0,1) # number=36
prog += X(2) # number=11
prog += X(3) # number=12
prog += CNOT(3,2) # number=43
prog += CNOT(3,2) # number=47
prog += X(0) # number=13
prog += CNOT(0,1) # number=22
prog += X(1) # number=23
prog += CNOT(0,1) # number=24
prog += X(2) # number=15
prog += X(1) # number=29
prog += Y(4) # number=28
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1203.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
11feff481cc0b103cd744e5ddaa43c27c9b9557e | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_auditlogs_request.py | a1697211afe96dd24be2581077e2977269f456d9 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,513 | py | # coding: utf-8
import pprint
import re
import six
class ListAuditlogsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'node_id': 'str',
'start_time': 'str',
'end_time': 'str',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'node_id': 'node_id',
'start_time': 'start_time',
'end_time': 'end_time',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, x_language=None, instance_id=None, node_id=None, start_time=None, end_time=None, offset=None, limit=None):
"""ListAuditlogsRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._instance_id = None
self._node_id = None
self._start_time = None
self._end_time = None
self._offset = None
self._limit = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
if node_id is not None:
self.node_id = node_id
self.start_time = start_time
self.end_time = end_time
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
@property
def x_language(self):
"""Gets the x_language of this ListAuditlogsRequest.
:return: The x_language of this ListAuditlogsRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListAuditlogsRequest.
:param x_language: The x_language of this ListAuditlogsRequest.
:type: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListAuditlogsRequest.
:return: The instance_id of this ListAuditlogsRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAuditlogsRequest.
:param instance_id: The instance_id of this ListAuditlogsRequest.
:type: str
"""
self._instance_id = instance_id
@property
def node_id(self):
"""Gets the node_id of this ListAuditlogsRequest.
:return: The node_id of this ListAuditlogsRequest.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this ListAuditlogsRequest.
:param node_id: The node_id of this ListAuditlogsRequest.
:type: str
"""
self._node_id = node_id
@property
def start_time(self):
"""Gets the start_time of this ListAuditlogsRequest.
:return: The start_time of this ListAuditlogsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ListAuditlogsRequest.
:param start_time: The start_time of this ListAuditlogsRequest.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ListAuditlogsRequest.
:return: The end_time of this ListAuditlogsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListAuditlogsRequest.
:param end_time: The end_time of this ListAuditlogsRequest.
:type: str
"""
self._end_time = end_time
@property
def offset(self):
"""Gets the offset of this ListAuditlogsRequest.
:return: The offset of this ListAuditlogsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListAuditlogsRequest.
:param offset: The offset of this ListAuditlogsRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListAuditlogsRequest.
:return: The limit of this ListAuditlogsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListAuditlogsRequest.
:param limit: The limit of this ListAuditlogsRequest.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAuditlogsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
229367ad40fc730f2e52dd2f23c9e2967bc956e6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /b8wRDMWgMZTN2nmfx_12.py | d816d604f90e8b6c23df10170ce6f72b2ee90835 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py |
def equal(a, b, c):
eq = 0
if a == b and a == c and b == c:
eq = 3
elif a == b or a == c or b == c:
eq = 2
return eq
| [
"[email protected]"
] | |
e5b009d8dc00d9b2eaef28128c03f56b450a9b67 | 20cea1f3ba7e5b35246b169eb156414a4cac2f98 | /exercise_motion_planning/cde-package/cde-root/usr/lib/python2.7/dist-packages/IPython/utils/decorators.py | 7225df3853c99bf123acab16222b9156ca6eb1ff | [] | no_license | jenniferdavid/summer_school_jul14 | 3a395b119aa79a2c31a1c376cfc31b2eee271850 | d1e29e41f55ed69a0e4311946381984e12eacd8a | refs/heads/master | 2021-05-04T09:43:40.169931 | 2018-01-04T14:29:26 | 2018-01-04T14:29:26 | 36,803,580 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | ../../../../../share/pyshared/IPython/utils/decorators.py | [
"[email protected]"
] | |
9af273cac076fed3d2bb3147957deeebc7485900 | c6dc8b682aea706b18b05952f791e01989db3669 | /Programiz/checkStringIsPalindrome.py | 4d9854af2259cf67461730ef2b277c103a7f238e | [] | no_license | LizaPersonal/personal_exercises | aeb9ceb2593a6d5ee1a8e9f7c0862ce638acd29b | 649dc0c116861995fbf58b4736a0c66fd75d648c | refs/heads/master | 2021-04-03T02:17:51.850676 | 2018-07-31T21:10:59 | 2018-07-31T21:10:59 | 125,123,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Program to check if a string is palindrome or not
# change this value for a different output
my_str = 'aIbohPhoBiA'
# make it suitable for caseless comparison
my_str = my_str.casefold()
# reverse the string
rev_str = reversed(my_str)
# check if the string is equal to its reverse
if list(my_str) == list(rev_str):
print("It is palindrome")
else:
print("It is not palindrome")
| [
"[email protected]"
] | |
4a8b858b417a2c65f955b1ce76dfdf44fba59bea | 8629f82f971f4e036c2b6358fe353a2c88bfd098 | /BConverters/AnnotationConverters.py | 76e538d13ae67693278507c13db362dc14a696cc | [
"MIT"
] | permissive | mahajrod/MAVR | 92828fa1c191b5f8ed08f1ba33f1684df09742cd | 8c57ff5519f130357e36e6f12868bc997e52a8a7 | refs/heads/master | 2023-08-25T01:02:24.738724 | 2023-08-22T15:13:39 | 2023-08-22T15:13:39 | 21,181,911 | 11 | 6 | null | 2017-09-18T20:25:16 | 2014-06-24T21:45:57 | Python | UTF-8 | Python | false | false | 635 | py | __author__ = 'mahajrod'
import os
from BCBio import GFF
class AnnotationConverters:
"
@staticmethod
def gff22gff3(input_file, output_file, target_lines=100000):
in_fd = open(input_file, "r")
out_fd = open(output_file, "w")
GFF.write(GFF.parse(in_fd, target_lines=target_lines), out_fd)
in_fd.close()
out_fd.close()
"
@staticmethod
def gff32gtf(input_file, output_file):
os.system("gffread %s -T -o %s" % (input_file, output_file))
@staticmethod
def gtf2gff3(input_file, output_file):
os.system("gffread %s -o %s" % (input_file, output_file))
| [
"[email protected]"
] | |
a510a15dfe373e469b66e9435f40c354e16b56cf | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res_bw/scripts/common/lib/encodings/cp1026.py | c4de9dd6738fbafdea4d05746feed1a2dabe7703 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,269 | py | # 2016.02.14 12:48:01 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/cp1026.py
""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors = 'strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors = 'strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final = False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final = False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x9c\t\x86\x7f\x97\x8d\x8e\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x9d\x85\x08\x87\x18\x19\x92\x8f\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\n\x17\x1b\x88\x89\x8a\x8b\x8c\x05\x06\x07\x90\x91\x16\x93\x94\x95\x96\x04\x98\x99\x9a\x9b\x14\x15\x9e\x1a \xa0\xe2\xe4\xe0\xe1\xe3\xe5{\xf1\xc7.<(+!&\xe9\xea\xeb\xe8\xed\xee\xef\xec\xdf\u011e\u0130*);^-/\xc2\xc4\xc0\xc1\xc3\xc5[\xd1\u015f,%_>?\xf8\xc9\xca\xcb\xc8\xcd\xce\xcf\xcc\u0131:\xd6\u015e\'=\xdc\xd8abcdefghi\xab\xbb}`\xa6\xb1\xb0jklmnopqr\xaa\xba\xe6\xb8\xc6\xa4\xb5\xf6stuvwxyz\xa1\xbf]$@\xae\xa2\xa3\xa5\xb7\xa9\xa7\xb6\xbc\xbd\xbe\xac|\xaf\xa8\xb4\xd7\xe7ABCDEFGHI\xad\xf4~\xf2\xf3\xf5\u011fJKLMNOPQR\xb9\xfb\\\xf9\xfa\xff\xfc\xf7STUVWXYZ\xb2\xd4#\xd2\xd3\xd50123456789\xb3\xdb"\xd9\xda\x9f'
encoding_table = codecs.charmap_build(decoding_table)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\cp1026.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:48:01 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
1fe21bfb31d89042f71c3414c4e127daaeb2dadb | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/leccion_20210910224916.py | 890f138fa4238e95326e91bb5a1829aaf353de86 | [] | no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from datetime import date
class Calculos:
def antiguedad(self,fecha):
hoy=date.today()
if hoy<fecha:
return -1
else:
anio=fecha.year
mes=fecha.month
dia=fecha.day
aa=0
while fecha<hoy:
aa=1
fecha=date(anio,mes,dia)
return aa
cal = Calculos()
print(cal.antiguedad(date(1971, 6, 9)))
| [
"[email protected]"
] | |
aa3ae29b2d0ea43a8ec9e447b892f19baf331b19 | 258b656d1b6864726864f89d4c8dc38fc633a48f | /odoo_addons_customization/lending_import_kairos/models/lending_kairos_line.py | 319e3707c1efd5689559ff6c6ea912bea29179ab | [] | no_license | test-odoorosario/opt | c17e1c1767710ca8e13a799644fb85b07e83639b | 77921b4d965f2e4c081d523b373eb306a450a873 | refs/heads/master | 2022-12-02T04:36:04.685119 | 2019-07-11T17:17:20 | 2019-07-11T17:17:20 | 196,436,293 | 0 | 1 | null | 2022-11-22T00:30:40 | 2019-07-11T17:13:21 | Python | UTF-8 | Python | false | false | 2,130 | py | # - coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class LendingKairosLine(models.Model):
_name = 'lending.kairos.line'
lending_id = fields.Many2one(
comodel_name='lending',
string='Medicamento'
)
code = fields.Char(
related='lending_id.code',
string='Codigo'
)
name = fields.Char(
related='lending_id.name',
string='Descripcion'
)
description_drug = fields.Char(
related='lending_id.description_drug',
string='Principio activo'
)
description_laboratory = fields.Char(
related='lending_id.description_laboratory',
string='Laboratorio'
)
description_presentation = fields.Char(
related='lending_id.description_presentation',
string='Gramaje y presentación'
)
description_product = fields.Char(
related='lending_id.description_product',
string='Marca comercial'
)
value = fields.Float(
string='Valor',
digits=(12, 6)
)
date = fields.Date(
string='Fecha de vigencia'
)
value_line_ids = fields.One2many(
'lending.kairos.value.line',
'kairos_id',
string="Valores"
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
04555d417f5d237a3e8c6bf9da7c8825b5cf8789 | 2ac01e5646032561a45f1c3283297dad58d4f5f0 | /broker/libs/gdrive.py | f9c9cf2a50e0739082a8808f5ffdf2c05dfab995 | [
"MIT"
] | permissive | avatar-lavventura/ebloc-broker | 4f31c3135b7a3cd44a00f5c8a47fa38eeffe7fda | 3501ac4bb958f73c4e5612f647f33af11db0360a | refs/heads/master | 2023-07-22T11:48:03.159093 | 2023-06-18T17:53:27 | 2023-06-18T17:53:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,758 | py | #!/usr/bin/env python3
import json
import os
import shutil
import subprocess
from contextlib import suppress
from pathlib import Path
from broker._utils._log import br, ok
from broker._utils.tools import _remove, mkdir, read_json
from broker.config import env
from broker.lib import echo_grep_awk, run, subprocess_call
from broker.utils import byte_to_mb, compress_folder, dump_dict_to_file, is_program_valid, log, print_tb
def refresh_gdrive_token():
with open(Path.home() / ".gdrive" / "token_v2.json", "r") as f:
output = json.load(f)
log("#> Running: gdrive about --refresh-token <id>", h=False)
run(["gdrive", "about", "--refresh-token", output["refresh_token"]])
def check_gdrive():
"""Check whether `gdrive about` returns a valid output."""
is_program_valid(["gdrive", "version"])
try:
output = run(["gdrive", "about"])
except:
refresh_gdrive_token()
output = run(["gdrive", "about"]) # re-try
return output
def check_gdrive_about(given_user):
output = check_gdrive()
user = output.partition("\n")[0].split(", ")[1]
return user == given_user, user
def submit(_from, job):
try:
job.check_account_status(_from)
# job.Ebb.is_provider_valid(provider)
job.Ebb.is_requester_valid(_from)
except Exception as e:
raise e
folder_ids_to_share = []
data_files_json_path = f"{job.tmp_dir}/meta_data.json"
try:
if len(job.folders_to_share) > 1:
for folder_to_share in job.folders_to_share[1:]:
if not isinstance(folder_to_share, bytes):
# starting from the first data file ignoring source_folder
# attempting to share the data folder
folder_key, tar_hash, job.tar_hashes = upload_folder(folder_to_share, job.tmp_dir)
folder_ids_to_share.append(folder_key) # record keys to share at end
job.foldername_tar_hash[folder_to_share] = tar_hash
job.keys[tar_hash] = folder_key
if job.tmp_dir == "":
# print_tb("job.tmp_dir is empty")
raise Exception("'job.tmp_dir' is empty")
_dump_dict_to_file(data_files_json_path, job.keys)
data_json = read_json(data_files_json_path)
if data_json:
log("## meta_data:")
log(data_json)
with suppress(Exception):
data_json = read_json(data_files_json_path)
if job.keys == data_json:
log(f"## meta_data.json file matches with the given data keys {ok()}")
else:
log("warning: meta_data.json file does not match with the given data keys")
folder_to_share = job.folders_to_share[0]
if not isinstance(folder_to_share, bytes):
folder_key, tar_hash, job.tar_hashes = upload_folder(folder_to_share, job.tmp_dir, folder_key_flag=True)
folder_ids_to_share.append(folder_key) # record keys to share at end
job.foldername_tar_hash[folder_to_share] = tar_hash
# add an element to the beginning of the dict since Python
# 3.7. dictionaries are now ordered by insertion order.
job.keys_final[tar_hash] = folder_key
job.keys_final.update(job.keys)
job.keys = job.keys_final
return job, folder_ids_to_share
except Exception as e:
print_tb(e)
raise e
finally:
_dump_dict_to_file(data_files_json_path, job.keys)
data_json = read_json(data_files_json_path)
if data_json:
log("## meta_data:")
log(data_json)
_id = None
for *_, v in data_json.items():
_id = v
break
if _id:
log("## updating meta_data ", end="")
update_meta_data_gdrive(_id, data_files_json_path)
log(ok())
def upload_folder(folder_to_share, tmp_dir, folder_key_flag=False):
log(f"## folder_to_share={folder_to_share}")
key, *_, tar_hash, tar_hashes = upload(folder_to_share, tmp_dir, folder_key_flag)
return key, tar_hash, tar_hashes
def upload(folder_to_share, tmp_dir, is_source_code=False):
tar_hashes = {}
is_already_uploaded = False
log(f"==> is_source_code={is_source_code} | tar.gz file is inside the base folder")
dir_path = os.path.dirname(folder_to_share)
tar_hash, _ = compress_folder(folder_to_share, is_exclude_git=True)
tar_hashes[folder_to_share] = tar_hash
path_to_move = f"{dir_path}/{tar_hash}"
_from = f"{dir_path}/{tar_hash}.tar.gz"
_to = f"{path_to_move}/{tar_hash}.tar.gz"
mkdir(path_to_move)
shutil.move(_from, _to)
if is_source_code:
shutil.copyfile(f"{tmp_dir}/meta_data.json", f"{path_to_move}/meta_data.json")
is_file_exist = _list(tar_hash, is_folder=True)
if is_file_exist:
log(f"## requested folder {tar_hash} is already uploaded", "blue")
log(is_file_exist, "bg")
key = is_file_exist.partition("\n")[0].split()[0]
is_already_uploaded = True
else:
key = _upload(dir_path, tar_hash, is_folder=True)
log(f"{_list(tar_hash)}", "bg")
_remove(f"{dir_path}/{tar_hash}") # created .tar.gz file is removed
return key, is_already_uploaded, tar_hash, tar_hashes
def delete_all(_type="all"):
"""Delete all created files and folder within the gdrive."""
if _type == "dir":
for line in list_all("dir").splitlines():
try:
run(["gdrive", "delete", "--recursive", line.split()[0]])
except:
pass
else:
for line in list_all().splitlines():
if " dir " not in line: # first remove files
try:
run(["gdrive", "delete", line.split()[0]])
except Exception as e:
log(f"E: [g]{e}")
for line in list_all().splitlines():
if " dir " in line:
try:
log(f"Attempt to delete dir: {line.split()[0]} ", end="", h=False)
output = run(["/usr/local/bin/gdrive", "delete", "--recursive", line.split()[0]])
print(output)
except Exception as e:
if str(e) != "":
log(f"E: [g]{e}")
# else:
# with suppress(Exception):
# run(["gdrive", "delete", line.split()[0]])
def list_all(_type="all"):
if _type == "dir":
_lines = ""
lines = run(["gdrive", "list", "--no-header"])
for line in lines.splitlines():
if " dir " in line:
_lines += f"{line}\n"
# breakpoint() # DEBUG
return _lines[:-1]
else:
lines = run(["gdrive", "list", "--no-header"])
return lines
def _list(tar_hash, is_folder=False):
r"""Query list from gdrive.
cmd: run(['gdrive', 'list', '--query', 'name contains \'' + tar_hash + '.tar.gz' + '\'', '--no-header'])
__https://developers.google.com/drive/api/v3/reference/query-ref
"""
if is_folder:
fn = f"name='{tar_hash}'"
else:
fn = f"name='{tar_hash}.tar.gz'"
return run(
[
"gdrive",
"list",
"--query",
f"{fn} and trashed=false",
"--no-header",
]
)
def _upload(dir_path, tar_hash, is_folder=False):
if is_folder:
subprocess.run(["gdrive", "upload", "--recursive", f"{dir_path}/{tar_hash}"], check=True)
output = (
subprocess.check_output(
["gdrive", "list", "--query", f"name='{tar_hash}' and trashed=false", "--no-header"]
)
.decode("utf-8")
.strip()
)
else:
# subprocess.run(['mv', folderToShare + '.tar.gz', tar_hash + '.tar.gz'])
file_name_to_upload = f"{tar_hash}.tar.gz"
tar_file_path = f"{dir_path}/{file_name_to_upload}"
subprocess.run(["gdrive", "upload", tar_file_path], check=True)
_remove(tar_file_path)
output = (
subprocess.check_output(
[
"gdrive",
"list",
"--query",
f"name='{file_name_to_upload}' and trashed=false",
"--no-header",
]
)
.decode("utf-8")
.strip()
)
# cmd = ['gdrive', 'list', '--query', 'name contains \'' + tar_hash + '.tar.gz' + '\'', '--no-header']
# output = subprocess.check_output(cmd).decode('utf-8').strip()
return output.split(" ", maxsplit=1)[0]
def get_file_info(gdrive_info, _type):
return echo_grep_awk(gdrive_info, _type, "2")
def get_file_id(key):
return run(["gdrive", "list", "--query", f"'{key}' in parents and trashed=false"])
def get_data_key_ids(results_folder_prev) -> bool:
fn = f"{results_folder_prev}/meta_data.json"
log(f"==> meta_data_path={fn}")
try:
meta_data = read_json(fn)
except Exception as e:
print_tb(e)
return meta_data
def update_meta_data_gdrive(key, path):
output = get_file_id(key)
meta_data_key = fetch_grive_output(output, "meta_data.json")
log(f"\n\t`gdrive update {meta_data_key} {path}`", h=False, end="")
run(["gdrive", "update", meta_data_key, path])
def fetch_grive_output(output, key):
for line in output.split("\n"):
if key in line:
return line.split(" ")[0]
raise Exception(f"gdrive: given key={key} does not exist")
def parse_gdrive_info(gdrive_info):
try:
_dict = {}
for line in gdrive_info.splitlines():
line = line.replace(" ", "")
output = line.split(":")
if output[0] not in ["DownloadUrl", "ViewUrl"]:
_dict[output[0]] = output[1]
log(_dict)
except:
log(gdrive_info, "yellow")
def size(key, mime_type, folder_name, gdrive_info, results_folder_prev, code_hashes, is_cached):
source_code_key = None
size_to_download = 0
if "folder" not in mime_type:
raise Exception
try:
output = get_file_id(key)
log(f"==> data_id=[m]{key}")
log(output, "green")
data_files_id = fetch_grive_output(output, "meta_data.json")
if not data_files_id:
raise Exception
# key for the source_code elimination output*.tar.gz files
source_code_key = fetch_grive_output(output, f"{folder_name}.tar.gz")
cmd = [
"gdrive",
"download",
"--recursive",
data_files_id, # first id is meta_data
"--force",
"--path",
results_folder_prev,
]
output = subprocess_call(cmd, 10)
print(output)
cmd = [
"gdrive",
"info",
"--bytes",
source_code_key,
"-c",
env.GDRIVE_METADATA,
]
gdrive_info = subprocess_call(cmd, 10)
except Exception as e:
print_tb(e)
# TODO: gdrive list --query "sharedWithMe"
raise e
md5sum = get_file_info(gdrive_info, "Md5sum")
_source_code_hash = code_hashes[0].decode("utf-8")
if md5sum != _source_code_hash:
# checks md5sum obtained from gdrive and given by the user
raise Exception(f"md5sum does not match with the provided data {source_code_key}")
log(f":beer: folder={md5sum}", "bg")
byte_size = int(get_file_info(gdrive_info, "Size"))
log(f"## code_hashes[0] == {_source_code_hash} | size={byte_size} bytes")
if not is_cached[code_hashes[0].decode("utf-8")]:
size_to_download += byte_size
try:
meta_data = get_data_key_ids(results_folder_prev)
except Exception as e:
raise e
data_key_dict = {}
if len(meta_data.items()) > 1:
idx = 0
for k, v in meta_data.items():
if idx == 0: # first item is for the source-code itself
_key = str(v)
output = get_file_id(_key)
data_key = fetch_grive_output(output, f"{k}.tar.gz")
cmd = ["gdrive", "info", "--bytes", data_key, "-c", env.GDRIVE_METADATA]
gdrive_info = subprocess_call(cmd, 10)
log(f" * gdrive_info for [g]{k}[/g]:")
parse_gdrive_info(gdrive_info)
idx += 1
else: # should start from the first index
try:
_key = str(v)
output = get_file_id(_key)
data_key = fetch_grive_output(output, f"{k}.tar.gz")
cmd = ["gdrive", "info", "--bytes", data_key, "-c", env.GDRIVE_METADATA]
gdrive_info = subprocess_call(cmd, 10)
except Exception as e:
raise e
md5sum = get_file_info(gdrive_info, _type="Md5sum")
log(f" * gdrive_info for [g]{k}[/g]:")
parse_gdrive_info(gdrive_info)
given_code_hash = code_hashes[idx].decode("utf-8")
log(f"==> given_code_hash={given_code_hash} idx={idx}")
if md5sum != given_code_hash:
# checks md5sum obtained from gdrive and given by the user
raise Exception(
f"md5sum does not match with the provided data{br(idx)}\n"
f"md5sum={md5sum} | given={given_code_hash}"
)
data_key_dict[md5sum] = data_key
_size = int(get_file_info(gdrive_info, "Size"))
log(
f"==> code_hashes{br(idx)} == {code_hashes[idx].decode('utf-8')} size={_size} bytes ~= {byte_to_mb(_size)} MB"
)
byte_size += _size
if not is_cached[code_hashes[idx].decode("utf-8")]:
size_to_download += _size
if bool(data_key_dict):
data_link_file = f"{results_folder_prev}/meta_data_link.json"
with open(data_link_file, "w") as f:
json.dump(data_key_dict, f)
else:
raise Exception("Something is wrong; data_key_dict is empty")
output = byte_to_mb(size_to_download)
log(f"total_size={byte_size} bytes | size to download={size_to_download} bytes ~= {output} MB")
return output, data_key_dict, source_code_key
def _dump_dict_to_file(fn, folder_keys):
try:
log("==> meta_data.json file is updated in the parent folder")
dump_dict_to_file(fn, folder_keys)
except Exception as e:
print_tb(e)
raise e
| [
"[email protected]"
] | |
f90310a5c43477d44f9d152349a9a72b164b204a | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/ws66i/media_player.py | 1101c0c9fbc8c9e1a5adc29af5766489e54fae72 | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 6,345 | py | """Support for interfacing with WS66i 6 zone home audio controller."""
from pyws66i import WS66i, ZoneStatus
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
MediaPlayerState,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MAX_VOL
from .coordinator import Ws66iDataUpdateCoordinator
from .models import Ws66iData
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the WS66i 6-zone amplifier platform from a config entry."""
ws66i_data: Ws66iData = hass.data[DOMAIN][config_entry.entry_id]
# Build and add the entities from the data class
async_add_entities(
Ws66iZone(
device=ws66i_data.device,
ws66i_data=ws66i_data,
entry_id=config_entry.entry_id,
zone_id=zone_id,
data_idx=idx,
coordinator=ws66i_data.coordinator,
)
for idx, zone_id in enumerate(ws66i_data.zones)
)
class Ws66iZone(CoordinatorEntity[Ws66iDataUpdateCoordinator], MediaPlayerEntity):
"""Representation of a WS66i amplifier zone."""
def __init__(
self,
device: WS66i,
ws66i_data: Ws66iData,
entry_id: str,
zone_id: int,
data_idx: int,
coordinator: Ws66iDataUpdateCoordinator,
) -> None:
"""Initialize a zone entity."""
super().__init__(coordinator)
self._ws66i: WS66i = device
self._ws66i_data: Ws66iData = ws66i_data
self._zone_id: int = zone_id
self._zone_id_idx: int = data_idx
self._status: ZoneStatus = coordinator.data[data_idx]
self._attr_source_list = ws66i_data.sources.name_list
self._attr_unique_id = f"{entry_id}_{self._zone_id}"
self._attr_name = f"Zone {self._zone_id}"
self._attr_supported_features = (
MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_SET
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, str(self.unique_id))},
name=self.name,
manufacturer="Soundavo",
model="WS66i 6-Zone Amplifier",
)
self._set_attrs_from_status()
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
# This will be called for each of the entities after the coordinator
# finishes executing _async_update_data()
# Save a reference to the zone status that this entity represents
self._status = self.coordinator.data[self._zone_id_idx]
self._set_attrs_from_status()
# Parent will notify HA of the update
super()._handle_coordinator_update()
@callback
def _set_attrs_from_status(self) -> None:
status = self._status
sources = self._ws66i_data.sources.id_name
self._attr_state = MediaPlayerState.ON if status.power else MediaPlayerState.OFF
self._attr_volume_level = status.volume / float(MAX_VOL)
self._attr_is_volume_muted = status.mute
self._attr_source = self._attr_media_title = sources[status.source]
@callback
def _async_update_attrs_write_ha_state(self) -> None:
self._set_attrs_from_status()
self.async_write_ha_state()
async def async_select_source(self, source: str) -> None:
"""Set input source."""
idx = self._ws66i_data.sources.name_id[source]
await self.hass.async_add_executor_job(
self._ws66i.set_source, self._zone_id, idx
)
self._status.source = idx
self._async_update_attrs_write_ha_state()
async def async_turn_on(self) -> None:
"""Turn the media player on."""
await self.hass.async_add_executor_job(
self._ws66i.set_power, self._zone_id, True
)
self._status.power = True
self._async_update_attrs_write_ha_state()
async def async_turn_off(self) -> None:
"""Turn the media player off."""
await self.hass.async_add_executor_job(
self._ws66i.set_power, self._zone_id, False
)
self._status.power = False
self._async_update_attrs_write_ha_state()
async def async_mute_volume(self, mute: bool) -> None:
"""Mute (true) or unmute (false) media player."""
await self.hass.async_add_executor_job(
self._ws66i.set_mute, self._zone_id, mute
)
self._status.mute = bool(mute)
self._async_update_attrs_write_ha_state()
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level, range 0..1."""
await self.hass.async_add_executor_job(self._set_volume, int(volume * MAX_VOL))
self._async_update_attrs_write_ha_state()
async def async_volume_up(self) -> None:
"""Volume up the media player."""
await self.hass.async_add_executor_job(
self._set_volume, min(self._status.volume + 1, MAX_VOL)
)
self._async_update_attrs_write_ha_state()
async def async_volume_down(self) -> None:
"""Volume down media player."""
await self.hass.async_add_executor_job(
self._set_volume, max(self._status.volume - 1, 0)
)
self._async_update_attrs_write_ha_state()
def _set_volume(self, volume: int) -> None:
"""Set the volume of the media player."""
# Can't set a new volume level when this zone is muted.
# Follow behavior of keypads, where zone is unmuted when volume changes.
if self._status.mute:
self._ws66i.set_mute(self._zone_id, False)
self._status.mute = False
self._ws66i.set_volume(self._zone_id, volume)
self._status.volume = volume
| [
"[email protected]"
] | |
757b8887c2b25b7bcfb6e1b864e4c50f6c00ff28 | da29f1f5b4459fbfec968bb694bedb9586f87b14 | /new_algs/Sequence+algorithms/Selection+algorithm/version13.py | 7b3e88334e262afc2d481e41273eef3d0a0a5617 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | coolsnake/JupyterNotebook | 547806a45a663f090f313dc3e70f779ad9b213c0 | 20d8df6172906337f81583dabb841d66b8f31857 | refs/heads/master | 2023-01-13T18:55:38.615312 | 2020-11-17T22:55:12 | 2020-11-17T22:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,000 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 13:53:42 2018
@author: wmy
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import tkinter as tk
from tkinter.filedialog import askopenfilename
from tkinter import messagebox
from PIL import Image, ImageTk
from sklearn.cluster import DBSCAN
import operator
np.random.seed(1)
# std colors
std_colors = {'DG0546':[27.818, -8.375, 8.282],
'DG0543':[27.818, -5.331, 10.061],
'DG0647':[30.4, -5.075, 5.519],
'DG0642':[30.4, -7.885, 8.605],
'DG0648':[30.4, -5.77, 11.405],
'DG0746':[32.963, -7.188, 8.464],
'DG0744':[32.963, -6.138, 12.688],
'DG0750':[32.963, -8.569, 15.102],
'MG0851':[35.538, -11.055, 12.566],
'MG0841':[35.538, -7.952, 14.866],
'MG0850':[35.538, -9.978, 17.898],
'MG1050':[38.146, -14.61, 17.02],
'MG1048':[38.146, -10.562, 18.522],
'MG1053':[38.146, -11.449, 22.008],
'MG1151':[40.703, -15.986, 20.351],
'MG1146':[40.703, -11.777, 22.669],
'MG1148':[40.703, -13.409, 25.373],
'MG1366':[43.316, -19.213, 25.02],
'MG1350':[43.316, -13.546, 27.435],
'MG1348':[43.316, -15.355, 31.017],
'BG1548':[45.88, -13.519, 27.439],
'BG1542':[45.88, -15.449, 30.991],
'BG1544':[45.88, -16.916, 34.674],
'BG1748':[48.448, -15.484, 30.905],
'BG1743':[48.448, -16.958, 34.49],
'BG1952':[50.978, -16.624, 34.363],
'BG1955':[50.978, -13.52, 35.343],
'BE0920':[35.538, 1.56, 11.939],
'BE1225':[40.751, -0.638, 15.773],
'BE1230':[40.751, 1.841, 12.319],
'BE1528':[45.88, -0.581, 19.228],
'BE1532':[45.88, 1.568, 15.512],
'BE1824':[48.958, -0.873, 19.084],
'BE1832':[48.958, 1.527, 15.373],
'BE1928':[50.978, -1.116, 18.883],
'BE1932':[50.978, 2.341, 19.65],
'BE1935':[50.978, 4.142, 13.964],
'RE1025':[38.656, 10.005, 14.867],
'RE1328':[42.808, 15.233, 18.384],
'RE1630':[46.922, 11.844, 12.337],
'RE1632':[46.922, 14.823, 17.992],
'YE1932':[50.978, 10.007, 26.137],
'YE1937':[50.978, 13.167, 22.369],
'YE2337':[55.031, 10.635, 30.011],
'YE2344':[55.031, 13.063, 22.276],
'YE2735':[59.067, 10.698, 29.749],
'YE2740':[59.067, 12.998, 22.111],
'YE3242':[63.011, 5.958, 31.797],
'YE3245':[63.011, 10.535, 29.511],
'YE3250':[63.011, 12.058, 37.976],
'YE3755':[66.976, 6.043, 38.723],
'YE3760':[66.976, 12.971, 42.748],
'SE1932':[50.978, 4.193, 17.536],
'SE2335':[55.031, 5.348, 24.6],
'SE2332':[55.031, 4.306, 17.658],
'SE2740':[59.067, 5.314, 24.639],
'SE2735':[59.067, 5.395, 16.822],
'SE3445':[64.963, 1.838, 17.919],
'SE3440':[64.963, 4.56, 13.035],
'SE3945':[68.896, 2.123, 12.069],
'SE3948':[68.896, 4.791, 17.37],
'NB0407':[25.26, 1.158, -0.449],
'NB0609':[30.40, 1.293, -0.479],
'NB0911':[35.54, 1.472, -0.515],
'NG1214':[40.751, 1.625, -0.58],
'NG1517':[45.88, 1.751, -0.646],
'NG1922':[50.978, 1.927, -0.682],
'NG2427':[56.028, 2.044, -0.742],
'NG2933':[61.045, 2.208, -0.784],
'NG3538':[66.059, 2.343, -0.84],
'NG4247':[70.871, 2.49, -0.888],
'NG4954':[75.638, 2.611, -0.941],
'NG5862':[80.516, 2.765, -0.988],
'NW6770':[85.403, 2.888, -1.046],
'NW7780':[90.229, 3.043, -1.09],
'NW8889':[95.077, 3.178, -1.137],
'WR1216':[40.80, -7.59, 8.16],
'WR2937':[61.05, -12.60, -5.57],
'WR4250':[70.87, 4.97, 13.32],
'WO0911':[35.52, -3.38, -12.29],
'WO5363':[78.05, -10.48, -7.61],
'CB1965':[50.978, -4.713, -16.964],
'CB2980':[61.045, -12.131, -37.265],
'CB4382':[71.214, -25.198, -25.499],
'CY4970':[75.638, 18.525, 31.211],
'CY6780':[85.352, 9.846, 18.865],
'CY7785':[90.229, -5.577, 25.272],
'CR1958':[50.978, 45.318, 32.692],
'CR2260':[53.492, 44.555, 18.778],
'CR2964':[61.045, 22.215, 7.098]}
data = []
label = []
for key, value in std_colors.items():
data.append(value)
label.append(key)
pass
data = np.array(data)
def colour_classify(incolour, traindata, traincolour, k):
'''训练的颜色个数'''
#shape为numpy模块中的方法 shape[0]为矩阵第二维的长度
trainsize = traindata.shape[0]
#计算各个维度的差值并储存在向量diffmat中
diffmat = np.tile(incolour, (trainsize,1)) - traindata
#计算误差的平方
squarediffmat = diffmat**2
#计算向量间的欧式距离
errordistance = squarediffmat.sum(axis=1)**0.5
#排序
sorteddistance = errordistance.argsort()
classcount = {}
for i in range(k):
#选取前k个最符合要求的颜色
selectedcolour = traincolour[sorteddistance[i]]
classcount[selectedcolour] = classcount.get(selectedcolour,0)+1
pass
sortedclasscount = sorted(classcount.items(),
key=operator.itemgetter(1),reverse=True)
return sortedclasscount[0][0]
def select_std_color(incolour):
return colour_classify(incolour, data, label, 1)
# main window
window = tk.Tk()
window.title('Colour Selection Algorithm')
window.geometry('860x800')
# params init
path_1 = tk.StringVar()
path_2 = tk.StringVar()
state = tk.StringVar()
state.set('正常')
# path select functions
def selectPath1():
path = askopenfilename()
path_1.set(path)
pass
def selectPath2():
path = askopenfilename()
path_2.set(path)
pass
# UI element
l1 = tk.Label(window, text='请选择第一张图片:', \
font=('Arial', 12), width=16, height=2, justify=tk.LEFT)
l1.place(x=2+5)
l2 = tk.Label(window, text='请选择第二张图片:', \
font=('Arial', 12), width=16, height=2, justify=tk.LEFT)
l2.place(x=2+5, y=50)
e1 = tk.Entry(window, textvariable=path_1, width=72)
e1.place(x=190+5, y=15)
e2 = tk.Entry(window, textvariable=path_2, width=72)
e2.place(x=190+5, y=65)
b1 = tk.Button(window, text='选择文件', width=8, height=1, command=selectPath1)
b1.place(x=720+5, y=8)
b2 = tk.Button(window, text='选择文件', width=8, height=1, command=selectPath2)
b2.place(x=720+5, y=58)
lp1 = tk.Label(window, width=128, height=64)
lp1.place(x=7, y=208)
lp2 = tk.Label(window, width=128, height=64)
lp2.place(x=7, y=288)
lp1c = tk.Label(window, width=640, height=64)
lp1c.place(x=160, y=208)
lp2c = tk.Label(window, width=640, height=64)
lp2c.place(x=160, y=288)
l5 = tk.Label(window, text='状态:', \
font=('Arial', 12), width=8, height=2, justify=tk.LEFT)
l5.place(x=220, y=153)
lstate = tk.Label(window, textvariable=state, \
font=('Arial', 12), width=50, height=1, justify=tk.LEFT, bg='#7FFF7F')
lstate.place(x=285, y=162)
l3 = tk.Label(window, text='聚类阈值:', \
font=('Arial', 12), width=16, height=2, justify=tk.LEFT)
l3.place(x=7, y=100)
e3 = tk.Entry(window, width=5)
e3.place(x=190+5, y=115)
l4 = tk.Label(window, text='色差阈值:', \
font=('Arial', 12), width=16, height=2, justify=tk.LEFT)
l4.place(x=360, y=100)
e4 = tk.Entry(window, width=5)
e4.place(x=480+5, y=115)
scrollbar = tk.Scrollbar(window)
scrollbar.place(x=808, y=368, height=368)
listbox = tk.Listbox(window, yscrollcommand=scrollbar.set, width=108, height=20)
listbox.place(x=48, y=368)
# make images for single pix to 64x64
def make_image(theme):
output = [[]]
for pix in theme:
for i in range(32):
output[0].append(pix)
pass
pass
for i in range(64):
output.append(output[0])
pass
output = np.array(output)
return output
def main():
# get images
image_1_path = e1.get()
image_2_path = e2.get()
try:
image_1_RGB = plt.imread(image_1_path)
image_2_RGB = plt.imread(image_2_path)
color_tolerance = float(e4.get())
cluster_tolerance = float(e3.get())
pass
except:
state.set('ERROR')
lstate.config(bg='#FF7F7F')
window.update_idletasks()
messagebox.showinfo(title='ERROR', message='输入错误!')
return None
pass
# update the state
lstate.config(bg='#7FFF7F')
window.update_idletasks()
# show image
state.set('显示图片中。。。')
window.update_idletasks()
img_open = Image.open(e1.get())
img = img_open.resize((128, 64))
img = ImageTk.PhotoImage(img)
lp1.config(image=img)
lp1.image = img
window.update_idletasks()
# show image
img_open = Image.open(e2.get())
img = img_open.resize((128, 64))
img = ImageTk.PhotoImage(img)
lp2.config(image=img)
lp2.image = img
window.update_idletasks()
# resize to speed up
image_1_RGB = Image.open(image_1_path)
w_resize = 96
h_resize = int(w_resize*image_1_RGB.size[1]/image_1_RGB.size[0])
image_1_RGB = image_1_RGB.resize((w_resize, h_resize))
image_1_RGB = np.array(image_1_RGB)
# resize to speed up
image_2_RGB = Image.open(image_2_path)
w_resize = 96
h_resize = int(w_resize*image_2_RGB.size[1]/image_2_RGB.size[0])
image_2_RGB = image_2_RGB.resize((w_resize, h_resize))
image_2_RGB = np.array(image_2_RGB)
state.set('转换RGB为LAB中。。。')
window.update_idletasks()
image_1_LAB = cv2.cvtColor(image_1_RGB,cv2.COLOR_RGB2LAB)
image_2_LAB = cv2.cvtColor(image_2_RGB,cv2.COLOR_RGB2LAB)
# image 1
state.set('第一张图片聚类中。。。')
window.update_idletasks()
dbscan1 = DBSCAN(eps=cluster_tolerance)
h_1, w_1, c_1 = image_1_LAB.shape
image_1_data = image_1_LAB.reshape((h_1*w_1, c_1))
dbscan1.fit(image_1_data)
labels = dbscan1.labels_
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
# find the cluster center
theme_1 = []
for i in range(n_clusters_1):
one_cluster = image_1_data[labels == i]
km = KMeans(n_clusters=1, max_iter=600)
km.fit(one_cluster)
theme_1.append(np.squeeze(km.cluster_centers_))
pass
theme_1 = np.array(theme_1)
# show image
pic_array = cv2.cvtColor(np.uint8(theme_1.reshape(1, len(theme_1), 3)), cv2.COLOR_LAB2RGB)
pic_array = make_image(pic_array[0])
pic = Image.fromarray(pic_array.astype('uint8')).convert('RGB')
img = ImageTk.PhotoImage(pic)
lp1c.config(image=img)
lp1c.image = img
window.update_idletasks()
# image 2
state.set('第二张图片聚类中。。。')
window.update_idletasks()
dbscan2 = DBSCAN(eps=cluster_tolerance)
h_2, w_2, c_2 = image_2_LAB.shape
image_2_data = image_2_LAB.reshape((h_2*w_2, c_2))
dbscan2.fit(image_2_data)
labels = dbscan2.labels_
n_clusters_2 = len(set(labels)) - (1 if -1 in labels else 0)
# find the cluster center
theme_2 = []
for i in range(n_clusters_2):
one_cluster = image_2_data[labels == i]
km = KMeans(n_clusters=1, max_iter=600)
km.fit(one_cluster)
theme_2.append(np.squeeze(km.cluster_centers_))
pass
theme_2 = np.array(theme_2)
# show image
pic_array = cv2.cvtColor(np.uint8(theme_2.reshape(1, len(theme_2), 3)), cv2.COLOR_LAB2RGB)
pic_array = make_image(pic_array[0])
pic = Image.fromarray(pic_array.astype('uint8')).convert('RGB')
img = ImageTk.PhotoImage(pic)
lp2c.config(image=img)
lp2c.image = img
window.update_idletasks()
state.set('聚类完成')
window.update_idletasks()
def calc_chromatism(lab1, lab2):
deltaL = lab1[0] - lab2[0]
deltaA = lab1[1] - lab2[1]
deltaB = lab1[2] - lab2[2]
deltaE = (deltaL**2 + deltaA**2 + deltaB**2)**0.5
return deltaE
# image 1 area
image1_color_area = []
state.set('计算图片一各颜色面积占比中。。。'+str(0)+'%')
window.update_idletasks()
for i in range(n_clusters_1):
num_same_pixs = 0
L1 = theme_1[i][0]*100/255
A1 = theme_1[i][1]-128
B1 = theme_1[i][2]-128
LAB1 = [L1, A1, B1]
for j in range(0, h_1*w_1):
L2 = image_1_data[j][0]*100/255
A2 = image_1_data[j][1]-128
B2 = image_1_data[j][2]-128
LAB2 = [L2, A2, B2]
deltaE = calc_chromatism(LAB1, LAB2)
if deltaE <= color_tolerance:
num_same_pixs += 1
pass
pass
area = num_same_pixs/(h_1*w_1)
image1_color_area.append(area)
state.set('计算图片一各颜色面积占比中。。。'+str(int(100*(i+1)/n_clusters_1))+'%')
window.update_idletasks()
pass
#print(image1_color_area)
# image 2 area
image2_color_area = []
state.set('计算图片二各颜色面积占比中。。。'+str(0)+'%')
window.update_idletasks()
for i in range(n_clusters_2):
num_same_pixs = 0
L1 = theme_2[i][0]*100/255
A1 = theme_2[i][1]-128
B1 = theme_2[i][2]-128
LAB1 = [L1, A1, B1]
for j in range(0, h_2*w_2):
L2 = image_2_data[j][0]*100/255
A2 = image_2_data[j][1]-128
B2 = image_2_data[j][2]-128
LAB2 = [L2, A2, B2]
deltaE = calc_chromatism(LAB1, LAB2)
if deltaE <= color_tolerance:
num_same_pixs += 1
pass
pass
area = num_same_pixs/(h_2*w_2)
image2_color_area.append(area)
state.set('计算图片二各颜色面积占比中。。。'+str(int(100*(i+1)/n_clusters_2))+'%')
window.update_idletasks()
pass
#print(image2_color_area)
state.set('面积占比计算完成')
window.update_idletasks()
state.set('共同色选取中。。。')
window.update_idletasks()
common_color = []
common_area = []
common_uint8_lab = []
common_color_A = []
common_color_B = []
for i in range(n_clusters_1):
L1 = theme_1[i][0]*100/255
A1 = theme_1[i][1]-128
B1 = theme_1[i][2]-128
LAB1 = [L1, A1, B1]
for j in range(n_clusters_2):
L2 = theme_2[j][0]*100/255
A2 = theme_2[j][1]-128
B2 = theme_2[j][2]-128
LAB2 = [L2, A2, B2]
deltaE = calc_chromatism(LAB1, LAB2)
if deltaE <= color_tolerance:
S1 = image1_color_area[i] / (image1_color_area[i] + image2_color_area[j])
S2 = image2_color_area[j] / (image1_color_area[i] + image2_color_area[j])
L3 = L1 * S1 + L2 * S2
A3 = A1 * S1 + A2 * S2
B3 = B1 * S1 + B2 * S2
L1 = round(L1, 3)
A1 = round(A1, 3)
B1 = round(B1, 3)
L2 = round(L2, 3)
A2 = round(A2, 3)
B2 = round(B2, 3)
L3 = round(L3, 3)
A3 = round(A3, 3)
B3 = round(B3, 3)
LAB1 = [L1, A1, B1]
LAB2 = [L2, A2, B2]
LAB3 = [L3, A3, B3]
common_color_A.append(LAB1)
common_color_B.append(LAB2)
common_color.append(LAB3)
common_area.append((image1_color_area[i], image2_color_area[j]))
uint8_lab3 = [L3*255/100, A3+128, B3+128]
common_uint8_lab.append(uint8_lab3)
pass
pass
pass
common_uint8_lab = np.uint8(common_uint8_lab)
#print(common_color)
#print(common_area)
state.set('共同色选取完成')
window.update_idletasks()
title = ' '*22 + 'LAB' + ' '*(48-3) + 'A' + ' '*48 + 'B' + ' '*32 + 'Std Color'
listbox.delete(0, tk.END)
listbox.insert(tk.END, title)
window.update_idletasks()
result_info = []
for i in range(len(common_color)):
#info = '{:4d}'.format(i+1) + ' '*4
info = '[{:3.3f} {:3.3f} {:3.3f}]'.format(common_color[i][0], \
common_color[i][1], common_color[i][2])
info += ' '*(36-len(info))
info += '{:3.2f}'.format(100*common_area[i][0]) + '%' + ' '*4
info += '[{:3.3f} {:3.3f} {:3.3f}]'.format(common_color_A[i][0], \
common_color_A[i][1], common_color_A[i][2])
info += ' '*(72-len(info))
info += '{:3.2f}'.format(100*common_area[i][1]) + '%' + ' '*4
info += '[{:3.3f} {:3.3f} {:3.3f}]'.format(common_color_B[i][0], \
common_color_B[i][1], common_color_B[i][2])
info += ' '*(108-len(info))
selected_std_color = select_std_color(common_color[i])
info += selected_std_color
res = (selected_std_color, info)
result_info.append(res)
pass
colors = []
dict_colors = {}
nums = []
for i in range(len(result_info)):
colors.append(result_info[i][0])
colors_set = set(colors)
pass
for color in colors_set:
num = colors.count(color)
if str(num) not in dict_colors.keys():
nums.append(num)
dict_colors[str(num)] = [color]
pass
else:
dict_colors[str(num)].append(color)
pass
pass
#print(dict_colors)
index = 0
while dict_colors != {}:
num = max(nums)
key = str(num)
for color in dict_colors[key]:
LAB1 = std_colors[color]
num_same_pixs = 0
for n in range(0, h_1*w_1):
L2 = image_1_data[n][0]*100/255
A2 = image_1_data[n][1]-128
B2 = image_1_data[n][2]-128
LAB2 = [L2, A2, B2]
deltaE = calc_chromatism(LAB1, LAB2)
if deltaE <= color_tolerance:
num_same_pixs += 1
pass
pass
area_A = num_same_pixs/(h_1*w_1)
num_same_pixs = 0
for n in range(0, h_2*w_2):
L2 = image_2_data[n][0]*100/255
A2 = image_2_data[n][1]-128
B2 = image_2_data[n][2]-128
LAB2 = [L2, A2, B2]
deltaE = calc_chromatism(LAB1, LAB2)
if deltaE <= color_tolerance:
num_same_pixs += 1
pass
pass
area_B = num_same_pixs/(h_2*w_2)
area = [round(100*area_A, 2), round(100*area_B, 2)]
for color_info in result_info:
if color_info[0] == color:
index += 1
info = '{:4d}'.format(index) + ' '*4
info += color_info[1][:]
info += ' '*4 + '[{:3.2f}% {:3.2f}%]'.format(area[0], area[1])
listbox.insert(tk.END, info)
window.update_idletasks()
pass
pass
pass
del dict_colors[key]
nums.remove(num)
pass
scrollbar.config(command=listbox.yview)
window.update_idletasks()
pass
# start button
b3 = tk.Button(window, text='开始选取', width=25, height=1, bg='#7F7FFF', command=main)
b3.place(x=7, y=158)
# window mainloop
window.mainloop()
| [
"[email protected]"
] | |
b95263fbfa97e24adf60bea5b151e16fde445b04 | 92675502d571ebe41ce38c6ba58413b1dc7af315 | /0x00-python-hello_world/102-magic_calculation.py | 7c936a7bc1c8e675de17b3fc19fee37c48eea9ee | [] | no_license | isaza00/holbertonschool-higher_level_programming | 8fc1eb0665602920bc1e86e5e17293156158c985 | f6e6cc619c8c74b131bc3a67931d3297ceec033d | refs/heads/master | 2022-12-25T09:01:01.635214 | 2020-09-30T21:14:13 | 2020-09-30T21:14:13 | 259,348,477 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | def magic_calculation(a, b):
return 98 + a**b
| [
"[email protected]"
] | |
f07cc36cc24fb93bc7abf3c6797c8a06336d73ce | 61f6a7b3226e892d897538180a41b9d65f3be9ef | /run.py | ef09f7438d369749a1b07c293e1931da000c9400 | [
"MIT"
] | permissive | tangbiondi/aantonop-subtitles | db7952dc6dd340eabb53b85813493f34ca6e45bd | 35120f247cee63b4f1c0f937d5c337374ef51ec3 | refs/heads/master | 2021-04-28T01:32:54.412736 | 2018-02-20T13:45:31 | 2018-02-20T13:45:31 | 122,281,228 | 0 | 0 | MIT | 2018-02-21T01:55:35 | 2018-02-21T01:55:34 | null | UTF-8 | Python | false | false | 1,875 | py | import re
from collections import defaultdict
from glob import glob
languages = set()
videos = {}
for filepath in glob("./subtitles/original/*"):
filename = filepath.replace("./subtitles/original/", "")
title, youtube_id, lang = re.match(r"(.*)-(.{11,13})\.(.*)\.vtt", filename).groups()
languages.add(lang)
if title not in videos:
videos[youtube_id] = {
"title": title,
"subtitles": [{"lang": lang, "filepath": filepath}]
}
else:
videos[youtube_id]["subtitles"].append({{"lang": lang, "filepath": filepath}})
print(filename)
headers = ["No.", "Title"] + list(languages)
langs = list(languages)
print("|", end="")
for header in ["No.", "Title"] + langs:
print(" <sup><sub>{}</sub></sup> |".format(header), end="")
print("")
print("|", end="")
for i in range(len(headers)):
print("----|", end="")
print("")
def multiline_split(str, char_per_line):
words = str.split(" ")
result = ""
line = ""
for word in words:
if len(line + word) < char_per_line:
line += " " + word
else:
result += line + "<br>"
line = word
result += line
return result
lang_stat = defaultdict(int)
for i, (youtube_id, video) in enumerate(videos.items()):
print("| <sup><sub>{}</sub></sup> |".format(i+1), end="", flush=True)
print(" <sup><sub>[{title}]({youtube_link})</sub></sup> |".format(
title=multiline_split(video["title"], 25),
youtube_link="https://www.youtube.com/watch?v={}".format(youtube_id)
), end="", flush=True)
for lang in langs:
if lang in [sub["lang"] for sub in video["subtitles"]]:
print(" <sup><sub>✓</sub></sup> |", end="", flush=True)
lang_stat[lang] += 1
else:
print(" |", end="", flush=True)
print("")
print("")
| [
"[email protected]"
] | |
88f23fd76116a7637df84116fa5bd6222cf5318a | f7dd190a665a4966db33dcc1cc461dd060ca5946 | /apps/posts/migrations/0001_initial.py | 97494a6fbf0f67308adabcc659abd18daaae77c0 | [] | no_license | Darwin939/macmeharder_back | 2cc35e2e8b39a82c8ce201e63d9f6a9954a04463 | 8fc078333a746ac7f65497e155c58415252b2d33 | refs/heads/main | 2023-02-28T12:01:23.237320 | 2021-02-02T17:37:33 | 2021-02-02T17:37:33 | 328,173,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # Generated by Django 3.1.5 on 2021-01-16 09:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=300)),
('mini_title', models.CharField(blank=True, max_length=300, null=True)),
('body', models.TextField(blank=True, max_length=10000, null=True)),
],
options={
'verbose_name': 'post',
'verbose_name_plural': 'post',
'ordering': ['created'],
},
),
]
| [
"[email protected]"
] | |
0cb41a6e2298f4c7682445e5240068028f4c752b | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/__init__.py | 9346822ef6a0c05ec1827aba2513ed36c687e717 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,865 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class network(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/address-family/ipv6/ipv6-unicast/default-vrf/network. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__network_ipv6_address','__network_weight','__backdoor','__network_route_map',)
_yang_name = 'network'
_rest_name = 'network'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__backdoor = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="backdoor", rest_name="backdoor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a BGP backdoor route', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
self.__network_route_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="network-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-map to modify the attributes', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
self.__network_weight = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..65535']}), is_leaf=True, yang_name="network-weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BGP weight for network', u'cli-full-command': None, u'alt-name': u'weight'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='absolute-decimal-number', is_config=True)
self.__network_ipv6_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="network-ipv6-address", rest_name="network-ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/M IPV6 address in dotted decimal/Mask'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv6-prefix', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'address-family', u'ipv6', u'ipv6-unicast', u'default-vrf', u'network']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'address-family', u'ipv6', u'unicast', u'network']
def _get_network_ipv6_address(self):
"""
Getter method for network_ipv6_address, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/network_ipv6_address (inet:ipv6-prefix)
"""
return self.__network_ipv6_address
def _set_network_ipv6_address(self, v, load=False):
"""
Setter method for network_ipv6_address, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/network_ipv6_address (inet:ipv6-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_network_ipv6_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network_ipv6_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="network-ipv6-address", rest_name="network-ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/M IPV6 address in dotted decimal/Mask'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv6-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network_ipv6_address must be of a type compatible with inet:ipv6-prefix""",
'defined-type': "inet:ipv6-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="network-ipv6-address", rest_name="network-ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/M IPV6 address in dotted decimal/Mask'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv6-prefix', is_config=True)""",
})
self.__network_ipv6_address = t
if hasattr(self, '_set'):
self._set()
def _unset_network_ipv6_address(self):
self.__network_ipv6_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="network-ipv6-address", rest_name="network-ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/M IPV6 address in dotted decimal/Mask'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv6-prefix', is_config=True)
def _get_network_weight(self):
"""
Getter method for network_weight, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/network_weight (absolute-decimal-number)
"""
return self.__network_weight
def _set_network_weight(self, v, load=False):
"""
Setter method for network_weight, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/network_weight (absolute-decimal-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_network_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network_weight() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..65535']}), is_leaf=True, yang_name="network-weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BGP weight for network', u'cli-full-command': None, u'alt-name': u'weight'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='absolute-decimal-number', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network_weight must be of a type compatible with absolute-decimal-number""",
'defined-type': "brocade-bgp:absolute-decimal-number",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..65535']}), is_leaf=True, yang_name="network-weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BGP weight for network', u'cli-full-command': None, u'alt-name': u'weight'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='absolute-decimal-number', is_config=True)""",
})
self.__network_weight = t
if hasattr(self, '_set'):
self._set()
def _unset_network_weight(self):
self.__network_weight = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..65535']}), is_leaf=True, yang_name="network-weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set BGP weight for network', u'cli-full-command': None, u'alt-name': u'weight'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='absolute-decimal-number', is_config=True)
def _get_backdoor(self):
"""
Getter method for backdoor, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/backdoor (empty)
"""
return self.__backdoor
def _set_backdoor(self, v, load=False):
"""
Setter method for backdoor, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/backdoor (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_backdoor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_backdoor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="backdoor", rest_name="backdoor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a BGP backdoor route', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """backdoor must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="backdoor", rest_name="backdoor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a BGP backdoor route', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__backdoor = t
if hasattr(self, '_set'):
self._set()
def _unset_backdoor(self):
self.__backdoor = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="backdoor", rest_name="backdoor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a BGP backdoor route', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
def _get_network_route_map(self):
"""
Getter method for network_route_map, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/network_route_map (rmap-type)
"""
return self.__network_route_map
def _set_network_route_map(self, v, load=False):
"""
Setter method for network_route_map, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf/network/network_route_map (rmap-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_network_route_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network_route_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="network-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-map to modify the attributes', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network_route_map must be of a type compatible with rmap-type""",
'defined-type': "brocade-bgp:rmap-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="network-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-map to modify the attributes', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)""",
})
self.__network_route_map = t
if hasattr(self, '_set'):
self._set()
def _unset_network_route_map(self):
self.__network_route_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="network-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-map to modify the attributes', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
network_ipv6_address = __builtin__.property(_get_network_ipv6_address, _set_network_ipv6_address)
network_weight = __builtin__.property(_get_network_weight, _set_network_weight)
backdoor = __builtin__.property(_get_backdoor, _set_backdoor)
network_route_map = __builtin__.property(_get_network_route_map, _set_network_route_map)
_pyangbind_elements = {'network_ipv6_address': network_ipv6_address, 'network_weight': network_weight, 'backdoor': backdoor, 'network_route_map': network_route_map, }
| [
"[email protected]"
] | |
8aac422067e1cd8db4e302ec4ff97aacdc5f7736 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02708/s543945314.py | 6e7a98625c4186a02cb68a2ca3bd2c3ee37b3d97 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #!/usr/bin/env python3
def main():
N, K = map(int, input().split())
mod = 7 + 10 ** 9
res = 0
for k in range(K, N + 2):
res += ((k * (2 * N - k + 1) / 2) - (k * (k - 1) / 2) + 1)
res %= mod
print(int(res))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fc6c6f943c0e49189d8ac9a2b455467b12652b7d | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /models/operator/multiscale_deformable_attention/_get_torch_build_conf.py | 06290f7aabf171664429380306b945bfc02fd19c | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import torch
from torch.utils.cpp_extension import include_paths, library_paths, CUDAExtension, COMMON_NVCC_FLAGS
def _get_torch_cuda_flags():
return COMMON_NVCC_FLAGS
def _get_torch_cuda_archs():
config = torch.__config__.show()
configs = config.split('\n')
archs = set()
for conf in configs:
if 'NVCC' in conf and 'arch' in conf:
ss = conf.split(';')
for s in ss:
s = s.strip()
if s.startswith('arch='):
cs = s[5:].split(',')
for c in cs:
v = c.split('_')
archs.add(int(v[1]))
return archs
def _get_torch_include_paths():
return [path.replace('\\', '/') for path in include_paths(False)]
def _get_torch_library_paths():
return [path.replace('\\', '/') for path in library_paths(False)]
def _get_torch_libraries():
return CUDAExtension('', []).libraries
| [
"[email protected]"
] | |
54af20a4223a7fce77f976c6063056318656c59a | 0fa98dbc4d6256121b9f478a13ff2254047fb543 | /12_01_typical_interview_tasks/L. Extra letter.py | 3044323e3d49139edab4073c9b2c1641c929ee7f | [] | no_license | vamotest/yandex_algorithms | 48d5b29cb6e2789ea8f7e8024c798851058f1d4c | a588da3d21ff95e2437818493769719600f3eaf7 | refs/heads/master | 2023-03-19T20:44:59.373046 | 2021-01-20T19:06:28 | 2021-01-20T19:06:28 | 330,421,669 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from itertools import zip_longest
def define_anagrams(first, second):
li = list(zip_longest(first, second))
for letter in li:
if letter[0] != letter[1]:
return letter[1]
if __name__ == '__main__':
first_word = ''.join(sorted(list(str(input()))))
second_word = ''.join(sorted(list(str(input()))))
result = define_anagrams(first_word, second_word)
print(result)
| [
"[email protected]"
] | |
16953af903b8aa207c099df1b969dace225c4224 | 40c3c3ad98e5d5b10af1cdaa5b5d2278472448a5 | /tests/app/tests/test_content_panes.py | 20881cc868cfd0f4365af6ba10b42a04acede327 | [] | no_license | modohash/django-hstore-flattenfields | ac96a509dde799625c01cff078c830b48d479f9d | 09626a638b9ef85d28fa5bfef1b040f9926bb95b | refs/heads/master | 2021-01-18T16:08:59.850946 | 2015-05-04T23:19:00 | 2015-05-04T23:19:00 | 54,548,872 | 0 | 0 | null | 2016-03-23T09:54:54 | 2016-03-23T09:54:53 | null | UTF-8 | Python | false | false | 3,645 | py | #!/usr/bin/env python
# encoding: utf-8
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from datetime import date, datetime
from hstore_flattenfields.models import ContentPane, DynamicField
from tests.app.models import AuthorType, Author
from hstore_flattenfields.utils import get_ctype
class AuthorContentPaneTests(TestCase):
def setUp(self):
self.commics_authors = AuthorType.objects.create(
id=1, name="Something Group", slug="commics_authors"
)
self.dramatic_authors = AuthorType.objects.create(
id=2, name="Other Group", slug="dramatic_authors"
)
self.main_info_pane = ContentPane.objects.create(
id=1,
name='Main Info', content_type=get_ctype(Author),
)
self.commic_pane = ContentPane.objects.create(
id=2, name='Commic Information Pane',
content_type=get_ctype(Author),
group=self.commics_authors.dynamicfieldgroup_ptr
)
self.dramatic_pane = ContentPane.objects.create(
id=3, name='Drama Information Pane',
content_type=get_ctype(Author),
group=self.dramatic_authors.dynamicfieldgroup_ptr
)
self.age = DynamicField.objects.create(
id=1, refer="Author",
typo="Integer", name="author_age",
verbose_name=u"Age",
content_pane=self.main_info_pane
)
self.name = DynamicField.objects.create(
id=2, refer="Author",
name="author_name", verbose_name=u"Name",
content_pane=self.main_info_pane
)
self.information = DynamicField.objects.create(
id=3, refer="Author", name="author_information",
verbose_name=u"Information",
group=self.commics_authors.dynamicfieldgroup_ptr
)
self.dramatic_level = DynamicField.objects.create(
id=4, refer="Author", name="author_dramatic_level",
typo="Integer", verbose_name=u"Dramatic Level",
content_pane=self.main_info_pane,
group=self.dramatic_authors.dynamicfieldgroup_ptr
)
def test_assert_content_pane_fields(self):
self.assertQuerysetEqual(
self.main_info_pane.fields,
[
'<DynamicField: Dramatic Level>',
'<DynamicField: Name>',
'<DynamicField: Age>'
]
)
def test_assert_object_content_panes(self):
author = Author.objects.create(
author_age=42, author_name="some-name"
)
self.assertQuerysetEqual(
author.content_panes,
['<ContentPane: Main Info>']
)
def test_assert_groupped_content_panes(self):
author = Author.objects.create(
pk=777,
author_age=42, author_name="some-name"
)
author.author_groups.add(self.commics_authors)
author = Author.objects.get()
self.assertQuerysetEqual(
author.content_panes,
[
'<ContentPane: Commic Information Pane>',
'<ContentPane: Main Info>'
]
)
self.assertQuerysetEqual(
author.dynamic_fields,
[
'<DynamicField: Age>',
'<DynamicField: Name>',
'<DynamicField: Information>'
]
)
| [
"[email protected]"
] | |
95a73697b11de4cb430cc1ce6bf5219f0e4f562a | a6894d17fdbceb56d4364f0e279d03b16a181396 | /working-env/lib/python2.5/TurboGears-1.0.2.2-py2.5.egg/turbogears/i18n/data/no.py | 0da3995f3e22ebaf88f03183914aae17f47e17df | [] | no_license | thraxil/gtreed | c1c5a19178c1f50ff5e61887b13ff7b004da1d25 | ca228848364edb204b15a7411fd6192379781c78 | refs/heads/master | 2020-04-18T03:02:15.468044 | 2008-12-10T20:02:12 | 2008-12-10T20:02:12 | 88,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,983 | py | # Formatting configuration for locale no
languages={'vi': 'vietnamesisk', 'el': 'gresk', 'eo': 'esperanto', 'en': 'engelsk', 'zh': 'kinesisk', 'af': 'afrikaans', 'sw': 'swahili', 'ca': 'katalansk', 'am': 'amharisk', 'gu': 'gujarati', 'sv': 'svensk', 'cs': 'tsjekkisk', 'ar': 'arabisk', 'ga': 'irsk', 'eu': 'baskisk', 'et': 'estisk', 'gl': 'galicisk', 'id': 'indonesisk', 'es': 'spansk', 'ru': 'russisk', 'nl': 'nederlandsk', 'nn': 'norsk nynorsk', 'no': 'norsk', 'nb': u'norsk bokm\xe5l', 'tr': 'tyrkisk', 'lv': 'latvisk', 'lt': 'litauisk', 'th': 'thai', 'ti': 'tigrinya', 'it': 'italiensk', 'so': 'somalisk', 'he': 'hebraisk', 'is': 'islandsk', 'pl': 'polsk', 'haw': 'hawaiisk', 'be': 'hviterussisk', 'fr': 'fransk', 'bg': 'bulgarsk', 'sl': 'slovensk', 'hr': 'kroatisk', 'iu': 'inuktitut', 'bn': 'bengali', 'de': 'tysk', 'da': 'dansk', 'fa': 'persisk', 'hi': 'hindi', 'fi': 'finsk', 'hy': 'armensk', 'hu': 'ungarsk', 'ja': 'japansk', 'fo': u'f\xe6r\xf8ysk', 'om': 'oromo', 'ro': 'rumensk', 'gv': 'manx', 'pt': 'portugisisk', 'sr': 'serbisk', 'sq': 'albansk', 'ko': 'koreansk', 'kn': 'kannada', 'mk': 'makedonsk', 'kl': 'kalaallisut', 'sk': 'slovakisk', 'mt': 'maltesisk', 'sh': 'serbokroatisk', 'kw': 'kornisk', 'uk': 'ukrainsk', 'mr': 'marathi', 'ta': 'tamil'}
countries={'BD': 'Bangladesh', 'BE': 'Belgia', 'BF': 'Burkina Faso', 'BG': 'Bulgaria', 'BA': 'Bosnia og Hercegovina', 'BB': 'Barbados', 'WF': 'Wallis og Futuna', 'BM': 'Bermuda', 'BN': 'Brunei Darussalam', 'BO': 'Bolivia', 'BH': 'Bahrain', 'BI': 'Burundi', 'BJ': 'Benin', 'BT': 'Bhutan', 'JM': 'Jamaica', 'BV': u'Bouvet\xf8ya', 'BW': 'Botswana', 'WS': 'Samoa', 'BR': 'Brasil', 'BS': 'Bahamas', 'BY': 'Hviterussland', 'BZ': 'Belize', 'RU': u'Den russiske f\xf8derasjon', 'RW': 'Rwanda', 'TL': u'\xd8st-Timor', 'RE': 'Reunion', 'TM': 'Turkmenistan', 'TJ': 'Tadsjikistan', 'RO': 'Romania', 'TK': 'Tokelau', 'GW': 'Guinea-Bissau', 'GU': 'Guam', 'GT': 'Guatemala', 'GS': u'S\xf8r-Georgia og S\xf8r-Sandwich-\xf8yene', 'GR': 'Hellas', 'GQ': 'Ekvatorial-Guinea', 'GP': 'Guadeloupe', 'JP': 'Japan', 'GY': 'Guyana', 'GF': 'Fransk Guyana', 'GE': 'Georgia', 'GD': 'Grenada', 'GB': 'Storbritannia', 'GA': 'Gabon', 'SV': 'El Salvador', 'GN': 'Guinea', 'GM': 'Gambia', 'GL': u'Gr\xf8nland', 'GI': 'Gibraltar', 'GH': 'Ghana', 'OM': 'Oman', 'TN': 'Tunisia', 'JO': 'Jordan', 'SP': 'Serbia', 'HR': 'Kroatia', 'HT': 'Haiti', 'HU': 'Ungarn', 'HK': 'Hong Kong S.A.R. (Kina)', 'HN': 'Honduras', 'HM': u'Heard- og McDonalds\xf8yene', 'VE': 'Venezuela', 'PR': 'Puerto Rico', 'PS': 'Palestinsk territorium', 'PW': 'Palau', 'PT': 'Portugal', 'SJ': 'Svalbard og Jan Mayen', 'PY': 'Paraguay', 'IQ': 'Irak', 'PA': 'Panama', 'PF': 'Fransk Polynesia', 'PG': 'Papua Ny-Guinea', 'PE': 'Peru', 'PK': 'Pakistan', 'PH': 'Filippinene', 'PN': 'Pitcairn', 'PL': 'Polen', 'PM': 'St. Pierre og Miquelon', 'ZM': 'Zambia', 'EH': 'Vest-Sahara', 'EE': 'Estland', 'EG': 'Egypt', 'ZA': u'S\xf8r-Afrika', 'EC': 'Ecuador', 'IT': 'Italia', 'VN': 'Vietnam', 'SB': u'Salomon\xf8yene', 'ET': 'Etiopia', 'SO': 'Somalia', 'ZW': 'Zimbabwe', 'SA': 'Saudi Arabia', 'ES': 'Spania', 'ER': 'Eritrea', 'MD': 'Moldova', 'MG': 'Madagaskar', 'MA': 'Marokko', 'MC': 'Monaco', 'UZ': 'Usbekistan', 'MM': 'Myanmar', 'ML': 'Mali', 'MO': 'Macao S.A.R. (Kina)', 'MN': 'Mongolia', 'MH': u'Marshall\xf8yene', 'MK': 'Makedonia, Republikken', 'MU': 'Mauritius', 'MT': 'Malta', 'MW': 'Malawi', 'MV': 'Maldivene', 'MQ': 'Martinique', 'MP': 'Nord-Marianene', 'MS': 'Montserrat', 'MR': 'Mauritania', 'UG': 'Uganda', 'MY': 'Malaysia', 'MX': 'Mexico', 'IL': 'Israel', 'FR': 'Frankrike', 'IO': u'Britiske omr\xe5der i det indiske hav', 'SH': 'Saint Helena', 'FI': 'Finland', 'FJ': 'Fiji', 'FK': u'Falklands\xf8yene (Malvinas)', 'FM': u'Mikronesiaf\xf8derasjonen', 'FO': u'F\xe6r\xf8yene', 'NI': 'Nicaragua', 'NL': 'Nederland', 'NO': 'Norge', 'NA': 'Namibia', 'VU': 'Vanuatu', 'NC': 'Ny-Caledonia', 'NE': 'Niger', 'NF': u'Norfolk\xf8yene', 'NG': 'Nigeria', 'NZ': 'New Zealand', 'NP': 'Nepal', 'NR': 'Nauru', 'NU': 'Niue', 'CK': u'Cook\xf8yene', 'CI': 'Elfenbenskysten', 'CH': 'Sveits', 'CO': 'Colombia', 'CN': 'Kina', 'CM': 'Kamerun', 'CL': 'Chile', 'CC': u'Kokos\xf8yene (Keeling\xf8yene)', 'CA': 'Canada', 'CG': 'Kongo', 'CF': 'Den sentralafrikanske republikk', 'CD': 'Kongo, Den demokratiske republikken', 'CZ': 'Tsjekkia', 'CY': 'Kypros', 'CX': u'Christmas\xf8ya', 'CR': 'Costa Rica', 'Fallback': 'en', 'CV': 'Kapp Verde', 'CU': 'Cuba', 'SZ': 'Swaziland', 'SY': 'Syria', 'KG': 'Kirgisistan', 'KE': 'Kenya', 'SR': 'Surinam', 'KI': 'Kiribati', 'KH': 'Kambodsja', 'KN': 'St. Christopher og Nevis', 'KM': 'Komorene', 'ST': 'Sao Tome og Principe', 'SK': 'Slovakia', 'KR': u'Korea (S\xf8r)', 'SI': 'Slovenia', 'KP': 'Korea (Nord)', 'KW': 'Kuwait', 'SN': 'Senegal', 'SM': 'San Marino', 'SL': 'Sierra Leone', 'SC': 'Seychellene', 'KZ': 'Kasakhstan', 'KY': u'Cayman\xf8yene', 'SG': 'Singapore', 'SE': 'Sverige', 'SD': 'Sudan', 'DO': 'Den dominikanske republikk', 'DM': 'Dominica', 'DJ': 'Djibouti', 'DK': 'Danmark', 'VG': u'Jomfru\xf8yene (britisk)', 'DE': 'Tyskland', 'YE': 'Yemen', 'DZ': 'Algerie', 'US': 'Sambandsstatane', 'UY': 'Uruguay', 'YU': 'Jugoslavia', 'YT': 'Mayotte', 'UM': u'USAs mindre \xf8yer', 'LB': 'Libanon', 'LC': 'St. Lucia', 'LA': 'Laos, Den folkedemokratiske republikken', 'TV': 'Tuvalu', 'TW': 'Taiwan', 'TT': 'Trinidad og Tobago', 'TR': 'Tyrkia', 'LK': 'Sri Lanka', 'LI': 'Liechtenstein', 'LV': 'Latvia', 'TO': 'Tonga', 'LT': 'Litauen', 'LU': 'Luxembourg', 'LR': 'Liberia', 'LS': 'Lesotho', 'TH': 'Thailand', 'TF': u'Franske s\xf8romr\xe5der', 'TG': 'Togo', 'TD': 'Tchad', 'TC': u'Turks- og Caicos\xf8yene', 'LY': 'Libya', 'VA': 'Vatikanstaten', 'VC': 'St. Vincent og Grenadinene', 'AE': 'De forente arabiske emiratene', 'AD': 'Andorra', 'AG': 'Antigua og Barbuda', 'AF': 'Afghanistan', 'AI': 'Anguilla', 'VI': 'U.S. Virgin Islands', 'IS': 'Island', 'IR': 'Iran', 'AM': 'Armenia', 'AL': 'Albania', 'AO': 'Angola', 'AN': 'De nederlandske antiller', 'AQ': 'Antarktis', 'AS': 'Amerikansk Samoa', 'AR': 'Argentina', 'AU': 'Australia', 'AT': u'\xd8sterrike', 'AW': 'Aruba', 'IN': 'India', 'TZ': 'Tanzania', 'AZ': 'Aserbajdsjan', 'IE': 'Irland', 'ID': 'Indonesia', 'UA': 'Ukraina', 'QA': 'Qatar', 'MZ': 'Mosambik'}
months=['januar', 'februar', 'mars', 'april', 'mai', 'juni', 'juli', 'august', 'september', 'oktober', 'november', 'desember']
abbrMonths=['jan', 'feb', 'mar', 'apr', 'mai', 'jun', 'jul', 'aug', 'sep', 'okt', 'nov', 'des']
days=['mandag', 'tirsdag', 'onsdag', 'torsdag', 'fredag', u'l\xf8rdag', u's\xf8ndag']
abbrDays=['ma', 'ti', 'on', 'to', 'fr', u'l\xf8', u's\xf8']
dateFormats={'medium': '%d. %%(abbrmonthname)s. %Y', 'full': '%%(dayname)s %d. %%(monthname)s %Y', 'long': '%d. %%(monthname)s %Y', 'short': '%d.%m.%y'}
numericSymbols={'group': u'\xa0', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | [
"[email protected]"
] | |
c4397cb30b808d19f39e5dc639214ad89de14d75 | 8832f83436809e8e918e60e5526d95add9fe8dbd | /books_app/migrations/0068_auto_20190930_1758.py | 4ad159bea80bec7a1af460bd8bd499d5f456283d | [] | no_license | HCDigitalScholarship/booksofduchesses | e31e56eaba253b92a1362de5918b5b005cb27f3c | 3f0e27515963c92a56714c5bada3b6a68a8665df | refs/heads/master | 2022-12-09T18:41:20.019687 | 2021-10-25T14:58:18 | 2021-10-25T14:58:18 | 190,254,161 | 0 | 3 | null | 2022-12-08T05:21:54 | 2019-06-04T18:05:08 | Python | UTF-8 | Python | false | false | 539 | py | # Generated by Django 2.2.2 on 2019-09-30 17:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("books_app", "0067_auto_20190927_1626")]
operations = [
migrations.AlterField(
model_name="dateowned",
name="book_owned",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="books_app.Book",
),
)
]
| [
"[email protected]"
] | |
cbd2c64466a8223e6f012a4117d8d62e307a34fb | f2a967dfcb768ef2e5729b1f7665740dc8f4e09c | /tti/indicators/_relative_momentum_index.py | 84f4a6dd01db9e19231be93ec64dd61f5df603f8 | [
"MIT"
] | permissive | Bill-Software-Engineer/trading-technical-indicators | 799c44a88ee73fb33c0255cb3ff5463f9d578506 | fc00008a41da54f160609343e866c72306f4962c | refs/heads/master | 2023-03-24T20:13:16.088567 | 2021-03-24T05:50:58 | 2021-03-24T05:50:58 | 349,295,934 | 0 | 1 | MIT | 2021-03-20T04:31:57 | 2021-03-19T04:05:07 | null | UTF-8 | Python | false | false | 6,267 | py | """
Trading-Technical-Indicators (tti) python library
File name: _relative_momentum_index.py
Implements the Relative Momentum Index technical indicator.
"""
import pandas as pd
from ._technical_indicator import TechnicalIndicator
from ..utils.constants import TRADE_SIGNALS
from ..utils.exceptions import NotEnoughInputData, WrongTypeForInputParameter,\
WrongValueForInputParameter
class RelativeMomentumIndex(TechnicalIndicator):
"""
Relative Momentum Index Technical Indicator class implementation.
Args:
input_data (pandas.DataFrame): The input data. Required input column
is ``close``. The index is of type ``pandas.DatetimeIndex``.
period (int, default=8): The past periods to be used for the
calculation of the indicator.
momentum_period (int, default=4): The momentum periods to be used for
the calculation of the indicator.
fill_missing_values (bool, default=True): If set to True, missing
values in the input data are being filled.
Attributes:
_input_data (pandas.DataFrame): The ``input_data`` after preprocessing.
_ti_data (pandas.DataFrame): The calculated indicator. Index is of type
``pandas.DatetimeIndex``. It contains one column, the ``rmi``.
_properties (dict): Indicator properties.
_calling_instance (str): The name of the class.
Raises:
WrongTypeForInputParameter: Input argument has wrong type.
WrongValueForInputParameter: Unsupported value for input argument.
NotEnoughInputData: Not enough data for calculating the indicator.
TypeError: Type error occurred when validating the ``input_data``.
ValueError: Value error occurred when validating the ``input_data``.
"""
def __init__(self, input_data, period=8, momentum_period=4,
fill_missing_values=True):
# Validate and store if needed, the input parameters
if isinstance(period, int):
if period > 0:
self._period = period
else:
raise WrongValueForInputParameter(
period, 'period', '>0')
else:
raise WrongTypeForInputParameter(
type(period), 'period', 'int')
if isinstance(momentum_period, int):
if momentum_period > 0:
self._momentum_period = momentum_period
else:
raise WrongValueForInputParameter(
momentum_period, 'momentum_period', '>0')
else:
raise WrongTypeForInputParameter(
type(momentum_period), 'momentum_period', 'int')
# Control is passing to the parent class
super().__init__(calling_instance=self.__class__.__name__,
input_data=input_data,
fill_missing_values=fill_missing_values)
def _calculateTi(self):
"""
Calculates the technical indicator for the given input data. The input
data are taken from an attribute of the parent class.
Returns:
pandas.DataFrame: The calculated indicator. Index is of type
``pandas.DatetimeIndex``. It contains one column, the ``rmi``.
Raises:
NotEnoughInputData: Not enough data for calculating the indicator.
"""
# Not enough data for the requested period
if len(self._input_data.index) < self._period + self._momentum_period:
raise NotEnoughInputData('Relative Momentum Index',
self._period + self._momentum_period,
len(self._input_data.index))
rmi = pd.DataFrame(index=self._input_data.index,
columns=['rmi', 'upc', 'dpc', 'smoothed_upc',
'smoothed_dpc'],
data=None, dtype='float64')
# Calculate price change (current close - close momentum periods ago)
close_price_change = self._input_data['close'] - self._input_data[
'close'].shift(self._momentum_period)
# Upward price change
rmi['upc'][close_price_change > 0] = close_price_change
rmi['upc'][close_price_change <= 0] = 0
# Downward price change
rmi['dpc'][close_price_change < 0] = abs(close_price_change)
rmi['dpc'][close_price_change >= 0] = 0
# Wilder's Moving Average for upc and dpc
rmi['smoothed_upc'].iat[self._period + self._momentum_period - 1] = \
rmi['upc'].iloc[
self._momentum_period:self._period + self._momentum_period].mean()
rmi['smoothed_dpc'].iat[self._period + self._momentum_period - 1] = \
rmi['dpc'].iloc[
self._momentum_period:self._period + self._momentum_period].mean()
for i in range(self._period + self._momentum_period,
len(self._input_data.index)):
rmi['smoothed_upc'].iat[i] = rmi['smoothed_upc'].iat[i - 1] + (
rmi['upc'].iat[i] - rmi['smoothed_upc'].iat[i - 1]
) / self._period
rmi['smoothed_dpc'].iat[i] = rmi['smoothed_dpc'].iat[i - 1] + (
rmi['dpc'].iat[i] - rmi['smoothed_dpc'].iat[i - 1]
) / self._period
# Calculate indicator
rmi['rmi'] = 100 * (rmi['smoothed_upc'] / rmi['smoothed_dpc']) / (
1 + rmi['smoothed_upc'] / rmi['smoothed_dpc'])
return rmi[['rmi']].round(4)
def getTiSignal(self):
"""
Calculates and returns the trading signal for the calculated technical
indicator.
Returns:
{('hold', 0), ('buy', -1), ('sell', 1)}: The calculated trading
signal.
"""
# Not enough data for trading signal
if len(self._ti_data.index) < 2:
return TRADE_SIGNALS['hold']
# Overbought region
if self._ti_data['rmi'].iat[-2] < 70. < self._ti_data['rmi'].iat[-1]:
return TRADE_SIGNALS['sell']
# Oversold region
if self._ti_data['rmi'].iat[-2] > 30. > self._ti_data['rmi'].iat[-1]:
return TRADE_SIGNALS['buy']
return TRADE_SIGNALS['hold']
| [
"[email protected]"
] | |
f4b5f06d621a40f412a9d3abd8a0a2c8f2e1248b | 26dcf8e0457156a8bde936d56a59e1099893f8c6 | /tests/test_hmm.py | e97feff68370733dbcc69ffcce59a30cb789f25b | [
"MIT"
] | permissive | SilenceWinter/MicroTokenizer | fc4212fb9a324e93e707edbe130b518bd782d07a | 0b617f4b107743f6c7c473a9fac9408d21c56931 | refs/heads/master | 2020-03-29T04:31:23.050836 | 2018-09-18T16:40:28 | 2018-09-18T16:40:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `MicroTokenizer` package."""
import pytest
from MicroTokenizer.hmm import HMMTokenizer
def test_persist(tmpdir):
temp_path = tmpdir.mkdir("hmm")
temp_path_str = str(temp_path)
tokenizer = HMMTokenizer()
tokenizer.train_one_line(["我", "是", "中国人"])
tokenizer.train_one_line(["你", "打", "人"])
tokenizer.do_train()
tokenizer.persist_to_dir(temp_path_str)
assert len(temp_path.listdir()) == 3
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_segment(input_text):
tokenizer = HMMTokenizer()
tokenizer.load_model()
result = tokenizer.segment(input_text)
pytest.helpers.assert_token_equals(result, input_text)
| [
"[email protected]"
] | |
fc53892213fa66aece7218c6e1a0dc1a2b68968c | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /spark/crm/PROC_A_CI_DEP_CONTRIBUTION.py | e899aff59721d52b99cbb6e4dbf84908af6f822b | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,376 | py | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_CI_DEP_CONTRIBUTION').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#---------------------------------------------------------------------------------------#
V_YEAR_MONTH = etl_date[0:4]+"-" + etl_date[4:6]
OCRM_F_CI_CON_PARM = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CON_PARM/*')
OCRM_F_CI_CON_PARM.registerTempTable("OCRM_F_CI_CON_PARM")
ACRM_F_DP_SAVE_INFO = sqlContext.read.parquet(hdfs+'/ACRM_F_DP_SAVE_INFO/*')
ACRM_F_DP_SAVE_INFO.registerTempTable("ACRM_F_DP_SAVE_INFO")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT CUST_ID as CUST_ID,
CUST_NAME as CUST_NAME,
ODS_ACCT_NO as ACCT_NO,
AA.ORG_ID as ORG_ID,
CYNO as CURR,
COALESCE(MONTH_RMB,0) as MONTH_AVG,
COALESCE(MVAL_RMB,0) as YEAR_AVG,
PRODUCT_ID as CONT_SUB_ID ,
cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6)) as MONTH_RATE ,
cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) as INNER_PRICE,
RUN_COST as RUN_COST ,
CASE WHEN (COALESCE(YEAR_AVG,0) *(cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6))) - COALESCE(RUN_COST,0))>0 THEN (COALESCE(YEAR_AVG,0) *(cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6))) - COALESCE(RUN_COST,0)) ELSE 0 END as CONTRIBUTION,
CASE WHEN (COALESCE(MVAL_RMB,0) *(cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6)))- COALESCE(RUN_COST,0))>0 THEN (COALESCE(MVAL_RMB,0) * (cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6)))- COALESCE(RUN_COST,0)) ELSE 0 END as CONTRIBUTION_RMB,
V_YEAR_MONTH as YEAR_MONTH,
V_DT as ODS_DATE,
COALESCE(BAL_RMB,0) as BAL_RMB,
AA.FR_ID as FR_ID,
'' as FR_NAME,
CUST_TYP as CUST_TYP
FROM
(SELECT A.CUST_ID,
A.CUST_TYP,
A.CUST_NAME,
A.ODS_ACCT_NO,
A.ORG_ID,
A.CYNO,
A.BAL_RMB,
A.MONTH_RMB,
A.MVAL_RMB,
A.MONTH_AVG,
A.YEAR_AVG,
A.PRODUCT_ID,
A.TD_IR_TP,
A.AGREENMENT_RATE,
(CASE WHEN A.ACCONT_TYPE = 'H' THEN 'H001'
WHEN A.PRODUCT_ID = '999TD000100' THEN 'D001'
WHEN A.PRODUCT_ID = '999TD110600' OR A.PRODUCT_ID = '999TD000600' THEN 'D002'
WHEN A.PRODUCT_ID = '999TD110700' OR A.PRODUCT_ID = '999TD000700' THEN 'D003'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '3' THEN 'D004'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '6' THEN 'D005'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '9' THEN 'D006'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '12' THEN 'D007'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '24' THEN 'D008'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '36' THEN 'D009'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '60' THEN 'D010'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT > '60' THEN 'D011' END
) AS SUB_ID,
A.FR_ID
FROM ACRM_F_DP_SAVE_INFO A
WHERE A.CUST_ID <> 'X9999999999999999999'
--AND A.FR_ID = V_FR_ID
) AA
LEFT JOIN OCRM_F_CI_CON_PARM B ON AA.SUB_ID = B.SUB_ID AND AA.FR_ID = B.ORG_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
sql = re.sub(r"\bV_YEAR_MONTH\b", "'"+V_YEAR_MONTH+"'", sql)
ACRM_F_CI_DEP_CONTRIBUTION = sqlContext.sql(sql)
ACRM_F_CI_DEP_CONTRIBUTION.registerTempTable("ACRM_F_CI_DEP_CONTRIBUTION")
dfn="ACRM_F_CI_DEP_CONTRIBUTION/"+V_DT+".parquet"
ACRM_F_CI_DEP_CONTRIBUTION.cache()
nrows = ACRM_F_CI_DEP_CONTRIBUTION.count()
ACRM_F_CI_DEP_CONTRIBUTION.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_F_CI_DEP_CONTRIBUTION.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_F_CI_DEP_CONTRIBUTION/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_F_CI_DEP_CONTRIBUTION lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| [
"[email protected]"
] | |
ee11e3de057d99b1e8ccfa4e205cb98f0315e99a | da3d949ee5b40981934a91852cffdad1843f497b | /pushserver/urls.py | 0b12094409d3abeb1f8bfb9c9ae2a47e09d35c2a | [
"MIT"
] | permissive | epikjjh/Stay_Hungry_Push-server | 597ecee836c4e42d55974640db6e62558226f295 | d4a3e3fcf650c078b5d6ea7cc1277ebd5f75141d | refs/heads/master | 2021-01-12T05:27:19.599515 | 2017-01-04T07:49:08 | 2017-01-04T07:49:08 | 77,929,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | """pushserver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'', include('push.urls')),
]
| [
"[email protected]"
] | |
efce074729bc329b7d3d2b62f933c18cd1893b4b | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /executale_binaries/register-variants/cmovbeq_r64_r64.gen.vex.py | aac0087e2d26b58209d268bec75c7d8b5c9eaab4 | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import angr
proj = angr.Project('cmovbeq_r64_r64.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"[email protected]"
] | |
76d09e968cf645ce2067c816a6f63d03efb99692 | 4920b6c12dc2427036077d38ed8fa513130418a8 | /bipad_api/test/test_inline_response20059.py | 7f7f5f5d6b53b4e708645271de5f31afefb61347 | [] | no_license | laxmitimalsina/covid_dashboard | d51a43d3ba2ad8a9754f723383f6395c1dccdda5 | ccba8a3f5dd6dbd2b28e2479bda6e581eb23805f | refs/heads/master | 2023-05-29T15:07:32.524640 | 2021-05-03T11:15:43 | 2021-05-03T11:15:43 | 273,698,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | # coding: utf-8
"""
BIPAD API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import bipad_api
from bipad_api.models.inline_response20059 import InlineResponse20059 # noqa: E501
from bipad_api.rest import ApiException
class TestInlineResponse20059(unittest.TestCase):
"""InlineResponse20059 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20059(self):
"""Test InlineResponse20059"""
# FIXME: construct object with mandatory attributes with example values
# model = bipad_api.models.inline_response20059.InlineResponse20059() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8c0e1b2b4be9161d9d7d0227d2503c37d44d22eb | 0c7d7b24a8d453fc1a9c2f27a08f3c4cfa46ec3b | /recipes/sota/2019/lm_analysis/shuffle_segments.py | f7e4b68d3a3f817fbc7488b29cbead6bad1659ba | [
"BSD-3-Clause",
"MIT"
] | permissive | piEYj/wav2letter | e6ae462eeeb6a4374f8280c8fa15d8f194c60215 | 49fbb1392e69b5194c077df9847505ec995b4e3d | refs/heads/main | 2023-09-06T01:08:48.837731 | 2021-11-12T14:13:41 | 2021-11-12T14:15:15 | 444,344,109 | 1 | 0 | NOASSERTION | 2022-01-04T08:37:19 | 2022-01-04T08:37:19 | null | UTF-8 | Python | false | false | 3,394 | py | import os
import random
import sys
from multiprocessing import Pool
import sox
align_file = sys.argv[1]
output_dir = sys.argv[2]
lines = []
with open(align_file) as fin:
lines = fin.readlines()
N_THREADS = 40
MIN_SIL_LENGTH = 0.13
TOLERANCE = 0.04
def process(parameters):
tid, n_samples = parameters
output_list = output_dir + "dev-other.{}.lst".format(tid)
with open(output_list, "w") as fout:
for i in range(tid * n_samples, min(len(lines), n_samples * (tid + 1))):
line = lines[i]
sp = line.split("\t")
filename = sp[0]
# print(filename)
# duration = sox.file_info.duration(filename)
alignments = sp[1].strip().split("\\n")
# Parse the alignments
chunk_starts = [0]
chunk_ends = []
words = []
cur_words = []
cur_end = 0
for i, alignment in enumerate(alignments):
sp = alignment.split()
begin = float(sp[2])
length = float(sp[3])
word = sp[4]
cur_end = begin + length
if i == 0:
continue
if word == "$":
if length > MIN_SIL_LENGTH:
chunk_ends.append(cur_end - TOLERANCE)
chunk_starts.append(cur_end - TOLERANCE)
words.append(" ".join(cur_words))
cur_words = []
continue
cur_words.append(word)
if len(cur_words) > 0:
chunk_ends.append(cur_end)
words.append(" ".join(cur_words))
else:
chunk_starts.pop()
# print(duration)
# print(chunk_starts)
# print(chunk_ends)
# print(words)
# Split the audios
order = list(range(len(chunk_starts)))
random.shuffle(order)
new_target = " ".join([words[i] for i in order])
new_audio_path = output_dir + filename.split("/")[-1]
fout.write(
"{}\t{}\t{}\t{}\n".format(
new_audio_path, new_audio_path, chunk_ends[-1] * 1000, new_target
)
)
if len(chunk_starts) == 1:
os.system("cp {} {}".format(filename, output_dir))
continue
paths = []
for i in order:
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16, rate=16000
)
sox_tfm.trim(chunk_starts[i], chunk_ends[i])
new_path = "/tmp/{}_{}.flac".format(tid, i)
sox_tfm.build(filename, new_path)
paths.append(new_path)
# Combine them
sox_comb = sox.Combiner()
sox_comb.build(list(paths), new_audio_path, "concatenate")
if __name__ == "__main__":
n_sample_per_thread = len(lines) // N_THREADS + 1
print(
"Spreading {} threads with {} samples in each".format(
N_THREADS, n_sample_per_thread
)
)
pool = Pool(N_THREADS)
pool.map(process, zip(list(range(N_THREADS)), [n_sample_per_thread] * N_THREADS))
pool.close()
pool.join()
| [
"[email protected]"
] | |
49f63466b6602a419c57f27b6e2d27a713646b02 | 56ade096db1fe376ee43d38c96b43651ee07f217 | /033. Search in Rotated Sorted Array/Python/Solution.py | c1099f90447f301ab75df1e19f2814222c1d9484 | [] | no_license | xiaole0310/leetcode | c08649c3f9a9b04579635ee7e768fe3378c04900 | 7a501cf84cfa46b677d9c9fced18deacb61de0e8 | refs/heads/master | 2020-03-17T05:46:41.102580 | 2018-04-20T13:05:32 | 2018-04-20T13:05:32 | 133,328,416 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
length = len(nums)
low = 0
high = length - 1
while low < high:
mid = (low + high) // 2
if nums[mid] > nums[high]:
low = mid + 1
else:
high = mid
rotate = low
low = 0
high = length - 1
while low <= high:
mid = (low + high) // 2
real_mid = (mid + rotate) % length
if nums[real_mid] == target:
return real_mid
if nums[real_mid] < target:
low = mid + 1
else:
high = mid - 1
return -1
| [
"[email protected]"
] | |
f92e9cd13f5028cb073ece399a73ac451bd30914 | fc5fa8501e8a62291a48c82611e1b74b961ca561 | /robust_loss/adaptive_test.py | 415dcbad61aff7e3520d0fcc1882db89e5a4445b | [
"Apache-2.0"
] | permissive | hitesh-hk/google-research | fa3d3e31cce995fa6da89322dab4993bf1c1ead8 | ddc22300c4cb3223654c9a981f892dc0f6286e35 | refs/heads/master | 2021-02-17T18:57:31.267570 | 2020-01-17T14:49:25 | 2020-01-17T14:54:27 | 245,119,290 | 1 | 1 | Apache-2.0 | 2020-03-05T09:24:01 | 2020-03-05T09:24:00 | null | UTF-8 | Python | false | false | 17,982 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adaptive.py."""
from absl.testing import parameterized
import numpy as np
import scipy.stats
import tensorflow.compat.v2 as tf
from robust_loss import adaptive
from robust_loss import util
from robust_loss import wavelet
tf.enable_v2_behavior()
def generate_pixel_toy_image_data(image_width, num_samples, _):
"""Generates pixel data for _test_fitting_toy_image_data_is_correct().
Constructs a "mean" image in RGB pixel space (parametrized by `image_width`)
and draws `num_samples` samples from a normal distribution using that mean,
and returns those samples and their empirical mean as reference.
Args:
image_width: The width and height in pixels of the images being produced.
num_samples: The number of samples to generate.
_: Dummy argument so that this function's interface matches
generate_wavelet_toy_image_data()
Returns:
A tuple of (samples, reference, color_space, representation), where
samples = A set of sampled images of size
(`num_samples`, `image_width`, `image_width`, 3)
reference = The empirical mean of `samples` of size
(`image_width`, `image_width`, 3).
color_space = 'RGB'
representation = 'PIXEL'
"""
color_space = 'RGB'
representation = 'PIXEL'
mu = np.random.uniform(size=(image_width, image_width, 3))
samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
reference = np.mean(samples, 0)
return samples, reference, color_space, representation
def generate_wavelet_toy_image_data(image_width, num_samples,
wavelet_num_levels):
"""Generates wavelet data for testFittingImageDataIsCorrect().
Constructs a "mean" image in the YUV wavelet domain (parametrized by
`image_width`, and `wavelet_num_levels`) and draws `num_samples` samples
from a normal distribution using that mean, and returns RGB images
corresponding to those samples and to the mean (computed in the
specified latent space) of those samples.
Args:
image_width: The width and height in pixels of the images being produced.
num_samples: The number of samples to generate.
wavelet_num_levels: The number of levels in the wavelet decompositions of
the generated images.
Returns:
A tuple of (samples, reference, color_space, representation), where
samples = A set of sampled images of size
(`num_samples`, `image_width`, `image_width`, 3)
reference = The empirical mean of `samples` (computed in YUV Wavelet space
but returned as an RGB image) of size (`image_width`, `image_width`, 3).
color_space = 'YUV'
representation = 'CDF9/7'
"""
color_space = 'YUV'
representation = 'CDF9/7'
samples = []
reference = []
for level in range(wavelet_num_levels):
samples.append([])
reference.append([])
w = image_width // 2**(level + 1)
scaling = 2**level
for _ in range(3):
# Construct the ground-truth pixel band mean.
mu = scaling * np.random.uniform(size=(3, w, w))
# Draw samples from the ground-truth mean.
band_samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
# Take the empirical mean of the samples as a reference.
band_reference = np.mean(band_samples, 0)
samples[-1].append(np.reshape(band_samples, [-1, w, w]))
reference[-1].append(band_reference)
# Handle the residual band.
mu = scaling * np.random.uniform(size=(3, w, w))
band_samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
band_reference = np.mean(band_samples, 0)
samples.append(np.reshape(band_samples, [-1, w, w]))
reference.append(band_reference)
# Collapse and reshape wavelets to be ({_,} width, height, 3).
samples = wavelet.collapse(samples, representation)
reference = wavelet.collapse(reference, representation)
samples = tf.transpose(
tf.reshape(samples, [num_samples, 3, image_width, image_width]),
perm=[0, 2, 3, 1])
reference = tf.transpose(reference, perm=[1, 2, 0])
# Convert into RGB space.
samples = util.syuv_to_rgb(samples).numpy()
reference = util.syuv_to_rgb(reference).numpy()
return samples, reference, color_space, representation
def sample_cauchy_ppf(num_samples):
"""Draws ``num_samples'' samples from a Cauchy distribution.
Because actual sampling is expensive and requires many samples to converge,
here we sample by drawing `num_samples` evenly-spaced values in [0, 1]
and then interpolate into the inverse CDF (aka PPF) of a Cauchy
distribution. This produces "samples" where maximum-likelihood estimation
likely recovers the true distribution even if `num_samples` is small.
Args:
num_samples: The number of samples to draw.
Returns:
A numpy array containing `num_samples` evenly-spaced "samples" from a
zero-mean Cauchy distribution whose scale matches our distribution/loss
when our scale = 1.
"""
spacing = 1. / num_samples
p = np.arange(0., 1., spacing) + spacing / 2.
return scipy.stats.cauchy(0., np.sqrt(2.)).ppf(p)
def sample_normal_ppf(num_samples):
"""Draws ``num_samples'' samples from a Normal distribution.
Because actual sampling is expensive and requires many samples to converge,
here we sample by drawing `num_samples` evenly-spaced values in [0, 1]
and then interpolate into the inverse CDF (aka PPF) of a Normal
distribution. This produces "samples" where maximum-likelihood estimation
likely recovers the true distribution even if `num_samples` is small.
Args:
num_samples: The number of samples to draw.
Returns:
A numpy array containing `num_samples` evenly-spaced "samples" from a
zero-mean unit-scale Normal distribution.
"""
spacing = 1. / num_samples
p = np.arange(0., 1., spacing) + spacing / 2.
return scipy.stats.norm(0., 1.).ppf(p)
def sample_nd_mixed_data(n, m, float_dtype):
"""`n` Samples from `m` scaled+shifted Cauchy and Normal distributions."""
samples0 = sample_cauchy_ppf(n)
samples2 = sample_normal_ppf(n)
mu = np.random.normal(size=m)
alpha = (np.random.uniform(size=m) > 0.5) * 2
scale = np.exp(np.clip(np.random.normal(size=m), -3., 3.))
samples = (
np.tile(samples0[:, np.newaxis], [1, m]) *
(alpha[np.newaxis, :] == 0.) + np.tile(samples2[:, np.newaxis], [1, m]) *
(alpha[np.newaxis, :] == 2.)) * scale[np.newaxis, :] + mu[np.newaxis, :]
return [float_dtype(x) for x in [samples, mu, alpha, scale]]
class AdaptiveTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(AdaptiveTest, self).setUp()
np.random.seed(0)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testInitialAlphaAndScaleAreCorrect(self, float_dtype):
"""Tests that `alpha` and `scale` are initialized as expected."""
for i in range(8):
# Generate random ranges for alpha and scale.
alpha_lo = float_dtype(np.random.uniform())
alpha_hi = float_dtype(np.random.uniform() + 1.)
# Half of the time pick a random initialization for alpha, the other half
# use the default value.
if i % 2 == 0:
alpha_init = float_dtype(alpha_lo + np.random.uniform() *
(alpha_hi - alpha_lo))
true_alpha_init = alpha_init
else:
alpha_init = None
true_alpha_init = (alpha_lo + alpha_hi) / 2.
scale_init = float_dtype(np.random.uniform() + 0.5)
scale_lo = float_dtype(np.random.uniform() * 0.1)
adaptive_lossfun = adaptive.AdaptiveLossFunction(
10,
float_dtype,
alpha_lo=alpha_lo,
alpha_hi=alpha_hi,
alpha_init=alpha_init,
scale_lo=scale_lo,
scale_init=scale_init)
alpha = adaptive_lossfun.alpha()[0, :].numpy()
scale = adaptive_lossfun.scale()[0, :].numpy()
self.assertAllClose(alpha, true_alpha_init * np.ones_like(alpha))
self.assertAllClose(scale, scale_init * np.ones_like(alpha))
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testFixedAlphaAndScaleAreCorrect(self, float_dtype):
"""Tests that fixed alphas and scales do not change during optimization)."""
for _ in range(8):
alpha_lo = float_dtype(np.random.uniform() * 2.)
alpha_hi = alpha_lo
scale_init = float_dtype(np.random.uniform() + 0.5)
scale_lo = scale_init
samples = float_dtype(np.random.uniform(size=(10, 10)))
# We must construct some variable for TF to attempt to optimize.
mu = tf.Variable(
tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
adaptive_lossfun = adaptive.AdaptiveLossFunction(
mu.shape[0],
float_dtype,
alpha_lo=alpha_lo,
alpha_hi=alpha_hi,
scale_lo=scale_lo,
scale_init=scale_init)
trainable_variables = list(adaptive_lossfun.trainable_variables) + [mu]
optimizer = tf.keras.optimizers.SGD(learning_rate=1000)
# pylint: disable=cell-var-from-loop
optimizer.minimize(
lambda: tf.reduce_mean(adaptive_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
alpha = adaptive_lossfun.alpha()[0, :].numpy()
scale = adaptive_lossfun.scale()[0, :].numpy()
alpha_init = (alpha_lo + alpha_hi) / 2.
self.assertAllClose(alpha, alpha_init * np.ones_like(alpha))
self.assertAllClose(scale, scale_init * np.ones_like(alpha))
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testFittingToyNdMixedDataIsCorrect(self, float_dtype):
"""Tests that minimizing the adaptive loss recovers the true model.
Here we generate a 2D array of samples drawn from a mix of scaled and
shifted Cauchy and Normal distributions. We then minimize our loss with
respect to the mean, scale, and shape of each distribution, and check that
after minimization the shape parameter is near-zero for the Cauchy data and
near 2 for the Normal data, and that the estimated means and scales are
accurate.
Args:
float_dtype: The type (np.float32 or np.float64) of data to test.
"""
samples, mu_true, alpha_true, scale_true = sample_nd_mixed_data(
100, 8, float_dtype)
mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
adaptive_lossfun = adaptive.AdaptiveLossFunction(mu.shape[0], float_dtype)
trainable_variables = list(adaptive_lossfun.trainable_variables) + [mu]
init_rate = 1.
final_rate = 0.1
num_iters = 201
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)
for _ in range(num_iters):
optimizer.minimize(
lambda: tf.reduce_mean(adaptive_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
mu = mu.numpy()
alpha = adaptive_lossfun.alpha()[0, :].numpy()
scale = adaptive_lossfun.scale()[0, :].numpy()
for a, b in [(alpha, alpha_true), (scale, scale_true), (mu, mu_true)]:
self.assertAllClose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testFittingToyNdMixedDataIsCorrectStudentsT(self, float_dtype):
"""Tests that minimizing the Student's T loss recovers the true model.
Here we generate a 2D array of samples drawn from a mix of scaled and
shifted Cauchy and Normal distributions. We then minimize our loss with
respect to the mean, scale, and shape of each distribution, and check that
after minimization the log-df parameter is near-zero for the Cauchy data and
very large for the Normal data, and that the estimated means and scales are
accurate.
Args:
float_dtype: The type (np.float32 or np.float64) of data to test.
"""
samples, mu_true, alpha_true, scale_true = sample_nd_mixed_data(
100, 8, float_dtype)
mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
students_lossfun = adaptive.StudentsTLossFunction(mu.shape[0], float_dtype)
trainable_variables = list(students_lossfun.trainable_variables) + [mu]
init_rate = 1.
final_rate = 0.1
num_iters = 201
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)
for _ in range(num_iters):
optimizer.minimize(
lambda: tf.reduce_mean(students_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
mu = mu.numpy()
df = students_lossfun.df()[0, :].numpy()
scale = students_lossfun.scale()[0, :].numpy()
for ldf, a_true in zip(np.log(df), alpha_true):
if a_true == 0:
self.assertAllClose(ldf, 0., rtol=0.1, atol=0.1)
elif a_true == 2:
self.assertAllGreater(ldf, 4)
scale /= np.sqrt(2. - (alpha_true / 2.))
for a, b in [(scale, scale_true), (mu, mu_true)]:
self.assertAllClose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testLossfunPreservesDtype(self, float_dtype):
"""Checks the loss's outputs have the same precisions as its input."""
num_dims = 8
samples, _, _, _ = sample_nd_mixed_data(100, num_dims, float_dtype)
lossfun = adaptive.AdaptiveLossFunction(num_dims, float_dtype)
loss = lossfun(samples)
self.assertDTypeEqual(loss, float_dtype)
self.assertDTypeEqual(lossfun.alpha(), float_dtype)
self.assertDTypeEqual(lossfun.scale(), float_dtype)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testImageLossfunPreservesDtype(self, float_dtype):
"""Tests that the image lossfun's outputs precisions match its input."""
x = float_dtype(np.random.uniform(size=(10, 64, 64, 3)))
lossfun = adaptive.AdaptiveImageLossFunction(x.shape[1:], float_dtype)
loss = lossfun(x).numpy()
alpha = lossfun.alpha().numpy()
scale = lossfun.scale().numpy()
self.assertDTypeEqual(loss, float_dtype)
self.assertDTypeEqual(alpha, float_dtype)
self.assertDTypeEqual(scale, float_dtype)
@parameterized.named_parameters(('Wavelet', generate_wavelet_toy_image_data),
('Pixel', generate_pixel_toy_image_data))
def testFittingImageDataIsCorrect(self, image_data_callback):
"""Tests that minimizing the adaptive image loss recovers the true model.
Here we generate a stack of color images drawn from a normal distribution,
and then minimize image_lossfun() with respect to the mean and scale of each
distribution, and check that after minimization the estimated means are
close to the true means.
Args:
image_data_callback: The function used to generate the training data and
parameters used during optimization.
"""
# Generate toy data.
image_width = 4
num_samples = 10
wavelet_num_levels = 2 # Ignored by generate_pixel_toy_image_data().
(samples, reference, color_space,
representation) = image_data_callback(image_width, num_samples,
wavelet_num_levels)
# Construct the loss.
mu = tf.Variable(tf.zeros(tf.shape(reference), samples.dtype))
image_lossfun = adaptive.AdaptiveImageLossFunction(
[image_width, image_width, 3],
samples.dtype,
color_space=color_space,
representation=representation,
wavelet_num_levels=wavelet_num_levels,
alpha_lo=2,
alpha_hi=2)
trainable_variables = list(image_lossfun.trainable_variables) + [mu]
init_rate = 1.
final_rate = 0.01
num_iters = 201
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)
for _ in range(num_iters):
optimizer.minimize(
lambda: tf.reduce_mean(image_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
mu = mu.numpy()
self.assertAllClose(mu, reference, rtol=0.01, atol=0.01)
def testLossfunChecksShape(self):
"""Tests that the image lossfun's checks input shapes."""
x1 = np.ones((10, 24), np.float32)
x2 = np.ones((10, 16), np.float32)
lossfun = adaptive.AdaptiveLossFunction(x1.shape[1], np.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
lossfun(x2)
def testImageLossfunChecksShape(self):
"""Tests that the image lossfun's checks input shapes."""
x1 = np.ones((10, 16, 24, 3), np.float32)
x2 = np.ones((10, 16, 16, 3), np.float32)
lossfun = adaptive.AdaptiveImageLossFunction(x1.shape[1:], np.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
lossfun(x2)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
91dcd5408203ac48530134b4c58374aad4842f14 | e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6 | /oslo_messaging/_drivers/zmq_driver/poller/green_poller.py | ab8f313f9fb8e6fd25990f75c2785310d39b15a9 | [] | no_license | KevinKaiQian/polar-bear | 46a814c746246394f76505846166673a049f12f2 | 61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e | refs/heads/master | 2022-04-29T02:15:35.536039 | 2021-05-19T12:33:07 | 2021-05-19T12:33:07 | 172,068,536 | 2 | 0 | null | 2022-03-29T21:56:51 | 2019-02-22T13:11:58 | Python | UTF-8 | Python | false | false | 2,501 | py | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import eventlet
from oslo_messaging._drivers.zmq_driver import zmq_poller
class GreenPoller(zmq_poller.ZmqPoller):
def __init__(self):
self.incoming_queue = eventlet.queue.LightQueue()
self.green_pool = eventlet.GreenPool()
self.thread_by_socket = {}
def register(self, socket, recv_method=None):
if socket not in self.thread_by_socket:
self.thread_by_socket[socket] = self.green_pool.spawn(
self._socket_receive, socket, recv_method)
def unregister(self, socket):
thread = self.thread_by_socket.pop(socket, None)
if thread:
thread.kill()
def _socket_receive(self, socket, recv_method=None):
while True:
if recv_method:
incoming = recv_method(socket)
else:
incoming = socket.recv_multipart()
self.incoming_queue.put((incoming, socket))
eventlet.sleep()
def poll(self, timeout=None):
try:
return self.incoming_queue.get(timeout=timeout)
except eventlet.queue.Empty:
return None, None
def close(self):
for thread in self.thread_by_socket.values():
thread.kill()
self.thread_by_socket = {}
class GreenExecutor(zmq_poller.Executor):
def __init__(self, method):
self._method = method
super(GreenExecutor, self).__init__(None)
self._done = threading.Event()
def _loop(self):
while not self._done.is_set():
self._method()
eventlet.sleep()
def execute(self):
self.thread = eventlet.spawn(self._loop)
def wait(self):
if self.thread is not None:
self.thread.wait()
def stop(self):
if self.thread is not None:
self.thread.kill()
def done(self):
self._done.set()
| [
"[email protected]"
] | |
5b868fc577cd8428158cebc51ec4b35b5f9e7d80 | 9970ab0ad1e805f83cc4463d008ee4654cfb668e | /tags/2.01/AStyleTest/file-py/file-extract.py | 342109f0b3e5c350d23ed0312153fc5378c9d14d | [] | no_license | svn2github/Artistic-Style | a464a7f6cc6bd11aec2a3452a9736e638630ecd8 | 6bd4db522937a182e63db96dbc095f2baae8a17a | refs/heads/master | 2020-12-08T06:01:08.497124 | 2018-04-05T22:36:21 | 2018-04-05T22:36:21 | 67,278,407 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #! /usr/bin/python
# Calls libextract to extract files in the TestArchives directory.
# Change the global variables to the desired values.
import libastyle #local directory
import libextract #local directory
import os
import sys
import time
# global variables ------------------------------------------------------------
# select one of the following from libastyle
# CODEBLOCKS
# CODELITE
# JEDIT
# KDEVELOP
# SCITE
# SHARPDEVELOP
# TESTPROJECT
project = libastyle.TESTPROJECT
# -----------------------------------------------------------------------------
def extract_project():
"""Call the library procedure to extract the requested project.
"""
starttime = time.time()
libextract.extract_project(project)
stoptime = time.time()
print_run_time(starttime, stoptime)
# -----------------------------------------------------------------------------
def print_run_time(starttime, stoptime):
"""Print run time for the test.
"""
runtime = int(stoptime - starttime + 0.5)
min = runtime / 60
sec = runtime % 60
if min == 0:
print "{0} seconds".format(sec)
else:
print "{0} min {1} seconds".format(min, sec)
# -----------------------------------------------------------------------------
# make the module executable
if __name__ == "__main__":
libastyle.set_text_color()
extract_project()
libastyle.system_exit()
# -----------------------------------------------------------------------------
| [
"jimp03@1fe3c263-5997-42ff-936f-87a7378ef0cd"
] | jimp03@1fe3c263-5997-42ff-936f-87a7378ef0cd |
4304b728a6cae825d940ba9d9e818606ca8eb1b0 | aaf045878465b2b26ff7ea12eb72453446cbd428 | /flaskRESTful/app.py | e19033a24a3162b6c6ec4ffe77a40785473fac05 | [] | no_license | mishrakeshav/REST-APIs-with-Flask-and-Python | c35a0e61c75763459227079c524eaf1dceb078f3 | 2b9dfdcb8da8d487713cd85cee9ee0aa3e65d974 | refs/heads/master | 2022-04-24T23:06:52.675441 | 2020-04-30T07:57:54 | 2020-04-30T07:57:54 | 257,958,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | from flask import Flask,request
from flask_restful import Resource , Api, reqparse
from flask_jwt import JWT ,jwt_required
from security import authenticate,identity
app = Flask(__name__)
app.secret_key = "keshav"
api = Api(app)
jwt = JWT(app, authenticate, identity) # /auth
items = []
class Item(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'price',
type=float,
required = True,
help = "This field cannot be left blank"
)
@jwt_required()
def get(self,name):
item = next(filter(lambda x : x['name'] == name , items),None)
return {'item':item},200 if item else 404
def post(self,name):
if next(filter(lambda x : x['name'] == name,items),None):
return {"message":"An item with name {} already exists.".format(name)},400
data = Item.parser.parse_args()
item = {'name':name, 'price':data['price']}
items.append(item)
return item,201
def delete(self,name):
global items
items = list(filter(lambda x : x['name']!= name, items))
return {"message" : "item deleted"}
def put(self,name):
data = Item.parser.parse_args()
item = next(filter(lambda x : x['name'] == name , items), None)
if item is None:
item = {'name':name,'price': data['price']}
items.append(item)
else:
item.update(data)
return item
class ItemsList(Resource):
def get(self):
return {'items':items}
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemsList, '/items')
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
1d2033a62e150fe1e23310385b00a8eba8c7586f | 2a7acc39c637824dd6974fa154664ef9eca4383e | /app/utils/validators.py | 3734657aa887f9ffb80c401926e70d99a84f7645 | [] | no_license | pigmonchu/backfiles | e2a9415902f780708928065770486c6a8ee34e15 | d0e8248d2949710f56d62777fb2a57727de39302 | refs/heads/master | 2022-10-04T12:10:50.078244 | 2019-08-26T22:30:20 | 2019-08-26T22:30:20 | 204,572,276 | 0 | 0 | null | 2022-09-16T18:08:43 | 2019-08-26T22:27:30 | Python | UTF-8 | Python | false | false | 1,117 | py | import importlib
from werkzeug.datastructures import MultiDict
import inspect
'''
Gestión de validaciones y mensajes de error
'''
def validate_form(blueprint_import_path, class_name, data):
m_data = MultiDict(data)
module = importlib.import_module('{}.forms'.format(blueprint_import_path))
form_class = getattr(module, class_name)
form = form_class(m_data)
resp = form.validate()
return resp, form
class ResponseJSON():
__code__ = None
def __init__(self, data, status=False):
self.status = status
self.data = data
@property
def status(self):
return 'success' if self.__status__ else 'fail'
@status.setter
def status(self, value):
self.__status__ = value
def __repr__(self):
return 'Response <{}>:{}'.format(self.status, self.data)
'''
Serializar JSON objetos (sólo parte pública)
'''
def public_attr_to_dict(obj):
obj_dict = {}
for key, value in inspect.getmembers(obj):
if key[:2] != '__' and not inspect.ismethod(getattr(obj, key)):
obj_dict[key] = value
return obj_dict
| [
"[email protected]"
] | |
3b8c7c687f2e9d9835d220c82d5e677e59f7cea6 | dcbe50ee6cb4dc108e71df95479d9fd6e868e4e6 | /Torch/6_nn.py | 990f6dd84670a000b7a0c511a2bad9919fc6d998 | [] | no_license | krishnakalyan3/DeepLearning-Experiments | 1854821bb630a0ce2f4dea2423350c1b303d954b | 7b7d9e9570e787b162c68e2734aa6b0c6567f257 | refs/heads/master | 2020-07-03T16:46:09.491095 | 2017-07-20T03:18:44 | 2017-07-20T03:18:44 | 74,243,657 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | #!/usr/bin/env python3
# https://github.com/PythonWorkshop/Intro-to-TensorFlow-and-PyTorch/blob/master/PyTorch%20Tutorial.ipynb
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.activation import Softmax
import torch.optim as optim
df = pd.read_csv('Data/winequality-red-cleaned.csv', sep=',')
y = pd.DataFrame([0. if item == 'Good' else 1. for item in df['category']])
X = df.drop(['quality', 'category'], axis=1)
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
learning_rate = 0.005
| [
"[email protected]"
] | |
1aa7ae250a923bee4235a9567487ed406c8f8edd | e5873fabe08bac8c298026871bc3a562e330151e | /openrasp_iast/plugin/scanner/directory_basic.py | 13fdb506795d49166c2fb0b801e70926a8e1ca03 | [
"Apache-2.0"
] | permissive | 1u0Hun/openrasp-iast | 7a93d33301fdeae8021f4742870068f2d09f62bb | 8b98e4ffda52c3e04bfaa682dde219e78c87c21a | refs/heads/master | 2020-11-30T13:57:55.088148 | 2019-12-20T06:42:08 | 2019-12-20T06:42:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Copyright 2017-2019 Baidu Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from core.components.plugin import scan_plugin_base
class ScanPlugin(scan_plugin_base.ScanPluginBase):
plugin_info = {
"name": "directory_basic",
"show_name": "目录遍历检测插件",
"description": "基础目录遍历漏洞检测插件"
}
def mutant(self, rasp_result_ins):
"""
测试向量生成
"""
if not rasp_result_ins.has_hook_type("directory"):
return
linux_payload_list = [
("../../../../../../../../../../../../../../../../../../../../etc", "/etc"),
("../../../../etc", "/etc"),
("/etc", "/etc")
]
windows_payload_list = [
("..\\..\\..\\..\\..\\..\\..\\..\\..\\windows", ":\\windows"),
("c:\\windows", "c:\\windows")
]
mac_payload_list = [
("../../../../../../../../../../../../../../../../../../../../private/etc", "/private/etc"),
("../../../private/etc", "/private/etc"),
("/private/etc", "/private/etc")
]
server_os = rasp_result_ins.get_server_info()["os"]
if server_os == "Windows":
payload_list = windows_payload_list
elif server_os == "Mac":
payload_list = mac_payload_list
else:
payload_list = linux_payload_list
# 获取所有待测试参数
request_data_ins = self.new_request_data(rasp_result_ins)
test_params = self.mutant_helper.get_params_list(
request_data_ins, ["get", "post", "json", "headers", "cookies"])
for param in test_params:
if not request_data_ins.is_param_concat_in_hook("directory", param["value"].rstrip("/\\")):
continue
payload_seq = self.gen_payload_seq()
for payload in payload_list:
request_data_ins = self.new_request_data(rasp_result_ins, payload_seq, payload[1])
request_data_ins.set_param(param["type"], param["name"], payload[0])
hook_filter = [{
"type": "dir",
"filter": {
"code": payload[1]
}
}]
request_data_ins.set_filter(hook_filter)
request_data_list = [request_data_ins]
yield request_data_list
def check(self, request_data_list):
"""
请求结果检测
"""
request_data_ins = request_data_list[0]
feature = request_data_ins.get_payload_info()["feature"]
rasp_result_ins = request_data_ins.get_rasp_result()
if rasp_result_ins is None:
return None
if self.checker.check_concat_in_hook(rasp_result_ins, "directory", feature):
return "读取的目录可被用户输入控制"
else:
return None
| [
"[email protected]"
] | |
5ab15084fd09e6973269b95c1650ba480596b272 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2700486_0/Python/Venia/prob.py | 86e3993e498a5d987a87e9293bcae14108d3f127 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,633 | py | #from functools import lru_cache
from fractions import Fraction
from copy import copy
def sim(n):
s = {(0, 0)}
for i in range(20):
s.add((2*i+1, -1))
s.add((-2*i+1,-1))
d = [(s, 1)]
for i in range(n-1):
dd = []
for pos, P in d:
i = 0
while (0, i+2) in pos:
i += 2
if (1, i+1) in pos and (-1, i+1) in pos:
cp = copy(pos)
cp.add((0, i+2))
dd.append((cp, P))
elif (1, i+1) in pos and (-1, i+1) not in pos:
cp = copy(pos)
x, y = -1, i+1
while (x-1, y-1) not in pos:
x, y = x-1, y-1
cp.add((x, y))
dd.append((cp, P))
elif (1, i+1) not in pos and (-1, i+1) in pos:
cp = copy(pos)
x, y = 1, i+1
while (x+1, y-1) not in pos:
#print(x, y)
x, y = x+1, y-1
cp.add((x, y))
dd.append((cp, P))
else:
cp1 = copy(pos)
cp2 = copy(pos)
x, y = 1, i+1
while (x+1, y-1) not in pos:
#print(x, y)
x, y = x+1, y-1
cp1.add((x, y))
dd.append((cp1, P*Fraction(1, 2)))
x, y = -1, i+1
while (x-1, y-1) not in pos:
#print(x, y)
x, y = x-1, y-1
cp2.add((x, y))
dd.append((cp2, P*Fraction(1, 2)))
d = dd
return d
ds = [None] + [sim(i) for i in range(1, 21)]
def silly_prob(x, y, n):
d = ds[n]
res = 0
for poss, P in d:
if (x, y) in poss:
res += P
return res
T = int(raw_input())
for i in range(T):
n, x, y = map(int, raw_input().split())
print("Case #{}: {}".format(i+1, float(silly_prob(x, y, n))))
#@lru_cache(maxsize = None)
def prob(x, y, n):
print(x, y, n)
if y == -1 and x%2 == 1: return 1
elif n == 1:
return 1 if (x, y) == (0, 0) else 0
elif x < 0:
return prob(-x, y, n-1)
elif x > 0:
res = prob(x, y, n-1) # it's already there
if y != 0:
return
res + (1-prob(x, y, n-1)) * prob(x+1, y-1, n-1) # right down must be there
# if RD is there, LD and R are also there
return res
else:
# x = 0
#
return prob(x, y, n-1) + (1-prob(x, y,n-1))*prob(x-1, y-1, n-1)*prob(x+1, y-1, n-1)
| [
"[email protected]"
] | |
13f1a79700d0944ac29487331e54040083749301 | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/hamcrest/core/matcher.py | 9694cdc65f3a3d50e5c962cf8660c9ebfdeb01ff | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | from hamcrest.core.description import Description
from typing import Generic, Optional, TypeVar
from .selfdescribing import SelfDescribing
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
T = TypeVar("T")
class Matcher(Generic[T], SelfDescribing):
"""A matcher over acceptable values.
A matcher is able to describe itself to give feedback when it fails.
Matcher implementations should *not* directly implement this protocol.
Instead, *extend* the :py:class:`~hamcrest.core.base_matcher.BaseMatcher`
class, which will ensure that the
:py:class:`~hamcrest.core.matcher.Matcher` API can grow to support new
features and remain compatible with all
:py:class:`~hamcrest.core.matcher.Matcher` implementations.
"""
def matches(self, item: T, mismatch_description: Optional[Description] = None) -> bool:
"""Evaluates the matcher for argument item.
If a mismatch is detected and argument ``mismatch_description`` is
provided, it will generate a description of why the matcher has not
accepted the item.
:param item: The object against which the matcher is evaluated.
:param mismatch_description:
:returns: ``True`` if ``item`` matches, otherwise ``False``.
"""
raise NotImplementedError("matches")
def describe_mismatch(self, item: T, mismatch_description: Description) -> None:
"""Generates a description of why the matcher has not accepted the
item.
The description will be part of a larger description of why a matching
failed, so it should be concise.
This method assumes that ``matches(item)`` is ``False``, but will not
check this.
:param item: The item that the
:py:class:`~hamcrest.core.matcher.Matcher` has rejected.
:param mismatch_description: The description to be built or appended
to.
"""
raise NotImplementedError("describe_mismatch")
def describe_match(self, item: T, match_description: Description) -> None:
"""Generates a description of why the matcher has accepted the item.
The description may be part of a larger description of why a matching
failed, so it should be concise.
This method assumes that ``matches(item)`` is ``True``, but will not
check this.
:param item: The item that the
:py:class:`~hamcrest.core.matcher.Matcher` has accepted.
:param match_description: The description to be built or appended to.
"""
raise NotImplementedError("describe_match")
| [
"[email protected]"
] | |
9271e738798b8de817ca13db1bf838eab731809a | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/errantri.py | 2826e0ef09a4463a8b1073331ad6ffbaa22f4d4a | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('AinsWRR.py', 1), ('AinsWRR2.py', 1)] | [
"[email protected]"
] | |
09715a8d5742db87c2c7f8144885b717b9d5a4ab | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/zedshaw-librelist/allPythonContent.py | d8dd0ae1811f773aadcda994d70e4002c4d2289b | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 72,014 | py | __FILENAME__ = admin
from email.utils import parseaddr
from config.settings import relay, SPAM, CONFIRM
import logging
from lamson import view, queue
from lamson.routing import route, stateless, route_like, state_key_generator
from lamson.bounce import bounce_to
from lamson.server import SMTPError
from app.model import mailinglist, bounce, archive
from app.handlers import bounce
INVALID_LISTS = ["noreply", "unbounce"]
@state_key_generator
def module_and_to(module_name, message):
name, address = parseaddr(message['to'])
if '-' in address:
list_name = address.split('-')[0]
else:
list_name = address.split('@')[0]
return module_name + ':' + list_name
@route("(address)@(host)", address='.+')
def SPAMMING(message, **options):
spam = queue.Queue(SPAM['queue'])
spam.push(message)
return SPAMMING
@route('(bad_list)@(host)', bad_list='.+')
@route('(list_name)@(host)')
@route('(list_name)-subscribe@(host)')
@bounce_to(soft=bounce.BOUNCED_SOFT, hard=bounce.BOUNCED_HARD)
def START(message, list_name=None, host=None, bad_list=None):
list_name = list_name.lower() if list_name else None
bad_list = bad_list.lower() if bad_list else None
host = host.lower() if host else None
if bad_list:
if '-' in bad_list:
# probably put a '.' in it, try to find a similar list
similar_lists = mailinglist.similar_named_lists(bad_list.replace('-','.'))
else:
similar_lists = mailinglist.similar_named_lists(bad_list)
help = view.respond(locals(), "mail/bad_list_name.msg",
From="noreply@%(host)s",
To=message['from'],
Subject="That's not a valid list name.")
relay.deliver(help)
return START
elif list_name in INVALID_LISTS or message.route_from.endswith(host):
logging.debug("LOOP MESSAGE to %r from %r.", message['to'],
message.route_from)
return START
elif mailinglist.find_list(list_name):
action = "subscribe to"
CONFIRM.send(relay, list_name, message, 'mail/confirmation.msg',
locals())
return CONFIRMING_SUBSCRIBE
else:
similar_lists = mailinglist.similar_named_lists(list_name)
CONFIRM.send(relay, list_name, message, 'mail/create_confirmation.msg',
locals())
return CONFIRMING_SUBSCRIBE
@route('(list_name)-confirm-(id_number)@(host)')
def CONFIRMING_SUBSCRIBE(message, list_name=None, id_number=None, host=None):
list_name = list_name.lower() if list_name else None
host = host.lower() if host else None
original = CONFIRM.verify(list_name, message.route_from, id_number)
if original:
mailinglist.add_subscriber(message.route_from, list_name)
msg = view.respond(locals(), "mail/subscribed.msg",
From="noreply@%(host)s",
To=message['from'],
Subject="Welcome to %(list_name)s list.")
relay.deliver(msg)
CONFIRM.cancel(list_name, message.route_from, id_number)
return POSTING
else:
logging.warning("Invalid confirm from %s", message.route_from)
return CONFIRMING_SUBSCRIBE
@route('(list_name)-(action)@(host)', action='[a-z]+')
@route('(list_name)@(host)')
def POSTING(message, list_name=None, action=None, host=None):
list_name = list_name.lower() if list_name else None
action = action.lower() if action else None
host = host.lower() if host else None
if action == 'unsubscribe':
action = "unsubscribe from"
CONFIRM.send(relay, list_name, message, 'mail/confirmation.msg',
locals())
return CONFIRMING_UNSUBSCRIBE
else:
mailinglist.post_message(relay, message, list_name, host)
# archive makes sure it gets cleaned up before archival
final_msg = mailinglist.craft_response(message, list_name,
list_name + '@' + host)
archive.enqueue(list_name, final_msg)
return POSTING
@route_like(CONFIRMING_SUBSCRIBE)
def CONFIRMING_UNSUBSCRIBE(message, list_name=None, id_number=None, host=None):
list_name = list_name.lower() if list_name else None
host = host.lower() if host else None
original = CONFIRM.verify(list_name, message.route_from, id_number)
if original:
mailinglist.remove_subscriber(message.route_from, list_name)
msg = view.respond(locals(), 'mail/unsubscribed.msg',
From="noreply@%(host)s",
To=message['from'],
Subject="You are now unsubscribed from %(list_name)s.")
relay.deliver(msg)
CONFIRM.cancel(list_name, message.route_from, id_number)
return START
else:
logging.warning("Invalid unsubscribe confirm from %s",
message.route_from)
return CONFIRMING_UNSUBSCRIBE
@route("(address)@(host)", address=".+")
def BOUNCING(message, address=None, host=None):
# don't send out a message if they are bouncing
return BOUNCING
########NEW FILE########
__FILENAME__ = bounce
from config.settings import relay, CONFIRM
from lamson.routing import route, Router, route_like
from lamson.bounce import bounce_to
from app.model import mailinglist, bounce
from app import handlers
from email.utils import parseaddr
def force_to_bounce_state(message):
# set their admin module state to disabled
name, address = parseaddr(message.bounce.final_recipient)
Router.STATE_STORE.set_all(address, 'BOUNCING')
Router.STATE_STORE.set('app.handlers.bounce', address, 'BOUNCING')
mailinglist.disable_all_subscriptions(message.bounce.final_recipient)
@route(".+")
def BOUNCED_HARD(message):
if mailinglist.find_subscriptions(message.bounce.final_recipient):
force_to_bounce_state(message)
bounce.archive_bounce(message)
return handlers.admin.START
@route(".+")
def BOUNCED_SOFT(message):
if mailinglist.find_subscriptions(message.bounce.final_recipient):
force_to_bounce_state(message)
bounce.archive_bounce(message)
return handlers.admin.START
@route('unbounce@(host)')
def BOUNCING(message, host=None):
CONFIRM.send(relay, 'unbounce', message, 'mail/unbounce_confirm.msg',
locals())
return CONFIRMING_UNBOUNCE
@route('unbounce-confirm-(id_number)@(host)')
def CONFIRMING_UNBOUNCE(message, id_number=None, host=None):
original = CONFIRM.verify('unbounce', message['from'], id_number)
if original:
relay.deliver(bounce.you_are_now_unbounced(message))
name, address = parseaddr(message['from'])
Router.STATE_STORE.set_all(address, 'POSTING')
mailinglist.enable_all_subscriptions(message['from'])
return UNBOUNCED
@route('unbounce@(host)')
def UNBOUNCED(message, host=None):
# we just ignore these since they may be strays
return UNBOUNCED
########NEW FILE########
__FILENAME__ = archive
from __future__ import with_statement
from lamson import queue, view
from config import settings
from datetime import datetime
import os
import shutil
import simplejson as json
import base64
import stat
ALLOWED_HEADERS = set([
"From", "In-Reply-To", "List-Id",
"Precedence", "References", "Reply-To",
"Return-Path", "Sender",
"Subject", "To", "Message-Id",
"Date", "List-Id",
])
DIR_MOD = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
FILE_MOD = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
def day_of_year_path():
return "%d/%0.2d/%0.2d" % datetime.today().timetuple()[0:3]
def store_path(list_name, name):
datedir = os.path.join(settings.ARCHIVE_BASE, list_name, day_of_year_path())
if not os.path.exists(datedir):
os.makedirs(datedir)
return os.path.join(datedir, name)
def fix_permissions(path):
os.chmod(path, DIR_MOD)
for root, dirs, files in os.walk(path):
os.chmod(root, DIR_MOD)
for f in files:
os.chmod(os.path.join(root, f), FILE_MOD)
def update_json(list_name, key, message):
jpath = store_path(list_name, 'json')
json_file = key + ".json"
json_archive = os.path.join(jpath, json_file)
if not os.path.exists(jpath):
os.makedirs(jpath)
with open(json_archive, "w") as f:
f.write(to_json(message.base))
fix_permissions(jpath)
def enqueue(list_name, message):
qpath = store_path(list_name, 'queue')
pending = queue.Queue(qpath, safe=True)
white_list_cleanse(message)
key = pending.push(message)
fix_permissions(qpath)
update_json(list_name, key, message)
return key
def white_list_cleanse(message):
for key in message.keys():
if key not in ALLOWED_HEADERS:
del message[key]
def json_encoding(base):
ctype, ctp = base.content_encoding['Content-Type']
cdisp, cdp = base.content_encoding['Content-Disposition']
ctype = ctype or "text/plain"
filename = ctp.get('name',None) or cdp.get('filename', None)
if ctype.startswith('text') or ctype.startswith('message'):
encoding = None
else:
encoding = "base64"
return {'filename': filename, 'type': ctype, 'disposition': cdisp,
'format': encoding}
def json_build(base):
data = {'headers': base.headers,
'body': base.body,
'encoding': json_encoding(base),
'parts': [json_build(p) for p in base.parts],
}
if data['encoding']['format'] and base.body:
data['body'] = base64.b64encode(base.body)
return data
def to_json(base):
return json.dumps(json_build(base), sort_keys=True, indent=4)
def build_index():
lists = sorted(os.listdir(settings.ARCHIVE_BASE))
html = view.render(locals(), "web/list_index.html")
open(os.path.join(settings.ARCHIVE_BASE, "lists.html"), "w").write(html)
########NEW FILE########
__FILENAME__ = bounce
from lamson import view, encoding, queue
from config import settings
def mail_to_you_is_bouncing(message):
reason = message.bounce.error_for_humans()
msg = view.respond(locals(), 'mail/you_bounced.msg',
From='[email protected]',
To=message.bounce.original['to'],
Subject="Email to you is bouncing.")
if message.bounce.report:
for report in message.bounce.report:
msg.attach('bounce_report.msg', content_type='text/plain', data=encoding.to_string(report),
disposition='attachment')
if message.bounce.notification:
msg.attach('notification_report.msg', content_type='text/plain',
data=encoding.to_string(message.bounce.notification),
disposition='attachment')
return msg
def you_are_now_unbounced(message):
msg = view.respond(locals(), 'mail/you_are_unbounced.msg',
From='[email protected]',
To=message['from'],
Subject="You are now unbounced.")
return msg
def archive_bounce(message):
qu = queue.Queue(settings.BOUNCE_ARCHIVE)
qu.push(message)
########NEW FILE########
__FILENAME__ = confirmation
from webapp.librelist.models import Confirmation
class DjangoConfirmStorage():
def clear(self):
Confirmation.objects.all().delete()
def get(self, target, from_address):
confirmations = Confirmation.objects.filter(from_address=from_address,
list_name=target)
if confirmations:
return confirmations[0].expected_secret, confirmations[0].pending_message_id
else:
return None, None
def delete(self, target, from_address):
Confirmation.objects.filter(from_address=from_address,
list_name=target).delete()
def store(self, target, from_address, expected_secret, pending_message_id):
conf = Confirmation(from_address=from_address,
expected_secret = expected_secret,
pending_message_id = pending_message_id,
list_name=target)
conf.save()
########NEW FILE########
__FILENAME__ = mailinglist
from webapp.librelist.models import *
from django.db.models import Q
from email.utils import parseaddr
from lamson.mail import MailResponse
from config import settings
from lib import metaphone
import Stemmer
from app.model.archive import build_index
def stem_and_meta(list_name):
s = Stemmer.Stemmer('english')
name = " ".join(s.stemWords(list_name.split('.')))
return metaphone.dm(name)
def create_list(list_name):
list_name = list_name.lower()
mlist = find_list(list_name)
sim_pri, sim_sec = stem_and_meta(list_name)
if not mlist:
mlist = MailingList(archive_url = "/archives/" + list_name,
archive_queue = "/queues/" + list_name,
name=list_name,
similarity_pri = sim_pri,
similarity_sec = sim_sec)
mlist.save()
build_index()
return mlist
def delete_list(list_name):
assert list_name == list_name.lower()
MailingList.objects.filter(name = list_name).delete()
def find_list(list_name):
assert list_name == list_name.lower()
mlists = MailingList.objects.filter(name = list_name)
if mlists:
return mlists[0]
else:
return None
def add_subscriber(address, list_name):
assert list_name == list_name.lower()
mlist = create_list(list_name)
sub_name, sub_addr = parseaddr(address)
subs = find_subscriptions(address, list_name)
if not subs:
sub = Subscription(subscriber_name = sub_name,
subscriber_address = sub_addr,
mailing_list = mlist)
sub.save()
return sub
else:
return subs[0]
def remove_subscriber(address, list_name):
assert list_name == list_name.lower()
find_subscriptions(address, list_name).delete()
def remove_all_subscriptions(address):
find_subscriptions(address).delete()
def find_subscriptions(address, list_name=None):
if list_name: assert list_name == list_name.lower()
sub_name, sub_addr = parseaddr(address)
if list_name:
mlist = find_list(list_name)
else:
mlist = None
if mlist:
subs = Subscription.objects.filter(
subscriber_address=sub_addr, mailing_list = mlist
).exclude(
enabled=False)
else:
subs = Subscription.objects.filter(
subscriber_address=sub_addr
).exclude(
enabled=False)
return subs
def post_message(relay, message, list_name, host):
assert list_name == list_name.lower()
mlist = find_list(list_name)
assert mlist, "User is somehow able to post to list %s" % list_name
for sub in mlist.subscription_set.all().values('subscriber_address'):
list_addr = "%s@%s" % (list_name, host)
delivery = craft_response(message, list_name, list_addr)
subject_mod = "[%s]" % list_name
if subject_mod not in delivery['subject']:
delivery['subject'] = subject_mod + " " + delivery['subject']
relay.deliver(delivery, To=sub['subscriber_address'], From=list_addr)
def craft_response(message, list_name, list_addr):
assert list_name == list_name.lower()
response = MailResponse(To=list_addr,
From=message['from'],
Subject=message['subject'])
msg_id = message['message-id']
response.update({
"Sender": list_addr,
"Reply-To": list_addr,
"List-Id": list_addr,
"List-Unsubscribe": "<mailto:%[email protected]>" % list_name,
"List-Archive": "<http://librelist.com/archives/%s/>" % list_name,
"List-Post": "<mailto:%s>" % list_addr,
"List-Help": "<http://librelist.com/help.html>",
"List-Subscribe": "<mailto:%[email protected]>" % list_name,
"Return-Path": list_addr,
"Precedence": 'list',
})
if 'date' in message:
response['Date'] = message['date']
if 'references' in message:
response['References'] = message['References']
elif msg_id:
response['References'] = msg_id
if msg_id:
response['message-id'] = msg_id
if 'in-reply-to' not in message:
response["In-Reply-To"] = message['Message-Id']
if message.all_parts():
response.attach_all_parts(message)
else:
response.Body = message.body()
return response
def disable_all_subscriptions(address):
Subscription.objects.filter(subscriber_address=address).update(enabled=False)
def enable_all_subscriptions(address):
Subscription.objects.filter(subscriber_address=address).update(enabled=True)
def similar_named_lists(list_name):
sim_pri, sim_sec = stem_and_meta(list_name)
sim_sec = sim_sec or sim_pri
return MailingList.objects.filter(Q(similarity_pri = sim_pri) |
Q(similarity_sec =
sim_sec))
########NEW FILE########
__FILENAME__ = state_storage
from lamson.routing import StateStorage, ROUTE_FIRST_STATE
from webapp.librelist.models import UserState
class UserStateStorage(StateStorage):
def clear(self):
for state in UserState.objects.all():
state.delete()
def _find_state(self, key, sender):
sender = sender.lower()
key = key.lower()
states = UserState.objects.filter(state_key = key,
from_address = sender)
if states:
return states[0]
else:
return None
def get(self, key, sender):
sender = sender.lower()
key = key.lower()
stored_state = self._find_state(key, sender)
if stored_state:
return stored_state.state
else:
return ROUTE_FIRST_STATE
def key(self, key, sender):
raise Exception("THIS METHOD MEANS NOTHING TO DJANGO!")
def set(self, key, sender, to_state):
sender = sender.lower()
key = key.lower()
stored_state = self._find_state(key, sender)
if stored_state:
if to_state == "START":
# don't store these, they're the default when it doesn't exist
stored_state.delete()
stored_state.state = to_state
stored_state.save()
else:
# avoid storing start states
if to_state != "START":
stored_state = UserState(state_key = key, from_address = sender,
state=to_state)
stored_state.save()
def set_all(self, sender, to_state):
"""
This isn't part of normal lamson code, it's used to
control the states for all of the app.handlers.admin
lists during a bounce.
"""
sender = sender.lower()
stored_states = UserState.objects.filter(from_address = sender)
for stored in stored_states:
stored.state = to_state
stored.save()
########NEW FILE########
__FILENAME__ = boot
from config import settings
from lamson.routing import Router
from lamson.server import Relay, SMTPReceiver
from lamson import view
import logging
import logging.config
import jinja2
from app.model import state_storage
logging.config.fileConfig("config/logging.conf")
# the relay host to actually send the final message to
settings.relay = Relay(host=settings.relay_config['host'],
port=settings.relay_config['port'], debug=1)
# where to listen for incoming messages
settings.receiver = SMTPReceiver(settings.receiver_config['host'],
settings.receiver_config['port'])
Router.defaults(**settings.router_defaults)
Router.load(settings.handlers)
Router.RELOAD=True
Router.LOG_EXCEPTIONS=True
Router.STATE_STORE=state_storage.UserStateStorage()
view.LOADER = jinja2.Environment(
loader=jinja2.PackageLoader(settings.template_config['dir'],
settings.template_config['module']))
########NEW FILE########
__FILENAME__ = settings
# This file contains python variables that configure Lamson for email processing.
import logging
import os
from lamson import confirm, encoding
encoding.VALUE_IS_EMAIL_ADDRESS = lambda v: '@' in v or '-AT-' in v
os.environ['DJANGO_SETTINGS_MODULE'] = 'webapp.settings'
relay_config = {'host': 'localhost', 'port': 8825}
receiver_config = {'host': 'localhost', 'port': 8823}
handlers = ['app.handlers.bounce', 'app.handlers.admin']
router_defaults = {
'host': 'librelist\\.(com|org|net)',
'list_name': '[a-zA-Z0-9\.]+',
'id_number': '[a-z0-9]+',
}
template_config = {'dir': 'app', 'module': 'templates'}
# the config/boot.py will turn these values into variables set in settings
PENDING_QUEUE = "run/pending"
ARCHIVE_BASE = "app/data/archive"
BOUNCE_ARCHIVE = "run/bounces"
SPAM = {'db': 'run/spamdb', 'rc': 'run/spamrc', 'queue': 'run/spam'}
from app.model.confirmation import DjangoConfirmStorage
CONFIRM = confirm.ConfirmationEngine('run/pending', DjangoConfirmStorage())
########NEW FILE########
__FILENAME__ = testing
from config import settings
from lamson import view
from lamson.routing import Router
from lamson.server import Relay
import jinja2
import logging
import logging.config
import os
from app.model import state_storage
logging.config.fileConfig("config/test_logging.conf")
# the relay host to actually send the final message to (set debug=1 to see what
# the relay is saying to the log server).
settings.relay = Relay(host=settings.relay_config['host'],
port=settings.relay_config['port'], debug=0)
settings.receiver = None
Router.defaults(**settings.router_defaults)
Router.load(settings.handlers)
Router.RELOAD=True
Router.LOG_EXCEPTIONS=False
Router.STATE_STORE=state_storage.UserStateStorage()
view.LOADER = jinja2.Environment(
loader=jinja2.PackageLoader(settings.template_config['dir'],
settings.template_config['module']))
# if you have pyenchant and enchant installed then the template tests will do
# spell checking for you, but you need to tell pyenchant where to find itself
# if 'PYENCHANT_LIBRARY_PATH' not in os.environ:
# os.environ['PYENCHANT_LIBRARY_PATH'] = '/opt/local/lib/libenchant.dylib'
########NEW FILE########
__FILENAME__ = json_convert
import sys
sys.path.append(".")
from lamson.mail import MailRequest, MailResponse
from lamson.queue import Queue
import config.testing
from app.model import archive
import os
def convert_queue(arg, dirname, names):
if dirname.endswith("new"):
print dirname, names
jpath = dirname + "/../../json"
if not os.path.exists(jpath):
os.mkdir(jpath)
for key in names:
json_file = key + ".json"
json_archive = os.path.join(jpath, json_file)
fpath = os.path.join(dirname, key)
msg = MailRequest('librelist.com', None, None, open(fpath).read())
f = open(json_archive, "w")
f.write(archive.to_json(msg.base))
f.close()
os.path.walk("app/data/archive", convert_queue, None)
########NEW FILE########
__FILENAME__ = config
import os
author = 'Zed A. Shaw' # Default author name. Overridden in individual document
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
input_dir = os.path.join(THIS_DIR, 'input')
output_dir = os.path.join(THIS_DIR, 'output')
template_dir = THIS_DIR
template = os.path.join(template_dir, 'template.html')
### Optional parameters
options = { 'baseurl':"", # if not set, relative URLs will be generated
'sitename':"librelist.com",
'slogan': "No logins. No tracking. Just lists.",
'extensions':['.txt'],
'format': 'text/x-textile',
'siteurl': 'http://librelist.com',
}
########NEW FILE########
__FILENAME__ = webgen
#!/usr/bin/env python
from __future__ import with_statement
import os
import sys
import string
from string import Template
from config import *
from datetime import date
from textile import textile
from stat import *
import datetime
import PyRSS2Gen
rss = PyRSS2Gen.RSS2(
title = options["sitename"],
link = options["siteurl"],
description = options["slogan"],
lastBuildDate = datetime.datetime.now(),
items = [])
def add_rss_item(rss, title, link, description, pubDate):
item = PyRSS2Gen.RSSItem(title = title, link = link,
description = description,
guid = PyRSS2Gen.Guid(link),
pubDate = datetime.datetime.fromtimestamp(pubDate))
rss.items.append(item)
def ext(fname):
return os.path.splitext(fname)[1]
def process(fname):
with open(fname, 'r') as f:
try:
head, body = f.read().split('\n\n')
body
except:
print 'Invalid file format : ', fname
def parse(fname):
with open(fname, 'r') as f:
raw = f.read()
headers = {}
try:
(header_lines,body) = raw.split("\n\n", 1)
for header in header_lines.split("\n"):
(name, value) = header.split(": ", 1)
headers[name.lower()] = unicode(value.strip())
return headers, body
except:
raise TypeError, "Invalid page file format for %s" % fname
def get_template(template):
"""Takes the directory where templates are located and the template name. Returns a blob containing the template."""
template = os.path.join(template_dir, template)
return Template(open(template, 'r').read())
def source_newer(source, target):
if len(sys.argv) > 1 and sys.argv[1] == "force":
return True
if not os.path.exists(target):
return True
else:
smtime = os.stat(source)[ST_MTIME]
tmtime = os.stat(target)[ST_MTIME]
return smtime > tmtime
def is_blog(current_dir, myself, headers, files):
"""A page tagged as an entry will get the files, sort them by their dates,
and then the contents will be that directory listing instead."""
if 'content-type' in headers and headers['content-type'] == "text/blog":
# it's a listing, make it all work
without_self = files[:]
without_self.remove(os.path.split(myself)[-1])
without_self.sort(reverse=True)
listing = []
for f in without_self:
print "Doing blog", f
# load up the file and peel out the first few paragraphs
content = os.path.join(current_dir, f)
head, body = parse(content)
paras = [p for p in body.split("\n\n") if p]
if paras:
# now make a simple listing entry with it
date, ext = os.path.splitext(f)
head["link"] = os.path.join("/" + os.path.split(current_dir)[-1], date + ".html")
head["date"] = date
format = determine_format(head)
pubDate = smtime = os.stat(content)[ST_CTIME]
head["content"] = content_format(current_dir, f, head, files,
format, "\n\n".join(paras[0:2]))
template = head['item-template'] if 'item-template' in head else headers['item-template']
description = get_template(template).safe_substitute(head)
if "feed" not in headers:
add_rss_item(rss, head["title"], options["siteurl"] +
head["link"], description, pubDate)
listing.append(description)
return lambda s: "".join(listing)
else:
return lambda s: s
def content_format(current_dir, inp, headers, files, format, body):
return {
u'text/plain': lambda s: u'<pre>%s</pre>' % s,
u'text/x-textile': lambda s: u'%s' % textile(s,head_offset=0, validate=0,
sanitize=0, encoding='utf-8', output='utf-8'),
u'text/html': lambda s: s,
u'text/blog': is_blog(current_dir, inp, headers, files)
}[format](body)
def determine_format(headers):
if 'content-type' in headers:
return headers['content-type']
else:
return options['format']
def parse_directory(current_dir, files, output_dir):
files = [f for f in files if ext(f) in options['extensions']]
for f in files:
inp = os.path.join(current_dir, f)
target = os.path.join(output_dir, f)
# TODO: Allow specifying the target extension from headers
outp = os.path.splitext(target)[0] + '.html'
# always redo the indexes since they'll typically list information to
# update from the directory they are in
if not source_newer(inp, outp) and f != "index.txt":
continue
headers, body = parse(inp)
if 'template' not in headers:
blob = get_template(template)
else:
blob = get_template(headers['template'])
format = determine_format(headers)
print "Processing %s" % inp
content = content_format(current_dir, inp, headers, files, format, body)
headers['content'] = content
headers.update(options)
output = blob.safe_substitute(**headers)
outf = open(outp, 'w')
outf.write(output)
outf.close()
def a_fucking_cmp_for_time(x,y):
diff = y.pubDate - x.pubDate
return diff.days * 24 * 60 * 60 + diff.seconds
def main():
### Walks through the input dir creating finding all subdirectories.
for root, dirs, files in os.walk(input_dir):
output = root.replace(input_dir, output_dir)
### Checks if the directory exists in output and creates it if false.
if not os.path.isdir(output):
os.makedirs(output)
parse_directory(root, files, output)
x,y = rss.items[0], rss.items[-1]
diff = x.pubDate - y.pubDate
print "diff!", diff.seconds, diff.days
rss.items.sort(cmp=lambda x,y: a_fucking_cmp_for_time(x,y))
rss.write_xml(open("output/feed.xml", "w"))
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = metaphone
#!python
#coding= latin-1
# This script implements the Double Metaphone algorythm (c) 1998, 1999 by Lawrence Philips
# it was translated to Python from the C source written by Kevin Atkinson (http://aspell.net/metaphone/)
# By Andrew Collins - January 12, 2007 who claims no rights to this work
# http://atomboy.isa-geek.com:8080/plone/Members/acoil/programing/double-metaphone
# Tested with Pyhon 2.4.3
# Updated Feb 14, 2007 - Found a typo in the 'gh' section
# Updated Dec 17, 2007 - Bugs fixed in 'S', 'Z', and 'J' sections. Thanks Chris Leong!
def dm(st) :
"""dm(string) -> (string, string or None)
returns the double metaphone codes for given string - always a tuple
there are no checks done on the input string, but it should be a single word or name."""
vowels = ['A', 'E', 'I', 'O', 'U', 'Y']
st = st.decode('ascii', 'ignore')
st = st.upper() # st is short for string. I usually prefer descriptive over short, but this var is used a lot!
is_slavo_germanic = (st.find('W') > -1 or st.find('K') > -1 or st.find('CZ') > -1 or st.find('WITZ') > -1)
length = len(st)
first = 2
st = '-' * first + st + '------' # so we can index beyond the begining and end of the input string
last = first + length -1
pos = first # pos is short for position
pri = sec = '' # primary and secondary metaphone codes
#skip these silent letters when at start of word
if st[first:first+2] in ["GN", "KN", "PN", "WR", "PS"] :
pos += 1
# Initial 'X' is pronounced 'Z' e.g. 'Xavier'
if st[first] == 'X' :
pri = sec = 'S' #'Z' maps to 'S'
pos += 1
# main loop through chars in st
while pos <= last :
#print str(pos) + '\t' + st[pos]
ch = st[pos] # ch is short for character
# nxt (short for next characters in metaphone code) is set to a tuple of the next characters in
# the primary and secondary codes and how many characters to move forward in the string.
# the secondary code letter is given only when it is different than the primary.
# This is just a trick to make the code easier to write and read.
nxt = (None, 1) # default action is to add nothing and move to next char
if ch in vowels :
nxt = (None, 1)
if pos == first : # all init vowels now map to 'A'
nxt = ('A', 1)
elif ch == 'B' :
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if st[pos+1] == 'B' :
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'C' :
# various germanic
if (pos > first and st[pos-2] in vowels and st[pos-1:pos+1] == 'ACH' and \
(st[pos+2] not in ['I', 'E'] or st[pos-2:pos+4] in ['BACHER', 'MACHER'])) :
nxt = ('K', 2)
# special case 'CAESAR'
elif pos == first and st[first:first+6] == 'CAESAR' :
nxt = ('S', 2)
elif st[pos:pos+4] == 'CHIA' : #italian 'chianti'
nxt = ('K', 2)
elif st[pos:pos+2] == 'CH' :
# find 'michael'
if pos > first and st[pos:pos+4] == 'CHAE' :
nxt = ('K', 'X', 2)
elif pos == first and (st[pos+1:pos+6] in ['HARAC', 'HARIS'] or \
st[pos+1:pos+4] in ["HOR", "HYM", "HIA", "HEM"]) and st[first:first+5] != 'CHORE' :
nxt = ('K', 2)
#germanic, greek, or otherwise 'ch' for 'kh' sound
elif st[first:first+4] in ['VAN ', 'VON '] or st[first:first+3] == 'SCH' \
or st[pos-2:pos+4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or st[pos+2] in ['T', 'S'] \
or ((st[pos-1] in ["A", "O", "U", "E"] or pos == first) \
and st[pos+2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W"]) :
nxt = ('K', 1)
else :
if pos == first :
if st[first:first+2] == 'MC' :
nxt = ('K', 2)
else :
nxt = ('X', 'K', 2)
else :
nxt = ('X', 2)
#e.g, 'czerny'
elif st[pos:pos+2] == 'CZ' and st[pos-2:pos+2] != 'WICZ' :
nxt = ('S', 'X', 2)
#e.g., 'focaccia'
elif st[pos+1:pos+4] == 'CIA' :
nxt = ('X', 3)
#double 'C', but not if e.g. 'McClellan'
elif st[pos:pos+2] == 'CC' and not (pos == (first +1) and st[first] == 'M') :
#'bellocchio' but not 'bacchus'
if st[pos+2] in ["I", "E", "H"] and st[pos+2:pos+4] != 'HU' :
#'accident', 'accede' 'succeed'
if (pos == (first +1) and st[first] == 'A') or \
st[pos-1:pos+4] in ['UCCEE', 'UCCES'] :
nxt = ('KS', 3)
#'bacci', 'bertucci', other italian
else:
nxt = ('X', 3)
else :
nxt = ('K', 2)
elif st[pos:pos+2] in ["CK", "CG", "CQ"] :
nxt = ('K', 'K', 2)
elif st[pos:pos+2] in ["CI", "CE", "CY"] :
#italian vs. english
if st[pos:pos+3] in ["CIO", "CIE", "CIA"] :
nxt = ('S', 'X', 2)
else :
nxt = ('S', 2)
else :
#name sent in 'mac caffrey', 'mac gregor
if st[pos+1:pos+3] in [" C", " Q", " G"] :
nxt = ('K', 3)
else :
if st[pos+1] in ["C", "K", "Q"] and st[pos+1:pos+3] not in ["CE", "CI"] :
nxt = ('K', 2)
else : # default for 'C'
nxt = ('K', 1)
elif ch == u'Ç' : # will never get here with st.encode('ascii', 'replace') above
nxt = ('S', 1)
elif ch == 'D' :
if st[pos:pos+2] == 'DG' :
if st[pos+2] in ['I', 'E', 'Y'] : #e.g. 'edge'
nxt = ('J', 3)
else :
nxt = ('TK', 2)
elif st[pos:pos+2] in ['DT', 'DD'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'F' :
if st[pos+1] == 'F' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'G' :
if st[pos+1] == 'H' :
if pos > first and st[pos-1] not in vowels :
nxt = ('K', 2)
elif pos < (first + 3) :
if pos == first : #'ghislane', ghiradelli
if st[pos+2] == 'I' :
nxt = ('J', 2)
else :
nxt = ('K', 2)
#Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and st[pos-2] in ['B', 'H', 'D'] ) \
or (pos > (first + 2) and st[pos-3] in ['B', 'H', 'D'] ) \
or (pos > (first + 3) and st[pos-3] in ['B', 'H'] ) :
nxt = (None, 2)
else :
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and st[pos-1] == 'U' \
and st[pos-3] in ["C", "G", "L", "R", "T"] :
nxt = ('F', 2)
else :
if pos > first and st[pos-1] != 'I' :
nxt = ('K', 2)
elif st[pos+1] == 'N' :
if pos == (first +1) and st[first] in vowels and not is_slavo_germanic :
nxt = ('KN', 'N', 2)
else :
# not e.g. 'cagney'
if st[pos+2:pos+4] != 'EY' and st[pos+1] != 'Y' and not is_slavo_germanic :
nxt = ('N', 'KN', 2)
else :
nxt = ('KN', 2)
# 'tagliaro'
elif st[pos+1:pos+3] == 'LI' and not is_slavo_germanic :
nxt = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (st[pos+1] == 'Y' \
or st[pos+1:pos+3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]) :
nxt = ('K', 'J', 2)
# -ger-, -gy-
elif (st[pos+1:pos+2] == 'ER' or st[pos+1] == 'Y') \
and st[first:first+6] not in ["DANGER", "RANGER", "MANGER"] \
and st[pos-1] not in ['E', 'I'] and st[pos-1:pos+2] not in ['RGY', 'OGY'] :
nxt = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif st[pos+1] in ['E', 'I', 'Y'] or st[pos-1:pos+3] in ["AGGI", "OGGI"] :
# obvious germanic
if st[first:first+4] in ['VON ', 'VAN '] or st[first:first+3] == 'SCH' \
or st[pos+1:pos+3] == 'ET' :
nxt = ('K', 2)
else :
# always soft if french ending
if st[pos+1:pos+5] == 'IER ' :
nxt = ('J', 2)
else :
nxt = ('J', 'K', 2)
elif st[pos+1] == 'G' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'H' :
# only keep if first & before vowel or btw. 2 vowels
if (pos == first or st[pos-1] in vowels) and st[pos+1] in vowels :
nxt = ('H', 2)
else : # (also takes care of 'HH')
nxt = (None, 1)
elif ch == 'J' :
# obvious spanish, 'jose', 'san jacinto'
if st[pos:pos+4] == 'JOSE' or st[first:first+4] == 'SAN ' :
if (pos == first and st[pos+4] == ' ') or st[first:first+4] == 'SAN ' :
nxt = ('H',)
else :
nxt = ('J', 'H')
elif pos == first and st[pos:pos+4] != 'JOSE' :
nxt = ('J', 'A') # Yankelovich/Jankelowicz
else :
# spanish pron. of e.g. 'bajador'
if st[pos-1] in vowels and not is_slavo_germanic \
and st[pos+1] in ['A', 'O'] :
nxt = ('J', 'H')
else :
if pos == last :
nxt = ('J', ' ')
else :
if st[pos+1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and st[pos-1] not in ["S", "K", "L"] :
nxt = ('J',)
else :
nxt = (None, )
if st[pos+1] == 'J' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'K' :
if st[pos+1] == 'K' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'L' :
if st[pos+1] == 'L' :
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and st[pos-1:pos+3] in ["ILLO", "ILLA", "ALLE"]) \
or (st[last-1:last+1] in ["AS", "OS"] or st[last] in ["A", "O"] \
and st[pos-1:pos+3] == 'ALLE') :
nxt = ('L', ' ', 2)
else :
nxt = ('L', 2)
else :
nxt = ('L', 1)
elif ch == 'M' :
if st[pos+1:pos+4] == 'UMB' \
and (pos + 1 == last or st[pos+2:pos+4] == 'ER') \
or st[pos+1] == 'M' :
nxt = ('M', 2)
else :
nxt = ('M', 1)
elif ch == 'N' :
if st[pos+1] == 'N' :
nxt = ('N', 2)
else :
nxt = ('N', 1)
elif ch == u'Ñ' :
nxt = ('N', 1)
elif ch == 'P' :
if st[pos+1] == 'H' :
nxt = ('F', 2)
elif st[pos+1] in ['P', 'B'] : # also account for "campbell", "raspberry"
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'Q' :
if st[pos+1] == 'Q' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'R' :
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not is_slavo_germanic \
and st[pos-2:pos] == 'IE' and st[pos-4:pos-2] not in ['ME', 'MA'] :
nxt = ('', 'R')
else :
nxt = ('R',)
if st[pos+1] == 'R' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'S' :
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if st[pos-1:pos+2] in ['ISL', 'YSL'] :
nxt = (None, 1)
# special case 'sugar-'
elif pos == first and st[first:first+5] == 'SUGAR' :
nxt =('X', 'S', 1)
elif st[pos:pos+2] == 'SH' :
# germanic
if st[pos+1:pos+5] in ["HEIM", "HOEK", "HOLM", "HOLZ"] :
nxt = ('S', 2)
else :
nxt = ('X', 2)
# italian & armenian
elif st[pos:pos+3] in ["SIO", "SIA"] or st[pos:pos+4] == 'SIAN' :
if not is_slavo_germanic :
nxt = ('S', 'X', 3)
else :
nxt = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
# also, -sz- in slavic language altho in hungarian it is pronounced 's'
elif (pos == first and st[pos+1] in ["M", "N", "L", "W"]) or st[pos+1] == 'Z' :
nxt = ('S', 'X')
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif st[pos+2:pos+4] == 'SC' :
# Schlesinger's rule
if st[pos+2] == 'H' :
# dutch origin, e.g. 'school', 'schooner'
if st[pos+3:pos+5] in ["OO", "ER", "EN", "UY", "ED", "EM"] :
# 'schermerhorn', 'schenker'
if st[pos+3:pos+5] in ['ER', 'EN'] :
nxt = ('X', 'SK', 3)
else :
nxt = ('SK', 3)
else :
if pos == first and st[first+3] not in vowels and st[first+3] != 'W' :
nxt = ('X', 'S', 3)
else :
nxt = ('X', 3)
elif st[pos+2] in ['I', 'E', 'Y'] :
nxt = ('S', 3)
else :
nxt = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and st[pos-2:pos] in ['AI', 'OI'] :
nxt = ('', 'S', 1)
else :
nxt = ('S',)
if st[pos+1] in ['S', 'Z'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'T' :
if st[pos:pos+4] == 'TION' :
nxt = ('X', 3)
elif st[pos:pos+3] in ['TIA', 'TCH'] :
nxt = ('X', 3)
elif st[pos:pos+2] == 'TH' or st[pos:pos+3] == 'TTH' :
# special case 'thomas', 'thames' or germanic
if st[pos+2:pos+4] in ['OM', 'AM'] or st[first:first+4] in ['VON ', 'VAN '] \
or st[first:first+3] == 'SCH' :
nxt = ('T', 2)
else :
nxt = ('0', 'T', 2)
elif st[pos+1] in ['T', 'D'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'V' :
if st[pos+1] == 'V' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'W' :
# can also be in middle of word
if st[pos:pos+2] == 'WR' :
nxt = ('R', 2)
elif pos == first and st[pos+1] in vowels or st[pos:pos+2] == 'WH' :
# Wasserman should match Vasserman
if st[pos+1] in vowels :
nxt = ('A', 'F', 1)
else :
nxt = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and st[pos-1] in vowels) \
or st[pos-1:pos+5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or st[first:first+3] == 'SCH' :
nxt = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif st[pos:pos+4] in ["WICZ", "WITZ"] :
nxt = ('TS', 'FX', 4)
else : # default is to skip it
nxt = (None, 1)
elif ch == 'X' :
# french e.g. breaux
nxt = (None,)
if not(pos == last and (st[pos-3:pos] in ["IAU", "EAU"] \
or st[pos-2:pos] in ['AU', 'OU'])):
nxt = ('KS',)
if st[pos+1] in ['C', 'X'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'Z' :
# chinese pinyin e.g. 'zhao'
if st[pos+1] == 'H' :
nxt = ('J',)
elif st[pos+1:pos+3] in ["ZO", "ZI", "ZA"] \
or (is_slavo_germanic and pos > first and st[pos-1] != 'T') :
nxt = ('S', 'TS')
else :
nxt = ('S',)
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
# ----------------------------------
# --- end checking letters------
# ----------------------------------
#print str(nxt)
if len(nxt) == 2 :
if nxt[0] :
pri += nxt[0]
sec += nxt[0]
pos += nxt[1]
elif len(nxt) == 3 :
if nxt[0] :
pri += nxt[0]
if nxt[1] :
sec += nxt[1]
pos += nxt[2]
if pri == sec :
return (pri, None)
else :
return (pri, sec)
if __name__ == '__main__' :
names = {'maurice':'MRS','aubrey':'APR','cambrillo':'KMPR','heidi':'HT','katherine':'K0RN,KTRN',\
'catherine':'K0RN,KTRN','richard':'RXRT,RKRT','bob':'PP','eric':'ARK','geoff':'JF,KF',\
'dave':'TF','ray':'R','steven':'STFN','bryce':'PRS','randy':'RNT','bryan':'PRN',\
'brian':'PRN','otto':'AT','auto':'AT', 'maisey':'MS, None', 'zhang':'JNK, None', 'solilijs':'SLLS, None'}
for name in names.keys() :
print name + '\t-->\t' + str(dm(name)) + '\t(' +names[name] + ')'
########NEW FILE########
__FILENAME__ = admin_tests
from nose.tools import *
from lamson.testing import *
from config import settings
import time
from app.model import archive, confirmation
queue_path = archive.store_path('test.list', 'queue')
sender = "sender-%[email protected]" % time.time()
host = "librelist.com"
list_name = "test.list"
list_addr = "test.list@%s" % host
client = RouterConversation(sender, 'Admin Tests')
def setup():
clear_queue("run/posts")
clear_queue("run/spam")
def test_new_user_subscribes_with_invalid_name():
client.begin()
client.say('test-list@%s' % host, "I can't read!", 'noreply')
client.say('test=list@%s' % host, "I can't read!", 'noreply')
clear_queue()
client.say('unbounce@%s' % host, "I have two email addresses!")
assert not delivered('noreply')
assert not delivered('unbounce')
client.say('noreply@%s' % host, "Dumb dumb.")
assert not delivered('noreply')
def test_new_user_subscribes():
client.begin()
msg = client.say(list_addr, "Hey I was wondering how to fix this?",
list_name + '-confirm')
client.say(msg['Reply-To'], 'Confirmed I am.', 'noreply')
clear_queue()
def test_existing_user_unsubscribes():
test_new_user_subscribes()
msg = client.say(list_name + "-unsubscribe@%s" % host, "I would like to unsubscribe.", 'confirm')
client.say(msg['Reply-To'], 'Confirmed yes I want out.', 'noreply')
def test_existing_user_posts_message():
test_new_user_subscribes()
msg = client.say(list_addr, "Howdy folks, I was wondering what this is?",
list_addr)
# make sure it gets archived
assert delivered(list_addr, to_queue=queue(queue_path))
########NEW FILE########
__FILENAME__ = bounce_tests
from nose.tools import *
from lamson.testing import *
from lamson.mail import MailRequest
from lamson.routing import Router
from app.handlers.admin import module_and_to
from app.model import mailinglist
from handlers import admin_tests
from email.utils import parseaddr
from lamson import bounce
from config import settings
sender = admin_tests.sender
list_addr = admin_tests.list_addr
client = admin_tests.client
def setup():
clear_queue(queue_dir=settings.BOUNCE_ARCHIVE)
def create_bounce(To, From):
msg = MailRequest("fakepeer", From, To, open("tests/bounce.msg").read())
assert msg.is_bounce()
msg.bounce.final_recipient = From
msg.bounce.headers['Final-Recipient'] = From
msg.bounce.original['from'] = From
msg.bounce.original['to'] = To
msg.bounce.original.To = set([To])
msg.bounce.original.From = From
return msg
def test_hard_bounce_disables_user():
# get them into a posting state
admin_tests.test_existing_user_posts_message()
assert_in_state('app.handlers.admin', list_addr, sender, 'POSTING')
clear_queue()
assert mailinglist.find_subscriptions(sender, list_addr)
# force them to HARD bounce
msg = create_bounce(list_addr, sender)
Router.deliver(msg)
assert_in_state('app.handlers.admin', list_addr, sender, 'BOUNCING')
assert_in_state('app.handlers.bounce', list_addr, sender, 'BOUNCING')
assert not delivered('unbounce'), "A HARD bounce should be silent."
assert_equal(len(queue(queue_dir=settings.BOUNCE_ARCHIVE).keys()), 1)
assert not mailinglist.find_subscriptions(sender, list_addr)
# make sure that any attempts to post return a "you're bouncing dude" message
client.say(list_addr, 'So anyway as I was saying.')
assert not delivered('unbounce')
assert_in_state('app.handlers.admin', list_addr, sender, 'BOUNCING')
# now have them try to unbounce
msg = client.say('[email protected]', "Please put me back on, I'll be good.",
'unbounce-confirm')
# handle the bounce confirmation
client.say(msg['from'], "Confirmed to unbounce.", 'noreply')
# alright they should be in the unbounce state for the global bounce handler
assert_in_state('app.handlers.bounce', list_addr, sender,
'UNBOUNCED')
# and they need to be back to POSTING for regular operations
assert_in_state('app.handlers.admin', list_addr, sender, 'POSTING')
assert mailinglist.find_subscriptions(sender, list_addr)
# and make sure that only the original bounce is in the bounce archive
assert_equal(len(queue(queue_dir=settings.BOUNCE_ARCHIVE).keys()), 1)
def test_soft_bounce_tells_them():
setup()
# get them into a posting state
admin_tests.test_existing_user_posts_message()
assert_in_state('app.handlers.admin', list_addr, sender, 'POSTING')
clear_queue()
assert mailinglist.find_subscriptions(sender, list_addr)
# force them to soft bounce
msg = create_bounce(list_addr, sender)
msg.bounce.primary_status = (3, bounce.PRIMARY_STATUS_CODES[u'3'])
assert msg.bounce.is_soft()
Router.deliver(msg)
assert_in_state('app.handlers.admin', list_addr, sender, 'BOUNCING')
assert_in_state('app.handlers.bounce', list_addr, sender, 'BOUNCING')
assert not delivered('unbounce'), "We shouldn't be sending on bounde."
assert_equal(len(queue(queue_dir=settings.BOUNCE_ARCHIVE).keys()), 1)
assert not mailinglist.find_subscriptions(sender, list_addr)
# make sure that any attempts to post return a "you're bouncing dude" message
client.say(list_addr, 'So anyway as I was saying.')
assert not delivered('unbounce')
assert_in_state('app.handlers.admin', list_addr, sender, 'BOUNCING')
# now have them try to unbounce
msg = client.say('[email protected]', "Please put me back on, I'll be good.",
'unbounce-confirm')
# handle the bounce confirmation
client.say(msg['from'], "Confirmed to unbounce.", 'noreply')
# alright they should be in the unbounce state for the global bounce handler
assert_in_state('app.handlers.bounce', list_addr, sender,
'UNBOUNCED')
# and they need to be back to POSTING for regular operations
assert_in_state('app.handlers.admin', list_addr, sender, 'POSTING')
assert mailinglist.find_subscriptions(sender, list_addr)
# and make sure that only the original bounce is in the bounce archive
assert_equal(len(queue(queue_dir=settings.BOUNCE_ARCHIVE).keys()), 1)
########NEW FILE########
__FILENAME__ = archive_tests
from nose.tools import *
from lamson.testing import *
from lamson.mail import MailRequest, MailResponse
from app.model import archive, mailinglist
import simplejson as json
import shutil
import os
from config import settings
queue_path = archive.store_path('test.list', 'queue')
json_path = archive.store_path('test.list', 'json')
def setup():
clear_queue(queue_path)
shutil.rmtree(json_path)
def teardown():
clear_queue(queue_path)
shutil.rmtree(json_path)
def test_archive_enqueue():
msg = MailResponse(From=u'"p\xf6stal Zed" <[email protected]>',
To="[email protected]",
Subject="test message", Body="This is a test.")
archive.enqueue('test.list', msg)
archived = delivered('zedshaw', to_queue=queue(queue_path))
assert archived, "Didn't get archived."
as_string = str(archived)
assert '-AT-' not in str(archived), "Should not longer be obfuscated"
assert '<' in as_string and '"' in as_string and '>' in as_string, "Unicode email screwed up."
def test_white_list_cleanse():
msg = MailRequest('fakepeer', None, None, open('tests/lots_of_headers.msg').read())
resp = mailinglist.craft_response(msg, 'test.list', '[email protected]')
archive.white_list_cleanse(resp)
for key in resp.keys():
assert key in archive.ALLOWED_HEADERS
assert '@' in resp['from']
assert str(resp)
def test_to_json():
msg = MailRequest('fakeperr', None, None, open("tests/bounce.msg").read())
resp = mailinglist.craft_response(msg, 'test.list', '[email protected]')
# attach an the message back but fake it as an image it'll be garbage
resp.attach(filename="tests/bounce.msg", content_type="image/png", disposition="attachment")
resp.to_message() # prime the pump
js = archive.to_json(resp.base)
assert js
rtjs = json.loads(js)
assert rtjs
assert rtjs['parts'][-1]['encoding']['format'] == 'base64'
def test_build_index():
archive.build_index()
assert os.path.exists(settings.ARCHIVE_BASE + "/lists.html")
########NEW FILE########
__FILENAME__ = bounce_tests
from nose.tools import *
from lamson.testing import *
from lamson.mail import MailRequest
from app.model import bounce
def test_mail_to_you_is_bouncing():
msg = MailRequest("fakepeer", None, None, open("tests/bounce.msg").read())
assert msg.is_bounce()
bounce_rep = bounce.mail_to_you_is_bouncing(msg)
assert bounce_rep
assert_equal(bounce_rep['to'], msg.bounce.final_recipient)
########NEW FILE########
__FILENAME__ = mailinglist_tests
from nose.tools import *
from app.model.mailinglist import *
from email.utils import parseaddr
from webapp.librelist.models import MailingList, Subscription
from lamson.mail import MailRequest, MailResponse
from lamson.testing import *
user_full_address = '"Zed A. Shaw" <[email protected]>'
user_name, user_address = parseaddr(user_full_address)
list_name = "test.lists"
def setup():
MailingList.objects.all().delete()
Subscription.objects.all().delete()
def test_create_list():
mlist = create_list(list_name)
assert mlist
mlist_found = find_list(list_name)
assert mlist_found
assert_equal(mlist.name, mlist_found.name)
# make sure create doesn't do it more than once
create_list(list_name)
assert_equal(MailingList.objects.filter(name = list_name).count(), 1)
delete_list(list_name)
def test_delete_list():
delete_list(list_name)
mlist = find_list(list_name)
assert not mlist, "Found list: %s, should not." % mlist
def test_remove_all_subscriptions():
test_add_subscriber()
remove_all_subscriptions(user_full_address)
subs = find_subscriptions(user_full_address)
assert_equal(len(subs), 0)
def test_add_subscriber():
remove_all_subscriptions(user_full_address)
sub = add_subscriber(user_full_address, list_name)
assert sub
assert_equal(sub.subscriber_address, user_address)
assert_equal(sub.subscriber_name, user_name)
subs = find_subscriptions(user_full_address)
assert_equal(len(subs), 1)
def test_remove_subscriber():
test_add_subscriber()
remove_subscriber(user_full_address, list_name)
subs = find_subscriptions(user_full_address, list_name=list_name)
assert_equal(len(subs), 0)
def test_post_message():
for i in range(0,3):
add_subscriber(user_full_address, list_name)
sample = MailResponse(To=list_name + "@librelist.com",
From=user_full_address,
Subject="Test post message.",
Body="I am telling you guys you are wrong.")
sample['Message-Id'] = '12313123123123123'
msg = MailRequest("fakepeer", sample['from'], sample['to'], str(sample))
post_message(relay(port=8825), msg, list_name, "librelist.com")
def test_disable_enable_all_subscriptions():
test_add_subscriber()
disable_all_subscriptions(user_address)
assert not find_subscriptions(user_address)
enable_all_subscriptions(user_address)
assert find_subscriptions(user_address)
def test_similarily_named_lists():
test_names = ['test.lists', 'tests.list', 'querylists', 'evil.named',
'shouldnot', 'teller.list']
for name in test_names:
create_list(name)
similar = similar_named_lists(list_name)
assert_equal(len(similar), 2)
nothing = similar_named_lists("zed.shaw")
assert not nothing
similar = similar_named_lists('teler.list')
assert_equal(len(similar), 1)
def test_craft_response_attachment():
sample = MailResponse(To=list_name + "@librelist.com",
From=user_full_address,
Subject="Test message with attachments.",
Body="The body as one attachment.")
sample.attach(filename="tests/model/mailinglist_tests.py",
content_type="text/plain",
disposition="attachment")
sample['message-id'] = '123545666'
im = sample.to_message()
assert_equal(len([x for x in im.walk()]), 3)
inmsg = MailRequest("fakepeer", None, None, str(sample))
assert_equal(len(inmsg.all_parts()), 2)
outmsg = craft_response(inmsg, list_name, list_name +
"@librelist.com")
om = outmsg.to_message()
assert_equal(len([x for x in om.walk()]),
len([x for x in im.walk()]))
assert 'message-id' in outmsg
def test_craft_response_no_attachment():
sample = MailResponse(To=list_name + "@librelist.com",
From=user_full_address,
Subject="Test message with attachments.",
Body="The body as one attachment.")
im = sample.to_message()
assert_equal(len([x for x in im.walk()]), 1)
assert_equal(im.get_payload(), sample.Body)
inmsg = MailRequest("fakepeer", None, None, str(sample))
assert_equal(len(inmsg.all_parts()), 0)
assert_equal(inmsg.body(), sample.Body)
outmsg = craft_response(inmsg, list_name, list_name +
"@librelist.com")
om = outmsg.to_message()
assert_equal(om.get_payload(), sample.Body)
assert_equal(len([x for x in om.walk()]),
len([x for x in im.walk()]))
########NEW FILE########
__FILENAME__ = admin
from webapp.librelist.models import *
from django.contrib import admin
for m in [Confirmation, UserState, MailingList, Subscription]:
admin.site.register(m)
########NEW FILE########
__FILENAME__ = 0001_initial
from south.db import db
from django.db import models
from webapp.librelist.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Subscription'
db.create_table('librelist_subscription', (
('subscriber_name', models.CharField(max_length=200)),
('enabled', models.BooleanField(default=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('subscriber_address', models.EmailField()),
('id', models.AutoField(primary_key=True)),
('mailing_list', models.ForeignKey(orm.MailingList)),
))
db.send_create_signal('librelist', ['Subscription'])
# Adding model 'UserState'
db.create_table('librelist_userstate', (
('created_on', models.DateTimeField(auto_now_add=True)),
('state', models.CharField(max_length=200)),
('id', models.AutoField(primary_key=True)),
('state_key', models.CharField(max_length=512)),
('from_address', models.EmailField()),
))
db.send_create_signal('librelist', ['UserState'])
# Adding model 'Confirmation'
db.create_table('librelist_confirmation', (
('from_address', models.EmailField()),
('request_date', models.DateTimeField(auto_now_add=True)),
('expected_secret', models.CharField(max_length=50)),
('pending_message_id', models.CharField(max_length=200)),
('list_name', models.CharField(max_length=200)),
('id', models.AutoField(primary_key=True)),
))
db.send_create_signal('librelist', ['Confirmation'])
# Adding model 'MailingList'
db.create_table('librelist_mailinglist', (
('name', models.CharField(max_length=512)),
('archive_url', models.CharField(max_length=512)),
('similarity_pri', models.CharField(max_length=50)),
('archive_queue', models.CharField(max_length=512)),
('similarity_sec', models.CharField(max_length=50, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('id', models.AutoField(primary_key=True)),
))
db.send_create_signal('librelist', ['MailingList'])
def backwards(self, orm):
# Deleting model 'Subscription'
db.delete_table('librelist_subscription')
# Deleting model 'UserState'
db.delete_table('librelist_userstate')
# Deleting model 'Confirmation'
db.delete_table('librelist_confirmation')
# Deleting model 'MailingList'
db.delete_table('librelist_mailinglist')
models = {
'librelist.subscription': {
'created_on': ('models.DateTimeField', [], {'auto_now_add': 'True'}),
'enabled': ('models.BooleanField', [], {'default': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'mailing_list': ('models.ForeignKey', ['MailingList'], {}),
'subscriber_address': ('models.EmailField', [], {}),
'subscriber_name': ('models.CharField', [], {'max_length': '200'})
},
'librelist.userstate': {
'created_on': ('models.DateTimeField', [], {'auto_now_add': 'True'}),
'from_address': ('models.EmailField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'state': ('models.CharField', [], {'max_length': '200'}),
'state_key': ('models.CharField', [], {'max_length': '512'})
},
'librelist.confirmation': {
'expected_secret': ('models.CharField', [], {'max_length': '50'}),
'from_address': ('models.EmailField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'list_name': ('models.CharField', [], {'max_length': '200'}),
'pending_message_id': ('models.CharField', [], {'max_length': '200'}),
'request_date': ('models.DateTimeField', [], {'auto_now_add': 'True'})
},
'librelist.mailinglist': {
'archive_queue': ('models.CharField', [], {'max_length': '512'}),
'archive_url': ('models.CharField', [], {'max_length': '512'}),
'created_on': ('models.DateTimeField', [], {'auto_now_add': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '512'}),
'similarity_pri': ('models.CharField', [], {'max_length': '50'}),
'similarity_sec': ('models.CharField', [], {'max_length': '50', 'null': 'True'})
}
}
complete_apps = ['librelist']
########NEW FILE########
__FILENAME__ = models
from django.db import models
from datetime import datetime
from email.utils import formataddr
# Create your models here.
class Confirmation(models.Model):
from_address = models.EmailField()
request_date = models.DateTimeField(auto_now_add=True)
expected_secret = models.CharField(max_length=50)
pending_message_id = models.CharField(max_length=200)
list_name = models.CharField(max_length=200)
def __unicode__(self):
return self.from_address
class UserState(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
state_key = models.CharField(max_length=512)
from_address = models.EmailField()
state = models.CharField(max_length=200)
def __unicode__(self):
return "%s:%s (%s)" % (self.state_key, self.from_address, self.state)
class MailingList(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
archive_url = models.CharField(max_length=512)
archive_queue = models.CharField(max_length=512)
name = models.CharField(max_length=512)
similarity_pri = models.CharField(max_length=50)
similarity_sec = models.CharField(max_length=50, null=True)
def __unicode__(self):
return self.name
class Subscription(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
subscriber_address = models.EmailField()
subscriber_name = models.CharField(max_length=200)
mailing_list = models.ForeignKey(MailingList)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return '"%s" <%s>' % (self.subscriber_name, self.subscriber_address)
def subscriber_full_address(self):
return formataddr((self.subscriber_name, self.subscriber_address))
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import *
urlpatterns = patterns('',)
########NEW FILE########
__FILENAME__ = views
# Create your views here.
########NEW FILE########
__FILENAME__ = manage
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
########NEW FILE########
__FILENAME__ = settings
# Django settings for webapp project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Zed A. Shaw', '[email protected]'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = os.path.dirname(__file__) + '/../run/data.sqlite3' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '####################'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.admindocs',
'webapp.librelist',
'south',
)
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^/', include('webapp.librelist.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
)
########NEW FILE########
| [
"[email protected]"
] | |
fe086916c88a8fb986cfa09b74afb9490397ab2f | 86df6f8f4f3c03cccc96459ad82bcdf3bf942492 | /lintcode/majority-number.py | e759f4f141d0ff3c9823992ea18f1ff927931f25 | [] | no_license | bdliyq/algorithm | 369d1fd2ae3925a559ebae3fa8f5deab233daab1 | e1c993a5d1531e1fb10cd3c8d686f533c9a5cbc8 | refs/heads/master | 2016-08-11T21:49:31.259393 | 2016-04-05T11:10:30 | 2016-04-05T11:10:30 | 44,576,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | #!/usr/bin/env python
# encoding: utf-8
# Question: http://www.lintcode.com/en/problem/majority-number/
class Solution:
"""
@param nums: A list of integers
@return: The majority number
"""
def majorityNumber(self, nums):
# write your codeare
if len(nums) == 0:
return 0
count = 1
last_num = nums[0]
for n in nums[1:]:
if n == last_num:
count += 1
else:
count -= 1
if count == 0:
last_num = n
count += 1
return last_num
if __name__ == '__main__':
s = Solution()
print s.majorityNumber([1, 1, 1, 1, 2, 2, 2])
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.