blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51773aa0f9023d2645abc5899ed45f0596fa157c | 473ae3b2ea92549d18adc1b33c473edda0abd6cb | /back-end/news/newsapi/serializer.py | 5192fc39bfd70dcb7d0b3c57dcac1c22199fbdd6 | [] | no_license | Stelmaszv/news-api | 0e7aabc8425f8b4105ce278261034b2ea2ad3aee | 86bb933b882fda2c14ecabf352bfa50c875cf91f | refs/heads/master | 2023-01-09T17:27:05.918639 | 2020-03-23T10:47:56 | 2020-03-23T10:47:56 | 244,368,280 | 0 | 1 | null | 2023-01-07T15:29:37 | 2020-03-02T12:41:55 | Python | UTF-8 | Python | false | false | 184 | py | from .models import news;
from rest_framework.serializers import ModelSerializer
class NewsSerializer(ModelSerializer):
class Meta:
model = news;
fields= '__all__' | [
"[email protected]"
] | |
6aa14a16c6fa5277c86e7faedb75368cd0993321 | deaf60a5ba012e68f8509c0df0d35a5228419b71 | /慧聪网系列/慧聪网抓取22/慧聪网诚信/hui_cong_gong/hui_cong_gong/pipelines.py | 01a5437673b488171028c31edfad2a8828267ed3 | [] | no_license | kokohui/con_spider | 7162d8e58725d9334db5f1da34649cd1d1ef29ea | da1181b53e5cbca546d1bb749f9efc2f48e698f8 | refs/heads/master | 2022-03-03T19:37:33.721533 | 2019-08-22T10:05:32 | 2019-08-22T10:05:32 | 193,631,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,658 | py | import pymysql
class HuiCongGongPipeline(object):
cursor = None # mysql游标对象声明
cur = None # 获取一个游标
def open_spider(self, spdier):
print('爬虫开始》》》》')
self.conn = pymysql.Connect(host='192.168.1.210', user='root', passwd='zhangxing888', db='ktcx_buschance',
port=3306,
charset='utf8')
self.cur = self.conn.cursor() # 获取一个游标
def process_item(self, item, spider):
print('process_item>>>>>>>>>>>>>>>>>>>>>>>')
# 数据库最大id查询
res_num = 0
try:
sql_1 = 'select max(id) from bus_user'
self.cur.execute(sql_1)
res_num = int(self.cur.fetchone()[0])
print('res.......................', res_num)
except:
print('查询错误.')
# 查询公司存储个数, 如果没有则存储~
sql_count = "select count(0) from bus_user where company_name='{}'".format(item['com_name'])
self.cur.execute(sql_count)
result = self.cur.fetchall()
result_count = int(result[0][0])
print('result_count........................', result_count)
if result_count == 0:
# 数据库最大id查询
res_num = res_num + 1
# 进行存储
try:
# 公司
sql = 'insert into `bus_user`(' \
'`id`,`name`, `logo`, `phone`, `password`, `source`, `type`, `state`, `plate_visit_num`, `plate_visit_pnum`,' \
' `product_visit_num`, `balance`, `growth`, `status`, `company_name`, `linkman`, `mobile`, `number`, `url`, `submit_date`,' \
' `by_date`, `domain_name`, `is_del`, `create_by`, `create_date`, `province_id`, `province_name`, `city_id`, `city_name`, `county_id`,' \
' `county_name`, `address`, `sub_summary`, `summary`, `sub_scopes`, `scopes`, `minglu_img`, `company_img`, `mapx`, `mapy`,' \
' `zip_code`, `email`, `qq`, `tel`, `website`, `total_fee`, `send_num`, `refresh_num`, `supply_inquiry_num`, `purchase_inquiry_num`,' \
' `ad_price`, `openid`, `provider_id`, `provider_name`, `channel_duty_id`, `channel_open_id`, `service_id`, `keywords`, `is_cx`) ' \
'VALUE' \
'(%s,%s,%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
self.cur.execute(sql, (
res_num, item['com_name'], item['list_img'], item['mobile'], '123456', 'pc', 'supply', '1', 0, 0,
0, 0, 0, '0', item['com_name'], item["linkman"], item['mobile'], '', '', item['create_date'],
item['create_date'], '', '0', '5fc530f6b8574e03b6f13794ec64c1f8', item['create_date'], '', '', '',
'', '',
'', item['address'], item['summary'], item['summary'], item['summary'], item['scopes'], '', '', '',
'', '', '', 123456, item['mobile'], '', 0, 0, 0, 0, 0,
0, '', '75cebe2e19434dcd9c4586f4621e6f9c', '', '', '', '', item['com_keyword'], 1))
# 第三表
sql_in_2 = "insert into `bus_user_industry` (`create_by`, `one_level_id`, `two_level_id`, `three_level_id`, `sort`, `is_del`) values(%s,%s,%s,%s,%s,%s)"
self.cur.execute(sql_in_2, (
res_num, item['one_level_id'], item['two_level_id'], item['three_level_id'], '1', '0'))
except Exception as e:
self.conn.rollback() # 事务回滚
print('事务处理失败')
raise e
else:
self.conn.commit() # 事务提交
print('数据添加成功')
# 产品信息存储
try:
sql_in = "INSERT INTO `bus_product` (`create_by`, `create_date`, `is_del`, `list_img`, `price`, `title`,`way`,`one_level_id`, `two_level_id`, `three_level_id`, `custom_id`, `keywords`,`models`,`standards`, `imgs`, `sort`, `update_time`, `state`, `is_verify`, `verify_remark`,`verify_time`, `verify_by`, `detail`, `types`, `start_time`, `end_time`, `num`, `units`,`money_units`, `province_id`, `province_name`, `city_id`, `city_name`, `view_count`,`inquiry_count`,`provider_id`, `provider_name`, `is_import`, `com_name`, `linkman`,`mobile`, `add_by`,`one_class_name`, `one_class_id`, `two_class_name`, `two_class_id`, `tree_class_name`, `tree_class_id`)" \
"VALUE " \
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
data = self.cur.execute(sql_in, (
res_num, item['create_date'], '0', item['list_img'], item['price'], item['title'],
item['way'],
item['one_level_id'], item['two_level_id'], item['three_level_id'], 0, item['keywords'], '',
'', item['imgs'], '1', item['create_date'], '1', '1', 0,
item['create_date'], '', item['detail'], '0', item['create_date'], item['create_date'], 1,
item['units'],
'元', '', '', '', '', '0', '0',
'1ec40ecd3cf64908941b5f7679f19d2b', '', '0', item['com_name'], item['linkman'], item['mobile'],
'43e9737882af413095f612ef34412a8f', item['one_class_name'], '',
item['two_class_name'], '', item['tree_class_name'],
item['tree_class_id'])) # 单条插入
print('.......................................')
print('data', data)
self.conn.commit() # 提交
print('添加成功')
except Exception as e:
raise e
return item
def close_spider(self, spider):
# sql_id = "SELECT id FROM bus_spider_data WHERE source='慧聪网'AND TYPE = 'gongying' AND is_del = '0' AND isuse = '0' ORDER BY create_date LIMIT 1 "
# self.cur.execute(sql_id)
# res_all_list = self.cur.fetchall()
# id = res_all_list[0][0]
# sql_insert = "UPDATE ktcx_buschance.bus_spider_data SET isuse='1' WHERE id={}".format(id)
# print(sql_insert)
# self.cur.execute(sql_insert)
# self.conn.commit()
print('爬虫结束>>>>>>>>')
self.cur.close()
self.conn.close()
| [
"[email protected]"
] | |
48874e51e27fff452749f385f2d2110852fbf097 | b1bf615bfa1ee2065e3adfe90310814c3b27c61d | /2021-3-5/linked-list-random-node.py | ca1fb15ecae5681d7901a7c15f9a0a1db9a706f2 | [] | no_license | Huajiecheng/leetcode | 73b09a88e61ea3b16ca3bf440fadd1470652ccf2 | 4becf814a2a06611ee909ec700380ab83ac8ab99 | refs/heads/main | 2023-03-19T21:54:20.952909 | 2021-03-06T03:34:52 | 2021-03-06T03:34:52 | 320,959,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
self.vals = self.return_vals(self.head)
def return_vals(self, head):
vals = list()
while head:
vals.append(head.val)
head = head.next
return vals
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
rand = random.randint(0,len(self.vals)-1)
return self.vals[rand]
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom() | [
"[email protected]"
] | |
28a7c9652a39e99bb330602f434d02b8f724b3d2 | 13f9fbda48604fe1d588e03460d9cba0d6ffd376 | /venv/Lib/site-packages/bs4/builder/__init__.py | 100fa153a6a9213dcbec6918cdcb3d1e11cb84cf | [] | no_license | gogoheejun/HangHae-01-miniproject | 96e4bb128bcaecf0603bce43265872cc12090980 | c1ac7d77a2e7d23ecc4aa8a06fb7ad939fb53872 | refs/heads/master | 2023-08-17T05:10:10.408421 | 2021-09-17T13:00:56 | 2021-09-17T13:00:56 | 406,779,223 | 0 | 1 | null | 2021-09-18T04:20:28 | 2021-09-15T13:35:46 | Python | UTF-8 | Python | false | false | 20,390 | py | # Use of this source code is governed by the MIT license.
__license__ = "MIT"
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
Stylesheet,
Script,
TemplateString,
nonwhitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
"""A way of looking up TreeBuilder subclasses by their name or by desired
features.
"""
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features.
:param treebuilder_class: A subclass of Treebuilder. its .features
attribute should list its features.
"""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
"""Look up a TreeBuilder subclass with the desired features.
:param features: A list of features to look for. If none are
provided, the most recently registered TreeBuilder subclass
will be used.
:return: A TreeBuilder subclass, or None if there's no
registered subclass with all the requested features.
"""
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a textual document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
DEFAULT_CDATA_LIST_ATTRIBUTES = {}
# Whitespace should be preserved inside these tags.
DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
# The textual contents of tags with these names should be
# instantiated with some class other than NavigableString.
DEFAULT_STRING_CONTAINERS = {}
USE_DEFAULT = object()
# Most parsers don't keep track of line numbers.
TRACKS_LINE_NUMBERS = False
def __init__(self, multi_valued_attributes=USE_DEFAULT,
preserve_whitespace_tags=USE_DEFAULT,
store_line_numbers=USE_DEFAULT,
string_containers=USE_DEFAULT,
):
"""Constructor.
:param multi_valued_attributes: If this is set to None, the
TreeBuilder will not turn any values for attributes like
'class' into lists. Setting this to a dictionary will
customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
for an example.
Internally, these are called "CDATA list attributes", but that
probably doesn't make sense to an end-user, so the argument name
is `multi_valued_attributes`.
:param preserve_whitespace_tags: A list of tags to treat
the way <pre> tags are treated in HTML. Tags in this list
are immune from pretty-printing; their contents will always be
output as-is.
:param string_containers: A dictionary mapping tag names to
the classes that should be instantiated to contain the textual
contents of those tags. The default is to use NavigableString
for every tag, no matter what the name. You can override the
default by changing DEFAULT_STRING_CONTAINERS.
:param store_line_numbers: If the parser keeps track of the
line numbers and positions of the original markup, that
information will, by default, be stored in each corresponding
`Tag` object. You can turn this off by passing
store_line_numbers=False. If the parser you're using doesn't
keep track of this information, then setting store_line_numbers=True
will do nothing.
"""
self.soup = None
if multi_valued_attributes is self.USE_DEFAULT:
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
self.cdata_list_attributes = multi_valued_attributes
if preserve_whitespace_tags is self.USE_DEFAULT:
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
self.preserve_whitespace_tags = preserve_whitespace_tags
if store_line_numbers == self.USE_DEFAULT:
store_line_numbers = self.TRACKS_LINE_NUMBERS
self.store_line_numbers = store_line_numbers
if string_containers == self.USE_DEFAULT:
string_containers = self.DEFAULT_STRING_CONTAINERS
self.string_containers = string_containers
def initialize_soup(self, soup):
"""The BeautifulSoup object has been initialized and is now
being associated with the TreeBuilder.
:param soup: A BeautifulSoup object.
"""
self.soup = soup
def reset(self):
"""Do any work necessary to reset the underlying parser
for a new document.
By default, this does nothing.
"""
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p/>" or "<p>".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no children.
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
be left alone.
:param tag_name: The name of a markup tag.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
"""Run some incoming markup through some parsing process,
populating the `BeautifulSoup` object in self.soup.
This method is not implemented in TreeBuilder; it must be
implemented in subclasses.
:return: None.
"""
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""Run any preliminary steps necessary to make incoming markup
acceptable to the parser.
:param markup: Some markup -- probably a bytestring.
:param user_specified_encoding: The user asked to try this encoding.
:param document_declared_encoding: The markup itself claims to be
in this encoding. NOTE: This argument is not used by the
calling code and can probably be removed.
:param exclude_encodings: The user asked _not_ to try any of
these encodings.
:yield: A series of 4-tuples:
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for converting the
document to Unicode and parsing it. Each strategy will be tried
in turn.
By default, the only strategy is to parse the markup
as-is. See `LXMLTreeBuilderForXML` and
`HTMLParserTreeBuilder` for implementations that take into
account the quirks of particular parsers.
"""
yield markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
:param fragment: A string -- fragment of HTML.
:return: A string -- a full HTML document.
"""
return fragment
def set_up_substitutions(self, tag):
"""Set up any substitutions that will need to be performed on
a `Tag` when it's output as a string.
By default, this does nothing. See `HTMLTreeBuilder` for a
case where this is used.
:param tag: A `Tag`
:return: Whether or not a substitution was performed.
"""
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""When an attribute value is associated with a tag that can
have multiple values for that attribute, convert the string
value to a list of strings.
Basically, replaces class="foo bar" with class=["foo", "bar"]
NOTE: This method modifies its input in place.
:param tag_name: The name of a tag.
:param attrs: A dictionary containing the tag's attributes.
Any appropriate attribute values will be modified in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in list(attrs.keys()):
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, str):
values = nonwhitespace_re.findall(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events.
This is not currently used for anything, but it demonstrates
how a simple TreeBuilder would work.
"""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print("Start %s, %r" % (name, attrs))
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print("End %s" % name)
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
empty_element_tags = set([
# These are from HTML5.
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
# These are from earlier versions of HTML and are removed in HTML5.
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
])
# The HTML standard defines these as block-level elements. Beautiful
# Soup does not treat these elements differently from other elements,
# but it may do so eventually, and this information is available if
# you need to use it.
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
# The HTML standard defines an unusual content model for these tags.
# We represent this by using a string class other than NavigableString
# inside these tags.
#
# I made this list by going through the HTML spec
# (https://html.spec.whatwg.org/#metadata-content) and looking for
# "metadata content" elements that can contain strings.
#
# TODO: Arguably <noscript> could go here but it seems
# qualitatively different from the other tags.
DEFAULT_STRING_CONTAINERS = {
'style': Stylesheet,
'script': Script,
'template': TemplateString,
}
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
DEFAULT_CDATA_LIST_ATTRIBUTES = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
def set_up_substitutions(self, tag):
"""Replace the declared encoding in a <meta> tag with a placeholder,
to be substituted when the tag is output to a string.
An HTML document may come in to Beautiful Soup as one
encoding, but exit in a different encoding, and the <meta> tag
needs to be changed to reflect this.
:param tag: A `Tag`
:return: Whether or not a substitution was performed.
"""
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
this_module = sys.modules[__name__]
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
"""An Exception to be raised when the underlying parser simply
refuses to parse the given markup.
"""
def __init__(self, message_or_exception):
"""Explain why the parser rejected the given markup, either
with a textual explanation or another exception.
"""
if isinstance(message_or_exception, Exception):
e = message_or_exception
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
super(ParserRejectedMarkup, self).__init__(message_or_exception)
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last resort.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| [
"[email protected]"
] | |
7abe0c4b9e89b36e941c342c156fa86726cb9d19 | 50025b693f437cd43e27282daadef67d7b77c5e0 | /models/transformer_model.py | d9e945f449b61d42cfaa7e03d2b04dc6c9913a57 | [] | no_license | MatthijsBiondina/Memory-Augmented-Neural-Networks | b29e99a2b167ce94e03b1eea63afd90049ddcb47 | 510e6f6703775db9ade6b8599744623b9b58f074 | refs/heads/master | 2023-01-11T00:33:41.024739 | 2020-11-06T16:32:09 | 2020-11-06T16:32:09 | 301,348,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import torch
from torch import nn, Tensor
from torch.nn import functional as F
import utils.config as cfg
BATCH_DIM, CHANNEL_DIM, LENGTH_DIM = 0, 1, 2
class TransformerModel(nn.Module):
def __init__(self):
super(TransformerModel, self).__init__()
self.line_prep = nn.Linear(cfg.input_size, cfg.num_units)
self.transformer = nn.Transformer(d_model=cfg.num_units, nhead=10, num_encoder_layers=12)
self.line_post = nn.Linear(cfg.num_units, cfg.output_size)
def forward(self, src: Tensor, mask=None, return_sequence=False):
src = src.transpose(1, 2).transpose(0, 1)
src = self.line_prep(src)
tgt = torch.zeros_like(src)
tgt = self.transformer(src, tgt)
out = cfg.output_func(self.line_post(tgt))
out = out.transpose(0, 1).transpose(1, 2)
return out
@property
def device(self):
return self.line_prep.bias.device
| [
"[email protected]"
] | |
306c04c7f95a6a4e5a99170f0e7f803142db8232 | cf7d96bdd34205ede987f0985dfc9e3ab415ee06 | /ad_covering_doc/wizard/covering_doc_onshipping.py | e959848948a72a36f9ae05490d5e15731633b6aa | [] | no_license | hendrasaputra0501/btxjalan | afc93467d54a6f20ef6ac46f7359e964ad5d42a0 | d02bc085ad03efc982460d77f7af1eb5641db729 | refs/heads/master | 2020-12-30T11:02:05.416120 | 2017-07-31T01:34:08 | 2017-07-31T01:34:08 | 98,836,234 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | from osv import osv, fields
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class covering_doc_onshipping(osv.osv_memory):
_name = "covering.doc.onshipping"
_description = "Covering Document Onshipping"
_columns = {
'group': fields.boolean("Group by Schedule Date"),
'date': fields.date('Creation date'),
}
_defaults = {
'group' : True,
}
def open_covering_doc(self, cr, uid, ids, context=None):
if context is None:
context = {}
covering_ids = []
data_pool = self.pool.get('ir.model.data')
active_ids = context.get('active_ids', [])
do_pool=self.pool.get('stock.picking')
pickings=do_pool.browse(cr,uid,active_ids)
invoice_ids=[x.invoice_id.id for x in pickings if x.invoice_id]
if len(invoice_ids)< len(active_ids):
raise osv.except_osv(_('Error!'), _('salah satu do belum ada invoice'))
applicant_ids=[x.invoice_id.partner_id.id for x in pickings if x.invoice_id.partner_id.id]
if len(set(applicant_ids))>1:
raise osv.except_osv(_('Error!'), _('applicant harus sama'))
res = self.create_covering_doc(cr, uid, ids, context=context)
covering_ids += [res]
action_model = False
action = {}
# if not covering_ids:
# raise osv.except_osv(_('Error!'), _('Please create Cover.'))
action_model,action_id = data_pool.get_object_reference(cr, uid, 'ad_covering_doc', "action_covering_doc")
if action_model:
action_pool = self.pool.get(action_model)
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,covering_ids))+"])]"
return action
def create_covering_doc(self, cr, uid, ids, context=None):
if context is None:
context = {}
covering_pool = self.pool.get('covering.doc')
active_ids = context.get('active_ids', [])
do_pool=self.pool.get('stock.picking')
invoice_ids=[]
for lines in active_ids:
do=do_pool.browse(cr, uid,lines)
invoice_id=do.invoice_id.id
invoice_ids.append(invoice_id)
consignee_id=do.invoice_id.partner_id.id
res=covering_pool.create(cr,uid,{
'date' : time.strftime("%Y-%m-%d"),
'consignee_id' : consignee_id,
'invoice_ids' : [(6,0,list(set(invoice_ids)))],
},context=context)
return res
covering_doc_onshipping() | [
"[email protected]"
] | |
71d18a339f1b635e154e3405773182b72cdbc9d5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03011/s920009528.py | a440fe2b8de7164c55132fdded3ac14222505c8d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | def main():
p,q,r = map(int,input().split())
print(min(p + q, q + r, p + r))
main() | [
"[email protected]"
] | |
07d52412d20ea4ec85aa04fd4b20a37ddf627a91 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/pendl.py | 1f55494f2d716915dafccf5f5c00b43991dd1aba | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 24 | py | ii = [('BrewDTO.py', 4)] | [
"[email protected]"
] | |
46a12423a8d6c6b20f8ab09c075dec14e3b86ed4 | b6b30fb06124883b074144c419b43d9182efcdff | /GA/knights.py | 04d7d422c42d92594d18da47964b6a7db8d1ff86 | [] | no_license | JohnnySunkel/BlueSky | da9f5107034289bfbdd3ba40458f9b9bd8d01a13 | 5a20eba9ef7509a5a7b7af86e7be848242e1a72f | refs/heads/master | 2021-07-07T09:57:37.256950 | 2020-09-02T23:06:46 | 2020-09-02T23:06:46 | 166,883,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | import random
import datetime
import unittest
import genetic
def get_fitness(genes, boardWidth, boardHeight):
attacked = set(pos
for kn in genes
for pos in get_attacks(kn, boardWidth, boardHeight))
return len(attacked)
def display(candidate, startTime, boardWidth, boardHeight):
timeDiff = datetime.datetime.now() - startTime
board = Board(candidate.Genes, boardWidth, boardHeight)
board.print()
print("{}\n\t{}\t{}".format(
' '.join(map(str, candidate.Genes)),
candidate.Fitness,
timeDiff))
def mutate(genes, boardWidth, boardHeight, allPositions, nonEdgePositions):
count = 2 if random.randint(0, 10) == 0 else 1
while count > 0:
count -= 1
positionToKnightIndexes = dict((p, []) for p in allPositions)
for i, knight in enumerate(genes):
for position in get_attacks(knight, boardWidth, boardHeight):
positionToKnightIndexes[position].append(i)
knightIndexes = set(i for i in range(len(genes)))
unattacked = []
for kvp in positionToKnightIndexes.items():
if len(kvp[1]) > 1:
continue
if len(kvp[1]) == 0:
unattacked.append(kvp[0])
continue
for p in kvp[1]: # len == 1
if p in knightIndexes:
knightIndexes.remove(p)
potentialKnightPositions = \
[p for positions in
map(lambda x: get_attacks(x, boardWidth, boardHeight),
unattacked)
for p in positions if p in nonEdgePositions] \
if len(unattacked) > 0 else nonEdgePositions
geneIndex = random.randrange(0, len(genes)) \
if len(knightIndexes) == 0 \
else random.choice([i for i in knightIndexes])
position = random.choice(potentialKnightPositions)
genes[geneIndex] = position
def create(fnGetRandomPosition, expectedKnights):
genes = [fnGetRandomPosition() for _ in range(expectedKnights)]
return genes
def get_attacks(location, boardWidth, boardHeight):
return [i for i in set(
Position(x + location.X, y + location.Y)
for x in [-2, -1, 1, 2] if 0 <= x + location.X < boardWidth
for y in [-2, -1, 1, 2] if 0 <= y + location.Y < boardHeight
and abs(y) != abs(x))]
class KnightsTests(unittest.TestCase):
def test_3x4(self):
width = 4
height = 3
# 1,0 2,0 3,0
# 0,2 1,2 2,0
# 2 N N N .
# 1 . . . .
# 0 . N N N
# 0 1 2 3
self.find_knight_positions(width, height, 6)
def test_8x8(self):
width = 8
height = 8
self.find_knight_positions(width, height, 14)
def test_10x10(self):
width = 10
height = 10
self.find_knight_positions(width, height, 22)
def test_benchmark(self):
genetic.Benchmark.run(lambda: self.test_10x10())
def find_knight_positions(self, boardWidth, boardHeight,
expectedKnights):
startTime = datetime.datetime.now()
def fnDisplay(candidate):
display(candidate, startTime, boardWidth, boardHeight)
def fnGetFitness(genes):
return get_fitness(genes, boardWidth, boardHeight)
allPositions = [Position(x, y)
for y in range(boardHeight)
for x in range(boardWidth)]
if boardWidth < 6 or boardHeight < 6:
nonEdgePositions = allPositions
else:
nonEdgePositions = [i for i in allPositions
if 0 < i.X < boardWidth - 1 and
0 < i.Y < boardHeight - 1]
def fnGetRandomPosition():
return random.choice(nonEdgePositions)
def fnMutate(genes):
mutate(genes, boardWidth, boardHeight, allPositions,
nonEdgePositions)
def fnCreate():
return create(fnGetRandomPosition, expectedKnights)
optimalFitness = boardWidth * boardHeight
best = genetic.get_best(fnGetFitness, None, optimalFitness,
None, fnDisplay, fnMutate, fnCreate)
self.assertTrue(not optimalFitness > best.Fitness)
class Position:
def __init__(self, x, y):
self.X = x
self.Y = y
def __str__(self):
return "{},{}".format(self.X, self.Y)
def __eq__(self, other):
return self.X == other.X and self.Y == other.Y
def __hash__(self):
return self.X * 1000 + self.Y
class Board:
def __init__(self, positions, width, height):
board = [['.'] * width for _ in range(height)]
for index in range(len(positions)):
knightPosition = positions[index]
board[knightPosition.Y][knightPosition.X] = 'N'
self._board = board
self._width = width
self._height = height
def print(self):
# 0,0 prints in bottom left corner
for i in reversed(range(self._height)):
print(i, "\t", ' '.join(self._board[i]))
print(" \t", ' '.join(map(str, range(self._width))))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2ae877bd29510457c3407d98e4d00cfde6085d75 | 07e8eaeaa6f3493546ba6b499be1593252f3c773 | /tests/opytimizer/optimizers/social/test_bso.py | 1b7810493c269d228b55c0c519a4f952dd8f4efa | [
"Apache-2.0"
] | permissive | himanshuRepo/opytimizer | 91dd848fffbe85736d8074169d515e46a8b54d74 | 09e5485b9e30eca622ad404e85c22de0c42c8abd | refs/heads/master | 2023-07-20T18:16:00.565759 | 2021-09-02T19:40:43 | 2021-09-02T19:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,984 | py | from types import new_class
import numpy as np
from opytimizer.optimizers.social import bso
from opytimizer.spaces import search
def test_bso_params():
params = {
'm': 5,
'p_replacement_cluster': 0.2,
'p_single_cluster': 0.8,
'p_single_best': 0.4,
'p_double_best': 0.5,
'k': 20
}
new_bso = bso.BSO(params=params)
assert new_bso.m == 5
assert new_bso.p_replacement_cluster == 0.2
assert new_bso.p_single_cluster == 0.8
assert new_bso.p_single_best == 0.4
assert new_bso.p_double_best == 0.5
assert new_bso.k == 20
def test_bso_params_setter():
new_bso = bso.BSO()
try:
new_bso.m = 'a'
except:
new_bso.m = 5
assert new_bso.m == 5
try:
new_bso.m = -1
except:
new_bso.m = 5
assert new_bso.m == 5
try:
new_bso.p_replacement_cluster = 'b'
except:
new_bso.p_replacement_cluster = 0.2
assert new_bso.p_replacement_cluster == 0.2
try:
new_bso.p_replacement_cluster = -1
except:
new_bso.p_replacement_cluster = 0.2
assert new_bso.p_replacement_cluster == 0.2
try:
new_bso.p_single_cluster = 'c'
except:
new_bso.p_single_cluster = 0.8
assert new_bso.p_single_cluster == 0.8
try:
new_bso.p_single_cluster = -1
except:
new_bso.p_single_cluster = 0.8
assert new_bso.p_single_cluster == 0.8
try:
new_bso.p_single_best = 'd'
except:
new_bso.p_single_best = 0.4
assert new_bso.p_single_best == 0.4
try:
new_bso.p_single_best = -1
except:
new_bso.p_single_best = 0.4
assert new_bso.p_single_best == 0.4
try:
new_bso.p_double_best = 'e'
except:
new_bso.p_double_best = 0.5
assert new_bso.p_double_best == 0.5
try:
new_bso.p_double_best = -1
except:
new_bso.p_double_best = 0.5
assert new_bso.p_double_best == 0.5
try:
new_bso.k = 'f'
except:
new_bso.k = 20
assert new_bso.k == 20
try:
new_bso.k = -1
except:
new_bso.k = 20
assert new_bso.k == 20
def test_bso_clusterize():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_bso = bso.BSO()
new_bso._clusterize(search_space.agents)
def test_bso_sigmoid():
new_bso = bso.BSO()
x = 0.5
y = new_bso._sigmoid(x)
assert y == 0.6224593312018546
def test_bso_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=50, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_bso = bso.BSO()
new_bso.evaluate(search_space, square)
new_bso.update(search_space, square, 1, 10)
new_bso.p_replacement_cluster = 1
new_bso.update(search_space, square, 1, 10)
| [
"[email protected]"
] | |
6e24ab11cd0ec830788f36d1221a31b0ac317caf | 1391218903f06d62735d973829205a6b32754d64 | /lib/kb_hisat2/util.py | 49f9a18d9f86f26a395628eccd7293ab313faff1 | [
"MIT"
] | permissive | JamesJeffryes/kb_hisat2 | 9ff28b19c9f8fcaea8291a79ed05c8a2636a9438 | 9179a9d44d8ae3b6e149d27b64052004eef17fd2 | refs/heads/master | 2020-03-28T11:12:29.078124 | 2018-08-20T18:28:20 | 2018-08-20T18:28:20 | 148,188,592 | 0 | 0 | null | 2018-09-10T16:51:25 | 2018-09-10T16:51:25 | null | UTF-8 | Python | false | false | 6,051 | py | """
Some utility functions for the HISAT2 module.
These mainly deal with manipulating files from Workspace objects.
There's also some parameter checking and munging functions.
"""
from __future__ import print_function
import re
from pprint import pprint
from Workspace.WorkspaceClient import Workspace
from DataFileUtil.DataFileUtilClient import DataFileUtil
def check_hisat2_parameters(params, ws_url):
"""
Checks to ensure that the hisat2 parameter set is correct and has the right
mash of options.
Returns a list of error strings if there's a problem, or just an empty list otherwise.
"""
errors = list()
# parameter keys and rules:
# -------------------------
# ws_name - workspace name, string, required
# alignmentset_name - output object name, string, required
# string sampleset_ref - input reads object ref, string, required
# string genome_ref - input genome object ref, string, required
# num_threads - int, >= 1, optional
# quality_score - string, one of phred33 or phred64, optional (default phred33)
# skip - int, >= 0, optional
# trim3 - int, >= 0, optional
# trim5 - int, >= 0, optional
# np - int,
# minins - int,
# maxins - int,
# orientation - string, one of fr, rr, rf, ff, optional (default fr)
# min_intron_length, int, >= 0, required
# int max_intron_length - int, >= 0, required
# bool no_spliced_alignment - 0 or 1, optional (default 0)
# string tailor_alignments - string ...?
print("Checking input parameters")
pprint(params)
if "ws_name" not in params or not valid_string(params["ws_name"]):
errors.append("Parameter ws_name must be a valid workspace "
"name, not {}".format(params.get("ws_name", None)))
if "alignment_suffix" not in params or not valid_string(params["alignment_suffix"]):
errors.append("Parameter alignment_suffix must be a valid Workspace object string, "
"not {}".format(params.get("alignment_suffix", None)))
if "sampleset_ref" not in params or not valid_string(params["sampleset_ref"], is_ref=True):
errors.append("Parameter sampleset_ref must be a valid Workspace object reference, "
"not {}".format(params.get("sampleset_ref", None)))
elif check_ref_type(params["sampleset_ref"], ["PairedEndLibary", "SingleEndLibrary"], ws_url):
if "condition" not in params or not valid_string(params["condition"]):
errors.append("Parameter condition is required for a single "
"PairedEndLibrary or SingleEndLibrary")
if "genome_ref" not in params or not valid_string(params["genome_ref"], is_ref=True):
errors.append("Parameter genome_ref must be a valid Workspace object reference, "
"not {}".format(params.get("genome_ref", None)))
return errors
def valid_string(s, is_ref=False):
is_valid = isinstance(s, basestring) and len(s.strip()) > 0
if is_valid and is_ref:
is_valid = check_reference(s)
return is_valid
def check_reference(ref):
"""
Tests the given ref string to make sure it conforms to the expected
object reference format. Returns True if it passes, False otherwise.
"""
obj_ref_regex = re.compile("^(?P<wsid>\d+)\/(?P<objid>\d+)(\/(?P<ver>\d+))?$")
ref_path = ref.strip().split(";")
for step in ref_path:
if not obj_ref_regex.match(step):
return False
return True
def is_set(ref, ws_url):
return check_ref_type(ref, ["sampleset", "readsset"], ws_url)
def check_ref_type(ref, allowed_types, ws_url):
"""
Validates the object type of ref against the list of allowed types. If it passes, this
returns True, otherwise False.
Really, all this does is verify that at least one of the strings in allowed_types is
a substring of the ref object type name.
Ex1:
ref = 11/22/33, which is a "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "KBaseFile.Assembly"]
returns False
Ex2:
ref = 44/55/66, which is a "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "genome"]
returns True
"""
obj_type = get_object_type(ref, ws_url).lower()
for t in allowed_types:
if t.lower() in obj_type:
return True
return False
def get_object_type(ref, ws_url):
"""
Fetches and returns the typed object name of ref from the given workspace url.
If that object doesn't exist, or there's another Workspace error, this raises a
RuntimeError exception.
"""
ws = Workspace(ws_url)
info = ws.get_object_info3({"objects": [{"ref": ref}]})
obj_info = info.get("infos", [[]])[0]
if len(obj_info) == 0:
raise RuntimeError("An error occurred while fetching type info from the Workspace. "
"No information returned for reference {}".format(ref))
return obj_info[2]
def get_object_names(ref_list, ws_url):
"""
From a list of workspace references, returns a mapping from ref -> name of the object.
"""
ws = Workspace(ws_url)
obj_ids = list()
for ref in ref_list:
obj_ids.append({"ref": ref})
info = ws.get_object_info3({"objects": obj_ids})
name_map = dict()
# might be in a data palette, so we can't just use the ref.
# we already have the refs as passed previously, so use those for mapping, as they're in
# the same order as what's returned.
for i in range(len(info["infos"])):
name_map[ref_list[i]] = info["infos"][i][1]
return name_map
def package_directory(callback_url, dir_path, zip_file_name, zip_file_description):
''' Simple utility for packaging a folder and saving to shock '''
dfu = DataFileUtil(callback_url)
output = dfu.file_to_shock({'file_path': dir_path,
'make_handle': 0,
'pack': 'zip'})
return {'shock_id': output['shock_id'],
'name': zip_file_name,
'description': zip_file_description}
| [
"[email protected]"
] | |
88a0541b16ace96c3908ebeeb1ad3d953465b14c | fc64ba451a7a78238d28400a013ca366a96beb05 | /pic_analyzer_python/finalwork..py | 86cf3dac6af7d3b8e4fc8282bbd4a2b5ca2f8ffe | [
"MIT"
] | permissive | 610yilingliu/simi_pic_detection | e11204147d4098e488babb4a7119d867815683e7 | b523a388831e1968fadd065bb8e77099f9f80567 | refs/heads/master | 2021-02-20T09:13:20.405716 | 2020-03-09T05:45:12 | 2020-03-09T05:45:12 | 245,332,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
pic1 = Image.open('../pics/edited_colorlevel_2.jpg')
pic2 = Image.open('../pics/edited_colorlevel_1.jpg')
w = 65
h = 64
def count_dhash(image_obj, width, height):
image = image_obj.resize((width, height), Image.ANTIALIAS)
gray_img = image.convert("L")
image_np = np.array(gray_img)
showimg(image_np)
binary=[0] * ((width - 1) * height)
for i in range(width-1-1):
for j in range(height):
if image_np[i,j] == image_np[i + 1,j]:
binary[(len(binary) -1) - (j * (width - 1) + i)] = 1
binary_str = ''
for i in range(len(binary)):
binary_str += str(binary[i])
octal = oct(int(binary_str, 2))
final = str(octal).zfill(len(binary))
return final
def hamming_dist(otcal1, octal2):
oct1 = otcal1
oct2 = octal2
diff = 0
for i in range(len(otcal1)):
if otcal1[i] != octal2[i]:
diff += 1
return diff
def showimg(image):
plt.imshow(image)
plt.show()
if __name__ == '__main__':
d1 = count_dhash(pic1, w, h)
d2 = count_dhash(pic2, w, h)
print(hamming_dist(d1,d2)) | [
"[email protected]"
] | |
c7e82e376d6a927d0d5020f13ec78bad68ff39ff | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/servicebus/azure-servicebus/azure/servicebus/aio/management/_management_client_async.py | da2c08ec1e5a0f6afdf0324df3f3fa45fa71687c | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 53,244 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=protected-access
# pylint:disable=specify-parameter-names-in-call
# pylint:disable=too-many-lines
import functools
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Union, cast, Mapping
from xml.etree.ElementTree import ElementTree
from azure.core.async_paging import AsyncItemPaged
from azure.core.exceptions import ResourceNotFoundError
from azure.core.pipeline import AsyncPipeline
from azure.core.pipeline.policies import (
HttpLoggingPolicy,
DistributedTracingPolicy,
ContentDecodePolicy,
RequestIdPolicy,
AsyncBearerTokenCredentialPolicy,
)
from azure.core.pipeline.transport import AioHttpTransport
from ...management._generated.models import (
QueueDescriptionFeed,
TopicDescriptionEntry,
QueueDescriptionEntry,
SubscriptionDescriptionFeed,
SubscriptionDescriptionEntry,
RuleDescriptionEntry,
RuleDescriptionFeed,
NamespacePropertiesEntry,
CreateTopicBody,
CreateTopicBodyContent,
TopicDescriptionFeed,
CreateSubscriptionBody,
CreateSubscriptionBodyContent,
CreateRuleBody,
CreateRuleBodyContent,
CreateQueueBody,
CreateQueueBodyContent,
)
from ..._base_handler import _parse_conn_str
from ..._common.constants import (
JWT_TOKEN_SCOPE,
SUPPLEMENTARY_AUTHORIZATION_HEADER,
DEAD_LETTER_SUPPLEMENTARY_AUTHORIZATION_HEADER,
)
from ...aio._base_handler_async import (
ServiceBusSharedKeyCredential,
ServiceBusSASTokenCredential,
)
from ...management._generated.aio._configuration_async import (
ServiceBusManagementClientConfiguration,
)
from ...management._generated.aio._service_bus_management_client_async import (
ServiceBusManagementClient as ServiceBusManagementClientImpl,
)
from ...management import _constants as constants
from ._shared_key_policy_async import AsyncServiceBusSharedKeyCredentialPolicy
from ...management._models import (
QueueRuntimeProperties,
QueueProperties,
TopicProperties,
TopicRuntimeProperties,
SubscriptionProperties,
SubscriptionRuntimeProperties,
RuleProperties,
NamespaceProperties,
TrueRuleFilter,
)
from ...management._xml_workaround_policy import ServiceBusXMLWorkaroundPolicy
from ...management._handle_response_error import _handle_response_error
from ._utils import extract_data_template, extract_rule_data_template, get_next_template
from ...management._utils import (
deserialize_rule_key_values,
serialize_rule_key_values,
create_properties_from_dict_if_needed,
_normalize_entity_path_to_full_path_if_needed,
_validate_entity_name_type,
_validate_topic_and_subscription_types,
_validate_topic_subscription_and_rule_types,
)
if TYPE_CHECKING:
from azure.core.credentials_async import (
AsyncTokenCredential,
) # pylint:disable=ungrouped-imports
class ServiceBusAdministrationClient: # pylint:disable=too-many-public-methods
"""Use this client to create, update, list, and delete resources of a ServiceBus namespace.
:param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
:param credential: To authenticate to manage the entities of the ServiceBus namespace.
:type credential: AsyncTokenCredential
"""
def __init__(
self,
fully_qualified_namespace: str,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
self.fully_qualified_namespace = fully_qualified_namespace
self._credential = credential
self._endpoint = "https://" + fully_qualified_namespace
self._config = ServiceBusManagementClientConfiguration(self._endpoint, **kwargs)
self._pipeline = self._build_pipeline()
self._impl = ServiceBusManagementClientImpl(
endpoint=fully_qualified_namespace, pipeline=self._pipeline
)
async def __aenter__(self) -> "ServiceBusAdministrationClient":
await self._impl.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._impl.__aexit__(*exc_details)
def _build_pipeline(self, **kwargs): # pylint: disable=no-self-use
transport = kwargs.get("transport")
policies = kwargs.get("policies")
credential_policy = (
AsyncServiceBusSharedKeyCredentialPolicy(
self._endpoint, self._credential, "Authorization"
)
if isinstance(self._credential, ServiceBusSharedKeyCredential)
else AsyncBearerTokenCredentialPolicy(self._credential, JWT_TOKEN_SCOPE)
)
if policies is None: # [] is a valid policy list
policies = [
RequestIdPolicy(**kwargs),
self._config.headers_policy,
self._config.user_agent_policy,
self._config.proxy_policy,
ContentDecodePolicy(**kwargs),
ServiceBusXMLWorkaroundPolicy(),
self._config.redirect_policy,
self._config.retry_policy,
credential_policy,
self._config.logging_policy,
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
if not transport:
transport = AioHttpTransport(**kwargs)
return AsyncPipeline(transport, policies)
async def _get_entity_element(self, entity_name, enrich=False, **kwargs):
# type: (str, bool, Any) -> ElementTree
_validate_entity_name_type(entity_name)
with _handle_response_error():
element = cast(
ElementTree,
await self._impl.entity.get(
entity_name,
enrich=enrich,
api_version=constants.API_VERSION,
**kwargs
),
)
return element
async def _get_subscription_element(
self, topic_name, subscription_name, enrich=False, **kwargs
):
# type: (str, str, bool, Any) -> ElementTree
_validate_topic_and_subscription_types(topic_name, subscription_name)
with _handle_response_error():
element = cast(
ElementTree,
await self._impl.subscription.get(
topic_name,
subscription_name,
enrich=enrich,
api_version=constants.API_VERSION,
**kwargs
),
)
return element
async def _get_rule_element(
self, topic_name, subscription_name, rule_name, **kwargs
):
# type: (str, str, str, Any) -> ElementTree
_validate_topic_subscription_and_rule_types(
topic_name, subscription_name, rule_name
)
with _handle_response_error():
element = cast(
ElementTree,
await self._impl.rule.get(
topic_name,
subscription_name,
rule_name,
enrich=False,
api_version=constants.API_VERSION,
**kwargs
),
)
return element
async def _create_forward_to_header_tokens(self, entity, kwargs):
"""forward_to requires providing a bearer token in headers for the referenced entity."""
kwargs["headers"] = kwargs.get("headers", {})
async def _populate_header_within_kwargs(uri, header):
token = (await self._credential.get_token(uri)).token.decode()
if not isinstance(
self._credential,
(ServiceBusSASTokenCredential, ServiceBusSharedKeyCredential),
):
token = "Bearer {}".format(token)
kwargs["headers"][header] = token
if entity.forward_to:
await _populate_header_within_kwargs(
entity.forward_to, SUPPLEMENTARY_AUTHORIZATION_HEADER
)
if entity.forward_dead_lettered_messages_to:
await _populate_header_within_kwargs(
entity.forward_dead_lettered_messages_to,
DEAD_LETTER_SUPPLEMENTARY_AUTHORIZATION_HEADER,
)
@classmethod
def from_connection_string(
cls, conn_str: str, **kwargs: Any
) -> "ServiceBusAdministrationClient":
"""Create a client from connection string.
:param str conn_str: The connection string of the Service Bus Namespace.
:rtype: ~azure.servicebus.management.aio.ServiceBusAdministrationClient
"""
(
endpoint,
shared_access_key_name,
shared_access_key,
_,
token,
token_expiry,
) = _parse_conn_str(conn_str)
if token and token_expiry:
credential = ServiceBusSASTokenCredential(token, token_expiry)
elif shared_access_key_name and shared_access_key:
credential = ServiceBusSharedKeyCredential(shared_access_key_name, shared_access_key) # type: ignore
if "//" in endpoint:
endpoint = endpoint[endpoint.index("//") + 2 :]
return cls(endpoint, credential, **kwargs) # type: ignore
async def get_queue(self, queue_name: str, **kwargs) -> QueueProperties:
"""Get the properties of a queue.
:param str queue_name: The name of the queue.
:rtype: ~azure.servicebus.management.QueueProperties
"""
entry_ele = await self._get_entity_element(queue_name, **kwargs)
entry = QueueDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Queue '{}' does not exist".format(queue_name))
queue_description = QueueProperties._from_internal_entity(
queue_name, entry.content.queue_description
)
return queue_description
async def get_queue_runtime_properties(
self, queue_name: str, **kwargs
) -> QueueRuntimeProperties:
"""Get the runtime information of a queue.
:param str queue_name: The name of the queue.
:rtype: ~azure.servicebus.management.QueueRuntimeProperties
"""
entry_ele = await self._get_entity_element(queue_name, **kwargs)
entry = QueueDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Queue {} does not exist".format(queue_name))
runtime_properties = QueueRuntimeProperties._from_internal_entity(
queue_name, entry.content.queue_description
)
return runtime_properties
async def create_queue(self, queue_name: str, **kwargs) -> QueueProperties:
"""Create a queue.
:param queue_name: Name of the queue.
:type queue_name: str
:keyword authorization_rules: Authorization rules for resource.
:type authorization_rules: list[~azure.servicebus.management.AuthorizationRule]
:keyword auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the queue is
automatically deleted. The minimum duration is 5 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type auto_delete_on_idle: Union[~datetime.timedelta, str]
:keyword dead_lettering_on_message_expiration: A value that indicates whether this queue has dead
letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:keyword default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type default_message_time_to_live: Union[~datetime.timedelta, str]
:keyword duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type duplicate_detection_history_time_window: Union[~datetime.timedelta, str]
:keyword enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:keyword enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:keyword enable_partitioning: A value that indicates whether the queue is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:keyword lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type lock_duration: Union[~datetime.timedelta, str]
:keyword max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:keyword max_size_in_megabytes: The maximum size of the queue in megabytes, which is the size of
memory allocated for the queue.
:type max_size_in_megabytes: int
:keyword requires_duplicate_detection: A value indicating if this queue requires duplicate
detection.
:type requires_duplicate_detection: bool
:keyword requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:keyword forward_to: The name of the recipient entity to which all the messages sent to the queue
are forwarded to.
:type forward_to: str
:keyword user_metadata: Custom metdata that user can associate with the description. Max length
is 1024 chars.
:type user_metadata: str
:keyword forward_dead_lettered_messages_to: The name of the recipient entity to which all the
dead-lettered messages of this subscription are forwarded to.
:type forward_dead_lettered_messages_to: str
:rtype: ~azure.servicebus.management.QueueProperties
"""
forward_to = _normalize_entity_path_to_full_path_if_needed(
kwargs.pop("forward_to", None), self.fully_qualified_namespace
)
forward_dead_lettered_messages_to = (
_normalize_entity_path_to_full_path_if_needed(
kwargs.pop("forward_dead_lettered_messages_to", None),
self.fully_qualified_namespace,
)
)
queue = QueueProperties(
queue_name,
authorization_rules=kwargs.pop("authorization_rules", None),
auto_delete_on_idle=kwargs.pop("auto_delete_on_idle", None),
dead_lettering_on_message_expiration=kwargs.pop(
"dead_lettering_on_message_expiration", None
),
default_message_time_to_live=kwargs.pop(
"default_message_time_to_live", None
),
duplicate_detection_history_time_window=kwargs.pop(
"duplicate_detection_history_time_window", None
),
availability_status=None,
enable_batched_operations=kwargs.pop("enable_batched_operations", None),
enable_express=kwargs.pop("enable_express", None),
enable_partitioning=kwargs.pop("enable_partitioning", None),
lock_duration=kwargs.pop("lock_duration", None),
max_delivery_count=kwargs.pop("max_delivery_count", None),
max_size_in_megabytes=kwargs.pop("max_size_in_megabytes", None),
requires_duplicate_detection=kwargs.pop(
"requires_duplicate_detection", None
),
requires_session=kwargs.pop("requires_session", None),
status=kwargs.pop("status", None),
forward_to=forward_to,
forward_dead_lettered_messages_to=forward_dead_lettered_messages_to,
user_metadata=kwargs.pop("user_metadata", None),
)
to_create = queue._to_internal_entity(self.fully_qualified_namespace)
create_entity_body = CreateQueueBody(
content=CreateQueueBodyContent(
queue_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
await self._create_forward_to_header_tokens(to_create, kwargs)
with _handle_response_error():
entry_ele = cast(
ElementTree,
await self._impl.entity.put(
queue_name, # type: ignore
request_body,
api_version=constants.API_VERSION,
**kwargs
),
)
entry = QueueDescriptionEntry.deserialize(entry_ele)
result = QueueProperties._from_internal_entity(
queue_name, entry.content.queue_description
)
return result
async def update_queue(
self, queue: Union[QueueProperties, Mapping[str, Any]], **kwargs
) -> None:
"""Update a queue.
Before calling this method, you should use `get_queue`, `create_queue` or `list_queues` to get a
`QueueProperties` instance, then update the properties. Only a portion of properties can
be updated. Refer to https://docs.microsoft.com/en-us/rest/api/servicebus/update-queue.
You could also pass keyword arguments for updating properties in the form of
`<property_name>=<property_value>` which will override whatever was specified in
the `QueueProperties` instance. Refer to ~azure.servicebus.management.QueueProperties for names of properties.
:param queue: The queue that is returned from `get_queue`, `create_queue` or `list_queues` and
has the updated properties.
:type queue: ~azure.servicebus.management.QueueProperties
:rtype: None
"""
# we should not mutate the input, making a copy first for update
queue = deepcopy(create_properties_from_dict_if_needed(queue, QueueProperties))
to_update = queue._to_internal_entity(self.fully_qualified_namespace, kwargs)
create_entity_body = CreateQueueBody(
content=CreateQueueBodyContent(
queue_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
await self._create_forward_to_header_tokens(to_update, kwargs)
with _handle_response_error():
await self._impl.entity.put(
queue.name, # type: ignore
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_queue(self, queue_name: str, **kwargs) -> None:
"""Delete a queue.
:param str queue_name: The name of the queue or
a `QueueProperties` with name.
:rtype: None
"""
_validate_entity_name_type(queue_name)
if not queue_name:
raise ValueError("queue_name must not be None or empty")
with _handle_response_error():
await self._impl.entity.delete(
queue_name, api_version=constants.API_VERSION, **kwargs
)
def list_queues(self, **kwargs: Any) -> AsyncItemPaged[QueueProperties]:
"""List the queues of a ServiceBus namespace.
:returns: An iterable (auto-paging) response of QueueProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.QueueProperties]
"""
def entry_to_qd(entry):
qd = QueueProperties._from_internal_entity(
entry.title, entry.content.queue_description
)
return qd
extract_data = functools.partial(
extract_data_template, QueueDescriptionFeed, entry_to_qd
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_QUEUES),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
def list_queues_runtime_properties(
self, **kwargs: Any
) -> AsyncItemPaged[QueueRuntimeProperties]:
"""List the runtime information of the queues in a ServiceBus namespace.
:returns: An iterable (auto-paging) response of QueueRuntimeProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.QueueRuntimeProperties]
"""
def entry_to_qr(entry):
qd = QueueRuntimeProperties._from_internal_entity(
entry.title, entry.content.queue_description
)
return qd
extract_data = functools.partial(
extract_data_template, QueueDescriptionFeed, entry_to_qr
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_QUEUES),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
async def get_topic(self, topic_name: str, **kwargs) -> TopicProperties:
"""Get the properties of a topic.
:param str topic_name: The name of the topic.
:rtype: ~azure.servicebus.management.TopicDescription
"""
entry_ele = await self._get_entity_element(topic_name, **kwargs)
entry = TopicDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Topic '{}' does not exist".format(topic_name))
topic_description = TopicProperties._from_internal_entity(
topic_name, entry.content.topic_description
)
return topic_description
async def get_topic_runtime_properties(
self, topic_name: str, **kwargs
) -> TopicRuntimeProperties:
"""Get the runtime information of a topic.
:param str topic_name: The name of the topic.
:rtype: ~azure.servicebus.management.TopicRuntimeProperties
"""
entry_ele = await self._get_entity_element(topic_name, **kwargs)
entry = TopicDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError("Topic {} does not exist".format(topic_name))
topic_description = TopicRuntimeProperties._from_internal_entity(
topic_name, entry.content.topic_description
)
return topic_description
async def create_topic(self, topic_name: str, **kwargs) -> TopicProperties:
"""Create a topic.
:param topic_name: Name of the topic.
:type topic_name: str
:keyword default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type default_message_time_to_live: Union[~datetime.timedelta, str]
:keyword max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:type max_size_in_megabytes: long
:keyword requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:type requires_duplicate_detection: bool
:keyword duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type duplicate_detection_history_time_window: Union[~datetime.timedelta, str]
:keyword enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:keyword size_in_bytes: The size of the topic, in bytes.
:type size_in_bytes: int
:keyword filtering_messages_before_publishing: Filter messages before publishing.
:type filtering_messages_before_publishing: bool
:keyword authorization_rules: Authorization rules for resource.
:type authorization_rules:
list[~azure.servicebus.management.AuthorizationRule]
:keyword support_ordering: A value that indicates whether the topic supports ordering.
:type support_ordering: bool
:keyword auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type auto_delete_on_idle: Union[~datetime.timedelta, str]
:keyword enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:keyword enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:keyword user_metadata: Metadata associated with the topic.
:type user_metadata: str
:rtype: ~azure.servicebus.management.TopicProperties
"""
topic = TopicProperties(
topic_name,
default_message_time_to_live=kwargs.pop(
"default_message_time_to_live", None
),
max_size_in_megabytes=kwargs.pop("max_size_in_megabytes", None),
requires_duplicate_detection=kwargs.pop(
"requires_duplicate_detection", None
),
duplicate_detection_history_time_window=kwargs.pop(
"duplicate_detection_history_time_window", None
),
enable_batched_operations=kwargs.pop("enable_batched_operations", None),
size_in_bytes=kwargs.pop("size_in_bytes", None),
authorization_rules=kwargs.pop("authorization_rules", None),
status=kwargs.pop("status", None),
support_ordering=kwargs.pop("support_ordering", None),
auto_delete_on_idle=kwargs.pop("auto_delete_on_idle", None),
enable_partitioning=kwargs.pop("enable_partitioning", None),
availability_status=None,
enable_express=kwargs.pop("enable_express", None),
user_metadata=kwargs.pop("user_metadata", None),
)
to_create = topic._to_internal_entity()
create_entity_body = CreateTopicBody(
content=CreateTopicBodyContent(
topic_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
entry_ele = cast(
ElementTree,
await self._impl.entity.put(
topic_name, # type: ignore
request_body,
api_version=constants.API_VERSION,
**kwargs
),
)
entry = TopicDescriptionEntry.deserialize(entry_ele)
result = TopicProperties._from_internal_entity(
topic_name, entry.content.topic_description
)
return result
async def update_topic(
self, topic: Union[TopicProperties, Mapping[str, Any]], **kwargs
) -> None:
"""Update a topic.
Before calling this method, you should use `get_topic`, `create_topic` or `list_topics` to get a
`TopicProperties` instance, then update the properties. Only a portion of properties can be updated.
Refer to https://docs.microsoft.com/en-us/rest/api/servicebus/update-topic.
You could also pass keyword arguments for updating properties in the form of
`<property_name>=<property_value>` which will override whatever was specified in
the `TopicProperties` instance. Refer to ~azure.servicebus.management.TopicProperties for names of properties.
:param topic: The topic that is returned from `get_topic`, `create_topic`, or `list_topics`
and has the updated properties.
:type topic: ~azure.servicebus.management.TopicProperties
:rtype: None
"""
topic = deepcopy(create_properties_from_dict_if_needed(topic, TopicProperties))
to_update = topic._to_internal_entity(kwargs)
create_entity_body = CreateTopicBody(
content=CreateTopicBodyContent(
topic_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
with _handle_response_error():
await self._impl.entity.put(
topic.name, # type: ignore
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_topic(self, topic_name: str, **kwargs) -> None:
"""Delete a topic.
:param str topic_name: The topic to be deleted.
:rtype: None
"""
_validate_entity_name_type(topic_name)
await self._impl.entity.delete(
topic_name, api_version=constants.API_VERSION, **kwargs
)
def list_topics(self, **kwargs: Any) -> AsyncItemPaged[TopicProperties]:
"""List the topics of a ServiceBus namespace.
:returns: An iterable (auto-paging) response of TopicProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.TopicProperties]
"""
def entry_to_topic(entry):
topic = TopicProperties._from_internal_entity(
entry.title, entry.content.topic_description
)
return topic
extract_data = functools.partial(
extract_data_template, TopicDescriptionFeed, entry_to_topic
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_TOPICS),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
def list_topics_runtime_properties(
self, **kwargs: Any
) -> AsyncItemPaged[TopicRuntimeProperties]:
"""List the topics runtime information of a ServiceBus namespace.
:returns: An iterable (auto-paging) response of TopicRuntimeProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.TopicRuntimeProperties]
"""
def entry_to_topic(entry):
topic = TopicRuntimeProperties._from_internal_entity(
entry.title, entry.content.topic_description
)
return topic
extract_data = functools.partial(
extract_data_template, TopicDescriptionFeed, entry_to_topic
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_entities, constants.ENTITY_TYPE_TOPICS),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
async def get_subscription(
self, topic_name: str, subscription_name: str, **kwargs
) -> SubscriptionProperties:
"""Get the properties of a topic subscription.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: name of the subscription.
:rtype: ~azure.servicebus.management.SubscriptionProperties
"""
entry_ele = await self._get_subscription_element(
topic_name, subscription_name, **kwargs
)
entry = SubscriptionDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError(
"Subscription('Topic: {}, Subscription: {}') does not exist".format(
subscription_name, topic_name
)
)
subscription = SubscriptionProperties._from_internal_entity(
entry.title, entry.content.subscription_description
)
return subscription
async def get_subscription_runtime_properties(
self, topic_name: str, subscription_name: str, **kwargs
) -> SubscriptionRuntimeProperties:
"""Get a topic subscription runtime info.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: name of the subscription.
:rtype: ~azure.servicebus.management.SubscriptionRuntimeProperties
"""
entry_ele = await self._get_subscription_element(
topic_name, subscription_name, **kwargs
)
entry = SubscriptionDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError(
"Subscription('Topic: {}, Subscription: {}') does not exist".format(
subscription_name, topic_name
)
)
subscription = SubscriptionRuntimeProperties._from_internal_entity(
entry.title, entry.content.subscription_description
)
return subscription
async def create_subscription(
self, topic_name: str, subscription_name: str, **kwargs
) -> SubscriptionProperties:
"""Create a topic subscription.
:param str topic_name: The topic that will own the
to-be-created subscription.
:param subscription_name: Name of the subscription.
:type subscription_name: str
:keyword lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type lock_duration: Union[~datetime.timedelta, str]
:keyword requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:keyword default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type default_message_time_to_live: Union[~datetime.timedelta, str]
:keyword dead_lettering_on_message_expiration: A value that indicates whether this subscription
has dead letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:keyword dead_lettering_on_filter_evaluation_exceptions: A value that indicates whether this
subscription has dead letter support when a message expires.
:type dead_lettering_on_filter_evaluation_exceptions: bool
:keyword max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:keyword enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:keyword forward_to: The name of the recipient entity to which all the messages sent to the
subscription are forwarded to.
:type forward_to: str
:keyword user_metadata: Metadata associated with the subscription. Maximum number of characters
is 1024.
:type user_metadata: str
:keyword forward_dead_lettered_messages_to: The name of the recipient entity to which all the
messages sent to the subscription are forwarded to.
:type forward_dead_lettered_messages_to: str
:keyword auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the subscription is
automatically deleted. The minimum duration is 5 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:type auto_delete_on_idle: Union[~datetime.timedelta, str]
:rtype: ~azure.servicebus.management.SubscriptionProperties
"""
# pylint:disable=protected-access
_validate_entity_name_type(topic_name, display_name="topic_name")
forward_to = _normalize_entity_path_to_full_path_if_needed(
kwargs.pop("forward_to", None), self.fully_qualified_namespace
)
forward_dead_lettered_messages_to = (
_normalize_entity_path_to_full_path_if_needed(
kwargs.pop("forward_dead_lettered_messages_to", None),
self.fully_qualified_namespace,
)
)
subscription = SubscriptionProperties(
subscription_name,
lock_duration=kwargs.pop("lock_duration", None),
requires_session=kwargs.pop("requires_session", None),
default_message_time_to_live=kwargs.pop(
"default_message_time_to_live", None
),
dead_lettering_on_message_expiration=kwargs.pop(
"dead_lettering_on_message_expiration", None
),
dead_lettering_on_filter_evaluation_exceptions=kwargs.pop(
"dead_lettering_on_filter_evaluation_exceptions", None
),
max_delivery_count=kwargs.pop("max_delivery_count", None),
enable_batched_operations=kwargs.pop("enable_batched_operations", None),
status=kwargs.pop("status", None),
forward_to=forward_to,
user_metadata=kwargs.pop("user_metadata", None),
forward_dead_lettered_messages_to=forward_dead_lettered_messages_to,
auto_delete_on_idle=kwargs.pop("auto_delete_on_idle", None),
availability_status=None,
)
to_create = subscription._to_internal_entity(self.fully_qualified_namespace) # type: ignore
create_entity_body = CreateSubscriptionBody(
content=CreateSubscriptionBodyContent(
subscription_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
await self._create_forward_to_header_tokens(to_create, kwargs)
with _handle_response_error():
entry_ele = cast(
ElementTree,
await self._impl.subscription.put(
topic_name,
subscription_name, # type: ignore
request_body,
api_version=constants.API_VERSION,
**kwargs
),
)
entry = SubscriptionDescriptionEntry.deserialize(entry_ele)
result = SubscriptionProperties._from_internal_entity(
subscription_name, entry.content.subscription_description
)
return result
async def update_subscription(
self,
topic_name: str,
subscription: Union[SubscriptionProperties, Mapping[str, Any]],
**kwargs
) -> None:
"""Update a subscription.
Before calling this method, you should use `get_subscription`, `update_subscription` or `list_subscription`
to get a `SubscriptionProperties` instance, then update the properties.
You could also pass keyword arguments for updating properties in the form of
`<property_name>=<property_value>` which will override whatever was specified in
the `SubscriptionProperties` instance.
Refer to ~azure.servicebus.management.SubscriptionProperties for names of properties.
:param str topic_name: The topic that owns the subscription.
:param ~azure.servicebus.management.SubscriptionProperties subscription: The subscription that is returned
from `get_subscription`, `update_subscription` or `list_subscription` and has the updated properties.
:rtype: None
"""
_validate_entity_name_type(topic_name, display_name="topic_name")
# we should not mutate the input, making a copy first for update
subscription = deepcopy(create_properties_from_dict_if_needed(subscription, SubscriptionProperties))
to_update = subscription._to_internal_entity(self.fully_qualified_namespace, kwargs)
create_entity_body = CreateSubscriptionBody(
content=CreateSubscriptionBodyContent(
subscription_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
await self._create_forward_to_header_tokens(to_update, kwargs)
with _handle_response_error():
await self._impl.subscription.put(
topic_name,
subscription.name,
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_subscription(
self, topic_name: str, subscription_name: str, **kwargs
) -> None:
"""Delete a topic subscription.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: The subscription
to be deleted.
:rtype: None
"""
_validate_topic_and_subscription_types(topic_name, subscription_name)
await self._impl.subscription.delete(
topic_name, subscription_name, api_version=constants.API_VERSION, **kwargs
)
def list_subscriptions(
self, topic_name: str, **kwargs: Any
) -> AsyncItemPaged[SubscriptionProperties]:
"""List the subscriptions of a ServiceBus Topic.
:param str topic_name: The topic that owns the subscription.
:returns: An iterable (auto-paging) response of SubscriptionProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.SubscriptionProperties]
"""
_validate_entity_name_type(topic_name)
def entry_to_subscription(entry):
subscription = SubscriptionProperties._from_internal_entity(
entry.title, entry.content.subscription_description
)
return subscription
extract_data = functools.partial(
extract_data_template, SubscriptionDescriptionFeed, entry_to_subscription
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_subscriptions, topic_name),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
def list_subscriptions_runtime_properties(
self, topic_name: str, **kwargs: Any
) -> AsyncItemPaged[SubscriptionRuntimeProperties]:
"""List the subscriptions runtime information of a ServiceBus.
:param str topic_name: The topic that owns the subscription.
:returns: An iterable (auto-paging) response of SubscriptionRuntimeProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.SubscriptionRuntimeProperties]
"""
_validate_entity_name_type(topic_name)
def entry_to_subscription(entry):
subscription = SubscriptionRuntimeProperties._from_internal_entity(
entry.title, entry.content.subscription_description
)
return subscription
extract_data = functools.partial(
extract_data_template, SubscriptionDescriptionFeed, entry_to_subscription
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_subscriptions, topic_name),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
async def get_rule(
self, topic_name: str, subscription_name: str, rule_name: str, **kwargs
) -> RuleProperties:
"""Get the properties of a topic subscription rule.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: The subscription that
owns the rule.
:param str rule_name: Name of the rule.
:rtype: ~azure.servicebus.management.RuleProperties
"""
entry_ele = await self._get_rule_element(
topic_name, subscription_name, rule_name, **kwargs
)
entry = RuleDescriptionEntry.deserialize(entry_ele)
if not entry.content:
raise ResourceNotFoundError(
"Rule('Topic: {}, Subscription: {}, Rule {}') does not exist".format(
subscription_name, topic_name, rule_name
)
)
rule_description = RuleProperties._from_internal_entity(
rule_name, entry.content.rule_description
)
deserialize_rule_key_values(
entry_ele, rule_description
) # to remove after #3535 is released.
return rule_description
async def create_rule(
self, topic_name: str, subscription_name: str, rule_name: str, **kwargs
) -> RuleProperties:
"""Create a rule for a topic subscription.
:param str topic_name: The topic that will own the
to-be-created subscription rule.
:param str subscription_name: The subscription that
will own the to-be-created rule.
:param rule_name: Name of the rule.
:type rule_name: str
:keyword filter: The filter of the rule. The default value is ~azure.servicebus.management.TrueRuleFilter
:type filter: Union[~azure.servicebus.management.CorrelationRuleFilter,
~azure.servicebus.management.SqlRuleFilter]
:keyword action: The action of the rule.
:type action: Optional[~azure.servicebus.management.SqlRuleAction]
:rtype: ~azure.servicebus.management.RuleProperties
"""
_validate_topic_and_subscription_types(topic_name, subscription_name)
rule = RuleProperties(
rule_name,
filter=kwargs.pop("filter", TrueRuleFilter()),
action=kwargs.pop("action", None),
created_at_utc=None,
)
to_create = rule._to_internal_entity()
create_entity_body = CreateRuleBody(
content=CreateRuleBodyContent(
rule_description=to_create, # type: ignore
)
)
request_body = create_entity_body.serialize(is_xml=True)
serialize_rule_key_values(request_body, rule)
with _handle_response_error():
entry_ele = await self._impl.rule.put(
topic_name,
subscription_name, # type: ignore
rule_name,
request_body,
api_version=constants.API_VERSION,
**kwargs
)
entry = RuleDescriptionEntry.deserialize(entry_ele)
result = RuleProperties._from_internal_entity(
rule_name, entry.content.rule_description
)
deserialize_rule_key_values(
entry_ele, result
) # to remove after #3535 is released.
return result
async def update_rule(
self,
topic_name: str,
subscription_name: str,
rule: Union[RuleProperties, Mapping[str, Any]],
**kwargs
) -> None:
"""Update a rule.
Before calling this method, you should use `get_rule`, `create_rule` or `list_rules` to get a `RuleProperties`
instance, then update the properties.
You could also pass keyword arguments for updating properties in the form of
`<property_name>=<property_value>` which will override whatever was specified in
the `RuleProperties` instance. Refer to ~azure.servicebus.management.RuleProperties for names of properties.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: The subscription that
owns this rule.
:param rule: The rule that is returned from `get_rule`,
`create_rule`, or `list_rules` and has the updated properties.
:type rule: ~azure.servicebus.management.RuleProperties
:rtype: None
"""
_validate_topic_and_subscription_types(topic_name, subscription_name)
# we should not mutate the input, making a copy first for update
rule = deepcopy(create_properties_from_dict_if_needed(rule, RuleProperties))
to_update = rule._to_internal_entity(kwargs)
create_entity_body = CreateRuleBody(
content=CreateRuleBodyContent(
rule_description=to_update,
)
)
request_body = create_entity_body.serialize(is_xml=True)
serialize_rule_key_values(request_body, rule)
with _handle_response_error():
await self._impl.rule.put(
topic_name,
subscription_name,
rule.name,
request_body,
api_version=constants.API_VERSION,
if_match="*",
**kwargs
)
async def delete_rule(
self, topic_name: str, subscription_name: str, rule_name: str, **kwargs
) -> None:
"""Delete a topic subscription rule.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: The subscription that
owns the topic.
:param str rule_name: The to-be-deleted rule.
:rtype: None
"""
_validate_topic_subscription_and_rule_types(
topic_name, subscription_name, rule_name
)
await self._impl.rule.delete(
topic_name,
subscription_name,
rule_name,
api_version=constants.API_VERSION,
**kwargs
)
def list_rules(
self, topic_name: str, subscription_name: str, **kwargs: Any
) -> AsyncItemPaged[RuleProperties]:
"""List the rules of a topic subscription.
:param str topic_name: The topic that owns the subscription.
:param str subscription_name: The subscription that
owns the rules.
:returns: An iterable (auto-paging) response of RuleProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.servicebus.management.RuleProperties]
"""
_validate_topic_and_subscription_types(topic_name, subscription_name)
def entry_to_rule(ele, entry):
"""
`ele` will be removed after #3535 is released.
"""
rule = entry.content.rule_description
rule_description = RuleProperties._from_internal_entity(entry.title, rule)
deserialize_rule_key_values(
ele, rule_description
) # to remove after #3535 is released.
return rule_description
extract_data = functools.partial(
extract_rule_data_template, RuleDescriptionFeed, entry_to_rule
)
get_next = functools.partial(
get_next_template,
functools.partial(self._impl.list_rules, topic_name, subscription_name),
**kwargs
)
return AsyncItemPaged(get_next, extract_data)
async def get_namespace_properties(self, **kwargs) -> NamespaceProperties:
"""Get the namespace properties
:rtype: ~azure.servicebus.management.NamespaceProperties
"""
entry_el = await self._impl.namespace.get(
api_version=constants.API_VERSION, **kwargs
)
namespace_entry = NamespacePropertiesEntry.deserialize(entry_el)
return NamespaceProperties._from_internal_entity(
namespace_entry.title, namespace_entry.content.namespace_properties
)
async def close(self) -> None:
await self._impl.close()
| [
"[email protected]"
] | |
075a9d195c8d0b7d80ec26a4fe420d20d8bfd829 | f4f6148303cfa9e0123ef73bc3c10f25604bad16 | /92. Reverse Linked List II.py | 8e57cef53a36950fa67b1f0ceaa99283065a6a3a | [] | no_license | newfull5/LeetCode | b1d45345603ed0bfdf45976c4b6fc21fbf10538d | 97ada948a3a4ca8ca14ab6e2f1c020e12e862dce | refs/heads/main | 2023-07-20T09:00:39.434876 | 2021-08-18T04:02:13 | 2021-08-18T04:02:13 | 325,930,197 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:
arr = []
while head is not None:
arr.append(head.val)
head = head.next
arr = arr[:left-1] + arr[left-1:right][::-1] + arr[right:]
answer = ListNode(arr.pop())
while arr:
answer = ListNode(arr.pop(), answer)
return answer
| [
"[email protected]"
] | |
21b4966a8b048a8668344f0e63eed7dee5ea7c38 | a8062308fb3bf6c8952257504a50c3e97d801294 | /test/test_subsets.py | 31cbb496ef446cc84619af46c77e190494a4cc15 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from unittest import TestCase
from problems.Subsets import Solution
class TestSolution(TestCase):
def test_subsets(self):
solution = Solution()
res = solution.subsets([1,2,3])
expected = [
[3],
[1],
[2],
[1,2,3],
[1,3],
[2,3],
[1,2],
[]
]
self.assertEqual(sorted(res), sorted(expected))
| [
"[email protected]"
] | |
87cbe13b16a4e002ab1e5145656ab99cb2412621 | 015106a1a964305ef8ceb478cc56fd7d4fbd86d5 | /495.py | 1523a6d84353326e0f42d942bf3794c7b5599774 | [] | no_license | zenmeder/leetcode | 51a0fa4dc6a82aca4c67b5f4e0ee8916d26f976a | 0fddcc61923d760faa5fc60311861cbe89a54ba9 | refs/heads/master | 2020-12-02T18:16:10.825121 | 2018-10-30T11:47:53 | 2018-10-30T11:47:53 | 96,505,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
class Solution(object):
def findPoisonedDuration(self, timeSeries, duration):
"""
:type timeSeries: List[int]
:type duration: int
:rtype: int
"""
if not timeSeries:
return 0
l = []
for time in timeSeries:
if not l:
l = [time, time + duration]
res = duration
continue
if time >= l[1]:
l = [time, time + duration]
res += duration
else:
res += time + duration - l[1]
l[1] = time + duration
return res
print(Solution().findPoisonedDuration([1, 2, 3, 4, 5], 5))
| [
"[email protected]"
] | |
7f773cfe5a7dd4e7d6b2a836e151328b9978e51b | 814e4ad96172a76d9b72ac35697553980d0db5f1 | /pyalp/chosen/fields.py | 442207f9e1d55eaa01973595a36a4c382c74f889 | [
"MIT"
] | permissive | Mause/pyalp | 29785037d3b4ebc2822e6ec74621aa65f621bb8e | fb0f723070e11f8c9ed57e2475eb963599f442a6 | refs/heads/master | 2022-12-05T12:05:01.804305 | 2014-07-02T13:04:21 | 2014-07-02T13:04:21 | 15,419,434 | 0 | 0 | MIT | 2022-11-22T00:24:05 | 2013-12-24T14:00:26 | PHP | UTF-8 | Python | false | false | 2,105 | py | from django import forms
from chosen.widgets import (
ChosenSelect,
ChosenSelectMultiple,
ChosenGroupSelect
)
__all__ = [
'ChosenFieldMixin', 'ChosenChoiceField', 'ChosenMultipleChoiceField',
'ChosenModelChoiceField', 'ChosenModelMultipleChoiceField',
'ChosenGroupChoiceField',
]
class ChosenFieldMixin(object):
def __init__(self, *args, **kwargs):
widget_kwargs = (
"overlay" in kwargs and
{"overlay": kwargs.pop('overlay')} or
{}
)
kwargs['widget'] = self.widget(**widget_kwargs)
super(ChosenFieldMixin, self).__init__(*args, **kwargs)
class ChosenChoiceField(ChosenFieldMixin, forms.ChoiceField):
widget = ChosenSelect
class ChosenMultipleChoiceField(ChosenFieldMixin, forms.MultipleChoiceField):
widget = ChosenSelectMultiple
class ChosenModelChoiceField(ChosenFieldMixin, forms.ModelChoiceField):
widget = ChosenSelect
class ChosenModelMultipleChoiceField(ChosenFieldMixin,
forms.ModelMultipleChoiceField):
widget = ChosenSelectMultiple
class ChosenGroupChoiceField(ChosenFieldMixin, forms.ChoiceField):
"""
This field generate a Single Select with Groups (optgroup support).
To render it correctly, you need to give a choice with the group title and
the list of (id, value) for the subtitles
A good way to do that is to add a Manager, eg::
class MyModelManager(models.Manager):
"Add get_group_choices to MyModel"
def get_group_choices(self):
'''
Will filter the model per name and return tuples (obj.id,
obj.rule)
'''
choices = []
for name in MyModel.objects.values_list("name").distinct():
name = name[0]
name_choices = tuple((obj.id, obj.rule) for obj in
MyModel.objects.filter(name=name))
choices.append((name, name_choices))
return choices
"""
widget = ChosenGroupSelect
| [
"[email protected]"
] | |
b996caa2550c772ef6e349a1776a0104a8b45f12 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/card-flipping-game.py | b755a10e0db7dfd461f122b6fbfbcc3341cd5b83 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 508 | py | # Time: O(n)
# Space: O(n)
import itertools
class Solution(object):
def flipgame(self, fronts, backs):
"""
:type fronts: List[int]
:type backs: List[int]
:rtype: int
"""
same = {n for i, n in enumerate(fronts) if n == backs[i]}
result = float("inf")
for n in itertools.chain(fronts, backs):
if n not in same:
result = min(result, n)
return result if result < float("inf") else 0
| [
"[email protected]"
] | |
b8adc8f00940fea61f6707c605d5d1bf01b846dc | 703312a73790e17473cccc577a208c3bec2b457f | /migrations/versions/d614011d4982_.py | 0b7cc107ca9a06fe5215029f191ea3be4abc0ae4 | [] | no_license | t8116189520/flasky | 0bd1be65d2b1ff0f79f3979f0f3c71dea86253bc | c9be14a1182878c6789eccdd0b82e6c9a07484ef | refs/heads/master | 2021-01-21T23:47:51.459924 | 2017-09-02T07:52:29 | 2017-09-02T07:52:29 | 102,181,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | """empty message
Revision ID: d614011d4982
Revises:
Create Date: 2017-08-10 11:14:25.087757
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd614011d4982'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
4887b831cf09aa8d24799853c73e7b0c63d8a282 | 1c562a6aca9f6a288e596a4dc4fe96d4567372e5 | /pypeln/process.py | fdc5e40358962155e49b61458eb8af753a3ff570 | [
"MIT"
] | permissive | Tezeo/pypeln | 90ea6fc998ed3a1e0e1d0419ccf928004360f12b | 29947c4da13f3f0d1f8bc4870e852f0d0c485d9f | refs/heads/master | 2020-04-02T02:37:37.040142 | 2018-10-15T17:52:23 | 2018-10-15T17:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,209 | py | """ The `process` module lets you create pipelines using objects from python's [multiprocessing](https://docs.python.org/3.4/library/multiprocessing.html) module according to Pypeline's general [architecture](https://cgarciae.gitbook.io/pypeln/#architecture). Use this module when you are in need of true parallelism for CPU heavy operations but be aware of its implications (continue reading).
### Example
## Stage
All functions from this module return a private `pypeln.process._Stage` object. Stages are lazy, that is, a `_Stage` objects merely contains the information needed to perform the computation of itself and the Stages it depends on. To actually execute the pipeline you can directly iterable over a `_Stage` or iterate over the generator returned by `pypeln.process.to_iterable` if you want to have more control.
## Workers
The worker type of this module is a [multiprocessing.Process](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Process). Each worker process is instantiated with `daemon = True`. Creating each process is slow and consumes a lot of memory. Since processes are technically separate programs managed by the OS they are great for doing operations in parallel and avoiding the [GIL](https://realpython.com/python-gil).
## Queue
The queue type of this module is a [multiprocessing.Queue](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue). Since processes don't share memory, all information passed between them through these queues must first be serialized (which is slow), be aware of this and try to avoid sending large objects.
## Recomendations
Creating processes and doing communication between them is expensive, therefore we recommend the following:
* Minimize the number of stages based on this module.
* If possible don't send large objects
* If you just need to perform a very simple task over a collection in parallel use the `pypeln.process.each` function.
"""
from __future__ import absolute_import, print_function
import functools
from collections import namedtuple
from . import utils
import sys
import traceback
#############
# imports pr
#############
from multiprocessing import Process as WORKER
from multiprocessing import Manager, Lock, Queue
from multiprocessing.queues import Full, Empty
from threading import Thread
from collections import namedtuple
from . import utils
_MANAGER = Manager()
def _get_namespace():
return _MANAGER.Namespace()
#############
# imports th
#############
# from threading import Thread as WORKER
# from threading import Thread
# from.utils import Namespace
# from six.moves.queue import Queue, Empty, Full
# from threading import Lock
# def _get_namespace():
# return Namespace()
####################
# classes
####################
class _Stage(utils.BaseStage):
def __init__(self, worker_constructor, workers, maxsize, on_start, on_done, target, args, dependencies):
self.worker_constructor = worker_constructor
self.workers = workers
self.maxsize = maxsize
self.on_start = on_start
self.on_done = on_done
self.target = target
self.args = args
self.dependencies = dependencies
def __iter__(self):
return to_iterable(self)
def __repr__(self):
return "_Stage(worker_constructor = {worker_constructor}, workers = {workers}, maxsize = {maxsize}, target = {target}, args = {args}, dependencies = {dependencies})".format(
worker_constructor = self.worker_constructor,
workers = self.workers,
maxsize = self.maxsize,
target = self.target,
args = self.args,
dependencies = len(self.dependencies),
)
class _StageParams(namedtuple("_StageParams",
[
"input_queue", "output_queues", "on_start", "on_done",
"stage_namespace", "stage_lock",
"pipeline_namespace", "pipeline_error_queue",
])):
pass
class _InputQueue(object):
def __init__(self, maxsize, total_done, pipeline_namespace, **kwargs):
self.queue = Queue(maxsize = maxsize, **kwargs)
self.lock = Lock()
self.namespace = _get_namespace()
self.namespace.remaining = total_done
self.pipeline_namespace = pipeline_namespace
def __iter__(self):
while not self.is_done():
x = self.get()
if self.pipeline_namespace.error:
return
if not utils.is_continue(x):
yield x
def get(self):
try:
x = self.queue.get(timeout = utils.TIMEOUT)
except (Empty, Full):
return utils.CONTINUE
if not utils.is_done(x):
return x
else:
with self.lock:
self.namespace.remaining -= 1
return utils.CONTINUE
def is_done(self):
return self.namespace.remaining == 0 and self.queue.empty()
def put(self, x):
self.queue.put(x)
class _OutputQueues(list):
def put(self, x):
for queue in self:
queue.put(x)
def done(self):
for queue in self:
queue.put(utils.DONE)
def _handle_exceptions(params):
def handle_exceptions(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except BaseException as e:
params.pipeline_error_queue.put((type(e), e, "".join(traceback.format_exception(*sys.exc_info()))))
params.pipeline_namespace.error = True
return wrapper
return handle_exceptions
def _run_task(f_task, params):
args = params.on_start() if params.on_start is not None else None
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
if params.input_queue:
for x in params.input_queue:
f_task(x, args)
else:
f_task(args)
params.output_queues.done()
if params.on_done is not None:
with params.stage_lock:
params.stage_namespace.active_workers -= 1
stage_status = utils.StageStatus(
namespace = params.stage_namespace,
lock = params.stage_lock,
)
params.on_done(stage_status, *args)
###########
# map
###########
def _map(f, params):
@_handle_exceptions(params)
def f_task(x, args):
y = f(x, *args)
params.output_queues.put(y)
_run_task(f_task, params)
def map(f, stage = utils.UNDEFINED, workers = 1, maxsize = 0, on_start = None, on_done = None):
"""
"""
if utils.is_undefined(stage):
return utils.Partial(lambda stage: map(f, stage, workers=workers, maxsize=maxsize, on_start=on_start, on_done=on_done))
stage = _to_stage(stage)
return _Stage(
worker_constructor = WORKER,
workers = workers,
maxsize = maxsize,
on_start = on_start,
on_done = on_done,
target = _map,
args = (f,),
dependencies = [stage],
)
###########
# flat_map
###########
def _flat_map(f, params):
@_handle_exceptions(params)
def f_task(x, args):
for y in f(x, *args):
params.output_queues.put(y)
_run_task(f_task, params)
def flat_map(f, stage = utils.UNDEFINED, workers = 1, maxsize = 0, on_start = None, on_done = None):
"""
"""
if utils.is_undefined(stage):
return utils.Partial(lambda stage: flat_map(f, stage, workers=workers, maxsize=maxsize, on_start=on_start, on_done=on_done))
stage = _to_stage(stage)
return _Stage(
worker_constructor = WORKER,
workers = workers,
maxsize = maxsize,
on_start = on_start,
on_done = on_done,
target = _flat_map,
args = (f,),
dependencies = [stage],
)
###########
# filter
###########
def _filter(f, params):
@_handle_exceptions(params)
def f_task(x, args):
if f(x, *args):
params.output_queues.put(x)
_run_task(f_task, params)
def filter(f, stage = utils.UNDEFINED, workers = 1, maxsize = 0, on_start = None, on_done = None):
"""
"""
if utils.is_undefined(stage):
return utils.Partial(lambda stage: filter(f, stage, workers=workers, maxsize=maxsize, on_start=on_start, on_done=on_done))
stage = _to_stage(stage)
return _Stage(
worker_constructor = WORKER,
workers = workers,
maxsize = maxsize,
on_start = on_start,
on_done = on_done,
target = _filter,
args = (f,),
dependencies = [stage],
)
###########
# each
###########
def _each(f, params):
@_handle_exceptions(params)
def f_task(x, args):
f(x, *args)
_run_task(f_task, params)
def each(f, stage = utils.UNDEFINED, workers = 1, maxsize = 0, on_start = None, on_done = None, run = False):
"""
"""
if utils.is_undefined(stage):
return utils.Partial(lambda stage: each(f, stage, workers=workers, maxsize=maxsize, on_start=on_start, on_done=on_done))
stage = _to_stage(stage)
stage = _Stage(
worker_constructor = WORKER,
workers = workers,
maxsize = maxsize,
on_start = on_start,
on_done = on_done,
target = _each,
args = (f,),
dependencies = [stage],
)
if not run:
return stage
for _ in stage:
pass
###########
# concat
###########
def _concat(params):
def f_task(x, args):
params.output_queues.put(x)
_run_task(f_task, params)
def concat(stages, maxsize = 0):
stages = [ _to_stage(s) for s in stages ]
return _Stage(
worker_constructor = WORKER,
workers = 1,
maxsize = maxsize,
on_start = None,
on_done = None,
target = _concat,
args = tuple(),
dependencies = stages,
)
################
# run
################
def run(stages, maxsize = 0):
if isinstance(stages, list) and len(stages) == 0:
raise ValueError("Expected atleast stage to run")
elif isinstance(stages, list):
stage = concat(stages, maxsize = maxsize)
else:
stage = stages
stage = to_iterable(stage, maxsize = maxsize)
for _ in stages:
pass
################
# _to_stage
################
def _to_stage(obj):
if isinstance(obj, _Stage):
return obj
elif hasattr(obj, "__iter__"):
return from_iterable(obj)
else:
raise ValueError("Object {obj} is not iterable".format(obj = obj))
################
# from_iterable
################
def _from_iterable(iterable, params):
def f_task(args):
for x in iterable:
params.output_queues.put(x)
_run_task(f_task, params)
def from_iterable(iterable = utils.UNDEFINED, maxsize = None, worker_constructor = Thread):
if utils.is_undefined(iterable):
return utils.Partial(lambda iterable: from_iterable(iterable, maxsize=maxsize, worker_constructor=worker_constructor))
return _Stage(
worker_constructor = worker_constructor,
workers = 1,
maxsize = None,
on_start = None,
on_done = None,
target = _from_iterable,
args = (iterable,),
dependencies = [],
)
##############
# to_iterable
##############
def _build_queues(stage, stage_input_queue, stage_output_queues, visited, pipeline_namespace):
if stage in visited:
return stage_input_queue, stage_output_queues
else:
visited.add(stage)
if len(stage.dependencies) > 0:
total_done = sum([ s.workers for s in stage.dependencies ])
input_queue = _InputQueue(stage.maxsize, total_done, pipeline_namespace)
stage_input_queue[stage] = input_queue
for _stage in stage.dependencies:
if _stage not in stage_output_queues:
stage_output_queues[_stage] = _OutputQueues([input_queue])
else:
stage_output_queues[_stage].append(input_queue)
stage_input_queue, stage_output_queues = _build_queues(
_stage,
stage_input_queue,
stage_output_queues,
visited,
pipeline_namespace = pipeline_namespace,
)
return stage_input_queue, stage_output_queues
def _create_worker(f, args, output_queues, input_queue):
kwargs = dict(
output_queues = output_queues)
if input_queue is not None:
kwargs.update(input_queue = input_queue)
return WORKER(target = f, args = args, kwargs = kwargs)
def _to_iterable(stage, maxsize):
pipeline_namespace = _get_namespace()
pipeline_namespace.error = False
pipeline_error_queue = Queue()
input_queue = _InputQueue(maxsize, stage.workers, pipeline_namespace)
stage_input_queue, stage_output_queues = _build_queues(
stage = stage,
stage_input_queue = dict(),
stage_output_queues = dict(),
visited = set(),
pipeline_namespace = pipeline_namespace,
)
stage_output_queues[stage] = _OutputQueues([ input_queue ])
processes = []
for _stage in stage_output_queues:
if _stage.on_done is not None:
stage_lock = Lock()
stage_namespace = _get_namespace()
stage_namespace.active_workers = _stage.workers
else:
stage_lock = None
stage_namespace = None
for _ in range(_stage.workers):
stage_params = _StageParams(
output_queues = stage_output_queues[_stage],
input_queue = stage_input_queue.get(_stage, None),
on_start = _stage.on_start,
on_done = _stage.on_done,
stage_lock = stage_lock,
stage_namespace = stage_namespace,
pipeline_namespace = pipeline_namespace,
pipeline_error_queue = pipeline_error_queue,
)
process = _stage.worker_constructor(
target = _stage.target,
args = _stage.args + (stage_params,)
)
processes.append(process)
for p in processes:
p.daemon = True
p.start()
for x in input_queue:
yield x
if pipeline_namespace.error:
error_class, _, trace = pipeline_error_queue.get()
raise error_class("\n\nOriginal {trace}".format(trace = trace))
for p in processes:
p.join()
def to_iterable(stage = utils.UNDEFINED, maxsize = 0):
if utils.is_undefined(stage):
return utils.Partial(lambda stage: _to_iterable(stage, maxsize))
else:
return _to_iterable(stage, maxsize)
if __name__ == '__main__':
import time
import random
def slow_square(x):
time.sleep(random.uniform(0, 1))
return x**2
stage = range(10)
stage = flat_map(lambda x: [x, x + 1, x + 2], stage)
stage = map(slow_square, stage, workers=4)
stage = filter(lambda x: x > 9, stage)
print(stage)
| [
"[email protected]"
] | |
b0d9bfbf78e240cfb7656fdc6c78420eb8d9cada | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-3329.py | 3250d9a0680f6052876ed43222c30451dcd5c6ca | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,289 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < $Var.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
decbd9eefc318e9f340928501dd77dc2fd3feae5 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/GoogleChronicleBackstory/Scripts/ChronicleDBotScoreWidgetScript/ChronicleDBotScoreWidgetScript_test.py | 233fec01a3ba223ad7e138ca621f62f0ad7167e3 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 2,273 | py | from unittest.mock import patch
import demistomock as demisto
import ChronicleDBotScoreWidgetScript
DBOT_SCORE = [{'CustomFields': {'chronicledbotscore': 2}}]
def test_main_success(mocker):
"""
When main function is called, get_html_representation should be called.
"""
mocker.patch.object(demisto, 'incidents', return_value=DBOT_SCORE)
mocker.patch.object(ChronicleDBotScoreWidgetScript, 'get_html_representation',
return_value='')
ChronicleDBotScoreWidgetScript.main()
assert ChronicleDBotScoreWidgetScript.get_html_representation.called
@patch('ChronicleDBotScoreWidgetScript.return_error')
def test_main_failure(mock_return_error, capfd, mocker):
"""
When main function gets some exception then valid message should be printed.
"""
mocker.patch.object(demisto, 'incidents', return_value=DBOT_SCORE)
mocker.patch.object(ChronicleDBotScoreWidgetScript, 'get_html_representation', side_effect=Exception)
with capfd.disabled():
ChronicleDBotScoreWidgetScript.main()
mock_return_error.assert_called_once_with('Could not load widget:\n')
def test_get_html_representation_when_dbotscore_is_1(mocker):
"""
When DBotscore is 1, get_html_representation should return html representation accordingly.
"""
html_representation = ChronicleDBotScoreWidgetScript.get_html_representation(1)
assert "<div style='color:green; text-align:center;'><h1>1<br/>Good</h1></div>" == html_representation
def test_get_html_representation_when_dbotscore_is_2(mocker):
"""
When DBotscore is 2, get_html_representation should return html representation accordingly.
"""
html_representation = ChronicleDBotScoreWidgetScript.get_html_representation(2)
assert "<div style='color:orange; text-align:center;'><h1>2<br/>Suspicious</h1></div>"\
== html_representation
def test_get_html_representation_when_dbotscore_is_3(mocker):
"""
When DBotscore is 3, get_html_representation should return html representation accordingly.
"""
html_representation = ChronicleDBotScoreWidgetScript.get_html_representation(3)
assert "<div style='color:red; text-align:center;'><h1>3<br/>Bad</h1></div>" == html_representation
| [
"[email protected]"
] | |
eaadf08598bc9363904e1e138f7be007406303dd | e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb | /src/tests/parsing_test_12.py | 9cd550d8a61b41538e0f4f4749160a0deb26a210 | [] | no_license | gsakkas/seq2parse | 3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9 | 7ae0681f1139cb873868727f035c1b7a369c3eb9 | refs/heads/main | 2023-04-09T12:29:37.902066 | 2023-01-18T21:32:12 | 2023-01-18T21:32:12 | 417,597,310 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | def distributeCandies(candies):
"""Given an integer array with even length, where different numbers in this array represent different kinds of candies. Each number means one candy of the corresponding kind. You need to distribute these candies equally in number to brother and sister. Return the maximum number of kinds of candies the sister could gain."""
brother, sister = [], []
c0, c1 = 0, 1
while c1 < len(candies):
if candies[c0] not in sister:
sister += candies[c0]
brother += candies[c1]
else:
sister += candies[c1]
brother += candies[c2]
c0 += 2
c1 += 2
uniqueCandies, i = [], 0
while i < len(sister):
if sister[i] not in uniqueCandies:
uniqueCandies += sister[i]
i += 1
return len(uniqueCandies)
distributeCandies([1,2,1,2,3,4,3,2,5,6,7,4,3,6])
| [
"[email protected]"
] | |
8497472c087b3e8dfcbaddcb08b0b577a736e7f8 | 76dfedcfdcc686a7a0c5604309512ee20466ae63 | /tater/core/visitors.py | 2a3fa78fc1e2758fe5d68f0efb6687aa00f4352a | [
"BSD-3-Clause"
] | permissive | pombredanne/tater | 1d485e72232b75677ce743a1a84b10a27cbdc50c | 8df0f686f295fb2fdfa1d0387268105677d70dc0 | refs/heads/master | 2021-01-24T14:45:59.917067 | 2013-11-08T22:41:27 | 2013-11-08T22:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,922 | py | from tater.base.visitor import Visitor
class Transformer(Visitor):
'''A visitor that replaces the visited node with the
output of the visitor function.
'''
def visit_nodes(self, node):
'''If the visitor function returns a new node, replace
the current node with it, then stop.
Otherwise, continue on down the tree.
'''
try:
new_node = self.visit_node(node)
except self.Continue:
# Skip visiting the child nodes.
return
if new_node is not None:
if node in node.parent.children:
node.replace(new_node)
return
visit_nodes = self.visit_nodes
for child in node.children[:]:
visit_nodes(child)
class Renderer(Visitor):
'''The visitor functions on this visitor are context managers.
They perform some action initially, then delegate to the node's
child functions all the way down the tree, then perform a final,
closing action, like closing at html tag.
from contextlib import contextmanager
from StringIO import StringIO
form tater.visitor import Renderer
class MyRenderer(Render):
def __init__(self):
self.buf = StringIO()
@contextmanager
def visit_div(self, node):
self.buf.write('<div>')
self.buf.write(node.first_text())
yield
self.buf.write('</div>')
'''
def visit_nodes(self, node):
'''If the visitor function is a context manager, invoke it,
otherwise just run the function.
'''
method = self._methods[node]
# If no function is defined, run the generic visit function.
if method is None:
generic_visit = getattr(self, 'generic_visit', None)
if generic_visit is None:
return
method = generic_visit
self._run_visitor_method(method, node)
def _run_visitor_method(self, method, node):
# Test if the function is a context manager. If so, invoke it.
try:
with method(node):
visit_nodes = self.visit_nodes
for child in self.get_children(node):
try:
visit_nodes(child)
except self.Continue:
continue
except:
return method(node)
class _Orderer(Visitor):
def __init__(self):
self.nodes = []
def visit_node(self, node):
self.nodes.append(node)
def _sortfunc(self, node):
if node.items:
for pos, token, text in node.items:
return pos
def finalize(self):
return sorted(self.nodes, key=self._sortfunc)
class OrderedRenderer(Visitor):
'''In sort nodes, method, chooses the order in which
to visit children based on their index vals. Probz doesn't
need a helper class to do that. ACTUALLY YES IT DOES.
'''
def visit(self, node):
self.ordered = _Orderer().visit(node)
super(OrderedRenderer, self).visit(node)
def visit_nodes(self, node):
'''If the visitor function is a context manager, invoke it,
otherwise just run the function.
'''
func = self._methods[node]
# If no function is defined, run the generic visit function.
if func is None:
generic_visit = getattr(self, 'generic_visit', None)
if generic_visit is None:
return
return generic_visit(node)
# Test if the function is a context manager. If so, invoke it.
else:
with func(node):
visit_nodes = self.visit_nodes
for child in node.children[:]: # sorted(node.children, key=self.ordered.index):
visit_nodes(child)
class DiGraphVisitor(Visitor):
def __init__(self, G):
self.G = G
def get_children(self, node):
return self.G[node]
def finalize(self):
'''Final steps the visitor needs to take, plus the
return value or .visit, if any.
'''
return self
class EtreeVisitor(Visitor):
def visit(self, node):
self.node = node
self.visit_nodes(node)
return self.finalize()
def get_children(self, node):
return tuple(node)
def visit_HtmlComment(self, node):
'''Skip comments.
'''
raise self.Continue()
# ---------------------------------------------------------------------------
# Helpers for figuring out the start/end indexes of a parse tree.
# ---------------------------------------------------------------------------
class IndexVisitor(Visitor):
'''Base for visitors that aggregate information about
string indices of modeled text.
'''
def __init__(self):
self.indices = []
class StartIndexVisitor(IndexVisitor):
'''This visitor finds the starting index of the left-most string
modeled by the ast.
'''
def get_index(self):
if self.indices:
return min(self.indices)
def generic_visit(self, node):
for pos, token, text in node.items:
self.indices.append(pos)
class EndIndexVisitor(IndexVisitor):
'''This visitor finds the ending index of the right-most string
modeled by the ast.
'''
def get_index(self):
if self.indices:
return max(self.indices)
def generic_visit(self, node):
'''The end index will be the `pos` obtained from
the lexer, plus the length of the associated text.
'''
for pos, token, text in node.items:
self.indices.append(pos + len(text))
def get_start(tree):
return StartIndexVisitor().visit(tree).get_index()
def get_end(tree):
return EndIndexVisitor().visit(tree).get_index()
def get_span(tree):
return (get_start(tree), get_end(tree))
| [
"[email protected]"
] | |
fabe5682cf9ca2d1969332761e1b6359bb59f68b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/469/usersdata/304/111875/submittedfiles/Av2_Parte4.py | 83fdc5c98c3ffc1bd4ce32042c996c4e2531ca26 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | # -*- coding: utf-8 -*-
m = int(input('Coluna: '))
n = int(input('Linha: '))
matriz = []
for i in range (0,m,1):
linha=[]
for j in range (0,n,1):
linha.append(int(input('Linha: ')))
matriz.append(linha)
soma1 = 0
soma2 = 0
soma3 = 0
soma4 = 0
for j in range (0,m,1):
soma1 = soma1 + matriz[i][0]+matriz[i][0]
for j in range (0,m,1):
soma2 = soma2 + matriz[i][1]+matriz[i][0]
for j in range (0,m,1):
soma3 = soma3 + matriz[i][2]+matriz[i][0]
for j in range (0,m,1):
soma4 = soma4 + matriz[i][3]+matriz[i][0]
if soma1<soma2 and soma1<soma3 and soma1<soma4:
print(soma1)
if soma2<soma1 and soma2<soma3 and soma2<soma4:
print(soma2)
if soma3<soma1 and soma3<soma2 and soma3<soma4:
print(soma1)
if soma4<soma1 and soma4<soma2 and soma4<soma3:
print(soma4) | [
"[email protected]"
] | |
d57ca73d1db71a5c04110ccef3b6f4aa3826a8b2 | 1285703d35b5a37734e40121cd660e9c1a73b076 | /at_coder/abc/old/138/c.py | 192446c41cea29241ee36a2e3850f0be6b17c42a | [] | no_license | takin6/algorithm-practice | 21826c711f57131108168775f08e4e13d07a3b38 | f4098bea2085a77d11c29e1593b3cc3f579c24aa | refs/heads/master | 2022-11-30T09:40:58.083766 | 2020-08-07T22:07:46 | 2020-08-07T22:07:46 | 283,609,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import heapq
N = int(input())
A = [ i for i in list(map(int,input().split()))]
heapq.heapify(A)
for i in range(N-1):
a = heapq.heappop(A)
b = heapq.heappop(A)
heapq.heappush(A, ((a+b)/2))
print(heapq.heappop(A)) | [
"[email protected]"
] | |
9bf6f107258718c52bb3048142ac5f1f53abf79c | a8a5772674e62beaa4f5b1f115d280103fd03749 | /boyle_coello_model.py | ee18846957fefb1f7050aebdba1dc53f9a62af0e | [] | no_license | tahentx/pv_workbook | c6fb3309d9acde5302dd3ea06a34ad2aee0de4b7 | 08912b0ef36a5226d23fa0430216a3f277aca33b | refs/heads/master | 2022-12-12T20:39:35.688510 | 2021-03-30T03:20:54 | 2021-03-30T03:20:54 | 172,827,250 | 0 | 1 | null | 2022-12-08T16:47:39 | 2019-02-27T02:25:24 | Python | UTF-8 | Python | false | false | 1,454 | py |
def boyle_coello_model(self,time,rain = 0,rainthreshold = 0,tilt = 0,pm2_5 = 0,pm_10=,**kwargs):
"""
Use the :py:func:`boyle_coello_model` function to determine the impact of dirt accumulation on performance.
Parameters
----------
time : numeric
Importing the datetime module would be suitable here
rain : array-like
Hourly rain accumulation values of the same duration defined in the time parameter. Units in milimeters.
rainthreshold : float
A scalar for the amount of rain in an accumulation period needed to clear the modules. In periods where the accumulated rain meets oro exceeds the threshold, the panels are assumed to be cleaned immediately after the accumulation period.
tilt : int
A scalar or vector for the tilt of the PV panels.
PM2_5 : float
The concentration of particulate matter with diamter less than 2.5 microns, in g/m^3.
PM10 : float
The concentration of particulate matter with diamter less than 10 microns, in g/m^3.
ModelType : int
Optional input to determine the model type to be used in the soiling model. A value of "1"
RainAccPeriod :
optional input that specifies the period, in hours
over which to accumulate rainfall totals before checking against the
rain cleaning threshold.
Returns
-------
See pvsystem.calcparams_cec for details
"""
kwargs = _build_kwargs([''])
| [
"[email protected]"
] | |
0d0c4847d7dd3f15c7552f308fddb0e884b9708f | 0f20f3e02aa05b8e690190a96e92a524b211338f | /프로그래머스/Level2/전화번호 목록.py | ba3a8b6089d7dcfebd4b0157b6e4632aec28a767 | [] | no_license | HYEONAH-SONG/Algorithms | ec744b7e775a52ee0756cd5951185c30b09226d5 | c74ab3ef21a728dcd03459788aab2859560367e6 | refs/heads/master | 2023-07-18T14:41:48.360182 | 2021-09-03T13:41:23 | 2021-09-03T13:41:23 | 336,240,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | # ["119", "97674223", "1195524421"]
# zip 활용하기
# 이중 for 문 절대 사용 x
def solution(phone_book):
phone_book.sort()
for p1, p2 in zip(phone_book, phone_book[1:]):
if p1 in p2[:len(p1)]:
return False
return True
phone_book = ["119", "97674223", "1195524421"]
print(solution(phone_book))
#
# def solution2(phone_book):
# phone = {}
# count = 0
# for i in phone_book:
# phone[i] = i
# for i in phone:
# for j in phone_book:
# if i == j[:len(i)] :
# count +=1
# print(i)
# if count ==len(phone_book):
# return True
# return False | [
"[email protected]"
] | |
9266cc2fa80f7bd6d02259c385446d72e46a40ca | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/PaperdollSculptingGhost.py | 05f89551022a65a105bc5ddbc5ead7c726d90203 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 244 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\modules\nice\client\_nastyspace\PaperdollSculptingGhost.py
from eve.client.script.paperDoll.PaperdollSculptingGhost import PaperdollSculptingGhost
| [
"[email protected]"
] | |
914c501cc5e67519401fa1fe18f0b726b7a83dcf | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/lobby/mode_selector/items/epic_mode_selector_item.py | 5ec8fbb916cf65c72c3009f55d06c1ec08f8e4ad | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 4,683 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/lobby/mode_selector/items/epic_mode_selector_item.py
import typing
from gui.impl import backport
from gui.impl.gen import R
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_card_types import ModeSelectorCardTypes
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_epic_model import ModeSelectorEpicModel
from gui.impl.lobby.mode_selector.items import setBattlePassState
from gui.impl.lobby.mode_selector.items.base_item import ModeSelectorLegacyItem
from gui.impl.lobby.mode_selector.items.items_constants import ModeSelectorRewardID
from gui.shared.formatters import time_formatters
from gui.shared.formatters.ranges import toRomanRangeString
from helpers import dependency, time_utils
from skeletons.gui.game_control import IEpicBattleMetaGameController
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_normal_card_model import BattlePassState
if typing.TYPE_CHECKING:
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_normal_card_model import ModeSelectorNormalCardModel
class EpicModeSelectorItem(ModeSelectorLegacyItem):
__slots__ = ()
_VIEW_MODEL = ModeSelectorEpicModel
_CARD_VISUAL_TYPE = ModeSelectorCardTypes.EPIC_BATTLE
__epicController = dependency.descriptor(IEpicBattleMetaGameController)
def _getIsDisabled(self):
return not self.__epicController.isEnabled()
def _onInitializing(self):
super(EpicModeSelectorItem, self)._onInitializing()
self.__epicController.onPrimeTimeStatusUpdated += self.__onEpicUpdate
self.__epicController.onUpdated += self.__onEpicUpdate
self.__epicController.onEventEnded += self.__onEventEnded
self.__fillViewModel()
def _onDisposing(self):
self.__epicController.onPrimeTimeStatusUpdated -= self.__onEpicUpdate
self.__epicController.onUpdated -= self.__onEpicUpdate
self.__epicController.onEventEnded -= self.__onEventEnded
super(EpicModeSelectorItem, self)._onDisposing()
def __onEpicUpdate(self, *_):
self.__fillViewModel()
def __onEventEnded(self):
self.onCardChange()
def __fillViewModel(self):
with self.viewModel.transaction() as vm:
self.__resetViewModel(vm)
currentSeason = self.__epicController.getCurrentSeason()
nextSeason = self.__epicController.getNextSeason()
season = currentSeason or nextSeason
currentTime = time_utils.getCurrentLocalServerTimestamp()
vehicleLevels = self.__epicController.getValidVehicleLevels()
localeFolder = R.strings.mode_selector.mode.epicBattle
vm.setConditions(backport.text(localeFolder.conditionSingleLevel() if len(vehicleLevels) == 1 else localeFolder.condition(), levels=toRomanRangeString(vehicleLevels)))
vm.setDescription(backport.text(R.strings.mode_selector.mode.epicBattle.description()))
if season is None:
return
vm.widget.setIsEnabled(True)
if season.hasActiveCycle(currentTime):
self._addReward(ModeSelectorRewardID.CREDITS)
self._addReward(ModeSelectorRewardID.EXPERIENCE)
timeLeftStr = ''
cycleInfo = season.getCycleInfo()
if cycleInfo is not None:
timeLeftStr = time_formatters.getTillTimeByResource(cycleInfo.endDate - currentTime, R.strings.menu.Time.timeLeftShort, removeLeadingZeros=True)
vm.setTimeLeft(timeLeftStr)
currentLevel, _ = self.__epicController.getPlayerLevelInfo()
vm.widget.setLevel(currentLevel)
else:
cycleInfo = season.getNextByTimeCycle(currentTime)
if cycleInfo is not None:
if cycleInfo.announceOnly:
vm.setStatusNotActive(backport.text(R.strings.mode_selector.mode.epicBattle.cycleSoon()))
else:
vm.setStatusNotActive(backport.text(R.strings.mode_selector.mode.epicBattle.cycleNext(), date=backport.getShortDateFormat(cycleInfo.startDate)))
self.viewModel.setBattlePassState(BattlePassState.NONE)
else:
vm.setStatusNotActive(backport.text(R.strings.mode_selector.mode.epicBattle.seasonEnd()))
setBattlePassState(self.viewModel)
return
@staticmethod
def __resetViewModel(vm):
vm.setTimeLeft('')
vm.setStatusActive('')
vm.setStatusNotActive('')
vm.getRewardList().clear()
| [
"[email protected]"
] | |
d60c82a2165499d5910f67b4948ff962e472bd62 | 76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a | /tags/release-0.7.2/pyformex/examples/Clock.py | efd5affc5d3cdb0b4bf75aa8c70dd37a5f479b1f | [] | no_license | BackupTheBerlios/pyformex-svn | ec2361b1b9967918be65e892217a691a6f8b145d | f5404809095711334bbb938d9d119a69ad8fc260 | refs/heads/master | 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | #!/usr/bin/env pyformex --gui
# $Id$
##
## This file is part of pyFormex 0.7.2 Release Tue Sep 23 16:18:43 2008
## pyFormex is a Python implementation of Formex algebra
## Website: http://pyformex.berlios.de/
## Copyright (C) Benedict Verhegghe ([email protected])
##
## This program is distributed under the GNU General Public License
## version 2 or later (see file COPYING for details)
##
"""Clock
level = 'advanced'
topics = []
techniques = []
"""
from formex import * # Needed if we want to use this example as a module
from gui.draw import * # Needed if we want to use this example as a module
import simple
from datetime import datetime
from PyQt4 import QtCore
class AnalogClock(object):
"""An analog clock built from Formices"""
def __init__(self,lw=2,mm=0.75,hm=0.85,mh=0.7,hh=0.6, sh=0.9):
"""Create an analog clock."""
self.linewidth = lw
self.circle = simple.circle(a1=2.,a2=2.)
radius = Formex(pattern('2'))
self.mainmark = radius.divide([mm,1.0])
self.hourmark = radius.divide([hm,1.0])
self.mainhand = radius.divide([0.0,mh])
self.hourhand = radius.divide([0.0,hh])
if sh > 0.0:
self.secshand = radius.divide([0.0,sh])
else:
self.secshand = None
self.hands = []
self.timer = None
def draw(self):
"""Draw the clock (without hands)"""
draw(self.circle,color='black',linewidth=self.linewidth)
draw(self.mainmark.rosette(4,90),color='black',linewidth=self.linewidth)
draw(self.hourmark.rot(30).rosette(2,30).rosette(4,90),
color='black',linewidth=0.5*self.linewidth)
def drawTime(self,hrs,min,sec=None):
"""Draw the clock's hands showing the specified time.
If no seconds are specified, no seconds hand is drawn.
"""
hrot = - hrs*30. - min*0.5
mrot = - min*6.
GD.canvas.removeActors(self.hands)
MH = draw(self.mainhand.rot(mrot),bbox=None,color='red',linewidth=self.linewidth)
HH = draw(self.hourhand.rot(hrot),bbox=None,color='red',linewidth=self.linewidth)
self.hands = [MH,HH]
if self.secshand and sec:
srot = - sec*6.
SH = draw(self.secshand.rot(srot),bbox=None,color='orange',linewidth=0.5*self.linewidth)
self.hands.append(SH)
def drawNow(self):
"""Draw the hands showing the current time."""
now = datetime.now()
self.drawTime(now.hour,now.minute,now.second)
def run(self,granularity=1,runtime=100):
"""Run the clock for runtime seconds, updating every granularity."""
if granularity > 0.0:
self.timer = QtCore.QTimer()
self.timer.connect(self.timer,QtCore.SIGNAL("timeout()"),self.drawNow)
self.timer.start(1000*granularity)
if runtime > 0.0:
self.timeout = QtCore.QTimer()
self.timeout.connect(self.timeout,QtCore.SIGNAL("timeout()"),self.stop)
self.timeout.setSingleShot(True)
self.timeout.start(1000*runtime)
def stop(self):
"""Stop a running clock."""
if self.timer:
self.timer.stop()
if __name__ == "draw":
C = AnalogClock()
C.draw()
zoomAll()
C.drawNow()
if ack("Shall I start the clock?"):
C.run()
warning("Please wait until the clock stops running")
| [
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
] | bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35 |
6d66b0273af64eacb5f11ceecc9bd3c0e6ec7cd8 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/15/43/13.py | 0d2d8218a97f9192b5a80288431e4d9cacbbae7a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | def solve():
n = int(raw_input())
text = [raw_input().split() for _ in xrange(n)]
base = [set(text[0]), set(text[1])]
base_res = len(base[0] & base[1])
if n == 2: return base_res
res = 1e9
for i in xrange(2 ** (n - 2)):
added = [set(), set()]
for j in xrange(n - 2):
lang = ((i >> j) & 1)
for word in text[j + 2]:
if not word in base[lang]:
added[lang].add(word)
r = len(added[0] & base[1]) + len(added[1] & base[0]) + len(added[0] & added[1])
res = min(res, base_res + r)
return res
for i in xrange(input()):
print "Case #%d: %s" % (i + 1, solve())
| [
"[email protected]"
] | |
85b562fb5ad9eb517991e871954a55520b0cf345 | 6d162c19c9f1dc1d03f330cad63d0dcde1df082d | /util/test/tests/Vulkan/VK_Synchronization_2.py | b4ce34cc6a9e18b8c49152887914de8d9b4884b1 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"CC-BY-3.0",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | baldurk/renderdoc | 24efbb84446a9d443bb9350013f3bfab9e9c5923 | a214ffcaf38bf5319b2b23d3d014cf3772cda3c6 | refs/heads/v1.x | 2023-08-16T21:20:43.886587 | 2023-07-28T22:34:10 | 2023-08-15T09:09:40 | 17,253,131 | 7,729 | 1,358 | MIT | 2023-09-13T09:36:53 | 2014-02-27T15:16:30 | C++ | UTF-8 | Python | false | false | 4,360 | py | import rdtest
import renderdoc as rd
class VK_Synchronization_2(rdtest.TestCase):
demos_test_name = 'VK_Synchronization_2'
def get_capture_options(self):
opts = rd.CaptureOptions()
# Ref all resources to pull in the image with unbound data
opts.refAllResources = True
return opts
def check_capture(self):
self.controller.SetFrameEvent(0, False)
pipe: rd.VKState = self.controller.GetVulkanPipelineState()
# Check that the layout is reported correctly at the start of the frame
for img in pipe.images:
img: rd.VKImageData
res = self.get_resource(img.resourceId)
if res.name == "Image:Preinitialised":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PREINITIALIZED":
raise rdtest.TestFailureException("Pre-initialised image is in {} layout".format(img.layouts[0].name))
elif res.name == "Image:Undefined":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_UNDEFINED":
raise rdtest.TestFailureException("Undefined image is in {} layout".format(img.layouts[0].name))
elif res.name == "Image:Swapchain":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR":
raise rdtest.TestFailureException("Swapchain image is in {} layout".format(img.layouts[0].name))
action = self.find_action("Before Transition")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.VKState = self.controller.GetVulkanPipelineState()
pre_init = rd.ResourceId()
undef_img = rd.ResourceId()
# Check that the layout is reported correctly before transitions still
for img in pipe.images:
img: rd.VKImageData
res = self.get_resource(img.resourceId)
if res.name == "Image:Preinitialised":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PREINITIALIZED":
raise rdtest.TestFailureException("Pre-initialised image is in {} layout".format(img.layouts[0].name))
pre_init = img.resourceId
elif res.name == "Image:Undefined":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_UNDEFINED":
raise rdtest.TestFailureException("Undefined image is in {} layout".format(img.layouts[0].name))
undef_img = img.resourceId
elif res.name == "Image:Swapchain":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR":
raise rdtest.TestFailureException("Swapchain image is in {} layout".format(img.layouts[0].name))
action = self.find_action("vkCmdDraw")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
# Check that the backbuffer didn't get discarded
self.check_triangle(out=action.outputs[0])
col = [float(0x40) / 255.0] * 4
# The pre-initialised image should have the correct data still also
self.check_triangle(out=pre_init, back=col, fore=col)
# we copied its contents into the undefined image so it should also have the right colour
self.check_triangle(out=undef_img, back=col, fore=col)
pipe: rd.VKState = self.controller.GetVulkanPipelineState()
# Check that after transitions, the images are in the right state
for img in pipe.images:
img: rd.VKImageData
res = self.get_resource(img.resourceId)
if res.name == "Image:Preinitialised":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL":
raise rdtest.TestFailureException("Pre-initialised image is in {} layout".format(img.layouts[0].name))
elif res.name == "Image:Undefined":
if img.layouts[0].name != "VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL":
raise rdtest.TestFailureException("Undefined image is in {} layout".format(img.layouts[0].name))
elif img.resourceId == pipe.currentPass.framebuffer.attachments[0].imageResourceId:
if img.layouts[0].name != "VK_IMAGE_LAYOUT_GENERAL":
raise rdtest.TestFailureException("Rendered swapchain image is in {} layout".format(img.layouts[0].name))
| [
"[email protected]"
] | |
7d65874d66433649931e827dc23a00315d029607 | 49cc32d5859e9002cb4b94ade25d72f5f4fe1612 | /CLASE4_PYTHON/CODIGOS/serial_python.py | 43e9522e950f183e31d94dd1531efffa2455861a | [] | no_license | jorgepdsML/DIGITAL-IMAGE-PROCESSING-PYTHON | c8441215b4cf9e912dad1885a82058c1b0bbb872 | 781c8c6d583aebda6381a301cdc33ad4d09f20c5 | refs/heads/master | 2021-06-26T00:06:44.344201 | 2021-01-21T17:41:36 | 2021-01-21T17:41:36 | 194,336,928 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #importar modulo serial
import serial
#usar la clase Serial
#creando un objeto Serial
objeto_serial=serial.Serial()
#CONFIGURANDO LA VELOCIDAD DE COMUNICACIÓN DEL OBJETO SERIAL
objeto_serial.baudrate=9600
objeto_serial.port="COM16"
objeto_serial.open()
objeto_serial.write("sfadg")
dato=objeto_serial.read()
objeto_serial.close()
| [
"[email protected]"
] | |
b3f94c94e05413730db58e91ad49543f9373f995 | f259a50a663a035c7c79bffbe4f7e08fc1ca5ce2 | /pepysdiary/annotations/__init__.py | 2e85a18199d3ff74ff2e7cbb8681c9b6532c3a40 | [] | no_license | eskadah/pepysdiary | a22aa5d4c82c9a92410940e5e9562eb232873258 | 7fd706fc862e216216d8d80238516328404f5786 | refs/heads/master | 2022-09-13T12:21:19.237047 | 2020-06-03T10:34:54 | 2020-06-03T10:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | default_app_config = "pepysdiary.annotations.apps.AnnotationsConfig"
def get_model():
from pepysdiary.annotations.models import Annotation
return Annotation
def get_form():
from pepysdiary.annotations.forms import AnnotationForm
return AnnotationForm
| [
"[email protected]"
] | |
d33581d9970083928f2bd10daedf11d919f2aeee | 8d29fd856250e746f19e086975e83d2dea2cf6a3 | /ResourceStatusSystem/Agent/test/Test_SSInspectorAgent/fixtures.py | 5e164fec36b40673dd1bb2d867db9da3fd471a7f | [] | no_license | hanyl/DIRAC | 048c749154192e3940e17b24396afe0e667444b2 | 82eb56888fc039f94ba1033ea4b6d3ad503bf96e | refs/heads/master | 2021-01-16T00:23:00.344192 | 2013-01-19T00:01:05 | 2013-01-19T00:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | import unittest, sys
from DIRAC.ResourceStatusSystem.PolicySystem.mock.PEP import PEP
from DIRAC.ResourceStatusSystem.Client.mock.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.mock import CheckingFreqs
from DIRAC.ResourceStatusSystem.Utilities.mock import CS
from DIRAC.ResourceStatusSystem.Agent.mock.AgentModule import AgentModule
from DIRAC.ResourceStatusSystem.Command.mock import knownAPIs
class UnitFixture( unittest.TestCase ):
def setUp( self ):
import DIRAC.ResourceStatusSystem.Agent.SSInspectorAgent as mockedModule
mockedModule.PEP = PEP
mockedModule.ResourceStatusClient = ResourceStatusClient
mockedModule.CheckingFreqs = CheckingFreqs
mockedModule.CS = CS
mockedModule.knownAPIs = knownAPIs
mockedModule.SSInspectorAgent.__bases__ = ( AgentModule, )
self.agent = mockedModule.SSInspectorAgent( '', '')
def tearDown( self ):
#sys.modules = self._modulesBkup
del sys.modules[ 'DIRAC.ResourceStatusSystem.Agent.SSInspectorAgent' ]
| [
"[email protected]"
] | |
3c53bda6c1dda03b00707e591ebe42019e901d8d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /K5277r6RmsJRSz27t_1.py | 8f51eb7e5f7e14c77fa0f9009fe9c23f6d01a7d5 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | """
The challenge is to recreate the functionality of the `title()` method into a
function called `emphasise()`. The `title()` method capitalises the first
letter of _every word_ and lowercases all of the other letters in the word.
### Examples
emphasise("hello world") ➞ "Hello World"
emphasise("GOOD MORNING") ➞ "Good Morning"
emphasise("99 red balloons!") ➞ "99 Red Balloons!"
### Notes
* You won't run into any issues when dealing with numbers in strings.
* Please don't use the `title()` method directly :(
"""
def emphasise(txt):
return ' '.join(w.capitalize() for w in txt.split())
| [
"[email protected]"
] | |
929885ae6922340d428fa9858beeb3cfdd43d863 | e7b312b4cc3355f4ca98313ef2ac9f3b0d81f245 | /abc/100/d/d.py | 6802afb2d5e0137ddc0db2a12ffb1d70809ab5bc | [] | no_license | minus9d/programming_contest_archive | 75466ab820e45ee0fcd829e6fac8ebc2accbbcff | 0cb9e709f40460305635ae4d46c8ddec1e86455e | refs/heads/master | 2023-02-16T18:08:42.579335 | 2023-02-11T14:10:49 | 2023-02-11T14:10:49 | 21,788,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import array
from bisect import *
from collections import *
import fractions
import heapq
from itertools import *
import math
import random
import re
import string
import sys
N, M = map(int, input().split())
XYZs = []
for n in range(N):
x, y, z = map(int, input().split())
XYZs.append((x,y,z))
ans = 0
for i in (1,-1):
for j in (1,-1):
for k in (1,-1):
scores = []
for x, y, z in XYZs:
val = x * i + y * j + z * k
scores.append(val)
scores.sort()
scores.reverse()
ans = max(ans, sum(scores[:M]))
print(ans)
| [
"[email protected]"
] | |
3fbb25875d5ed9d4a1c9f86aeb665817922af9a8 | 262311e60529868e38c2c57ee3db573f8e11c458 | /mysite/books/models.py | d197441088c1c45f016b482eeb1d336bedac587b | [] | no_license | huileizhan227/untitled | 1c5604736d9ffcce6f7cb7e308cdc0ebd07e116a | 07df74c89291b1664a28e3c8dcba51a917f1835f | refs/heads/master | 2023-01-27T11:51:37.609210 | 2020-04-16T11:49:59 | 2020-04-16T11:49:59 | 150,606,504 | 1 | 0 | null | 2023-01-09T12:00:12 | 2018-09-27T15:12:18 | HTML | UTF-8 | Python | false | false | 1,468 | py | from django.db import models
# Create your models here.
class Publisher(models.Model):
'''
出版商 - 名称,地址,所在城市,省,国家,网站
'''
name = models.CharField(max_length=30)
address = models.CharField(max_length=100)
city = models.CharField(max_length=60)
state_province = models.CharField(max_length=30)
country = models.CharField(max_length=50)
website = models.URLField()
def __unicode__(self):
# unicode() 方法告诉Python如何将对象以unicode的方式显示出来
return self.name
class Author(models.Model):
'''
作者 - 姓,名,email地址
'''
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __unicode__(self):
return u"{} {}".format(self.first_name, self.last_name)
class Book(models.Model):
'''
书籍 - 书名,作者(一个或多个作者,和作者是多对多的关联关系[many-to-many]),
出版商(和出版商是一对多的关联关系[one-to-many],也被称作外键[foreign key]),
出版日期
'''
title = models.CharField(max_length=100)
author = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE) #ForeignKey中需要加上on_delete=models.CASCADE
publication_date = models.DateField()
def __unicode__(self):
return self.title
| [
"[email protected]"
] | |
689b6003d2c9cf04e858dc49e68af06d8e75d941 | 500daf0ee45c943293920e0ce3fd9b539f05fcb6 | /tensorloaders/redisdataset.py | 6fafa8c00b901549697cbeec03254ad91eb51bb2 | [
"BSD-3-Clause"
] | permissive | deeptechlabs/tensorloaders | 6a8cd5cd97d217d0f6a352a99b68da3729085257 | f3db29828d5e0fdc28c2089726420fa5fbdf1ff2 | refs/heads/master | 2020-04-23T23:48:28.338670 | 2019-03-12T11:41:28 | 2019-03-12T11:41:28 | 171,546,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | """
Shows how to store and load data from redis using a PyTorch
Dataset and DataLoader (with multiple workers).
@author: ptrblck
"""
import redis
import torch
from torch.utils.data import Dataset
import numpy as np
# Create RedisDataset
class RedisDataset(Dataset):
def __init__(self,
redis_host='localhost',
redis_port=6379,
redis_db=0,
length=0,
transform=None):
self.db = redis.Redis(host=redis_host, port=redis_port, db=redis_db)
self.length = length
self.transform = transform
def __getitem__(self, index):
data = self.db.get(index)
data = np.frombuffer(data, dtype=np.long)
x = data[:-1].reshape(3, 24, 24).astype(np.uint8)
y = torch.tensor(data[-1]).long()
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return self.length
| [
"[email protected]"
] | |
04c7fcf52789924a483aa1830f9cda339da0de7b | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-2274.py | e14b1b28785336353e0de986324748db63bda4f2 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,006 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = $Literal
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"[email protected]"
] | |
8adc6546de6c9c690bd02fbceb11bb4e0269169d | 1b47e0fb58651a224ca7269c5365c174f9e6197b | /django2/cve/venv/bin/static | ac9f5aae41fb626d4856b268b1d74de823547d47 | [] | no_license | ggrecco/python | a3f0f6f6f99e6aeb49cacabf11c2e986eada4479 | d5895a1b8067358e4336e6273b1b493010dab2ce | refs/heads/master | 2022-12-13T19:53:51.706179 | 2020-02-20T19:32:21 | 2020-02-20T19:32:21 | 107,701,916 | 0 | 0 | null | 2022-12-08T02:05:15 | 2017-10-20T16:35:16 | Python | UTF-8 | Python | false | false | 344 | #!/home/ggrecco/Documentos/python/django2/cve/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'static3==0.7.0','console_scripts','static'
__requires__ = 'static3==0.7.0'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('static3==0.7.0', 'console_scripts', 'static')()
)
| [
"[email protected]"
] | ||
d1ae8ef5423ff53c51eb1efd77f1100c2598de28 | 8d7262650584eb6d66b5874d2e4651963918ad1d | /sp/migrations/0004_auto_20200401_1328.py | 40a4fd3e76205868fb5342f378d749ed3612589d | [
"BSD-3-Clause"
] | permissive | imsweb/django-saml-sp | dee5757ac9807526849a7382550815c02057d1ae | d8c3bdb91e4a5988e282690d79a8069af77a0c7a | refs/heads/main | 2023-03-07T08:41:28.291208 | 2023-02-24T16:32:45 | 2023-02-24T16:32:45 | 220,322,548 | 14 | 11 | BSD-3-Clause | 2023-02-24T14:16:00 | 2019-11-07T20:23:22 | Python | UTF-8 | Python | false | false | 930 | py | # Generated by Django 3.0.4 on 2020-04-01 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sp", "0003_auto_20200331_1934"),
]
operations = [
migrations.AlterField(
model_name="idp",
name="authenticate_method",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="idp",
name="base_url",
field=models.CharField(
help_text=(
"Root URL for the site, including http/https, no trailing slash."
),
max_length=200,
verbose_name="Base URL",
),
),
migrations.AlterField(
model_name="idp",
name="login_method",
field=models.CharField(blank=True, max_length=200),
),
]
| [
"[email protected]"
] | |
b43c00902b34c537eae4a4a8cb803bbe7ca2daec | 102a1e4885a39c9af588ca4215bcf625f9dce312 | /deploy/deploy12.py | f9de7a832c6e399e18e51b127aa742e24617ae8f | [] | no_license | lingxiao/good-great-ngrams | 9c51136b5e7af5d522193d159e8e5e74596140a2 | ac189b638a34b410a2f7fe84406ad4baf111792c | refs/heads/master | 2021-01-11T21:15:29.648113 | 2017-02-21T01:42:54 | 2017-02-21T01:42:54 | 79,279,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | ############################################################
# Module : Applicaton Main
# Date : November 14th
# Author : Xiao Ling
############################################################
from app import *
from prelude import *
from utils import *
from server import *
from client import *
############################################################
# Initialize application
############################################################
# root = '/home1/l/lingxiao/xiao/good-great-ngrams/'
root = "/Users/lingxiao/Documents/research/code/good-great-ngrams"
data = os.path.join(root, 'ngrams/')
a12 = App(root
,data
,'outputs-2'
,'one-sided-patterns'
,'two-sided-patterns'
,'moh-graph/testset-12')
a12.refresh(2)
| [
"[email protected]"
] | |
58bd14d240242ed58dcff35fe91cebeae4899478 | 18136ff686211c8a1c1938c369c2bacd03f10133 | /leet/matrix/numberOfIslands.py | 01201be8ad186c6f9d51c14c688c155234eacb23 | [
"Apache-2.0"
] | permissive | KshitijSrivastava/python-cp-cheatsheet | e13ef9f7260ce18b77a1171c8c8d13e74d8292c5 | a5514b08816959de1198156f7764c54a7a585f20 | refs/heads/master | 2023-07-08T10:24:28.165854 | 2021-08-09T15:33:46 | 2021-08-09T15:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | """
time: X * Y
space: worst case X * Y
"""
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
Y = len(grid)
X = len(grid[0])
def dfs(y, x):
if y < 0 or x < 0 or y > Y-1 or x > X-1:
return
if grid[y][x] == "1":
grid[y][x] = "0"
dfs(y, x-1)
dfs(y, x+1)
dfs(y-1, x)
dfs(y+1, x)
ans = 0
for y in range(Y):
for x in range(X):
if grid[y][x] == "1":
dfs(y, x)
ans += 1
return ans
def numIslands(self, grid : List[List[str]]) -> int:
R = len(grid)
C = len(grid[0])
def dfs(r, c):
if r < 0 or c < 0 or r >= R or c >= C:
return
if grid[r][c] == '1':
grid[r][c] = '0'
dfs(r-1,c)
dfs(r+1,c)
dfs(r,c-1)
dfs(r,c+1)
rtn = 0
for r in range(R):
for c in range(C):
if grid[r][c] == '1':
rtn += 1
dfs(r,c)
return rtn
| [
"[email protected]"
] | |
53a4492d522dec93f874e933bc11a1b32ad07ede | 1b2f4a5528a366da4d586f14328c28f234bb3e86 | /docs/conf.py | ed44bcac90899e2aa6e564cf571b156b9411fd61 | [
"BSD-3-Clause"
] | permissive | molssi-seamm/loop_step | 51cdbb171131a51507f301d50daaa86dba1986c2 | aa44a410c1e613a2c262d374c4cadeacf1a83adb | refs/heads/main | 2023-08-17T02:38:17.194247 | 2023-04-24T18:59:28 | 2023-04-24T18:59:28 | 165,525,662 | 0 | 1 | BSD-3-Clause | 2023-04-24T18:51:20 | 2019-01-13T16:07:18 | Python | UTF-8 | Python | false | false | 9,510 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# loop_step documentation build configuration file,
# created by the SEAMM step cookiecutter.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import loop_step # noqa: E402
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_design',
'sphinx_copybutton',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SEAMM Loop Plug-in'
copyright = u"2019-2023, Molecular Sciences Software Institute (MolSSI)"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = loop_step.__version__
# The full version, including alpha/beta/rc tags.
release = loop_step.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
# keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
html_theme_options = {
"github_url": "https://github.com/molssi-seamm/loop_step",
"twitter_url": "https://twitter.com/MolSSI_NSF",
"logo": {
"image_light": "SEAMM logo.png",
"image_dark": "SEAMM Inverted 288x181.png",
"text": "Loop Step",
"molssi_light": "molssi_main_logo.png",
"molssi_dark": "molssi_main_logo_inverted_white.png",
},
"show_toc_level": 2,
"header_links_before_dropdown": 4,
"external_links": [
{"name": "SEAMM Documentation", "url": "https://molssi-seamm.github.io"},
{"name": "MolSSI", "url": "https://molssi.org"}
],
"secondary_sidebar_items": ["page-toc", "sourcelink"],
"footer_items": [ "molssi_footer" ],
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
'css/custom.css',
]
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'loop_stepdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'loop_step.tex',
u'Loop Step Documentation',
u'Paul Saxe', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'loop_step',
u'Loop Step Documentation',
[u'Paul Saxe'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'loop_step',
u'Loop Step Documentation',
u'Paul Saxe',
'loop_step',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
b83707b7b3c7b8d985fb452b2237fc9ca45e35ce | feef30d93f43b9f5e08c050f0c97237b4fb4ba9c | /Login/UserRegistration/admin.py | 5fa7543a9d186355023c55b6b4fdb2d7ee13fb95 | [] | no_license | cmrajib/django_resale_market_place | 3926d051a7a47b6a77fe0a06a570bd8d4fddc98c | a8b6557fb50c34ce91a96f2e795837851b3aaf2c | refs/heads/main | 2023-02-11T22:54:41.022680 | 2021-01-10T02:28:08 | 2021-01-10T02:28:08 | 328,287,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | from django.contrib import admin
from django.utils.html import format_html
# Register your models here.
from UserRegistration.models import User, Profile
class CarAdmin(admin.ModelAdmin):
def thumbnail(self, object):
return format_html('<img src="{}" width="40" style="border-radius: 50px;"/>'.format(object.image.url))
thumbnail.short_description = 'Photo'
list_display = ('id', 'thumbnail','full_name', 'phone','city', 'zipcode', 'country')
list_display_links = ('id','thumbnail', 'full_name')
list_filter = ('full_name','city')
# list_editable = ('is_featured',)
search_fields =('full_name', 'city','phone')
list_per_page = 10
admin.site.register(User)
admin.site.register(Profile, CarAdmin)
| [
"[email protected]"
] | |
1276f3aadbfeca2b05571f2c36083d8ce4f88638 | b0f1acbe5cd30c2ade801465924c12403ab7e585 | /Corda_Api_Library/test/test_net_corda_core_context_auth_service_id.py | 62b0ee61a3b07529252bc9c118598e42cffe31e6 | [] | no_license | TanzimAzadNishan/Blockchain-Based-Online-Ticketing-Platform | 94ea0f06a7761f9033f7a1dc61548ade6f6ff499 | d04a2696cab4c41743c7c5999c623002d0e57f80 | refs/heads/main | 2023-03-09T14:34:27.148340 | 2021-02-24T11:49:26 | 2021-02-24T11:49:26 | 338,845,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | """
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.net_corda_core_context_auth_service_id import NetCordaCoreContextAuthServiceId
class TestNetCordaCoreContextAuthServiceId(unittest.TestCase):
"""NetCordaCoreContextAuthServiceId unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetCordaCoreContextAuthServiceId(self):
"""Test NetCordaCoreContextAuthServiceId"""
# FIXME: construct object with mandatory attributes with example values
# model = NetCordaCoreContextAuthServiceId() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1ba5eb3665cadc23d5d6eb759ee9015d804c800d | b35cf6b82290ef5f3f6d2e3d12d3321aeaba099f | /backend/home/migrations/0002_load_initial_data.py | 80b25ac73b80bc50b0c43a86ef6d30f4aee74408 | [] | no_license | crowdbotics-apps/check-plannss-dev-1905 | 26339d7cb82bcc623a7006a2a25b8159dd557e3c | cb1355e7a58f58671dd71c93d47e9858be60d041 | refs/heads/master | 2022-04-08T01:23:31.526664 | 2020-03-10T16:26:04 | 2020-03-10T16:26:04 | 246,351,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "check plannss"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">check plannss</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "check-plannss-dev-1905.botics.co"
site_params = {
"name": "check plannss",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
70c7c0082ae0fd69750b6dffa4ecbc4c018fc7b9 | 85de8e224f9bf8202b25a0aee939f4949f69471c | /day_9/Rotting_Oranges.py | 2f48c21c4b8f2aee0e2c08bcc52ae57ad88e79fd | [] | no_license | ahmedmeshref/August-LeetCoding-Challenge | 78d17ad3bbcaa240ebe53841832ff1f835cae729 | 68b69d30e9d0a32cf6cd1a5f8c05cb2eb572946b | refs/heads/master | 2022-12-05T08:19:55.659127 | 2020-09-01T02:06:56 | 2020-09-01T02:06:56 | 284,369,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | from collections import deque
class Solution:
def orangesRotting(self, grid) -> int:
rotten_q = deque()
self.tot_fresh = 0
def rotten(i, j):
grid[i][j] = 2
self.tot_fresh -= 1
self.rotten = True
return [i, j]
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 2:
rotten_q.append([i, j])
elif grid[i][j] == 1:
self.tot_fresh += 1
min_mins = 0
while rotten_q:
ln = len(rotten_q)
self.rotten = False
for _ in range(ln):
i, j = rotten_q.popleft()
if i - 1 >= 0 and grid[i - 1][j] == 1:
rotten_q.append(rotten(i - 1, j))
if i + 1 < len(grid) and grid[i + 1][j] == 1:
rotten_q.append(rotten(i + 1, j))
if j - 1 >= 0 and grid[i][j - 1] == 1:
rotten_q.append(rotten(i, j - 1))
if j + 1 < len(grid[0]) and grid[i][j + 1] == 1:
rotten_q.append(rotten(i, j + 1))
if self.rotten:
min_mins += 1
return min_mins if not self.tot_fresh else -1 | [
"[email protected]"
] | |
adee69a1c6986002dedb3deed09e40a4833e5728 | bd17e9fc0e5978cb664037bffdcf618a893e0523 | /python/models/rnnseq2seq.py | d3b4a2b94c3abe396f4232f79e71aef412e17af1 | [] | no_license | kedz/ntg | 598513fb2c6e910ad11f40f031675a587eb7ec79 | 34f13b23a6850eb0c8a727a51e7aa49fd6aec098 | refs/heads/master | 2020-12-07T15:29:10.305416 | 2017-11-07T03:07:52 | 2017-11-07T03:07:52 | 95,521,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,552 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
from models.sequence_predictor import SequencePredictor
from encoder import RNNEncoder
from decoder import RNNDecoder
import bridge as be
from parallel_module import ParallelModule
class RNNSeq2Seq(SequencePredictor):
@classmethod
def from_args(cls, args, encoder_input_modules, decoder_input_modules,
dropout=None, rnn_type=None, target_vocab_size=None,
attention_type=None, bidirectional=None, learn_init=None,
bridge_type=None):
if learn_init is None:
learn_init = bool(args.learn_init)
if bridge_type is None:
bridge_type = args.bridge_type
encoder = RNNEncoder.from_args(
args, encoder_input_size=encoder_input_modules.embedding_size,
dropout=dropout, rnn_type=rnn_type, bidirectional=bidirectional)
if args.rnn_type == "lstm":
bridge1 = be.from_args(
args, bridge_type=bridge_type,
bidirectional=bidirectional)
bridge2 = be.from_args(
args, bridge_type=bridge_type,
bidirectional=bidirectional)
bridge = ParallelModule([bridge1, bridge2])
else:
bridge = be.from_args(
args, bridge_type=bridge_type,
bidirectional=bidirectional)
decoder = RNNDecoder.from_args(
args, decoder_input_size=decoder_input_modules.embedding_size,
dropout=dropout, rnn_type=rnn_type,
target_vocab_size=target_vocab_size,
attention_type=attention_type)
return cls(encoder_input_modules, decoder_input_modules,
encoder, bridge, decoder, learn_init=learn_init)
def __init__(self, encoder_input_modules, decoder_input_modules,
encoder, bridge, decoder, learn_init=False):
super(RNNSeq2Seq, self).__init__(decoder_input_modules, decoder)
self.encoder_input_modules_ = encoder_input_modules
self.encoder_ = encoder
self.bridge_ = bridge
if learn_init:
init_state = [nn.Parameter(torch.FloatTensor(*dims).fill_(0))
for dims in encoder.rnn_state_dims]
self.init_state = nn.ParameterList(init_state)
else:
self.init_state = nn.ParameterList()
@property
def encoder_input_modules(self):
return self.encoder_input_modules_
@property
def encoder(self):
return self.encoder_
@property
def bridge(self):
return self.bridge_
# TODO make this a mixin, also for rnnlm model
def get_init_state(self, batch_size):
if len(self.init_state) == 0:
return None
elif len(self.init_state) == 1:
state = self.init_state[0].repeat(1, batch_size, 1)
return state
else:
return tuple([state.repeat(1, batch_size, 1)
for state in self.init_state])
def forward(self, batch, mask_output=True):
# todo add a size parameter to batch
batch_size = batch.encoder_inputs[0].size(0)
encoder_max_steps = batch.encoder_inputs[0].size(1)
decoder_max_steps = batch.decoder_inputs[0].size(1)
init_state = self.get_init_state(batch_size)
encoder_inputs = self.encoder_input_modules.forward_sequence(
batch.encoder_inputs, encoder_max_steps)
context, encoder_state = self.encoder(
encoder_inputs, batch.encoder_length, prev_state=init_state)
decoder_state = self.bridge(encoder_state)
logits, _ = super(RNNSeq2Seq, self).forward(
batch.decoder_inputs + batch.decoder_features,
decoder_max_steps,
prev_state=decoder_state,
context=context)
if mask_output:
mask = batch.decoder_inputs[0].data.t().eq(0)
mask3d = mask.view(decoder_max_steps, batch_size, 1).expand(
decoder_max_steps, batch_size, logits.data.size(2))
logits.data.masked_fill_(mask3d, 0)
return logits
def init_beam_rnn_state(self, state, beam_size):
if isinstance(state, type(None)):
return None
elif isinstance(state, (tuple, list)):
return (self.init_beam_rnn_state(state_i, beam_size)
for state_i in state)
else:
num_layers = state.size(0)
batch_size = state.size(1)
hidden_size = state.size(2)
return state.contiguous().repeat(1, 1, beam_size).view(
num_layers, batch_size * beam_size, hidden_size)
def init_beam_context(self, context, beam_size):
steps = context.size(0)
batch_size = context.size(1)
hidden_size = context.size(2)
beam_context = context.repeat(1, 1, beam_size).view(
steps, batch_size * beam_size, hidden_size)
return beam_context
def update_beam_state(self, state, source):
if isinstance(state, type(None)):
return None
elif isinstance(state, (tuple, list)):
return (self.update_beam_state(state_i, source)
for state_i in state)
else:
next_state = state.data.new(state.size())
i = 0
for batch in range(source.size(0)):
for beam in range(source.size(1)):
loc = batch * source.size(1) + source[batch, beam]
next_state[:,i,:].copy_(state.data[:, loc])
i += 1
return Variable(next_state)
def complete_sequence(self, encoder_inputs, encoder_length,
decoder_inputs, decoder_features,
max_steps=100, beam_size=8):
batch_size = encoder_inputs[0].size(0)
encoder_max_steps = encoder_inputs[0].size(1)
prefix_size = decoder_inputs[0].size(1) - 1
init_state = self.get_init_state(batch_size)
encoder_inputs = self.encoder_input_modules.forward_sequence(
encoder_inputs, encoder_max_steps)
context, encoder_state = self.encoder(
encoder_inputs, encoder_length, prev_state=init_state)
decoder_state = self.bridge(encoder_state)
if prefix_size > 0:
prefix_inputs = []
prev_outputs = []
for input in decoder_inputs + decoder_features:
if input.dim() == 1:
prefix_inputs.append(input)
prev_outputs.append(input)
elif input.dim() == 2:
prefix_inputs.append(input[:,:-1])
prev_outputs.append(input[:,-1])
else:
raise Exception(
"I don't know what to do with " \
"input with dims = {}".format(input.dim()))
prefix_logits, decoder_state = super(RNNSeq2Seq, self).forward(
prefix_inputs, prefix_inputs[0].size(1),
prev_state=decoder_state,
context=context)
else:
prev_outputs = []
for input in decoder_inputs + decoder_features:
if input.dim() == 1:
prev_outputs.append(input)
elif input.dim() == 2:
prev_outputs.append(input[:,-1])
else:
raise Exception(
"I don't know what to do with " \
"input with dims = {}".format(input.dim()))
# This getter is a BAD idea.
Warning("Fix stop getter you dumb dumb.")
stop_index = self.get_meta("target_reader").vocab.index("_DSTOP_")
beam_prev_outputs = self.init_beam_outputs(prev_outputs, beam_size)
beam_rnn_state = self.init_beam_rnn_state(decoder_state, beam_size)
beam_context = self.init_beam_context(context, beam_size)
beam_scores = decoder_inputs[0].data.new(
batch_size, beam_size).float()
beam_scores.fill_(float("-inf"))
for batch in range(batch_size):
beam_scores[batch, 0] = 0
return self.beam_search(
beam_prev_outputs, prev_state=beam_rnn_state, scores=beam_scores,
max_steps=max_steps, beam_size=beam_size, batch_size=batch_size,
stop_index=stop_index, context=beam_context)
| [
"[email protected]"
] | |
d7d8d16a859496673b797120662e9289bd77dcbd | cb13037cdbd3e0ab6108670108e9497cc1e2a5a7 | /0.leetcode/3.刷题/1.数据结构系列/1.线性结构/3.栈/1.单调栈/2.最小栈/456.mid_132模式.py | d52e63ba018f3ae19666a84483eb475d55592f48 | [] | no_license | GMwang550146647/network | 390fe0d1c72dcaca8b6d6dd1307adca0d56b55ce | 576de9b993f7763789d25a995702b40c9bc6fa57 | refs/heads/master | 2023-06-15T04:42:54.306077 | 2021-07-12T06:06:02 | 2021-07-12T06:06:02 | 315,488,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | from fundamentals.test_time import test_time
from math import ceil
class Solution():
def __init__(self):
pass
# @test_time
# def find132pattern_ans(self, nums):
# if len(nums) < 3:
# return False
#
# pre_mins = [nums[0]]
# for i in range(1, len(nums)):
# pre_mins.append(min(pre_mins[-1], nums[i]))
# stack_k = []
# for j in range(len(nums) - 1, -1, -1):
# if nums[j] > pre_mins[j]:
# while stack_k and pre_mins[j] >= stack_k[-1]:
# stack_k.pop()
#
# if stack_k and stack_k[-1] < nums[j]:
# return True
#
# stack_k.append(nums[j])
#
# return False
@test_time
def find132pattern(self, nums):
if len(nums) < 3:
return False
pre_mins = [nums[0]]
for i in range(1, len(nums)):
pre_mins.append(min(pre_mins[-1], nums[i]))
stack = []
for i in range(len(nums) - 1, -1, -1):
if nums[i] > pre_mins[i]:
while stack and pre_mins[i] >= stack[-1]:
stack.pop(-1)
if stack and stack[-1] < nums[i]:
return True
stack.append(nums[i])
return False
def main(self):
nums = [3, 5, 0, 3, 4]
nums = [1, 2, 3, 4, 5]
nums = [-1, 3, 2, 0]
# nums = [1, 0, 1, -4, -3]
nums = [-2, 1, 2, -2, 1, 2]
# nums = [3,1,4,2]
# nums = [-1,3,2,0]
print(self.find132pattern(nums))
print(self.find132pattern_ans(nums))
if __name__ == '__main__':
SL = Solution()
SL.main()
| [
"[email protected]"
] | |
ecc18e3af497e0ca2abb1553abd591c27b215a95 | d54e1b89dbd0ec5baa6a018464a419e718c1beac | /Python from start to practice/Chapters/Chatper11_20200408_test_class/employee.py | f1860aa47e1c95ce3745cf45e926609001f81631 | [] | no_license | cjx1996/vscode_Pythoncode | eda438279b7318e6cb73211e26107c7e1587fdfb | f269ebf7ed80091b22334c48839af2a205a15549 | refs/heads/master | 2021-01-03T19:16:18.103858 | 2020-05-07T13:51:31 | 2020-05-07T13:51:31 | 240,205,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | class Employee():
'''存储雇员的信息'''
def __init__(self, first, last, wage):
'''
定义雇员的基本信息,包括姓,名,年薪
'''
self.first = first
self.last = last
self.wage = wage
def give_raise(self, number=5000):
'''给员工增加年薪,默认增加5000,也可以增加其他值'''
self.wage = self.wage + number
| [
"[email protected]"
] | |
51e9cb3c0299e3467570be9639804c4196211c3b | 740af83394e19b1e68c08dc0fdbec8c44ac6216e | /jesse/indicators/atr.py | 8116f5d70f6fa5ed96f1da2f0f44026824644b07 | [
"MIT"
] | permissive | dinet/jesse | 9a6b60c48b4f51a09e7092bf66df446763dd7b21 | e6f59fba3f56b01b12bf6c1adf9829afe8dcd2df | refs/heads/master | 2022-06-22T21:06:42.348993 | 2020-05-09T19:45:35 | 2020-05-09T19:45:35 | 262,654,825 | 1 | 0 | MIT | 2020-05-09T20:38:11 | 2020-05-09T20:38:10 | null | UTF-8 | Python | false | false | 604 | py | import numpy as np
import talib
from typing import Union
def atr(candles: np.ndarray, period=14, sequential=False) -> Union[float, np.ndarray]:
"""
ATR - Average True Range
:param candles: np.ndarray
:param period: int - default=14
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
res = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=period)
if sequential:
return res
else:
return None if np.isnan(res[-1]) else res[-1]
| [
"[email protected]"
] | |
2999798603d8b2c64207d963cda92df262135cb4 | 3fad7381b03607e908dc06a7f91ae60f10e5be16 | /01_tests/05_andrei_repository/2017.07.01_SgdComparison/sgd_src/solvers.py | 0c38dacb4fe3477439eb6608953a24c820401493 | [] | no_license | Cloudifier/CLOUDIFIER_WORK | ea5efe0f8e75315313db5ee145f4cc8092b542fa | e8ce18fad97b1207545e933ed0947347ed09c536 | refs/heads/master | 2021-12-23T16:41:03.149554 | 2021-12-13T13:16:51 | 2021-12-13T13:16:51 | 108,911,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,381 | py | from abc import ABC, abstractmethod
import numpy as np
import time
class SgdSolver(ABC):
def __init__(self, logger, epsilon):
self.logger = logger
self.epsilon = epsilon
self.model = None
self.cost_history = []
self.epochs_to_convergence = 0
np.set_printoptions(precision = 6)
def sigmoid(self, z):
return 1.0 / (1 + np.exp(-z))
def save_results(self, theta, cost_history, epochs):
self.model = theta
self.cost_history = cost_history
self.epochs_to_convergence = epochs
def print_statistics(self, epoch, current_cost, diff, gradient, theta):
self.logger.log("cost_eph#{:>2} = {:.4f}; abs diff between current and last eph = {:.4f}"
.format(epoch, current_cost, abs(diff)), tabs = 1, verbosity_level = 2)
self.logger.log("eph#{:>2}, gradient[380:385] = {}"
.format(epoch, gradient[380:385]), tabs = 1, verbosity_level = 1)
def logreg_cross_entropy_cost(self, X, y, theta, beta):
m = y.size
h = self.sigmoid(np.dot(X, theta))
return (1 / m) * (-y.T.dot(np.log(h)) - (1 - y).T.dot(np.log(1 - h))) + \
(beta / (2 * m) * np.sum(theta[1:] ** 2))
def logreg_mse_cost(self, X, y, theta, beta):
m = y.size
h = self.sigmoid(np.dot(X, theta))
return (1 / (2 * m)) * np.sum((h-y)**2) + (beta / (2 * m)) * (np.sum(theta[1:]) ** 2)
def linreg_mse_cost(self, X, y, theta, beta):
m = y.size
h = np.dot(X, theta)
return 1 / (2 * m) * np.sum( (h - y) ** 2 ) + (beta / (2 * m) * np.sum(theta[1:] ** 2))
@abstractmethod
def train(self, train_X, train_y, validation_X, validation_y, initial_theta,
epochs = 0, alpha = 0, batch_size = 0, beta = 0, speed = 0, n_boost = 0,
is_linear_reg = False, cost_function='MSE'):
pass
class SimpleSgdSolver(SgdSolver):
def __init__(self, logger, epsilon):
super().__init__(logger, epsilon)
self.logger.log("Initialize simple gradient descendent logisitic regression solver")
def train(self, train_X, train_y, validation_X, validation_y, initial_theta,
epochs = 0, alpha = 0, batch_size = 0, beta = 0, speed = 0, n_boost = 0,
is_linear_reg = False, cost_function='MSE'):
start = time.time()
reg_str = "simple without reg"
if beta != 0:
reg_str = "simple with reg"
self.logger.log("Start {} training: alpha={}, batchSz={}, beta={}"
.format(reg_str, alpha, batch_size, beta), verbosity_level = 2)
theta = np.array(initial_theta)
cost_history = []
gradient = None
last_theta = None
for epoch in range(epochs):
for i in np.arange(0, train_X.shape[0], batch_size):
current_X = train_X[i : i + batch_size, :]
current_y = train_y[i : i + batch_size]
yhat = np.dot(current_X, theta)
if not is_linear_reg:
predicted = self.sigmoid(yhat)
else:
predicted = yhat
residual = predicted - current_y
if (is_linear_reg) or ((not is_linear_reg) and (cost_function is 'CrossEntropy')):
gradient = current_X.T.dot(residual) / current_X.shape[0]
gradient[1:] += beta * theta[1:] / current_X.shape[0]
else:
factor = np.exp(yhat) / ((np.exp(yhat) + 1) ** 2)
gradient = (current_X.T * factor).dot(residual) / current_X.shape[0]
gradient[1:] += beta * theta[1:] / current_X.shape[0]
#gradient = current_X.T.dot(residual) / current_X.shape[0]
#gradient[1:] += beta * theta[1:] / current_X.shape[0]
theta -= alpha * gradient
if not is_linear_reg:
if cost_function is 'CrossEntropy':
cost_history.append(self.logreg_cross_entropy_cost(train_X, train_y, theta, beta))
elif cost_function is 'MSE':
cost_history.append(self.logreg_mse_cost(train_X, train_y, theta, beta))
else:
cost_history.append(self.linreg_mse_cost(train_X, train_y, theta, beta))
last_theta = theta
if epoch == 0:
continue
diff = cost_history[-2] - cost_history[-1]
if diff < 0:
alpha /= 2
theta = last_theta
self.logger.log("eph#{}, cost increased by {:.4f} ==> decrease alpha to {:.4f}"
.format(epoch, abs(diff), alpha), tabs = 1, verbosity_level = 1)
else:
alpha += alpha * 0.05
self.logger.log("eph#{}, cost decreased by {:.4f} ==> increasing alpha to {:.4f}"
.format(epoch, abs(diff), alpha), tabs = 1, verbosity_level = 1)
if abs(diff) < self.epsilon:
self.logger.log("eph#{}, delta(cost) < epsilon ==> early stopping"
.format(epoch, abs(diff)), tabs = 1, verbosity_level = 2)
self.save_results(theta, cost_history, epoch + 1)
break
if epoch % 3 == 0:
self.print_statistics(epoch, cost_history[-1], diff, gradient, theta)
self.save_results(theta, cost_history, epoch + 1)
stop = time.time()
self.logger.log("Time for {} training = {:.3f}s".format(reg_str, stop - start))
return stop - start
class MomentunSgdSolver(SgdSolver):
def __init__(self, logger, epsilon):
super().__init__(logger, epsilon)
self.logger.log("Initialize momentun gradient descendent logisitic regression solver")
def train(self, train_X, train_y, validation_X, validation_y, initial_theta,
epochs = 0, alpha = 0, batch_size = 0, beta = 0, speed = 0, n_boost = 0,
is_linear_reg = False, cost_function='MSE'):
start = time.time()
reg_str = "momentum without reg"
if beta != 0:
reg_str = "momentum with reg"
self.logger.log("Start {} training: alpha={}, batchSz={}, beta={}, momentum={}"
.format(reg_str, alpha, batch_size, beta, speed), verbosity_level = 2)
theta = np.array(initial_theta)
cost_history = []
gradient = None
last_momentum = None
last_theta = None
for epoch in range(epochs):
for i in np.arange(0, train_X.shape[0], batch_size):
current_X = train_X[i : i + batch_size, :]
current_y = train_y[i : i + batch_size]
yhat = np.dot(current_X, theta)
if not is_linear_reg:
predicted = self.sigmoid(yhat)
else:
predicted = yhat
residual = predicted - current_y
if (is_linear_reg) or ((not is_linear_reg) and (cost_function is 'CrossEntropy')):
gradient = current_X.T.dot(residual) / current_X.shape[0]
gradient[1:] += beta * theta[1:] / current_X.shape[0]
else:
factor = np.exp(yhat) / ((np.exp(yhat) + 1) ** 2)
gradient = (current_X.T * factor).dot(residual) / current_X.shape[0]
gradient[1:] += beta * theta[1:] / current_X.shape[0]
if last_momentum is not None:
momentun = speed * last_momentum + alpha * gradient
else:
momentun = alpha * gradient
theta -= momentun
last_momentum = momentun
if not is_linear_reg:
if cost_function is 'CrossEntropy':
cost_history.append(self.logreg_cross_entropy_cost(train_X, train_y, theta, beta))
elif cost_function is 'MSE':
cost_history.append(self.logreg_mse_cost(train_X, train_y, theta, beta))
else:
cost_history.append(self.linreg_mse_cost(train_X, train_y, theta, beta))
last_theta = theta
if epoch == 0:
continue
diff = cost_history[-2] - cost_history[-1]
if diff < 0:
#alpha /= 2
theta = last_theta
self.logger.log("eph#{}, cost increased by {:.4f} ==> decrease alpha to {:.4f}"
.format(epoch, abs(diff), alpha), tabs = 1, verbosity_level = 1)
else:
#alpha += alpha * 0.05
self.logger.log("eph#{}, cost decreased by {:.4f} ==> increasing alpha to {:.4f}"
.format(epoch, abs(diff), alpha), tabs = 1, verbosity_level = 1)
if abs(diff) < self.epsilon:
self.logger.log("eph#{}, delta(cost) < epsilon ==> early stopping"
.format(epoch), tabs = 1, verbosity_level = 2)
self.save_results(theta, cost_history, epoch + 1)
break
if epoch % 3 == 0:
self.print_statistics(epoch, cost_history[-1], diff, gradient, theta)
self.save_results(theta, cost_history, epoch + 1)
stop = time.time()
self.logger.log("Time for {} training = {:.3f}s".format(reg_str, stop - start))
return stop - start
class BoostingSgdSolver(SgdSolver):
def __init__(self, logger, epsilon, solver):
super().__init__(logger, epsilon)
self.solver = solver
def train(self, train_X, train_y, validation_X, validation_y, initial_theta,
epochs = 0, alpha = 0, batch_size = 0, beta = 0, speed = 0, n_boost = 0,
is_linear_reg = False):
start = time.time()
reg_str = "boosting without reg"
if beta != 0:
reg_str = "boosting with reg"
self.logger.log("Start {} training: alpha={}, batchSz={}, beta={}, n_boost={}"
.format(reg_str, alpha, batch_size, beta, n_boost), verbosity_level = 2)
theta = np.array(initial_theta)
current_theta = np.array(initial_theta)
current_y = train_y
self.cost_history = []
self.epochs_to_convergence = 0
gradient = None
for it in range(n_boost):
self.logger.log("Boost #{}".format(it+1), verbosity_level = 2)
self.solver.train(train_X, current_y, validation_X, validation_y, initial_theta, epochs, alpha, batch_size, beta, speed, 0, True)
current_theta = self.solver.model
predicted = np.dot(train_X, current_theta)
residual = predicted - current_y
residual = np.array([i if abs(i) > 5 else 0 for i in residual])
print("Predicted {}".format(predicted[:5]))
print("Residual {}".format(residual[:5]))
self.cost_history.append(self.solver.cost_history)
self.epochs_to_convergence += self.solver.epochs_to_convergence
current_y = residual
if it == 0:
theta = current_theta
#self.cost_history = self.solver.cost_history
#self.epochs_to_convergence = self.solver.epochs_to_convergence
else:
theta -= current_theta
self.model = theta
stop = time.time()
self.logger.log("Time for {} training = {:.3f}s".format(reg_str, stop - start))
self.cost_history = np.hstack(self.cost_history).tolist()
return stop - start
if __name__=='__main__':
print("Library module. No main function")
| [
"[email protected]"
] | |
8b63183a6ac2ad40af1c8b51e501977274c5f4b4 | d110546d747d7e3865ce5742d5fca09f404623c0 | /tests/pytests/unit/test_crypt.py | e3c98ab63666cc6a76c83babef91ec87ead7bdfa | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | saltstack/salt | 354fc86a7be1f69514b3dd3b2edb9e6f66844c1d | 1ef90cbdc7203f97775edb7666db86a41eb9fc15 | refs/heads/master | 2023-07-19T20:56:20.210556 | 2023-06-29T23:12:28 | 2023-07-19T11:47:47 | 1,390,248 | 11,026 | 6,296 | Apache-2.0 | 2023-09-14T20:45:37 | 2011-02-20T20:16:56 | Python | UTF-8 | Python | false | false | 7,298 | py | """
tests.pytests.unit.test_crypt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for salt's crypt module
"""
import uuid
import pytest
import salt.crypt
import salt.master
import salt.utils.files
PRIV_KEY = """
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAoAsMPt+4kuIG6vKyw9r3+OuZrVBee/2vDdVetW+Js5dTlgrJ
aghWWn3doGmKlEjqh7E4UTa+t2Jd6w8RSLnyHNJ/HpVhMG0M07MF6FMfILtDrrt8
ZX7eDVt8sx5gCEpYI+XG8Y07Ga9i3Hiczt+fu6HYwu96HggmG2pqkOrn3iGfqBvV
YVFJzSZYe7e4c1PeEs0xYcrA4k+apyGsMtpef8vRUrNicRLc7dAcvfhtgt2DXEZ2
d72t/CR4ygtUvPXzisaTPW0G7OWAheCloqvTIIPQIjR8htFxGTz02STVXfnhnJ0Z
k8KhqKF2v1SQvIYxsZU7jaDgl5i3zpeh58cYOwIDAQABAoIBABZUJEO7Y91+UnfC
H6XKrZEZkcnH7j6/UIaOD9YhdyVKxhsnax1zh1S9vceNIgv5NltzIsfV6vrb6v2K
Dx/F7Z0O0zR5o+MlO8ZncjoNKskex10gBEWG00Uqz/WPlddiQ/TSMJTv3uCBAzp+
S2Zjdb4wYPUlgzSgb2ygxrhsRahMcSMG9PoX6klxMXFKMD1JxiY8QfAHahPzQXy9
F7COZ0fCVo6BE+MqNuQ8tZeIxu8mOULQCCkLFwXmkz1FpfK/kNRmhIyhxwvCS+z4
JuErW3uXfE64RLERiLp1bSxlDdpvRO2R41HAoNELTsKXJOEt4JANRHm/CeyA5wsh
NpscufUCgYEAxhgPfcMDy2v3nL6KtkgYjdcOyRvsAF50QRbEa8ldO+87IoMDD/Oe
osFERJ5hhyyEO78QnaLVegnykiw5DWEF02RKMhD/4XU+1UYVhY0wJjKQIBadsufB
2dnaKjvwzUhPh5BrBqNHl/FXwNCRDiYqXa79eWCPC9OFbZcUWWq70s8CgYEAztOI
61zRfmXJ7f70GgYbHg+GA7IrsAcsGRITsFR82Ho0lqdFFCxz7oK8QfL6bwMCGKyk
nzk+twh6hhj5UNp18KN8wktlo02zTgzgemHwaLa2cd6xKgmAyuPiTgcgnzt5LVNG
FOjIWkLwSlpkDTl7ZzY2QSy7t+mq5d750fpIrtUCgYBWXZUbcpPL88WgDB7z/Bjg
dlvW6JqLSqMK4b8/cyp4AARbNp12LfQC55o5BIhm48y/M70tzRmfvIiKnEc/gwaE
NJx4mZrGFFURrR2i/Xx5mt/lbZbRsmN89JM+iKWjCpzJ8PgIi9Wh9DIbOZOUhKVB
9RJEAgo70LvCnPTdS0CaVwKBgDJW3BllAvw/rBFIH4OB/vGnF5gosmdqp3oGo1Ik
jipmPAx6895AH4tquIVYrUl9svHsezjhxvjnkGK5C115foEuWXw0u60uiTiy+6Pt
2IS0C93VNMulenpnUrppE7CN2iWFAiaura0CY9fE/lsVpYpucHAWgi32Kok+ZxGL
WEttAoGAN9Ehsz4LeQxEj3x8wVeEMHF6OsznpwYsI2oVh6VxpS4AjgKYqeLVcnNi
TlZFsuQcqgod8OgzA91tdB+Rp86NygmWD5WzeKXpCOg9uA+y/YL+0sgZZHsuvbK6
PllUgXdYxqClk/hdBFB7v9AQoaj7K9Ga22v32msftYDQRJ94xOI=
-----END RSA PRIVATE KEY-----
"""
PUB_KEY = """
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoAsMPt+4kuIG6vKyw9r3
+OuZrVBee/2vDdVetW+Js5dTlgrJaghWWn3doGmKlEjqh7E4UTa+t2Jd6w8RSLny
HNJ/HpVhMG0M07MF6FMfILtDrrt8ZX7eDVt8sx5gCEpYI+XG8Y07Ga9i3Hiczt+f
u6HYwu96HggmG2pqkOrn3iGfqBvVYVFJzSZYe7e4c1PeEs0xYcrA4k+apyGsMtpe
f8vRUrNicRLc7dAcvfhtgt2DXEZ2d72t/CR4ygtUvPXzisaTPW0G7OWAheCloqvT
IIPQIjR8htFxGTz02STVXfnhnJ0Zk8KhqKF2v1SQvIYxsZU7jaDgl5i3zpeh58cY
OwIDAQAB
-----END PUBLIC KEY-----
"""
PRIV_KEY2 = """
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAp+8cTxguO6Vg+YO92VfHgNld3Zy8aM3JbZvpJcjTnis+YFJ7
Zlkcc647yPRRwY9nYBNywahnt5kIeuT1rTvTsMBZWvmUoEVUj1Xg8XXQkBvb9Ozy
Gqy/G/p8KDDpzMP/U+XCnUeHiXTZrgnqgBIc2cKeCVvWFqDi0GRFGzyaXLaX3PPm
M7DJ0MIPL1qgmcDq6+7Ze0gJ9SrDYFAeLmbuT1OqDfufXWQl/82JXeiwU2cOpqWq
7n5fvPOWim7l1tzQ+dSiMRRm0xa6uNexCJww3oJSwvMbAmgzvOhqqhlqv+K7u0u7
FrFFojESsL36Gq4GBrISnvu2tk7u4GGNTYYQbQIDAQABAoIBAADrqWDQnd5DVZEA
lR+WINiWuHJAy/KaIC7K4kAMBgbxrz2ZbiY9Ok/zBk5fcnxIZDVtXd1sZicmPlro
GuWodIxdPZAnWpZ3UtOXUayZK/vCP1YsH1agmEqXuKsCu6Fc+K8VzReOHxLUkmXn
FYM+tixGahXcjEOi/aNNTWitEB6OemRM1UeLJFzRcfyXiqzHpHCIZwBpTUAsmzcG
QiVDkMTKubwo/m+PVXburX2CGibUydctgbrYIc7EJvyx/cpRiPZXo1PhHQWdu4Y1
SOaC66WLsP/wqvtHo58JQ6EN/gjSsbAgGGVkZ1xMo66nR+pLpR27coS7o03xCks6
DY/0mukCgYEAuLIGgBnqoh7YsOBLd/Bc1UTfDMxJhNseo+hZemtkSXz2Jn51322F
Zw/FVN4ArXgluH+XsOhvG/MFFpojwZSrb0Qq5b1MRdo9qycq8lGqNtlN1WHqosDQ
zW29kpL0tlRrSDpww3wRESsN9rH5XIrJ1b3ZXuO7asR+KBVQMy/+NcUCgYEA6MSC
c+fywltKPgmPl5j0DPoDe5SXE/6JQy7w/vVGrGfWGf/zEJmhzS2R+CcfTTEqaT0T
Yw8+XbFgKAqsxwtE9MUXLTVLI3sSUyE4g7blCYscOqhZ8ItCUKDXWkSpt++rG0Um
1+cEJP/0oCazG6MWqvBC4NpQ1nzh46QpjWqMwokCgYAKDLXJ1p8rvx3vUeUJW6zR
dfPlEGCXuAyMwqHLxXgpf4EtSwhC5gSyPOtx2LqUtcrnpRmt6JfTH4ARYMW9TMef
QEhNQ+WYj213mKP/l235mg1gJPnNbUxvQR9lkFV8bk+AGJ32JRQQqRUTbU+yN2MQ
HEptnVqfTp3GtJIultfwOQKBgG+RyYmu8wBP650izg33BXu21raEeYne5oIqXN+I
R5DZ0JjzwtkBGroTDrVoYyuH1nFNEh7YLqeQHqvyufBKKYo9cid8NQDTu+vWr5UK
tGvHnwdKrJmM1oN5JOAiq0r7+QMAOWchVy449VNSWWV03aeftB685iR5BXkstbIQ
EVopAoGAfcGBTAhmceK/4Q83H/FXBWy0PAa1kZGg/q8+Z0KY76AqyxOVl0/CU/rB
3tO3sKhaMTHPME/MiQjQQGoaK1JgPY6JHYvly2KomrJ8QTugqNGyMzdVJkXAK2AM
GAwC8ivAkHf8CHrHa1W7l8t2IqBjW1aRt7mOW92nfG88Hck0Mbo=
-----END RSA PRIVATE KEY-----
"""
PUB_KEY2 = """
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+8cTxguO6Vg+YO92VfH
gNld3Zy8aM3JbZvpJcjTnis+YFJ7Zlkcc647yPRRwY9nYBNywahnt5kIeuT1rTvT
sMBZWvmUoEVUj1Xg8XXQkBvb9OzyGqy/G/p8KDDpzMP/U+XCnUeHiXTZrgnqgBIc
2cKeCVvWFqDi0GRFGzyaXLaX3PPmM7DJ0MIPL1qgmcDq6+7Ze0gJ9SrDYFAeLmbu
T1OqDfufXWQl/82JXeiwU2cOpqWq7n5fvPOWim7l1tzQ+dSiMRRm0xa6uNexCJww
3oJSwvMbAmgzvOhqqhlqv+K7u0u7FrFFojESsL36Gq4GBrISnvu2tk7u4GGNTYYQ
bQIDAQAB
-----END PUBLIC KEY-----
"""
def test_get_rsa_pub_key_bad_key(tmp_path):
"""
get_rsa_pub_key raises InvalidKeyError when encoutering a bad key
"""
key_path = str(tmp_path / "key")
with salt.utils.files.fopen(key_path, "w") as fp:
fp.write("")
with pytest.raises(salt.crypt.InvalidKeyError):
salt.crypt.get_rsa_pub_key(key_path)
def test_cryptical_dumps_no_nonce():
master_crypt = salt.crypt.Crypticle({}, salt.crypt.Crypticle.generate_key_string())
data = {"foo": "bar"}
ret = master_crypt.dumps(data)
# Validate message structure
assert isinstance(ret, bytes)
une = master_crypt.decrypt(ret)
une.startswith(master_crypt.PICKLE_PAD)
assert salt.payload.loads(une[len(master_crypt.PICKLE_PAD) :]) == data
# Validate load back to orig data
assert master_crypt.loads(ret) == data
def test_cryptical_dumps_valid_nonce():
nonce = uuid.uuid4().hex
master_crypt = salt.crypt.Crypticle({}, salt.crypt.Crypticle.generate_key_string())
data = {"foo": "bar"}
ret = master_crypt.dumps(data, nonce=nonce)
assert isinstance(ret, bytes)
une = master_crypt.decrypt(ret)
une.startswith(master_crypt.PICKLE_PAD)
nonce_and_data = une[len(master_crypt.PICKLE_PAD) :]
assert nonce_and_data.startswith(nonce.encode())
assert salt.payload.loads(nonce_and_data[len(nonce) :]) == data
assert master_crypt.loads(ret, nonce=nonce) == data
def test_cryptical_dumps_invalid_nonce():
nonce = uuid.uuid4().hex
master_crypt = salt.crypt.Crypticle({}, salt.crypt.Crypticle.generate_key_string())
data = {"foo": "bar"}
ret = master_crypt.dumps(data, nonce=nonce)
assert isinstance(ret, bytes)
with pytest.raises(salt.crypt.SaltClientError, match="Nonce verification error"):
assert master_crypt.loads(ret, nonce="abcde")
def test_verify_signature(tmp_path):
tmp_path.joinpath("foo.pem").write_text(PRIV_KEY.strip())
tmp_path.joinpath("foo.pub").write_text(PUB_KEY.strip())
tmp_path.joinpath("bar.pem").write_text(PRIV_KEY2.strip())
tmp_path.joinpath("bar.pub").write_text(PUB_KEY2.strip())
msg = b"foo bar"
sig = salt.crypt.sign_message(str(tmp_path.joinpath("foo.pem")), msg)
assert salt.crypt.verify_signature(str(tmp_path.joinpath("foo.pub")), msg, sig)
def test_verify_signature_bad_sig(tmp_path):
tmp_path.joinpath("foo.pem").write_text(PRIV_KEY.strip())
tmp_path.joinpath("foo.pub").write_text(PUB_KEY.strip())
tmp_path.joinpath("bar.pem").write_text(PRIV_KEY2.strip())
tmp_path.joinpath("bar.pub").write_text(PUB_KEY2.strip())
msg = b"foo bar"
sig = salt.crypt.sign_message(str(tmp_path.joinpath("foo.pem")), msg)
assert not salt.crypt.verify_signature(str(tmp_path.joinpath("bar.pub")), msg, sig)
| [
"[email protected]"
] | |
59272245968fd3f11a85901ea07d6c1fedcbbda5 | 0a33cc0ebb67c51cc38750f0f04c3e6c088e3b1a | /tests/components/sleepiq/test_init.py | e468734e06321cc5513d41f33c58d9775346af86 | [
"Apache-2.0"
] | permissive | robert-alfaro/home-assistant | e9bb08ad22a167ed226fb3de8f5b36acfc393548 | 4a53121b58b77a318f08c64ad2c5372a16b800e0 | refs/heads/dev | 2023-02-28T06:46:23.217246 | 2022-04-26T17:30:08 | 2022-04-26T17:30:08 | 115,894,662 | 4 | 0 | Apache-2.0 | 2023-02-22T06:21:08 | 2018-01-01T02:00:35 | Python | UTF-8 | Python | false | false | 4,870 | py | """Tests for the SleepIQ integration."""
from asyncsleepiq import (
SleepIQAPIException,
SleepIQLoginException,
SleepIQTimeoutException,
)
from homeassistant.components.sleepiq.const import (
DOMAIN,
IS_IN_BED,
PRESSURE,
SLEEP_NUMBER,
)
from homeassistant.components.sleepiq.coordinator import UPDATE_INTERVAL
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import MockConfigEntry, async_fire_time_changed, mock_registry
from tests.components.sleepiq.conftest import (
BED_ID,
SLEEPER_L_ID,
SLEEPER_L_NAME,
SLEEPER_L_NAME_LOWER,
SLEEPIQ_CONFIG,
setup_platform,
)
ENTITY_IS_IN_BED = f"sensor.sleepnumber_{BED_ID}_{SLEEPER_L_NAME_LOWER}_{IS_IN_BED}"
ENTITY_PRESSURE = f"sensor.sleepnumber_{BED_ID}_{SLEEPER_L_NAME_LOWER}_{PRESSURE}"
ENTITY_SLEEP_NUMBER = (
f"sensor.sleepnumber_{BED_ID}_{SLEEPER_L_NAME_LOWER}_{SLEEP_NUMBER}"
)
async def test_unload_entry(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test unloading the SleepIQ entry."""
entry = await setup_platform(hass, "sensor")
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_entry_setup_login_error(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test when sleepiq client is unable to login."""
mock_asyncsleepiq.login.side_effect = SleepIQLoginException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_entry_setup_timeout_error(
hass: HomeAssistant, mock_asyncsleepiq
) -> None:
"""Test when sleepiq client timeout."""
mock_asyncsleepiq.login.side_effect = SleepIQTimeoutException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_update_interval(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test update interval."""
await setup_platform(hass, "sensor")
assert mock_asyncsleepiq.fetch_bed_statuses.call_count == 1
async_fire_time_changed(hass, utcnow() + UPDATE_INTERVAL)
await hass.async_block_till_done()
assert mock_asyncsleepiq.fetch_bed_statuses.call_count == 2
async def test_api_error(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test when sleepiq client is unable to login."""
mock_asyncsleepiq.init_beds.side_effect = SleepIQAPIException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_api_timeout(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test when sleepiq client timeout."""
mock_asyncsleepiq.init_beds.side_effect = SleepIQTimeoutException
entry = await setup_platform(hass, None)
assert not await hass.config_entries.async_setup(entry.entry_id)
async def test_unique_id_migration(hass: HomeAssistant, mock_asyncsleepiq) -> None:
"""Test migration of sensor unique IDs."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data=SLEEPIQ_CONFIG,
unique_id=SLEEPIQ_CONFIG[CONF_USERNAME].lower(),
)
mock_entry.add_to_hass(hass)
mock_registry(
hass,
{
ENTITY_IS_IN_BED: er.RegistryEntry(
entity_id=ENTITY_IS_IN_BED,
unique_id=f"{BED_ID}_{SLEEPER_L_NAME}_{IS_IN_BED}",
platform=DOMAIN,
config_entry_id=mock_entry.entry_id,
),
ENTITY_PRESSURE: er.RegistryEntry(
entity_id=ENTITY_PRESSURE,
unique_id=f"{BED_ID}_{SLEEPER_L_NAME}_{PRESSURE}",
platform=DOMAIN,
config_entry_id=mock_entry.entry_id,
),
ENTITY_SLEEP_NUMBER: er.RegistryEntry(
entity_id=ENTITY_SLEEP_NUMBER,
unique_id=f"{BED_ID}_{SLEEPER_L_NAME}_{SLEEP_NUMBER}",
platform=DOMAIN,
config_entry_id=mock_entry.entry_id,
),
},
)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
ent_reg = er.async_get(hass)
sensor_is_in_bed = ent_reg.async_get(ENTITY_IS_IN_BED)
assert sensor_is_in_bed.unique_id == f"{SLEEPER_L_ID}_{IS_IN_BED}"
sensor_pressure = ent_reg.async_get(ENTITY_PRESSURE)
assert sensor_pressure.unique_id == f"{SLEEPER_L_ID}_{PRESSURE}"
sensor_sleep_number = ent_reg.async_get(ENTITY_SLEEP_NUMBER)
assert sensor_sleep_number.unique_id == f"{SLEEPER_L_ID}_{SLEEP_NUMBER}"
| [
"[email protected]"
] | |
91beaa721a902e1b82c439cbefb62196165a74e9 | 2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df | /contributions/Acksl/python/Data Structures/2016-09-18.py | 8f3b1ae44bc5ba3c68d9c6718bc90507547e0675 | [] | no_license | 0x8801/commit | 18f25a9449f162ee92945b42b93700e12fd4fd77 | e7692808585bc7e9726f61f7f6baf43dc83e28ac | refs/heads/master | 2021-10-13T08:04:48.200662 | 2016-12-20T01:59:47 | 2016-12-20T01:59:47 | 76,935,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | Following PEP 8 styling guideline.
`bytes` type
Double ended queues with `deque`
There is more to copying
`bytearray` objects | [
"[email protected]"
] | |
70ec520b8f46fc9cf8721e28e623a8fcfb530cd0 | 345f9b0788d76820486b06a704ac77a59f506acf | /ACIS_hist/acis_hist_plot_trend_interactive.py | 813a6244d76563814f19a5b8aee632689ef213c4 | [] | no_license | tisobe/MTA_old | df7525d46073fbdd58d1cebdec5f2bc9e703001a | c0eb23514684b1ff91955c84eeade7436880b5e4 | refs/heads/master | 2021-09-14T17:24:33.131131 | 2018-05-16T16:18:19 | 2018-05-16T16:18:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,893 | py | #!/usr/bin/env /proj/sot/ska/bin/python
#########################################################################################################################
# #
# acis_hist_plot_trend_interactive.py: plot acis histgram peak, width, and count rate trends (interactive version) #
# #
# author: t. isobe ([email protected]) #
# #
# last update: May 10, 2016 #
# #
#########################################################################################################################
import sys
import os
import string
import re
import getpass
import fnmatch
import numpy
import getopt
import os.path
import time
import mpld3
from mpld3 import plugins, utils
#
#--- pylab plotting routine related modules
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
#
#--- read argv
#
try:
option, remainder = getopt.getopt(sys.argv[1:],'t',['test'])
except getopt.GetoptError as err:
print str(err)
sys.exit(2)
pass_test = ''
for opt, arg in option:
if opt in ('-t', '--test'):
pass_test = 'test'
#
#--- reading directory list
#
if pass_test == 'test':
path = '/data/mta/Script/ACIS/Acis_hist2/house_keeping/dir_list_test'
else:
path = '/data/mta/Script/ACIS/Acis_hist/house_keeping/dir_list_py'
f = open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec "%s = %s" %(var, line)
#
#--- append path to a private folder
#
mta_dir = '/data/mta/Script/Python_script2.7' #---- temporary until everything is moved to 2.7
sys.path.append(mta_dir)
sys.path.append(bin_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import convertTimeFormat as tcnv
#
#--- mta common functions
#
import mta_common_functions as mtac
#
#--- least sq fitting routine (see http://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html)
#
from kapteyn import kmpfit
#
#-- peak position y direction plotting range (x2 of pdelta)
#
pdelta = 70
#
#-- peak width y direction plotting range (x2 of wdelta)
#
wdelta = 70
#
#--- count rate y direction plotting range
#
count_ymin = 1.0e-4
count_ymax = 0.07
#
#--- each frame takes 3.241 sec
#
sec_per_frame = 3.2412
#--------------------------------------------------------------------------------------------------------
#-- acis_hist_plot_trend: main function to plot trend of acis histgram data and create interactive html page
#--------------------------------------------------------------------------------------------------------
def acis_hist_plot_trend():
'''
main function to plot trend of acis histgram data and create interactive html page
input: none, this will read data from data_dir
output: interactive web pages in <web_dir>/Html_pages/
'''
#
#--- go through given ccds, nodes, and data collection regions
#
for ccd in (1, 2, 3, 6, 7):
for dtype in ('pos', 'cnt', 'width'):
for loc in ('low', 'high'):
#
#--- read data
#
node0_data = readdata(ccd, 0, loc)
node1_data = readdata(ccd, 1, loc)
node2_data = readdata(ccd, 2, loc)
node3_data = readdata(ccd, 3, loc)
#
#--- create the interactive web pages
#
plot_interactive_trend(node0_data, node1_data, node2_data, node3_data, ccd, loc, dtype)
#
#--- udate the main web page
#
update_web_page()
#--------------------------------------------------------------------------------------------------------
#--- readdata: read trend data from database ---
#--------------------------------------------------------------------------------------------------------
def readdata(ccd, node, loc):
'''
read trend data from database
input: ccd, node, and loc (low or high)
output: a list of lists: [time, pos1, cnt1, width1, pos2, cnt2, width2, pos3, cnt3, width3]
'''
file = data_dir + 'ccd' + str(ccd) + '_node' + str(node) + '_' + loc
time = []
pos1 = []
cnt1 = []
width1 = []
pos2 = []
cnt2 = []
width2 = []
pos3 = []
cnt3 = []
width3 = []
perr1 = []
cerr1 = []
werr1 = []
perr2 = []
cerr2 = []
werr2 = []
perr3 = []
cerr3 = []
werr3 = []
interv = []
y_list = []
m_list = []
#
#-- check whether the data file exists... not all CCDs have the histgram data
#
f = open(file, 'r')
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split('\s+|t+', ent)
btemp = re.split(':', atemp[0])
ytime = float(btemp[0]) + (float(btemp[1])+0.5) / 12.0 #--- adding 15 day to set position at mid month
duration = float(atemp[20])
interv.append(duration)
atemp[2] = float(atemp[2])
atemp[3] = float(atemp[3])
atemp[4] = float(atemp[4])
atemp[5] = float(atemp[5])
atemp[6] = float(atemp[6]) * 2.354 #--- changing sigma to FWHM
atemp[7] = float(atemp[7])
atemp[8] = float(atemp[8])
atemp[9] = float(atemp[9])
atemp[10] = float(atemp[10])
atemp[11] = float(atemp[11])
atemp[12] = float(atemp[12]) * 2.354
atemp[13] = float(atemp[13])
atemp[14] = float(atemp[14])
atemp[15] = float(atemp[15])
atemp[16] = float(atemp[16])
atemp[17] = float(atemp[17])
atemp[18] = float(atemp[18]) * 2.354
atemp[19] = float(atemp[19])
chk = 0
for i in range(2,20):
if atemp[i] <= 0:
chk = 1
continue
if atemp[i] > 1e5:
chk = 1
continue
if chk == 1:
continue
time.append(ytime)
y_list.append(btemp[0])
m_list.append(btemp[1])
pos1.append(atemp[2])
perr1.append(atemp[3])
cnt1.append(log10(atemp[4] / duration))
cerr1.append(atemp[5] / duration)
width1.append(atemp[6])
werr1.append(atemp[7])
pos2.append(atemp[8])
perr2.append(atemp[9])
cnt2.append(log10(atemp[10] / duration))
cerr2.append(atemp[11] / duration)
width2.append(atemp[12])
werr2.append(atemp[13])
pos3.append(atemp[14])
perr3.append(atemp[15])
cnt3.append(log10(atemp[16] / duration))
cerr3.append(atemp[17] / duration)
width3.append(atemp[18])
werr3.append(atemp[19])
out1 = [time, pos1, cnt1, width1, pos2, cnt2, width2, pos3, cnt3, width3]
out2 = [perr1, cerr1, werr1, perr2, cerr2, werr2, perr3, cerr3, werr3, y_list, m_list, interv]
out = out1 + out2
return out
#--------------------------------------------------------------------------------------------------------
#-- convert_ytime: change time fromat from in second from 1.1.1998 to time in unit of fractional year --
#--------------------------------------------------------------------------------------------------------
def convert_ytime(time):
'''
change time fromat from in second from 1.1.1998 to time in unit of fractional year
input time in: seconds from 1.1.1998
output time in : fractional year, e.g., 2011.1213
'''
atime = tcnv.convertCtimeToYdate(time)
btemp = re.split(':', atime)
year = float(btemp[0])
ydate = float(btemp[1])
hour = float(btemp[2])
mins = float(btemp[3])
sec = float(btemp[4])
chk = 4.0 * int(0.25 * year)
if chk == year:
base = 366.0
else:
base = 365.0
ydate = ydate + (hour/24.0 + mins/1440.0 + sec/86400.0)
frac = ydate/base
ytime = year + frac
return ytime
#-----------------------------------------------------------------------------------------------------------------------------
#-- save_results: print out line fitted result in a file ---
#-----------------------------------------------------------------------------------------------------------------------------
def save_results(ccd, loc, dtype, int_list, slp_list):
"""
print out line fitted result in a file
input: ccd --- ccd #
loc --- location; low, high
dtype --- data type: pos, cnt, width
int_list --- a list of lists of parameter 1 [[node0-al, node0-ti, node0-mn], [node1al..}..]
slp_list --- a list of lists of parameter 2 [[node0-al, node0-ti, node0-mn], [node1al..}..]
output: a file --- ccd<ccd#>_<dtype>_<loc>_fitting_results
"""
ofile = web_dir + 'Fittings/ccd' + str(ccd) + '_' + dtype + '_' + loc + '_fitting_results'
fout = open(ofile , 'w')
if dtype == 'cnt':
dline = ': Log Count Rate '
equation = 'y = const1 * exp*(-1 * const2 * (Time - 2000) (Time in year))\n'
elif dtype == 'width':
dline = ': Peak Width '
equation = 'y = const1 + const2 * (Time - 2000) (Time in year))\n'
else:
dline = ': Peak Position '
equation = 'y = const1 + const2 * (Time - 2000) (Time in year))\n'
fout.write('\n')
line = 'CCD' + str(ccd) + dline + 'Al K-alpha (set Year 2000 to 0)\n'
fout.write(line)
fout.write('#\n')
fout.write(equation)
fout.write('#\n')
fout.write('node rows element const1 const2\n')
fout.write('#---------------------------------------------------------------------\n')
print_out_fit_result(fout, loc, 'al', 0, int_list[0][0], slp_list[0][0])
print_out_fit_result(fout, loc, 'al', 1, int_list[1][0], slp_list[1][0])
print_out_fit_result(fout, loc, 'al', 2, int_list[2][0], slp_list[2][0])
print_out_fit_result(fout, loc, 'al', 3, int_list[3][0], slp_list[3][0])
fout.write('\n\n')
line = 'CCD' + str(ccd) + dline + 'Ti K-alpha (set Year 2000 to 0) \n'
fout.write(line)
fout.write('#\n')
fout.write(equation)
fout.write('#\n')
fout.write('node rows element const1 const2\n')
fout.write('#---------------------------------------------------------------------\n')
print_out_fit_result(fout, loc, 'ti', 0, int_list[0][1], slp_list[0][1])
print_out_fit_result(fout, loc, 'ti', 1, int_list[1][1], slp_list[1][1])
print_out_fit_result(fout, loc, 'ti', 2, int_list[2][1], slp_list[2][1])
print_out_fit_result(fout, loc, 'ti', 3, int_list[3][1], slp_list[3][1])
fout.write('\n\n')
line = 'CCD' + str(ccd) + dline + 'Mn K-alpha (set Year 2000 to 0) \n'
fout.write(line)
fout.write('#\n')
fout.write(equation)
fout.write('#\n')
fout.write('node rows element const1 const2\n')
fout.write('#---------------------------------------------------------------------\n')
print_out_fit_result(fout, loc, 'mn', 0, int_list[0][2], slp_list[0][2])
print_out_fit_result(fout, loc, 'mn', 1, int_list[1][2], slp_list[1][2])
print_out_fit_result(fout, loc, 'mn', 2, int_list[2][2], slp_list[2][2])
print_out_fit_result(fout, loc, 'mn', 3, int_list[3][2], slp_list[3][2])
fout.write('\n\n')
fout.close()
#--------------------------------------------------------------------------------------------------------
#-- print_out_fit_result: create an output line for the result ---
#--------------------------------------------------------------------------------------------------------
def print_out_fit_result(fout, loc, elm, node, a1, a2):
"""
create an output line for the result
input: fout --- print out device
loc --- location of data colletion; low, hight
elm --- elements: al, ti, mn
node --- node #
al --- parameter 1
a2 --- parameter 2
ouput: printint out the result
"""
if loc == 'low':
lpos = '21- 221'
else:
lpos = '801 - 1001'
b1 = '%.4f' % float(a1)
b2 = '%.4f' % float(a2)
line = str(node) + '\t\t' + lpos + '\t\t\t' + elm + '\t\t' + str(b1) + '\t' + str(b2) + '\n'
fout.write(line)
#--------------------------------------------------------------------------------------------------------
#----fit_line: kmpfit calling function to fit the lines on data ---
#--------------------------------------------------------------------------------------------------------
def fit_line(paramsinit, x, y, err, type):
'''
kmpfit calling function to fit the lines on data
input: paramsinit: initial guess for the parameters
x, y: data
type: linear or exp:
output: two parameters (a, b) are returned
'''
sx = []
sy = []
se = []
avg = mean(y)
stdp = std(y)
bot = avg - 3.0 * stdp
top = avg + 3.0 * stdp
i = 0
for val in y:
if (val >= bot) and (val <= top):
sx.append(x[i])
sy.append(y[i])
se.append(err[i])
i += 1
#
#--- make sure that the arrays are numpyed
#
d = numpy.array(sx)
v = numpy.array(sy)
e = numpy.array(se)
if type == 'linear':
#
#--- linear fit
#
fitobj = kmpfit.Fitter(residuals=linear_res, data=(d, v, e))
fitobj.fit(params0 = paramsinit)
else:
#
#--- exp fit
#
fitobj = kmpfit.Fitter(residuals=exp_res, data=(d, v, e))
fitobj.fit(params0 = paramsinit)
return fitobj.params
#--------------------------------------------------------------------------------------------------------
#---linear_fit: linear model fit ---
#--------------------------------------------------------------------------------------------------------
def linear_fit(param, x):
'''
linear model fit
input: param: (a,b)
x independent val
ouptput: estimate
'''
a, b = param
return (a + b * x)
#--------------------------------------------------------------------------------------------------------
#-- linear_res: linear model resitual ---
#--------------------------------------------------------------------------------------------------------
def linear_res(param, data):
'''
linear model resitual
input: param (a, b)
data (x, y)
output: residual
'''
a, b = param
x, y, e = data
res = y - (a + b * x)
return res
#--------------------------------------------------------------------------------------------------------
#-- exp_fit: exponential model ---
#--------------------------------------------------------------------------------------------------------
def exp_fit(param, x):
'''
exponential model
input: param (a, b)
x independent variable
output: estimate
'''
a, b = param
return (a * exp(-1.0 * b * x))
#--------------------------------------------------------------------------------------------------------
#-- exp_res: exponential model residual --
#--------------------------------------------------------------------------------------------------------
def exp_res(param, data):
'''
exponential model residual
input param(a, b)
data (x, y)
output: residual
'''
a, b = param
x, y, e = data
res = y - (a * exp(-1.0 * b * x))
return res
#--------------------------------------------------------------------------------------------------------
#-- update_web_page: update "update date" on the main page --
#--------------------------------------------------------------------------------------------------------
def update_web_page():
"""
update "update date" on the main page
input: none but read <house_keeping>/acis_hist_main.html
output: <web_page>/acis_hist_main.html
"""
ldate = tcnv.currentTime('Display')
path = house_keeping + 'acis_hist_main.html'
data = open(path, 'r').read()
data = data.replace("#DATE#", ldate)
outfile = web_dir + 'acis_hist_main.html'
fout = open(outfile, 'w')
fout.write(data)
fout.close();
#--------------------------------------------------------------------------------------------------------
#-- plot_interactive_trend: create an interactive web page ---
#--------------------------------------------------------------------------------------------------------
def plot_interactive_trend(node_0_data, node_1_data, node_2_data, node_3_data, ccd, loc, dtype):
"""
create an interactive web page
input: node_0_data --- data set for node 0 of a given ccd, loc, and dtype
node_1_data --- data set for node 1 of a given ccd, loc, and dtype
node_2_data --- data set for node 2 of a given ccd, loc, and dtype
node_3_data --- data set for node 3 of a given ccd, loc, and dtype
ccd --- ccd #
loc --- location of the data collection either "low" or "high"
dtype --- data type, cnt, width, or pos
outout: interactive html page: acis_hist_ccd<ccd#>_<loc>_<dtype>.html (e.g., acis_hist_cccd1_low_cnt.html)
line fitted result page: ccd<ccd#>_<dtype>_<loc>_fitting_results (e.g., ccd1_cnt_low_fitting_results)
"""
#
#--- set several values used in the plots
#
color_table = ['red', 'blue', 'green', 'lime']
marker_table = ['s', '*', '^', 'o']
marker_size = [50, 80, 70, 50]
#
#--- this css is used for the pop up page
#
css = """
body{
width:600px;
height:300px;
}
p{
text-align:center;
}
"""
#
#--- open the data packs
#
[n0_time, n0_pos3, n0_cnt3, n0_width3, n0_pos1, n0_cnt1, n0_width1, n0_pos2, n0_cnt2, n0_width2] = node_0_data[:10]
[n1_time, n1_pos3, n1_cnt3, n1_width3, n1_pos1, n1_cnt1, n1_width1, n1_pos2, n1_cnt2, n1_width2] = node_1_data[:10]
[n2_time, n2_pos3, n2_cnt3, n2_width3, n2_pos1, n2_cnt1, n2_width1, n2_pos2, n2_cnt2, n2_width2] = node_2_data[:10]
[n3_time, n3_pos3, n3_cnt3, n3_width3, n3_pos1, n3_cnt1, n3_width1, n3_pos2, n3_cnt2, n3_width2] = node_3_data[:10]
[y_list0, m_list0, duration0] = node_0_data[-3:]
[y_list1, m_list1, duration1] = node_1_data[-3:]
[y_list2, m_list2, duration2] = node_2_data[-3:]
[y_list3, m_list3, duration3] = node_3_data[-3:]
xmin = 2000
xmax = int(max(n0_time)) + 1
xdiff = xmax - xmin
xtext = xmin + 0.05 * xdiff
#
#--- set plotting page
#
plt.close('all')
fig, ax = plt.subplots(3, figsize=(8,6), sharex='col')
mpl.rcParams['font.size'] = 12
props = font_manager.FontProperties(size=12)
plt.subplots_adjust(hspace=0.08)
#
#---- trending plots
#
int_list = []
slp_list = []
t_list0 = []
t_list1 = []
t_list2 = []
for node in range(0, 4):
a_list = []
b_list = []
for k in range(0, 3):
exec 'xtime = n%s_time' % (str(node))
exec 'ydata = n%s_%s%s' % (str(node), dtype, str(k+1))
exec 'y_list = y_list%s' % (str(node))
exec 'm_list = m_list%s' % (str(node))
exec 't_list%s = t_list%s + ydata' % (str(k), str(k))
nlabel = 'node' + str(node)
#
#--- the main plots are created here
#
points = ax[k].scatter(xtime, ydata, color=color_table[node], marker=marker_table[node], s=marker_size[node] ,lw=0)
#
#--- pop up plots are created here
#
labels = create_label_html(ccd, node, loc, y_list, m_list)
#
#--- link the two pages
#
plugins.connect(fig, mpld3.plugins.PointHTMLTooltip(points, labels, css=css, hoffset=-140))
#
#--- prepare for line fittings then fit a linear line
#
shifted = []
for m in range(0, len(xtime)):
shifted.append(xtime[m] - 2000)
paramsinitial = set_initial_guess(dtype, k)
ferr = [0] * len(xtime)
(a, b) = fit_line(paramsinitial, shifted, ydata, ferr, 'linear')
start = a + b * ((xmin-2000) - 0.5)
stop = a + b * ((xmax-2000) + 0.5)
ax[k].plot([xmin, xmax], [start, stop], color=color_table[node], marker=marker_table[node], lw=2, label=nlabel)
a_list.append(a)
b_list.append(b)
int_list.append(a_list)
slp_list.append(b_list)
#
#--- setting plotting range using all data and with fitted lines
#
[ymin_list, ymax_list, ylab] = set_plot_range(xmin, xmax, int_list, slp_list, t_list0, t_list1, t_list2, dtype, loc)
#
#--- printing out fitted results
#
save_results(ccd, loc, dtype, int_list, slp_list)
#
#--- set plotting frames
#
ymin = ymin_list[0]
ymax = ymax_list[0]
ax[0].set_xlim(xmin, xmax)
ax[0].set_ylim(ymin, ymax)
ax[0].text(xtext, y_text(ymin, ymax), 'Al K-alpha',fontsize=16)
ymin = ymin_list[1]
ymax = ymax_list[1]
ax[1].set_xlim(xmin, xmax)
ax[1].set_ylim(ymin, ymax)
ax[1].text(xtext, y_text(ymin, ymax), 'Ti K-alpha', fontsize=16)
ymin = ymin_list[2]
ymax = ymax_list[2]
ax[2].set_xlim(xmin, xmax)
ax[2].set_ylim(ymin, ymax)
ax[2].text(xtext, y_text(ymin, ymax), 'Mn K-alpha', fontsize=16)
#
#
ax[0].legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=4, fancybox=True, shadow=True)
#
#--- shift y label so that it is easily seen
#
ax[1].set_ylabel(ylab)
ax[1].yaxis.set_label_coords(-0.10, 0.5)
#
#---- set x tick labels
#
xt_list = []
xn_list = []
for val in range(xmin, xmax+1, 2):
val = int(val)
xt_list.append(val)
val = '%s' % (val)
xn_list.append(str(val))
for az in ax.flatten():
ax[k].set_xticks(xt_list, xn_list)
ax[k].set_xticklabels(xn_list)
#
#--- add x label on the bottom panel
#
ax[2].set_xlabel('Time (year)')
#
#--- set the size of plot
#
fig.set_size_inches(10.0, 15.0)
fig.tight_layout()
plt.close('all')
#
#
#------------ start making the web page ---------------------------------
#
#
#--- read javascript file
#
jfile = house_keeping + 'java_script_deposit'
f = open(jfile, 'r')
jscript = f.read()
f.close()
#
#--- set title description
#
if dtype == 'cnt':
description = 'Normalized Count Rates '
elif dtype == 'width':
description = 'Peak Width '
else:
description = 'Peak Position '
if loc == 'low':
description = description + '<br /> Rows: 21 - 221'
else:
description = description + '<br /> Rows: 801 - 1001'
#
#--- the file name which keeps the fitted results
#
file_name = 'ccd' + str(ccd) + '_' + dtype + '_' + loc + '_fitting_results'
#
#--- start creating html page
#
out = '<!DOCTYPE html>\n<html>\n<head>\n\t<title>ACIS Histogram Plots</title>\n'
out = out + jscript + '\n'
out = out + '</head>\n<body style="width:95%;margin-left:10px; margin-right;10px">\n\n'
out = out + '<a href="https://cxc.cfa.harvard.edu/mta_days/mta_acis_hist/acis_hist_main.html" '
out = out + 'style="float:right;padding-right:50px;font-size:120%"><b>Back to Top</b></a>'
out = out + '<h2>CCD ' + str(ccd) + ' ' + description
out = out + ' (<a href="javascript:WindowOpener(\'' + file_name + '\')" style="text-align:right">Fitted Results </a>)'
out = out + '</h2>\n\n'
out = out + '<p style="text-align:left; ">'
out = out + 'If you move the mouse to one of the data point, the distribution plot will pop up. '
out = out + 'If you like to magnify the plot, click the <b>magnifier icon</b> at the bottom of the plot'
out = out + ' or the <b>cross icon</b> to move the plot location around. You can go back to the full view by clicking '
out = out + ' the <b>house icon</b>.</p>'
#
#--- convert mat plot figure to html page format
#
out = out + mpld3.fig_to_html(fig)
out = out.replace('None', '') #--- fixing a bug to remove un-wanted "None" appears on the web page
out = out + '<div style="padding-top:30px"></div>'
out = out + '<hr /><p style="text-align:left; padding-top:10px;padding-bottom:20px">'
out = out + 'If you have any questions, please contact <a href="mailto:[email protected]">[email protected]</a>.'
out = out + '\n\n\n</body>\n</html>\n'
#
#--- write out the html data
#
name = web_dir + 'Html_pages/acis_hist_cccd' + str(ccd) + '_' + loc + '_' + dtype + '.html'
fo = open(name, 'w')
fo.write(out)
fo.close()
#--------------------------------------------------------------------------------------------------------
#-- set_plot_param: set plotting parameters --
#--------------------------------------------------------------------------------------------------------
def set_plot_range(xmin, xmax, int_list, slp_list, tlist0, tlist1, tlist2, dtype, loc):
"""
set plotting range parameters
input: xmin --- min of x
xmax --- max of x
int_list --- a list of intercepts {[node0-al, node0-ti, node0-mn], {node1-al...]...]
slp_list --- a list of slopes
tlist0 --- a list of lists of al [node0, node1, node2, node3t]
tlist1 --- a list of lists of ti [node0, node1, node2, node3t]
tlist2 --- a list of lists of mn [node0, node1, node2, node3t]
dtype --- data type: cnt, width, pos
loc --- location of data, either low or high
output: ymin_list --- a list of ymin for al, ti, mn
ymax_list --- a list of ymax for al, ti, mn
"""
if dtype == 'cnt':
ymin_list = [-5, -5, -5]
ymax_list = [-1, -1, -1]
ylab = 'Log(Counts/Sec)'
else:
prange = find_range(xmin, xmax, int_list, slp_list)
ymin_list = []
ymax_list = []
for k in range(0, 3):
exec "ydata = tlist%s" %(str(k))
[ymin, ymax] = set_range(ydata, prange)
ymin_list.append(ymin)
ymax_list.append(ymax)
ylab = 'ADU'
return [ymin_list, ymax_list, ylab]
#--------------------------------------------------------------------------------------------------------
#-- find_range: setting the size of plotting range ----
#--------------------------------------------------------------------------------------------------------
def find_range(xmin, xmax, int_list, slp_list):
"""
setting the size of plotting range
input: xmin --- min of x value
xmax --- max of x value
int_list --- a list of intercepts {[node0-al, node0-ti, node0-mn], {node1-al...]...]
slp_list --- a list of slopes
output: range --- plotting width
"""
prange = 1
for n in range(0, 3):
pmin = 1.0e6
pmax = -1.0e6
#
#--- find min and max among all node data
#
for m in range(0, 4):
intc = int_list[m][n]
slope = slp_list[m][n]
tval1 = intc + slope * (xmin -2000)
tval2 = intc + slope * (xmax -2000)
if tval1 < pmin:
pmin = tval1
if tval2 < pmin:
pmin = tval2
if tval1 > pmax:
pmax = tval1
if tval2 > pmax:
pmax = tval2
#
#--- find the largest interval among the elements
#
trange = pmax - pmin
if trange > prange:
prange = trange
prange *= 1.8
return int(prange)
#--------------------------------------------------------------------------------------------------------
#-- set_range: set plotting range ---
#--------------------------------------------------------------------------------------------------------
def set_range(ydata, prange):
"""
set plotting range
input: ydata --- the data which the plotting range to be set
prange --- the width of the plotting range
output: [ymin, ymax] --- min and max of the range
"""
med = numpy.median(ydata)
ymin = int(med - 0.5 * prange)
ymax = ymin + prange
return [ymin, ymax]
#----------------------------------------------------------------------------------
#-- create_label_html: creating a list of html links to the distribution plots ---
#----------------------------------------------------------------------------------
def create_label_html(ccd, node, loc, y_list, m_list):
"""
creating a list of html links to the distribution plots
input: ccd --- ccd #
node --- node #
loc --- location: either"low" or "high"
y_list --- a list of year
m_list --- a list of month
output: hlist --- a list of html links
"""
#
#--- set directory path and html top link to the plot
#
fdir = web_dir + 'Plot_indivisual/'
html_plot = 'https://cxc.cfa.harvard.edu/mta_days/mta_acis_hist/Plot_indivisual/';
hlink = '<p> <img src="' + html_plot
#
#--- read data for the given ccd/node/loc combination
#
dfile = data_dir + 'ccd' + str(ccd) + '_node' + str(node) + '_' + loc
f = open(dfile, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
hlist = []
for k in range(0, len(y_list)):
lyear = y_list[k]
mon = m_list[k]
lmon = mon
if float(mon) < 10:
lmon = '0' + lmon
hfile = 'CCD' + str(ccd) + '/node' + str(node) + '/' + loc + '/hist_'
hfile = hfile + lyear + '_' + lmon + '.png'
title = '<b style="background-color:yellow">CCD' + str(ccd)
title = title + ' Node: ' + str(node) + ' (Year: ' + lyear +' Month: ' + lmon +')</b>'
ulink = title + hlink
#
#--- check whether the plot exists
#
chk = fdir + hfile
if os.path.isfile(chk):
#
#--- add a plot
#
alink = ulink + hfile + '" width=600px> </p>'
#
#--- add a data table
#
line = create_table(data, lyear, mon)
alink = alink + line
else:
alink = '<p><span style="font-size:250%"><b>No Plot</b></span> </p>'
line = create_table(data, lyear, mon)
if line != 'No Data':
alink = title + alink + line
else:
alink = title + alink
hlist.append(alink)
return hlist
#---------------------------------------------------------------------
#-- create_table: create data table for the html page --
#---------------------------------------------------------------------
def create_table(data, year, mon):
"""
create data table for the html page
input: data --- a list of data
year --- a year of the data needed
mon --- a month of the data needed
output: line --- a html table of data. if there is no data,
return "No Data"
"""
tag = str(year) + ':' + str(mon)
for ent in data:
atemp = re.split('\s+', ent)
if atemp[0] == tag:
duration = float(atemp[-1])
mn_p = str(round(float(atemp[2]), 2))
al_p = str(round(float(atemp[8]), 2))
ti_p = str(round(float(atemp[14]), 2))
#
#--- by multiplying 2.354, change it from signa to width
#
mn_w = str(round(float(atemp[6]) * 2.354, 2))
al_w = str(round(float(atemp[12]) * 2.354, 2))
ti_w = str(round(float(atemp[18]) * 2.354, 2))
#
#--- count rate is rather small, when it goes < 1e-4, use the form, e.g. 1.0e-5
#
mn_c = float(atemp[4]) /duration
if mn_c < 1e-4:
mn_c = "%1.2e" % (mn_c)
else:
mn_c = str(round(mn_c, 4))
al_c = float(atemp[10])/duration
if al_c < 1e-4:
al_c = "%1.2e" % (al_c)
else:
al_c = str(round(al_c, 4))
ti_c = float(atemp[16])/duration
if ti_c < 1e-4:
ti_c = "%1.2e" % (ti_c)
else:
ti_c = str(round(ti_c, 4))
line = '<table border=1 cellpadding=2 style="text-aligne:center;background-color:yellow">'
line = line + '<tr><th> </th>'
line = line + '<th>Peak Position</th>'
line = line + '<th>Peak Width</th>'
line = line + '<th>Counts Rate<br />(cnts/sec)</th></tr>'
line = line + '<tr><th>Al k-alpha</th>'
line = line + '<th>' + al_p + '</th>'
line = line + '<th>' + al_w + '</th>'
line = line + '<th>' + al_c + '</th>'
line = line + '</tr>'
line = line + '<tr><th>Ti k-alpha</th>'
line = line + '<th>' + ti_p + '</th>'
line = line + '<th>' + ti_w + '</th>'
line = line + '<th>' + ti_c + '</th>'
line = line + '</tr>'
line = line + '<tr><th>Mn k-alpha</th>'
line = line + '<th>' + mn_p + '</th>'
line = line + '<th>' + mn_w + '</th>'
line = line + '<th>' + mn_c + '</th>'
line = line + '</tr></table>'
return line
break
return '<p>No Data</p>'
#---------------------------------------------------------------------
#-- set_initial_guess: provide initial guess for kmpfit procedure --
#---------------------------------------------------------------------
def set_initial_guess(dtype, k):
"""
provide initial guess for kmpfit procedure
input: dtype --- data type: pos, width, cnt
k --- line # (al, ti, mn in order)
output: [const1, cost2]
"""
if dtype == 'pos':
glist = [360, 1120, 1300]
elif dtype == 'width':
glist = [20, 30, 40]
else:
glist = [-3, -3, -3]
return [glist[k], -0.3]
#---------------------------------------------------------------------
#-- y_text: set y position of text ---
#---------------------------------------------------------------------
def y_text(ymin, ymax):
"""
set y position of text
input: ymin --- min of y
ymax --- max of y
output ytext --- y position of the text
"""
ydiff = ymax - ymin
ytext = ymin + 0.10 * ydiff
return ytext
#--------------------------------------------------------------------------------------------------------
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
if __name__ == '__main__':
acis_hist_plot_trend()
| [
"[email protected]"
] | |
831ed980bfb919797195d6eb39cdb4209814b00e | 26fc334777ce27d241c67d97adc1761e9d23bdba | /tests/django_tests/tests/validators/tests.py | da3db594d96ffff67788fdd68b416de6c0cc67f4 | [
"BSD-3-Clause"
] | permissive | alihoseiny/djongo | 1434c9e78c77025d7e0b3330c3a40e9ea0029877 | e2edf099e398573faa90e5b28a32c3d7f1c5f1e9 | refs/heads/master | 2020-03-27T23:27:02.530397 | 2018-08-30T14:44:37 | 2018-08-30T14:44:37 | 147,317,771 | 2 | 1 | BSD-3-Clause | 2018-09-04T09:00:53 | 2018-09-04T09:00:53 | null | UTF-8 | Python | false | false | 22,568 | py | import os
import re
import types
from datetime import datetime, timedelta
from decimal import Decimal
from unittest import TestCase, skipUnless
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.validators import (
BaseValidator, DecimalValidator, EmailValidator, FileExtensionValidator,
MaxLengthValidator, MaxValueValidator, MinLengthValidator,
MinValueValidator, ProhibitNullCharactersValidator, RegexValidator,
URLValidator, int_list_validator, validate_comma_separated_integer_list,
validate_email, validate_image_file_extension, validate_integer,
validate_ipv4_address, validate_ipv6_address, validate_ipv46_address,
validate_slug, validate_unicode_slug,
)
from django.test import SimpleTestCase
try:
from PIL import Image # noqa
except ImportError:
PILLOW_IS_INSTALLED = False
else:
PILLOW_IS_INSTALLED = True
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file', 'git+ssh']
TEST_DATA = [
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, ValidationError),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_integer, '\n42', ValidationError),
(validate_integer, '42\n', ValidationError),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected].उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, 'example@atm.%s' % ('a' * 63), None),
(validate_email, 'example@%s.atm' % ('a' * 63), None),
(validate_email, 'example@%s.%s.atm' % ('a' * 63, 'b' * 10), None),
(validate_email, 'example@atm.%s' % ('a' * 64), ValidationError),
(validate_email, 'example@%s.atm.%s' % ('b' * 64, 'a' * 63), ValidationError),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, '[email protected].', ValidationError),
# Max length of domain name labels is 63 characters per RFC 1034.
(validate_email, 'a@%s.us' % ('a' * 63), None),
(validate_email, 'a@%s.us' % ('a' * 64), ValidationError),
# Trailing newlines in username or domain not allowed
(validate_email, '[email protected]\n', ValidationError),
(validate_email, 'a\[email protected]', ValidationError),
(validate_email, '"test@test"\[email protected]', ValidationError),
(validate_email, 'a@[127.0.0.1]\n', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, 'a', None),
(validate_slug, '1', None),
(validate_slug, 'a1', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, '[email protected]', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '你 好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_slug, 'trailing-newline\n', ValidationError),
(validate_unicode_slug, 'slug-ok', None),
(validate_unicode_slug, 'longer-slug-still-ok', None),
(validate_unicode_slug, '--------', None),
(validate_unicode_slug, 'nohyphensoranything', None),
(validate_unicode_slug, 'a', None),
(validate_unicode_slug, '1', None),
(validate_unicode_slug, 'a1', None),
(validate_unicode_slug, '你好', None),
(validate_unicode_slug, '', ValidationError),
(validate_unicode_slug, ' text ', ValidationError),
(validate_unicode_slug, ' ', ValidationError),
(validate_unicode_slug, '[email protected]', ValidationError),
(validate_unicode_slug, '\n', ValidationError),
(validate_unicode_slug, '你 好', ValidationError),
(validate_unicode_slug, 'trailing-newline\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
(validate_ipv4_address, '1.1.1.1\n', ValidationError),
(validate_ipv4_address, '٧.2٥.3٣.243', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '12', None),
(validate_comma_separated_integer_list, '1,2', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '10,32', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(validate_comma_separated_integer_list, ',', ValidationError),
(validate_comma_separated_integer_list, '1,2,3,', ValidationError),
(validate_comma_separated_integer_list, '1,2,', ValidationError),
(validate_comma_separated_integer_list, ',1', ValidationError),
(validate_comma_separated_integer_list, '1,,2', ValidationError),
(int_list_validator(sep='.'), '1.2.3', None),
(int_list_validator(sep='.', allow_negative=True), '1.2.3', None),
(int_list_validator(allow_negative=True), '-1,-2,3', None),
(int_list_validator(allow_negative=True), '1,-2,-12', None),
(int_list_validator(), '-1,2,3', ValidationError),
(int_list_validator(sep='.'), '1,2,3', ValidationError),
(int_list_validator(sep='.'), '1.2.3\n', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'git+ssh://[email protected]/example/hg-git.git', None),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
# Trailing newlines not accepted
(URLValidator(), 'http://www.djangoproject.com/\n', ValidationError),
(URLValidator(), 'http://[::ffff:192.9.5.5]\n', ValidationError),
# Trailing junk does not take forever to reject
(URLValidator(), 'http://www.asdasdasdasdsadfm.com.br ', ValidationError),
(URLValidator(), 'http://www.asdasdasdasdsadfm.com.br z', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
(FileExtensionValidator(['txt']), ContentFile('contents', name='fileWithUnsupportedExt.jpg'), ValidationError),
(FileExtensionValidator(['txt']), ContentFile('contents', name='fileWithUnsupportedExt.JPG'), ValidationError),
(FileExtensionValidator(['txt']), ContentFile('contents', name='fileWithNoExtension'), ValidationError),
(FileExtensionValidator(['']), ContentFile('contents', name='fileWithAnExtension.txt'), ValidationError),
(FileExtensionValidator([]), ContentFile('contents', name='file.txt'), ValidationError),
(FileExtensionValidator(['']), ContentFile('contents', name='fileWithNoExtension'), None),
(FileExtensionValidator(['txt']), ContentFile('contents', name='file.txt'), None),
(FileExtensionValidator(['txt']), ContentFile('contents', name='file.TXT'), None),
(FileExtensionValidator(['TXT']), ContentFile('contents', name='file.txt'), None),
(FileExtensionValidator(), ContentFile('contents', name='file.jpg'), None),
(DecimalValidator(max_digits=2, decimal_places=2), Decimal('0.99'), None),
(DecimalValidator(max_digits=2, decimal_places=1), Decimal('0.99'), ValidationError),
(DecimalValidator(max_digits=3, decimal_places=1), Decimal('999'), ValidationError),
(DecimalValidator(max_digits=4, decimal_places=1), Decimal('999'), None),
(DecimalValidator(max_digits=20, decimal_places=2), Decimal('742403889818000000'), None),
(DecimalValidator(20, 2), Decimal('7.42403889818E+17'), None),
(DecimalValidator(max_digits=20, decimal_places=2), Decimal('7424742403889818000000'), ValidationError),
(DecimalValidator(max_digits=5, decimal_places=2), Decimal('7304E-1'), None),
(DecimalValidator(max_digits=5, decimal_places=2), Decimal('7304E-3'), ValidationError),
(DecimalValidator(max_digits=5, decimal_places=5), Decimal('70E-5'), None),
(DecimalValidator(max_digits=5, decimal_places=5), Decimal('70E-6'), ValidationError),
# 'Enter a number.' errors
*[
(DecimalValidator(decimal_places=2, max_digits=10), Decimal(value), ValidationError)
for value in (
'NaN', '-NaN', '+NaN', 'sNaN', '-sNaN', '+sNaN',
'Inf', '-Inf', '+Inf', 'Infinity', '-Infinity', '-Infinity',
)
],
(validate_image_file_extension, ContentFile('contents', name='file.jpg'), None),
(validate_image_file_extension, ContentFile('contents', name='file.png'), None),
(validate_image_file_extension, ContentFile('contents', name='file.PNG'), None),
(validate_image_file_extension, ContentFile('contents', name='file.txt'), ValidationError),
(validate_image_file_extension, ContentFile('contents', name='file'), ValidationError),
(ProhibitNullCharactersValidator(), '\x00something', ValidationError),
(ProhibitNullCharactersValidator(), 'something', None),
(ProhibitNullCharactersValidator(), None, None),
]
def create_path(filename):
return os.path.abspath(os.path.join(os.path.dirname(__file__), filename))
# Add valid and invalid URL tests.
# This only tests the validator without extended schemes.
with open(create_path('valid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), None))
with open(create_path('invalid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), ValidationError))
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
if validator is validate_image_file_extension:
SKIP_MSG = "Pillow is required to test validate_image_file_extension"
test_func = skipUnless(PILLOW_IS_INSTALLED, SKIP_MSG)(test_func)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(SimpleTestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), "['Not Valid']")
self.assertEqual(repr(v), "ValidationError(['Not Valid'])")
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), "['First Problem', 'Second Problem']")
self.assertEqual(repr(v), "ValidationError(['First Problem', 'Second Problem'])")
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), "{'first': ['First Problem']}")
self.assertEqual(repr(v), "ValidationError({'first': ['First Problem']})")
def test_regex_validator_flags(self):
msg = 'If the flags are set, regex must be a regular expression string.'
with self.assertRaisesMessage(TypeError, msg):
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
def test_max_length_validator_message(self):
v = MaxLengthValidator(16, message='"%(value)s" has more than %(limit_value)d characters.')
with self.assertRaisesMessage(ValidationError, '"djangoproject.com" has more than 16 characters.'):
v('djangoproject.com')
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
def test_decimal_equality(self):
self.assertEqual(
DecimalValidator(1, 2),
DecimalValidator(1, 2),
)
self.assertNotEqual(
DecimalValidator(1, 2),
DecimalValidator(1, 1),
)
self.assertNotEqual(
DecimalValidator(1, 2),
DecimalValidator(2, 2),
)
self.assertNotEqual(
DecimalValidator(1, 2),
MinValueValidator(11),
)
def test_file_extension_equality(self):
self.assertEqual(
FileExtensionValidator(),
FileExtensionValidator()
)
self.assertEqual(
FileExtensionValidator(['txt']),
FileExtensionValidator(['txt'])
)
self.assertEqual(
FileExtensionValidator(['TXT']),
FileExtensionValidator(['txt'])
)
self.assertEqual(
FileExtensionValidator(['TXT', 'png']),
FileExtensionValidator(['txt', 'png'])
)
self.assertEqual(
FileExtensionValidator(['txt']),
FileExtensionValidator(['txt'], code='invalid_extension')
)
self.assertNotEqual(
FileExtensionValidator(['txt']),
FileExtensionValidator(['png'])
)
self.assertNotEqual(
FileExtensionValidator(['txt']),
FileExtensionValidator(['png', 'jpg'])
)
self.assertNotEqual(
FileExtensionValidator(['txt']),
FileExtensionValidator(['txt'], code='custom_code')
)
self.assertNotEqual(
FileExtensionValidator(['txt']),
FileExtensionValidator(['txt'], message='custom error message')
)
def test_prohibit_null_characters_validator_equality(self):
self.assertEqual(
ProhibitNullCharactersValidator(message='message', code='code'),
ProhibitNullCharactersValidator(message='message', code='code')
)
self.assertEqual(
ProhibitNullCharactersValidator(),
ProhibitNullCharactersValidator()
)
self.assertNotEqual(
ProhibitNullCharactersValidator(message='message1', code='code'),
ProhibitNullCharactersValidator(message='message2', code='code')
)
self.assertNotEqual(
ProhibitNullCharactersValidator(message='message', code='code1'),
ProhibitNullCharactersValidator(message='message', code='code2')
)
| [
"[email protected]"
] | |
51896015a14c6a4d8b309e161612842c2ac88de5 | 89680cfcf0108e32d0c39905e85e1b7c06b5e65c | /lfs/voucher/tests.py | 3dc26fc282cb84cd9a6d1b9934f5651ff6c74618 | [] | no_license | cocox/django-lfs | b87d7bbc7c4e775340ea392ba31afed9359b62b7 | 1de6876f27036c03f34c0cfb739bd134a1c46ae1 | refs/heads/master | 2021-01-16T20:56:53.988027 | 2012-03-02T18:50:11 | 2012-03-30T08:30:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,358 | py | # python imports
import datetime
# django imports
from django.contrib.auth.models import User
from django.contrib.sessions.backends.file import SessionStore
from django.test import TestCase
# lfs imports
import lfs.voucher.utils
from lfs.cart.models import Cart
from lfs.cart.models import CartItem
from lfs.catalog.models import Product
from lfs.tests.utils import RequestFactory
from lfs.tax.models import Tax
from lfs.voucher.models import Voucher
from lfs.voucher.models import VoucherGroup
from lfs.voucher.models import VoucherOptions
from lfs.voucher.settings import ABSOLUTE
from lfs.voucher.settings import PERCENTAGE
class VoucherUtilsTestCase(TestCase):
"""
"""
def test_create_vouchers_1(self):
"""Tests the default voucher options
"""
number = lfs.voucher.utils.create_voucher_number()
self.failUnless(len(number) == 5)
letters = "ABCDEFGHIJKLMNOPQRSTUVXYZ"
for letter in number:
self.failIf(letter not in letters)
def test_create_vouchers_2(self):
"""Tests the custom options.
"""
VoucherOptions.objects.create(
number_prefix="DH",
number_suffix="XM",
number_length=4,
number_letters="abcdefghijklmnopqrstuvwxyz",
)
number = lfs.voucher.utils.create_voucher_number()
self.failUnless(len(number) == 8)
letters = "abcdefghijklmnopqrstuvwxyz"
for letter in number[2:-2]:
self.failIf(letter not in letters)
class VoucherTestCase(TestCase):
"""
"""
fixtures = ['lfs_shop.xml']
def setUp(self):
"""
"""
self.request = RequestFactory().get("/")
self.request.session = SessionStore()
self.request.user = User(id=1)
self.vg = VoucherGroup.objects.create(
name="xmas",
creator=self.request.user,
)
self.v1 = Voucher.objects.create(
number="AAAA",
group=self.vg,
creator=self.request.user,
start_date=datetime.date(2009, 12, 1),
end_date=datetime.date(2009, 12, 31),
effective_from=0,
kind_of=ABSOLUTE,
value=10.0,
limit=2,
)
self.p1 = Product.objects.create(name="Product 1", slug="product-1", price=10.0)
self.p2 = Product.objects.create(name="Product 2", slug="product-2", price=100.0)
self.cart = Cart.objects.create()
CartItem.objects.create(cart=self.cart, product=self.p1, amount=1)
CartItem.objects.create(cart=self.cart, product=self.p2, amount=1)
def test_defaults(self):
"""
"""
self.assertEqual(self.v1.number, "AAAA")
self.assertEqual(self.v1.group, self.vg)
self.assertEqual(self.v1.creator, self.request.user)
self.assertEqual(self.v1.start_date, datetime.date(2009, 12, 1),)
self.assertEqual(self.v1.end_date, datetime.date(2009, 12, 31),)
self.assertEqual(self.v1.effective_from, 0.0)
self.assertEqual(self.v1.kind_of, ABSOLUTE)
self.assertEqual(self.v1.active, True)
self.assertEqual(self.v1.used_amount, 0)
self.assertEqual(self.v1.last_used_date, None)
self.assertEqual(self.v1.value, 10.0)
self.assertEqual(self.v1.tax, None)
def test_prices_absolute(self):
"""
"""
# No tax
price_net = self.v1.get_price_net(self.request)
self.assertEqual(price_net, 10)
price_gross = self.v1.get_price_gross(self.request)
self.assertEqual(price_gross, 10)
tax = self.v1.get_tax(self.request)
self.assertEqual(tax, 0.0)
# With tax
self.v1.tax = Tax.objects.create(rate=19.0)
self.v1.save()
price_net = self.v1.get_price_net(self.request)
self.assertEqual("%.2f" % price_net, "%.2f" % 8.4)
price_gross = self.v1.get_price_gross(self.request)
self.assertEqual(price_gross, 10)
tax = self.v1.get_tax(self.request)
self.assertEqual("%.2f" % tax, "%.2f" % 1.6)
def test_prices_percentage(self):
"""
"""
# 10% discount
self.v1.kind_of = PERCENTAGE
self.v1.value = 10.0
self.v1.save()
# No tax
price_gross = self.v1.get_price_gross(self.request, self.cart)
self.assertEqual(price_gross, 11.0)
price_net = self.v1.get_price_net(self.request, self.cart)
self.assertEqual(price_net, 11.0)
tax = self.v1.get_tax(self.request, self.cart)
self.assertEqual(tax, 0.0)
# With tax
# Note: If the voucher is pecentage the tax is taken from the several
# products not from the voucher itself.
tax = Tax.objects.create(rate=19.0)
self.p1.tax = tax
self.p1.save()
self.p2.tax = tax
self.p2.save()
price_gross = self.v1.get_price_gross(self.request, self.cart)
self.assertEqual(price_gross, 11.0)
price_net = self.v1.get_price_net(self.request, self.cart)
self.assertEqual("%.2f" % price_net, "%.2f" % 9.24)
tax = self.v1.get_tax(self.request, self.cart)
self.assertEqual("%.2f" % tax, "%.2f" % 1.76)
def test_kind_of(self):
"""
"""
self.assertEqual(self.v1.kind_of, ABSOLUTE)
self.assertEqual(self.v1.is_absolute(), True)
self.assertEqual(self.v1.is_percentage(), False)
self.v1.kind_of = PERCENTAGE
self.v1.save()
self.assertEqual(self.v1.kind_of, PERCENTAGE)
self.assertEqual(self.v1.is_absolute(), False)
self.assertEqual(self.v1.is_percentage(), True)
def test_mark_as_used(self):
"""
"""
self.assertEqual(self.v1.used_amount, 0)
self.assertEqual(self.v1.last_used_date, None)
self.v1.mark_as_used()
self.assertEqual(self.v1.used_amount, 1)
self.failIf(self.v1.last_used_date is None)
def test_is_effective(self):
"""
"""
current_year = datetime.datetime.now().year
# True
self.v1.start_date = datetime.date(2000, 1, 1)
self.v1.end_date = datetime.date(2999, 12, 31)
self.v1.active = True
self.v1.used_amount = 1
self.v1.effective_from = 0
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], True)
# start / end
self.v1.start_date = datetime.date(current_year, 12, 31)
self.v1.end_date = datetime.date(current_year, 12, 31)
self.v1.active = True
self.v1.used_amount = 1
self.v1.effective_from = 0
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], False)
# effective from
self.v1.start_date = datetime.date(current_year, 1, 1)
self.v1.end_date = datetime.date(current_year, 12, 31)
self.v1.active = True
self.v1.used_amount = 1
self.v1.effective_from = 1000
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], False)
# Used
self.v1.start_date = datetime.date(current_year, 1, 1)
self.v1.end_date = datetime.date(current_year, 12, 31)
self.v1.active = True
self.v1.used_amount = 1
self.v1.effective_from = 0
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], True)
self.v1.mark_as_used()
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], False)
# unlimited amount
self.v1.limit = 0
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], True)
# Not active
self.v1.start_date = datetime.date(current_year, 1, 1)
self.v1.end_date = datetime.date(current_year, 12, 31)
self.v1.active = False
self.v1.used_amount = 1
self.v1.effective_from = 0
self.assertEqual(self.v1.is_effective(self.request, self.cart)[0], False)
class VoucherOptionsCase(TestCase):
"""
"""
def tests_default_values(self):
"""
"""
vo = VoucherOptions.objects.create()
self.assertEqual(vo.number_prefix, u"")
self.assertEqual(vo.number_suffix, u"")
self.assertEqual(vo.number_length, 5)
self.assertEqual(vo.number_letters, u"ABCDEFGHIJKLMNOPQRSTUVWXYZ")
| [
"[email protected]"
] | |
cf418654128f2d45f3d354cb2e1916f862126d88 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dlf/huaweicloudsdkdlf/v1/model/update_connection_response.py | 47bbe563821d6e9a6e1416271cd977115452295f | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 2,447 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateConnectionResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""UpdateConnectionResponse
The model defined in huaweicloud sdk
"""
super(UpdateConnectionResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateConnectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
505ffc46bb8c74cd11dac8ac80d0d705f1fd86f2 | 8cbeff7328c5e315733ca4f76307be407045f178 | /backend/comments_app/forms.py | 825467daedaee00183729d306f0a37e73ab3ff85 | [] | no_license | eflipe/Blog-App-Adv | 2586ce2817d61e8c16492e8d03bef720f55b8a58 | b398cb96bf93757af084fc33fb6055380be2029e | refs/heads/master | 2022-11-20T07:40:21.985089 | 2020-07-17T21:34:24 | 2020-07-17T21:34:24 | 277,913,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django import forms
class CommentForm(forms.Form):
content_type = forms.CharField(widget=forms.HiddenInput)
object_id = forms.IntegerField(widget=forms.HiddenInput)
# parent_id = forms.IntegerField(widget=forms.HiddenInput, required=False)
content = forms.CharField(widget=forms.Textarea)
| [
"[email protected]"
] | |
95a80a4842d0bca8dc17195fdb00391489b67b5b | 3fda3ff2e9334433554b6cf923506f428d9e9366 | /hipeac/api/views/awards.py | 6e5e44adc2596d98b2f46e8d59e69f82d93a5a05 | [
"MIT"
] | permissive | CreativeOthman/hipeac | 12adb61099886a6719dfccfa5ce26fdec8951bf9 | 2ce98da17cac2c6a87ec88df1b7676db4c200607 | refs/heads/master | 2022-07-20T10:06:58.771811 | 2020-05-07T11:39:13 | 2020-05-07T11:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | from django.views.decorators.cache import never_cache
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.viewsets import GenericViewSet
from hipeac.models import Publication, PublicationConference, TechTransferCall, TechTransferApplication
from ..serializers import (
PublicationListSerializer,
PublicationConferenceListSerializer,
TechTransferCallSerializer,
TechTransferApplicationSerializer,
)
class PublicationConferenceViewSet(ListModelMixin, GenericViewSet):
queryset = PublicationConference.objects.all()
pagination_class = None
serializer_class = PublicationConferenceListSerializer
class PaperAwardViewSet(ListModelMixin, GenericViewSet):
pagination_class = None
serializer_class = PublicationListSerializer
def list(self, request, *args, **kwargs):
year = request.query_params.get("year", False)
if not year:
raise PermissionDenied("Please include a `year` query parameter in your request.")
self.queryset = Publication.objects.awarded(year=int(year))
return super().list(request, *args, **kwargs)
class TechTransferViewSet(RetrieveModelMixin, ListModelMixin, GenericViewSet):
pagination_class = None
serializer_class = TechTransferApplicationSerializer
def get_object(self):
return TechTransferCall.objects.active()
@action(detail=False, serializer_class=TechTransferCallSerializer)
@never_cache
def call(self, request, *args, **kwargs):
return RetrieveModelMixin.retrieve(self, request, *args, **kwargs)
def list(self, request, *args, **kwargs):
self.queryset = TechTransferApplication.objects.filter(awarded=True).prefetch_related("call")
return super().list(request, *args, **kwargs)
| [
"[email protected]"
] | |
0f2723868f232a4c15d651578491256f668f1061 | 87d5b21265c381104de8f45aa67842a4adc880eb | /402.2.py | 54dab3ccdb05d73e26403a3f64631462212ab2f8 | [] | no_license | MYMSSENDOG/leetcodes | ac047fe0d951e0946740cb75103fc94aae967166 | 8a52a417a903a0742034161471a084bc1e494d68 | refs/heads/master | 2020-09-23T16:55:08.579319 | 2020-09-03T19:44:26 | 2020-09-03T19:44:26 | 225,543,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | class Solution:
def removeKdigits(self, num: str, k: int) -> str:
while k:
z = num.find("0")
if z != -1 and z + 1 < k:
k -= z
num = num[z:]
num = num.lstrip("0")
else:
for i in range(len(num) - 1):
if num[i] > num[i + 1]:
num = (num[:i] + num[i+1:]).lstrip("0")
break
else:
num = num[:-1]
k-=1
if not num:
return "0"
return num
sol = Solution()
num = "1020304050"
k = 3
print(sol.removeKdigits(num,k)) | [
"[email protected]"
] | |
75e5080dc01075d32c626fad54cd5ce394d32f88 | 7e8c72c099b231078a763ea7da6bba4bd6bac77b | /python/misc_ipata/src/ipa4django/views/json.py | 033b9896dd602912ef01717ff11f20a7a689ccbe | [] | no_license | github188/demodemo | fd910a340d5c5fbf4c8755580db8ab871759290b | 96ed049eb398c4c188a688e9c1bc2fe8cd2dc80b | refs/heads/master | 2021-01-12T17:16:36.199708 | 2012-08-15T14:20:51 | 2012-08-15T14:20:51 | 71,537,068 | 1 | 2 | null | 2016-10-21T06:38:22 | 2016-10-21T06:38:22 | null | UTF-8 | Python | false | false | 5,005 | py |
from django.core.serializers.json import Serializer as BuitlInJsonSerializer
from django.utils.encoding import smart_unicode
import simplejson
from django.db.models.query import QuerySet
from django.db import models
from StringIO import StringIO
import types
from ipa4django.db.raw_sql import SQLRow
class DjangoSerializer(BuitlInJsonSerializer):
def end_object( self, obj ):
if(self.selected_fields ):
for field_name in self.selected_fields:
if self._current.has_key(field_name):continue
try:
o = obj
for attr in field_name.split("."):
o = getattr(o, attr)
if callable(o): o = o()
field_name = field_name.replace(".", "_")
if type(o) not in [types.ListType, types.DictType]:
self._current[field_name] = smart_unicode(o, strings_only=True)
else:
self._current[field_name] = o
except:
field_name = field_name.replace(".", "_")
self._current[field_name] = None
BuitlInJsonSerializer.end_object(self, obj)
def end_serialization(self):
pass
def getvalue(self):
return self.objects
class MixedSerializer(object):
#set django base.Serializer
internal_use_only = False
def __init__(self):
pass
def serialize(self, object, **options):
self.stream = options.get("stream", StringIO())
self.selected_fields = options.get("fields")
obj = self.object_to_serializable(object)
from django.core.serializers.json import DjangoJSONEncoder
simplejson.dump(obj, self.stream, cls=DjangoJSONEncoder, **options)
return self.getvalue()
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def dict_to_serializable(self, o):
for k, v in o.items():
o[k] = self.object_to_serializable(v)
return o
def list_to_serializable(self, obj):
r = []
for o in obj:
r.append(self.object_to_serializable(o))
return r
def sql_row_to_serializable(self, obj):
o = {}
if not hasattr(obj, '__json__'):
for attr in obj.field_names:
o[attr] = getattr(obj, attr)
else:
o = obj.__json__()
return o
def object_to_serializable(self, o):
if isinstance(o, types.DictType):
return self.dict_to_serializable(o)
elif isinstance(o, types.TupleType):
if len(o) <= 0:
return []
elif isinstance(o[0], QuerySet):
return self.queryset_to_serializable(*o)
elif isinstance(o[0], models.Model):
return self.django_model_to_serializable(*o)
else:
return self.list_to_serializable(o)
elif isinstance(o, QuerySet):
return self.queryset_to_serializable(o)
elif isinstance(o, models.Model):
return self.django_model_to_serializable(o)
elif isinstance(o, types.ListType):
return self.list_to_serializable(o)
elif isinstance(o, SQLRow):
return self.sql_row_to_serializable(o)
elif hasattr(o, '__json__') and callable(o.__json__):
return o.__json__()
return o
def queryset_to_serializable(self, o, args={}, *dummy):
def pre(r, param):
if param.has_key("pre"):
for i in r:
for p in param["pre"]:
o = getattr(i, p)
if callable(o): o()
del param["pre"]
return param
def merge_ext_fields(r, param):
ext_fields = []
if param.has_key("ext_fields"):
ext_fields = param['ext_fields']
del param['ext_fields']
if len(o) <= 0: return param
r = o[0]
fields = not param.has_key("fields") and \
[ f.attname for f in r._meta.local_fields ] or \
list(param['fields'])
fields.extend(ext_fields)
param['fields'] = fields
return param
django_ser = DjangoSerializer()
if args: args = pre(o, args)
if args: args = merge_ext_fields(o, args)
return django_ser.serialize(o, **args)
def django_model_to_serializable(self, o, args={}, *dummy):
r = self.queryset_to_serializable([o, ], args)
return r[0]
Serializer = MixedSerializer
| [
"DeonWu@b18a5524-d64a-0410-9f42-ad3cd61580fb"
] | DeonWu@b18a5524-d64a-0410-9f42-ad3cd61580fb |
52b0ba351f610b254e933851391b0bb6f48a7e7e | 94b603a26fd0942181d4a3da2f9f830034c74ce0 | /adventofcode/2021/11/2.py | c5b6f2302004f3bb44d647548985c08755f1f847 | [
"Unlicense"
] | permissive | jan25/code_sorted | 55336e10bb9ee74610ce1ba62617db979ddf8e26 | 2a5af4f8c95a66ccca3223a261362a17be05728f | refs/heads/master | 2023-01-25T00:50:56.846461 | 2022-12-21T17:40:30 | 2022-12-21T17:40:30 | 34,155,896 | 3 | 0 | Unlicense | 2019-02-07T23:07:10 | 2015-04-18T06:54:30 | C++ | UTF-8 | Python | false | false | 1,095 | py | import fileinput
from collections import deque
grid = [list(map(int, l.strip())) for l in fileinput.input()]
n, m = len(grid), len(grid[0])
def neighs(i, j):
for di in range(-1, 2):
for dj in range(-1, 2):
if di == 0 and dj == 0:
continue
ni, nj = i + di, j + dj
if ni >= 0 and nj >= 0 and ni < n and nj < m:
yield ni, nj
def fill(i, j):
q = deque()
q.append((i, j))
grid[i][j] = 0
while q:
i, j = q.popleft()
for ni, nj in neighs(i, j):
if grid[ni][nj] == 0:
continue
grid[ni][nj] += 1
if grid[ni][nj] > 9:
grid[ni][nj] = 0
q.append((ni, nj))
def step():
for row in grid:
for i in range(len(row)):
row[i] += 1
for i, row in enumerate(grid):
for j, val in enumerate(row):
if val > 9:
fill(i, j)
return sum(row.count(0) for row in grid) == n * m
for s in range(1000):
if step():
print(s + 1)
exit(0)
| [
"[email protected]"
] | |
5a1709cca4ed8430186d843c77dd3b355da9a2e5 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/lllcho_CAPTCHA-breaking/CAPTCHA-breaking-master/test_type4.py | cdfa7e2c847d4dde613763837b52778d127047c8 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,087 | py | # coding:utf-8
# __author__ = 'lllcho'
# __date__ = '2015/8/4'
import cv2
import cPickle
import numpy as np
import codecs
import h5py
import theano
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
def word_simialr_score(s1, s2):
score = 0
for j in range(len(s1)):
if s1[j] == s2[j]:
score += 1
return score
def words_simmilar_score(word, words):
word_score = {}
for Word in words:
ws = word_simialr_score(word, Word)
if ws not in word_score.keys():
word_score[ws] = [Word]
else:
word_score[ws].append(Word)
return word_score
np.random.seed(123)
model_path = './model/type4_model.d5'
chars = cPickle.load(open('model/chars_type4.pkl', 'rb'))
words = cPickle.load(open('model/words_type4.pkl', 'rb'))
chars.append('A')
f = h5py.File('./model/type4_train_mean_std.h5', 'r')
x_mean = f['x_mean'][:]
x_std = f['x_std'][:][0]
f.close()
model = Sequential()
model.add(Convolution2D(32, 3, 4, 4, border_mode='full', activation='relu'))
model.add(Convolution2D(32, 32, 4, 4, activation='relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 4, 4, border_mode='full', activation='relu'))
model.add(Convolution2D(64, 64, 4, 4, activation='relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64 * 8 * 8, 512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, 1250, activation='softmax'))
model.load_weights(model_path)
model.compile(loss='categorical_crossentropy', optimizer='adagrad')
get_predict_score = theano.function([model.layers[0].input],
model.layers[-1].get_output(train=False),
allow_input_downcast=True)
comp = 'type4_test1'
img_dir = './image/' + comp + '/'
fcsv = codecs.open("result/" + comp + '.csv', 'w', 'utf-8')
# for nb_img in range(1, 20001):
# name=comp+'_'+str(nb_img)+'.png'
import os
names = os.listdir(img_dir)
for name in names:
print name
imgname = img_dir + name
img = cv2.imread(imgname, cv2.IMREAD_COLOR)
im = 255 - img[4:-4, :, :]
t = im.shape[1] / 4.0
dd = 5
bb = np.zeros((im.shape[0], dd, 3), dtype=np.uint8)
im1 = im[:, 0:np.floor(t) + dd]
im2 = im[:, np.floor(t) - dd:np.floor(2 * t) + dd]
im3 = im[:, np.floor(2 * t) - dd:np.floor(3 * t) + dd]
im4 = im[:, np.floor(3 * t) - dd:]
imgs = np.zeros((4, 3, 32, 32))
imgs[0, :] = cv2.resize(np.concatenate((bb, im1), axis=1), (32, 32)).transpose()
imgs[1, :] = cv2.resize(im2, (32, 32)).transpose()
imgs[2, :] = cv2.resize(im3, (32, 32)).transpose()
imgs[3, :] = cv2.resize(np.concatenate((im4, bb), axis=1), (32, 32)).transpose()
imgs.astype(np.float32)
imgs = imgs - x_mean
imgs = imgs / x_std
classes = model.predict_classes(imgs, verbose=0)
model_predict_score = get_predict_score(imgs)
result = []
for c in classes:
result.append(chars[c])
word = ''.join(result)
old_word = word
if word not in words:
word_score = words_simmilar_score(word, words)
max_score = max(word_score.keys())
if max_score > 0:
candidate_words = word_score[max_score]
predict_similar_score = {}
for candidate_word in candidate_words:
diff_chars = {}
for j in range(len(candidate_word)):
if old_word[j] != candidate_word[j]:
diff_chars[j] = candidate_word[j]
diff_chars_similar_score = 0
for key, iterm in diff_chars.items():
diff_chars_similar_score += model_predict_score[key, chars.index(iterm)]
predict_similar_score[candidate_word] = diff_chars_similar_score
word = max(predict_similar_score.items(), key=lambda x: x[1])[0]
print word
fcsv.write(name + ',' + word + '\n')
fcsv.close()
| [
"[email protected]"
] | |
1d0c897dba3bb6a7161f8c54cbce22b16c320822 | 8b6cd902deb20812fba07f1bd51a4460d22adc03 | /back-end/.history/djreact/djreact/settings_20191221114946.py | d73de7bc2359fc8e027509c0dad39f0a615e9d53 | [] | no_license | vishaldenzil/Django-react- | f3a49d141e0b6882685b7eaa4dc43c84857f335a | 35b6d41f6dacb3bddcf7858aa4dc0d2fe039ff98 | refs/heads/master | 2022-11-08T09:27:02.938053 | 2020-05-29T04:53:52 | 2020-05-29T04:53:52 | 267,768,028 | 0 | 1 | null | 2022-10-15T14:08:30 | 2020-05-29T04:52:20 | Python | UTF-8 | Python | false | false | 3,999 | py | """
Django settings for djreact project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n=0@g2ye2)0n0igcw(-h$b^+4g5_bby2s1q!%dnyr*y*r7@5aq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'corsheaders',
'rest_auth.registration',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'articles',
'users'
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'djreact.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CORS_ORIGIN_ALLOW_ALL = True
WSGI_APPLICATION = 'djreact.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': "django.db.backends.postgresql_psycopg2",
'NAME': "testData",
'USER': "postgres",
'PASSWORD': "root",
'HOST': "localhost",
}
}
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_REQUIRED = False
AUTH_USER_MODEL = 'users.User'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
'rest_framework.permissions.AllowAny'
]
}
| [
"[email protected]"
] | |
cfb4bb724fb35de375d1710630dfc8212de919b6 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/fc168073da2740468bd7ae97f4ffe797.py | 35f45f8bdaeee0be8d73f3dfcb12f58b7ce0e6b3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 127 | py | from string import maketrans
translation = maketrans('GCTA', 'CGAU')
def to_rna(dna):
return dna.translate(translation)
| [
"[email protected]"
] | |
077af677258eb500890b365e35a67a1aac4d0416 | 6e964d46b8fab9bccbd199ea7ade41297282b0a7 | /test/PySrc/tests/test_code_tracer_loops.py | b466d5a43d643b812be0eb2fb5fa47d94acf4e24 | [
"MIT"
] | permissive | donkirkby/live-py-plugin | 1a4cb87a796983245094d7c97c3e72f3cea0c540 | 165b447cc1288c94f24f1e660e0c45a6ef476826 | refs/heads/master | 2023-08-29T15:14:37.585327 | 2023-07-23T21:12:19 | 2023-07-23T21:12:19 | 4,332,096 | 257 | 59 | MIT | 2023-09-09T18:18:40 | 2012-05-15T04:41:18 | Python | UTF-8 | Python | false | false | 4,499 | py | from space_tracer.main import TraceRunner
def test_loop():
code = """\
i = 1
for j in range(3):
i += j
"""
expected_report = """\
i = 1
j = 0 | j = 1 | j = 2
i = 1 | i = 2 | i = 4"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_loop_target_list():
code = """\
for a,b in [(1,2)]:
c = a + b
"""
expected_report = """\
a = 1 | b = 2
c = 3"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_loop_starred_target_list():
code = """\
words = ['foo', 'bar']
for (head, *tail) in words:
print(head, tail)
"""
expected_report = """\
words = ['foo', 'bar']
head = 'f' | tail = ['o', 'o'] | head = 'b' | tail = ['a', 'r']
print("f ['o', 'o']") | print("b ['a', 'r']")"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_loop_target_list_attribute():
code = """\
class Foo:
def do_foo(self, x):
for self.i in range(x):
print(self.i)
foo = Foo()
foo.do_foo(3)
"""
expected_report = """\
x = 3
self.i = 0 | self.i = 1 | self.i = 2
print('0') | print('1') | print('2')
"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_loop_target_list_attribute_complex():
code = """\
class Foo:
def do_foo(self, x):
self.state = [None]
for self.state[0] in range(x):
print(self.state)
foo = Foo()
foo.do_foo(3)
"""
expected_report = """\
x = 3
self.state = [None]
| |
print('[0]') | print('[1]') | print('[2]')
"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_nested_loop():
code = """\
n = 0
for i in range(2):
n += i
for j in range(3):
n += j
"""
expected_report = """\
n = 0
i = 0 | i = 1
n = 0 | n = 4
j = 0 | j = 1 | j = 2 | j = 0 | j = 1 | j = 2
n = 0 | n = 1 | n = 3 | n = 4 | n = 5 | n = 7"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_for_else():
code = """\
i = 1
for j in range(3):
i += j
else:
i *= 10
"""
expected_report = """\
i = 1
j = 0 | j = 1 | j = 2
i = 1 | i = 2 | i = 4
i = 40"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_while_else():
code = """\
i = 0
while i < 2:
i += 1
else:
i *= 10
"""
expected_report = """\
i = 0
|
i = 1 | i = 2
i = 20"""
report = TraceRunner().trace_code(code)
assert report == expected_report
def test_loop_conditional():
code = """\
for i in range(3):
if i == 1:
c = 5
c = 2
"""
expected_report = """\
i = 0 | i = 1 | i = 2
| |
| c = 5 |
c = 2"""
tracer = TraceRunner()
report = tracer.trace_code(code)
assert report == expected_report
def test_infinite_loop_by_count():
code = """\
n = 0
while True:
n += 1
"""
expected_report = """\
n = 0
| |
n = 1 | n = 2 | RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.message_limit = 8
report = tracer.trace_code(code)
assert report == expected_report
def test_infinite_loop_by_width():
code = """\
n = 0
while True:
n += 1
"""
expected_report = """\
n = 0
| |
n = 1 | n = 2 | RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.max_width = 20
report = tracer.trace_code(code)
assert report == expected_report
def test_infinite_loop_pass():
code = """\
while True:
pass
"""
expected_report = """\
RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.message_limit = 3
report = tracer.trace_code(code)
assert report in (expected_report + '\n', '\n' + expected_report)
def test_infinite_loop_pass_in_function():
code = """\
def foo():
while True:
pass
foo()
"""
expected_report1 = """\
RuntimeError: live coding message limit exceeded
RuntimeError: live coding message limit exceeded"""
expected_report2 = """\
RuntimeError: live coding message limit exceeded
RuntimeError: live coding message limit exceeded"""
tracer = TraceRunner()
tracer.message_limit = 3
report = tracer.trace_code(code)
assert report in (expected_report1, expected_report2)
| [
"[email protected]"
] | |
ef3c7f577c74802ed75c1fe43a00148d5752b617 | 27acd9eeb0d2b9b6326cc0477e7dbb84341e265c | /test/vraag4/src/isbn/211.py | f12f4415f953aa0120695d76f38edd57b5552c69 | [] | no_license | VerstraeteBert/algos-ds | e0fe35bc3c5b7d8276c07250f56d3719ecc617de | d9215f11cdfa1a12a3b19ade3b95fa73848a636c | refs/heads/master | 2021-07-15T13:46:58.790446 | 2021-02-28T23:28:36 | 2021-02-28T23:28:36 | 240,883,220 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | def som_reeks(reeks):
cijfers = [int(teken) for teken in reeks]
return sum(cijfers)
def isISBN_13(isbn):
if not type(isbn) is str:
return False
if len(isbn) != 13:
return False
if not isbn.isdigit():
return False
if isbn.find('978') != 0 and isbn.find('979') != 0:
return False
som_oneven = som_reeks(isbn[:12:2])
som_even = som_reeks(isbn[1::2])
controle = (10 - (som_oneven + 3 * som_even)%10) %10
return int(isbn[12]) == controle
def overzicht(codes):
land_codes = {"0" : "Engelstalige landen", "1" : "Engelstalige landen", "2" : "Franstalige landen", \
"3": "Duitstalige landen", "4": "Japan", "5": "Russischtalige landen", "7": "China", "6": "Overige landen", \
"8": "Overige landen", "9": "Overige landen"}
foutief = "Fouten"
overzicht = {}
for landtype in land_codes.values():
overzicht[landtype] = 0
overzicht[foutief] = 0
for code in codes:
if isISBN_13(code):
overzicht[land_codes[code[3]]] += 1
else:
overzicht[foutief] += 1
for landtype, aantal in overzicht.items():
print("{}: {}".format(landtype, aantal))
| [
"[email protected]"
] | |
c636f690bc9e4514b26c33b7ed2bd921baee0ad3 | aa28417be8935d6fa369fcb526174f9e1e30479a | /爬虫实战/动态渲染/14.选项卡.py | f4109af394e44c7c95bd96a034c9f7244d153be4 | [] | no_license | YYN117/Demo | d6fca95ed8a1a433ef06f1f3fc2e768414e863cb | 40690040a7422fd5d8f03a0d68f20f1be5d4a836 | refs/heads/master | 2020-04-14T12:29:30.129709 | 2019-01-02T13:31:10 | 2019-01-02T13:31:10 | 163,841,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from selenium import webdriver
import time
browser = webdriver.Chrome()
browser.get('https://www.baidu.com')
browser.execute_script('window.open()')
print(browser.window_handles)
browser.switch_to_window(browser.window_handles[1])
browser.get('https://www.taobao.com')
time.sleep(2)
browser.switch_to_window(browser.window_handles[0])
browser.get('https://python.org')
| [
"[email protected]"
] | |
5d4c1a6a2946425a8bb1b574e9a067700ddd6bac | b8ec95cba7c239d8a72e0ce8a88ddb37bb938770 | /learning/MB_lambda_IAMB_Lner.py | 0b4858fda13a0da8311cd1324519eba9af101a04 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | artiste-qb-net/quantum-fog | a7b95a5177594138aaf909c23b69053bc685672a | 5b4a3055ea14c2ee9c80c339f759fe2b9c8c51e2 | refs/heads/master | 2023-02-19T04:41:35.908393 | 2023-02-13T21:37:05 | 2023-02-13T21:37:05 | 47,056,346 | 95 | 34 | NOASSERTION | 2021-03-10T00:52:36 | 2015-11-29T08:49:47 | Jupyter Notebook | UTF-8 | Python | false | false | 5,542 | py | from learning.MB_BasedLner import *
class MB_lambda_IAMB_Lner(MB_BasedLner):
"""
The MB_lambda_IAMB_Lner (lambda Incremental Association Markov Blanket
Learner) is a subclass of MB_BasedLner. See docstring for MB_BasedLner
for more info about this type of algo.
lambda refers to the fact tha it uses an extra parameter lambda between
zero and one.
See Ref. 1 below for pseudo code on which this class is based.
References
----------
1. An Improved IAMB Algorithm for Markov Blanket Discovery, by Yishi
Zhang, Zigang Zhang, Kaijun Liu, and Gangyi Qian (JCP 2010 Vol.5(11))
Attributes
----------
lam : float
extra parameter between 0 and 1. The closer it is to 1, the fewer
elements are added to MB
vtx_to_MB : dict[str, list[str]]
A dictionary mapping each vertex to a list of the vertices in its
Markov Blanket. (The MB of a node consists of its parents, children
and children's parents, aka spouses).
"""
def __init__(self, states_df, alpha, verbose=False,
vtx_to_states=None, lam=.5, learn_later=False):
"""
Constructor
Parameters
----------
states_df : pandas.DataFrame
alpha : float
verbose : bool
vtx_to_states : dict[str, list[str]]
A dictionary mapping each node name to a list of its state names.
This information will be stored in self.bnet. If
vtx_to_states=None, constructor will learn vtx_to_states
from states_df
lam : float
learn_later : bool
False if you want to call the function learn_struc() inside the
constructor. True if not.
Returns
-------
"""
self.lam = lam
MB_BasedLner.__init__(self, states_df, alpha, verbose,
vtx_to_states, learn_later)
def find_MB(self, vtx=None):
"""
This function finds the MB of vtx and stores it inside vtx_to_MB[
vtx]. If vtx=None, then it will find the MB of all the vertices of
the graph.
Parameters
----------
vtx : str
Returns
-------
None
"""
if self.verbose:
print('alpha=', self.alpha)
print('lambda=', self.lam)
vertices = self.states_df.columns
if vtx is None:
tar_list = vertices
else:
tar_list = [vtx]
self.vtx_to_MB = {}
def ci__(a, b): # H(a | b)
return DataEntropy.cond_info(self.states_df, a, b)
def MB(a):
return self.vtx_to_MB[a]
for tar in tar_list:
self.vtx_to_MB[tar] = []
# growing phase
if self.verbose:
print('\n****begin growing phase')
growing = True
while growing:
growing = False
ht_y1_min, hty1_min, y1_min = None, None, None
ht_y2_min, hty2_min, y2_min = None, None, None
ht = ci__([tar], MB(tar)) # H(tar | MB(tar))
y1_set = (set(vertices) - {tar}) - set(MB(tar))
for y1 in y1_set:
# H(tar | MB(tar), y1)
ht_y1 = ci__([tar], list(set(MB(tar)) | {y1}))
# H( tar: y1 |MB(tar))
hty1 = ht - ht_y1
if ht_y1_min is None or ht_y1 < ht_y1_min:
ht_y1_min = ht_y1
hty1_min = hty1
y1_min = y1
y2_set = y1_set - {y1_min}
for y2 in y2_set:
# H(tar | MB(tar), y2)
ht_y2 = ci__([tar], list(set(MB(tar)) | {y2}))
# H( tar: y2 |MB(tar))
hty2 = ht - ht_y2
if ht_y2_min is None or ht_y2 < ht_y2_min:
ht_y2_min = ht_y2
hty2_min = hty2
y2_min = y2
if y1_min is not None and hty1_min > self.alpha:
if y2_min is not None and hty2_min > self.alpha and\
ht_y2_min - self.lam*ht_y1_min < (1-self.lam)*ht:
self.vtx_to_MB[tar].append(y1_min)
self.vtx_to_MB[tar].append(y2_min)
growing = True
elif y1_min is not None:
self.vtx_to_MB[tar].append(y1_min)
growing = True
if self.verbose:
print('target, MB(tar) aft-growing, bef-shrinking:')
print(tar, self.vtx_to_MB[tar])
print('end growing phase')
print('****begin shrinking phase')
# shrinking phase
shrinking = True
while shrinking:
shrinking = False
for y in MB(tar):
cmi = DataEntropy.cond_mut_info(self.states_df,
[y], [tar], list(set(MB(tar)) - {y}))
if cmi < self.alpha:
self.vtx_to_MB[tar].remove(y)
shrinking = True
if self.verbose:
print('target, MB(tar) aft-shrinking:')
print(tar, self.vtx_to_MB[tar])
print('end shrinking phase')
if __name__ == "__main__":
def main():
MB_BasedLner.MB_lner_test(MB_lambda_IAMB_Lner, verbose=True)
main()
| [
"[email protected]"
] | |
33858cfb2091be346894cf7ae45113989b2a2dd4 | da29f1f5b4459fbfec968bb694bedb9586f87b14 | /new_algs/Sequence+algorithms/Selection+algorithm/pull_results2.py | 1e464fe8c9b22f81a07ab0908c52d802e137226d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | coolsnake/JupyterNotebook | 547806a45a663f090f313dc3e70f779ad9b213c0 | 20d8df6172906337f81583dabb841d66b8f31857 | refs/heads/master | 2023-01-13T18:55:38.615312 | 2020-11-17T22:55:12 | 2020-11-17T22:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,501 | py | """
Pull results for genomic selection
"""
import os, sys
import pandas as pd
import numpy as np
import re
wkdir = os.path.abspath(sys.argv[0])[:-16]
print(wkdir)
use = ['02_PC', '03_rrBLUP', '04_BayesB','05_BayesA','06_BRR',"07_BL"]
index = []
accuracy = []
stdev = []
sterr = []
notes = []
for j in os.listdir(wkdir):
if j.startswith("."):
pass
#elif re.search('[a-z].*_.*_.*', j): ## Format of GS dataset directories
elif re.search('rice_.*_.*', j):
print("Pulling scores for %s" % j)
for i in os.listdir(wkdir + '/' + j):
if i in use:
for k in os.listdir(wkdir + '/' + j +'/' + i):
if k.startswith('trait_'):
wkdir2 = wkdir + j +'/' + i + '/' + k + '/'
method = i[3:]
# Make 3 level index: ID, Trait, Method
index.append((j,k[6:],method))
yhat_all = pd.read_csv(wkdir2 + 'output/cv_1.csv', header=0, names = ['y', 'cv_1'])
for m in os.listdir(wkdir2 + 'output/'):
if m == 'cv_1.csv':
pass
elif m.startswith('cv_'):
number = m.split('.')[0][3:]
temp = pd.read_csv(wkdir2 + 'output/' + m, header=0, names = ['y', 'cv_' + number])
yhat_all = pd.concat([yhat_all, temp['cv_' + number]], axis = 1)
print(yhat_all)
yhat_all['yhat_mean'] = (yhat_all.filter(like='cv_')).mean(axis=1)
yhat_all['yhat_sd'] = (yhat_all.filter(like='cv_')).std(axis=1)
yhat_all.to_csv(wkdir2 + 'output/yhat_all.csv', sep=',', index=False)
quit()
elif i == '08_ML':
wkdir2 = wkdir + j +'/' + i + '/'
for l in open(wkdir2 + 'RESULTS_reg.txt').readlines():
if l.startswith('DateTime'):
pass
else:
line = l.strip().split('\t')
k = line[3]
method = line[4]
# Make 3 level index: ID, Trait, Method
index.append((j,k,method))
# Calculate acc and stdev from each run (100 cv mixes)
accuracy = np.append(accuracy, line[18])
stdev = np.append(stdev, line[19])
sterr = np.append(sterr, line[20])
notes = np.append(notes, 'na')
elif i == '09_MLP':
method = 'MLP'
wkdir3 = wkdir + j +'/' + i + '/'
mlp = pd.read_table(wkdir3 + 'RESULTS.txt', sep='\t', header=0)
mlp_mean = mlp.groupby(['Trait','Archit','ActFun','LearnRate','Beta']).agg({'Accuracy': ['mean','std']}).reset_index()
mlp_mean.columns = list(map(''.join, mlp_mean.columns.values))
mlp_mean = mlp_mean.sort_values('Accuracymean', ascending=False).drop_duplicates(['Trait'])
for i, row in mlp_mean.iterrows():
index.append((j,row['Trait'],method))
accuracy = np.append(accuracy, row['Accuracymean'])
stdev = np.append(stdev, row['Accuracystd'])
sterr = np.append(sterr, 'na')
notes = np.append(notes, row['ActFun'] + '_' + row['Archit'] + '_' + str(row['LearnRate']) + '_' + str(row['Beta']))
pd_index = pd.MultiIndex.from_tuples(index, names = ['ID','Trait','Method'])
data_array = np.column_stack((np.array(accuracy), np.array(stdev), np.array(sterr), np.array(notes)))
df_acc = pd.DataFrame(data_array, index = pd_index, columns = ('Ac_mean', 'Ac_sd', 'Ac_se', 'Notes'))
print(df_acc.head(20))
df_acc.to_csv('RESULTS.csv', sep=',')
| [
"[email protected]"
] | |
b3d7e17241a103dbbfcbecd1e99ab102a74ebe22 | 3001520af0cb70fb658b081d87181ad62a8354c6 | /crawler/dedupdata.py | 940356bed8c2639e25b146b45fe048a7d7a24f9f | [] | no_license | praveen97uma/MalwareWebMap | 508a5c7a09052e74b79b4e7668cade869131c6ef | d0e00b52e8f71e23efd4bc262b1065ca0460c922 | refs/heads/master | 2020-05-17T00:13:12.186252 | 2014-01-27T14:04:38 | 2014-01-27T14:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | import utils
import db_utils
EXPORT_EDGES_FILE = "graph.edges.csv"
EXPORT_NODES_FILE = "graph.nodes.csv"
def export_graph_data():
data_file = open(EXPORT_EDGES_FILE, "w")
db = db_utils.getDBInstance()
nodes = set([])
data_file.write("Source;Target\n")
edges = set([])
pages = db.webpages.find()
for page in pages:
ilinks = page["incoming_links"]
for link in ilinks:
if link.startswith("javascript"):
continue
if page["url"].startswith("javascript"):
continue
link = link.replace(",", "")
nodes.add(utils.domainOf(link))
page["url"] = page["url"].replace(",", "")
nodes.add(utils.domainOf(page["url"]))
edges.add((utils.domainOf(link), utils.domainOf(page["url"])))
#data_file.write(("%s\t%s\n"%(link, page["url"])))
nodes_file = open(EXPORT_NODES_FILE, "w")
nodes_file.write("Id;Label\n")
for node in nodes:
nodes_file.write("%s;%s\n"%(node, utils.domainOf(node)))
nodes_file.close()
for edge in edges:
data_file.write("%s;%s\n"%(edge[0], edge[1]))
data_file.close()
if __name__ == '__main__':
export_graph_data()
| [
"[email protected]"
] | |
d2e01a8372d6b5b3369a05a5af23b34c574bac70 | b96ed10d6247e22d4fa1d28bc3314bc319d3109c | /LessonSample/mysite/polls/views.py | 8520924cec34b22ae4e3758c28bd575f78e22b7f | [] | no_license | 13555785106/PythonPPT-01 | ac1b22b9b1851f2b3ea6e4ab0a100e5f6896ee8c | 40e5883f248cb342f3a7fc7ad12ba02ebde4c619 | refs/heads/master | 2020-04-26T16:49:59.675964 | 2019-03-04T07:16:21 | 2019-03-04T07:16:21 | 157,095,747 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | # -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "你还没有做出一个选择。",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"[email protected]"
] | |
0e4fa25e30758201ddcc318774962240fd3ed97a | 84abce44bd0278fa99e9556168290675f399834c | /EcalAlCaRecoProducers/config/reRecoTags/Cal_Nov2015_newGT_0T_v5.py | 92aba913315372ce6b1b26f8e76f775b6c3957e6 | [] | no_license | ECALELFS/ECALELF | 7c304c6b544b0f22a4b62cf942f47fa8b58abef0 | 62a046cdf59badfcb6281a72923a0f38fd55e183 | refs/heads/master | 2021-01-23T13:36:31.574985 | 2017-06-22T12:26:28 | 2017-06-22T12:26:28 | 10,385,620 | 1 | 9 | null | 2017-06-30T12:59:05 | 2013-05-30T15:18:55 | C++ | UTF-8 | Python | false | false | 1,551 | py | import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBSetup_cfi import *
#candidate used for 0T rereco without updating ECAL conditions
# + scaled IC with Bon/Boff corrections and scaled ADCtoGeV
RerecoGlobalTag = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
globaltag = cms.string('74X_dataRun2_2015EOY_Candidate_2016_01_17_21_50_54'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("EcalIntercalibConstantsRcd"),
tag = cms.string("EcalIntercalibConstants_Run1_Run2_V03_offline"),
#connect = cms.untracked.string("sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_ECALCALIB/RunII-IC/Cal_Nov2015/combinations/tags/db/EcalIntercalibConstants_2015_Boff_EEonly.db"),
),
cms.PSet(record = cms.string("EcalADCToGeVConstantRcd"),
tag = cms.string("EcalADCToGeVConstant_Run1_Run2_V03_offline"),
#connect = cms.untracked.string("sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_ECALCALIB/RunII-IC/Cal_Nov2015/ADCtoGeV/tags/db/EcalADCToGeVConstant_2015_Boff_EEonly.db"),
),
)
)
| [
"[email protected]"
] | |
59adf9bfccc9f07873dac411201e44a2bf369e63 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190120130744.py | 09a1a974e1b1fe1446c8124560c24895dabfee93 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,298 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
from maya import cmds
class Cam_Item_Layout(form_class,base_class):
def __init__(self,MainWindow):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
self.Cam_Item_Num = 0
self.Cam_Item_Scroll.verticalScrollBar().valueChanged.connect(self.Scroll_Fn)
self.Scroll_Offset = 0
self.Attr = {}
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Name"] = ""
# Note 功能按键
self.Batch_Keyframe_BTN.clicked.connect(self.Batch_Keyframe_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
self.Batch_Position_BTN.clicked.connect(self.Batch_Position_Fn)
self.Batch_Constraint_BTN.clicked.connect(self.Batch_Constraint_Fn)
# Note spliter
splitter = QSplitter(Qt.Vertical)
splitter.setHandleWidth(5)
splitter.addWidget(self.Cam_Item_Scroll)
splitter.addWidget(self.Button_Layout)
num = len(self.VBox_Widget.children())
self.VBox_Widget.layout().insertWidget(splitter)
def Batch_Constraint_Fn(self):
Cam_Grp = self.Attr["Add_CamGrp_LE"]
Loc = self.Attr["Add_Loc_LE"]
if not cmds.objExists(Cam_Grp): return
if not cmds.objExists(Loc): return
cmds.select(cl=1)
cmds.select(Loc,add=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Cam_Loc = child.Attr["Add_Loc_LE"]
if not cmds.objExists(Cam_Loc): continue
cmds.select(Cam_Loc,add=1)
child.Cam_Con_CB.setEnabled(True)
cmds.select(Cam_Grp,add=1)
orCns = cmds.orientConstraint(Loc,Cam_Grp,mo=0)[0]
pnCns = cmds.pointConstraint(mo=0)[0]
Attr_List = cmds.listAttr(pnCns,k=1,string="*W*")
cmds.setAttr("%s.%s" % (pnCns,Attr_List[1]),1)
for i,child in enumerate(ChildrenList):
if i != 0:
cmds.setAttr("%s.%s" % (pnCns,Attr_List[i+1]),0)
try :
child.Cam_Con_CB.stateChanged.disconnect()
except:
pass
child.Cam_Con_CB.stateChanged.connect(partial(self.Cam_Con_CB_Fn,child,pnCns,Attr_List,i))
self.Con_Keyframe_BTN.setEnabled(True)
self.Con_Keyframe_BTN.clicked.connect(partial(self.Con_Keyframe_Fn,pnCns,Attr_List))
def Cam_Con_CB_Fn(self,CB,pnCns,Attr_List,num,state):
"""
Cam_Con_CB_Fn - CheckBox Signal
# Note 复选框事件函数
Arguments:
CB {CheckBox} -- 复选框
pnCns {ParenConstraint} -- 父子约束节点
Attr_List {Attribute List} -- 父子约束节点下的属性列表
num {number} -- 当前属性列表下的序号
state {CheckBox state} -- 复选框的状态
"""
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if child != CB:
child.Cam_Con_CB.blockSignals(True)
child.Cam_Con_CB.setChecked(False)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[i+1]),0)
if state == 2:
CB.Cam_Con_CB.setChecked(True)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[num+1]),1)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[1]),0)
else:
CB.Cam_Con_CB.setChecked(False)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[num+1]),0)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[1]),1)
for i,child in enumerate(ChildrenList):
if i != 0:
if child != CB:
child.Cam_Con_CB.blockSignals(False)
def Con_Keyframe_Fn(self,pnCns,Attr_List):
for i,Attr in enumerate(Attr_List):
if i != 0:
cmds.setKeyframe ("%s.%s" % (pnCns,Attr))
def Batch_Position_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Base_Curve = self.Attr["Add_Crv_LE"]
CamGrp = child.Attr["Add_CamGrp_LE"]
if not cmds.objExists(Base_Curve): continue
if not cmds.objExists(CamGrp): continue
cmds.setAttr("%s.tx" % CamGrp,0)
cmds.setAttr("%s.ty" % CamGrp,0)
cmds.setAttr("%s.tz" % CamGrp,0)
cmds.setAttr("%s.rx" % CamGrp,0)
cmds.setAttr("%s.ry" % CamGrp,0)
cmds.setAttr("%s.rz" % CamGrp,0)
cmds.xform( CamGrp,cp=1 )
cmds.delete(cmds.parentConstraint( Base_Curve,CamGrp ))
Target_Curve = child.Attr["Add_Crv_LE"]
if not cmds.objExists(Target_Curve): continue
cmds.xform( Target_Curve,cp=1 )
# Note 解除曲线的锁定
cmds.setAttr("%s.tx" % Target_Curve,lock=False)
cmds.setAttr("%s.ty" % Target_Curve,lock=False)
cmds.setAttr("%s.tz" % Target_Curve,lock=False)
cmds.setAttr("%s.rx" % Target_Curve,lock=False)
cmds.setAttr("%s.ry" % Target_Curve,lock=False)
cmds.setAttr("%s.rz" % Target_Curve,lock=False)
cmds.delete(cmds.parentConstraint( Base_Curve,Target_Curve ))
cmds.headsUpMessage(u"位置匹配完成")
def Batch_Keyframe_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Path = child.Attr["Add_Motion_Path_LE"]
if cmds.objExists(Path):
offset = cmds.keyframe(Path,q=1)[0]
cmds.keyframe("%s.uValue"% Path,e=1,iub=1,r=1,o="over",tc=-offset)
def Select_Path_Fn(self):
cmds.select(cl=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if cmds.objExists(child.Attr["Add_Motion_Path_LE"]):
cmds.select(child.Attr["Add_Motion_Path_LE"],add=1)
def Item_Add_Fn(self):
self.Cam_Item_Num += 1
return Cam_Item(self,self.MainWindow)
def Item_Clear_Fn(self):
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Name"] = ""
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
def Scroll_Fn(self):
self.Scroll_Offset = self.Cam_Item_Scroll.verticalScrollBar().value()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent,MainWindow):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
# self.Cam_Con_CB.stateChanged.connect(self.Cam_Con_CB_Fn)
# Note 初始化创建参数
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % parent.Cam_Item_Num)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.Num = TotalCount
self.Attr = {}
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = 0
self.Attr["End_Time_SB"] = 0
self.MainWindow.Save_Json_Fun()
def Cam_Del_BTN_Fn(self):
self.deleteLater()
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if i > self.Num:
# Note 修正 child 的序号
child.Num -= 1
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % (i-1))
else:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = ""
self.Attr["End_Time_SB"] = ""
self.MainWindow.Save_Json_Fun()
| [
"[email protected]"
] | |
8f36eab7e9317370fbed4d199e089a935148128b | 6fdf0ad8a70cfe666ab1cae331ddf751178b0f34 | /Python/Arrays/problem_1051. Height Checker.py | 679788265b355852106eb65beff9440a8fd1ef24 | [] | no_license | vigneshthiagarajan/Leetcode_prep | 3aa46f90af084d6100cd61af28767e811c848d4e | 1f087564e9b68f85d9974c3643538b8370ba82e3 | refs/heads/main | 2023-06-19T06:47:00.388621 | 2021-07-12T04:54:32 | 2021-07-12T04:54:32 | 356,921,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | class Solution:
def heightChecker(self, heights: List[int]) -> int:
num_indices_mismatch = 0
expected_heights = sorted(heights)
for i in range(len(heights)):
if heights[i] != expected_heights[i]:
num_indices_mismatch += 1
return num_indices_mismatch
| [
"[email protected]"
] | |
48ab5771787957eeb1c9c69b7eb86c08eda031ee | 9378f00e13fa41b41cad1bb4dc733bcb46f259ef | /python/scripts/classifiers/SVM.py | 05f35e4215ce796d9c424c42db6bcd0505515469 | [] | no_license | ghpaetzold/iconic-internship | 0ac55c6e5289b569af2eb627a9e7025e0b9e9914 | 463f562dd1f3bdcb85954604ef2364ee5dcbef8e | refs/heads/master | 2021-01-10T19:12:19.713265 | 2015-07-21T15:46:02 | 2015-07-21T15:46:02 | 37,980,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from sklearn import svm
import sys, numpy, pickle
from sklearn.preprocessing import normalize
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
def readXY(xf, yf):
X = []
for line in open(xf):
values = [float(v) for v in line.strip().split('\t')]
X.append(values)
Y = numpy.array([float(l.strip()) for l in open(yf)])
return X, Y
def writeLabels(labels, file):
c = -1
for l in labels:
c += 1
file.write(str(l) + '\n')
file.close()
def writeModel(classifier, model_file):
pickle.dump(classifier, open(model_file, "wb"))
C = float(sys.argv[1])
kernel = sys.argv[2]
degree = int(sys.argv[3])
gamma = float(sys.argv[4])
coef0 = float(sys.argv[5])
Xtr, Ytr = readXY(sys.argv[6], sys.argv[7])
Xte, Yte = readXY(sys.argv[8], sys.argv[9])
Xtr = normalize(Xtr, axis=0)
Xte = normalize(Xte, axis=0)
k = sys.argv[10]
if k!='all':
k = int(k)
selector = SelectKBest(f_classif, k=k).fit(Xtr, Ytr)
Xtr = selector.transform(Xtr)
Xte = selector.transform(Xte)
o = open(sys.argv[11], 'w')
model_file = sys.argv[12]
classifier = svm.SVC(C=C, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0)
classifier.fit(Xtr, Ytr)
labels = classifier.predict(Xte)
writeLabels(labels, o)
writeModel(classifier, model_file)
| [
"[email protected]"
] | |
8be72ee78cc9d7e1cf6795076617cef99ea05d82 | 7cebfa2066e679e19993a5507e59d1979df3d4a8 | /1_Basics/DataScienceAssignments/basicSearchEngine.py | eb134571d3ebac8f6c093cf7fa05c155bc04fe29 | [
"Apache-2.0"
] | permissive | Arunken/PythonScripts | 833e9e43ccb29234a206027f1cda1d978718d5eb | 702d0a3af7a9be3311f9da0afc5285d453f15484 | refs/heads/master | 2022-12-24T18:50:43.672779 | 2021-05-13T11:31:51 | 2021-05-13T11:31:51 | 237,631,027 | 0 | 0 | Apache-2.0 | 2022-12-08T00:47:45 | 2020-02-01T15:01:20 | Python | UTF-8 | Python | false | false | 3,737 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 28 13:17:19 2018
@author: SilverDoe
"""
import urllib
import collections
def get_page(url):
#return page html from url
try:
return [url,urllib.urlopen(url).read()]
except:
return [url,""]
def get_next_url(page):
#goes through page html from a starting position and finds the next url
start_link = page[1].find('<a href="http')
if start_link == -1:
return None, 0
end_link = page[1].find('"', start_link + len('<a href="'))
url = page[1][start_link + len('<a href="'): end_link]
return url, end_link
def get_all_links(page):
#returns all urls from a page
links = []
while True:
url,end_link = get_next_url(page)
if url:
links.append(url)
page[1] = page[1][end_link:]
else:
return links
def crawl_web(seed, to_crawl, crawled):
#calls get_all_links to crawl webpage, updates crawled and to_crawl.
to_crawl.remove(seed)
if seed not in crawled:
new_links = set(link for link in get_all_links(get_page(seed)))
to_crawl = to_crawl.union(new_links)
crawled.add(seed)
return crawled, to_crawl, new_links
def track_depth(url, maxdepth):
#sets depth of webcrawl, feeds seed url to webcrawler
depth = 0
tier = [[url]]
to_crawl = set([url])
crawled = set()
while depth < maxdepth:
next_tier = []
for next_url in tier[depth]:
crawled, to_crawl, new_links = crawl_web(next_url, to_crawl,
crawled)
next_tier += list(new_links)
tier.append(next_tier)
depth += 1
return tier, crawled, to_crawl
def get_next_string(page):
#finds string in html of page using paragraph markers
start_string = page[1].find('<p>')
if start_string == -1:
return None, 0
end_string = page[1].find('</p>', start_string + len('<p>'))
string = page[1][start_string + len('<p>'): end_string]
return string, end_string
def get_page_words(page):
#gets all strings on page and converts to word list
page_string = ''
to_remove = '#$%^&*._,1234567890+=<>/\()":;!?'
while True:
string, end_string = get_next_string(page)
if string:
page_string += " " + string
page[1] = page[1][end_string:]
else:
for i in to_remove:
page_string = page_string.replace(i, '').lower()
page_words = page_string.split()
return page_words
def word_bank(crawled):
#creates word index mapping url values to word keys
crawled = list(crawled)
word_count = {}
for url in crawled:
for word in get_page_words(get_page(url)):
if word in word_count:
if url in word_count[word]:
word_count[word][url] += 1
else:
word_count[word][url] = 1
elif len(word) < 15:
word_count[word] = {url: 1}
return word_count
def search_engine(target_string, word_count):
#searches word_bank for words in string, returns urls words are found at
targets = list(set(target_string.split()))
result =[]
for word in targets:
if word in word_count:
result += word_count[word].keys()
ans = collections.Counter(result).most_common()
return ans[0][0], ans[1][0], ans[2][0]
crawled = track_depth("http://www.wccftech.com/", 2)[1]
print("crawling done")
word_count = word_bank(crawled)
print("word_count done")
#print word_count
print(search_engine('starting blogs about', word_count)) | [
"[email protected]"
] | |
23d6e34bbac69811382efe75a0eaa6ccf8ac50e0 | 635c9f0501039f5a099849a3108e19de76092aea | /algorithm/ssafy_190325/부분집합합.py | a2aa237ea48bc4c554fc9fbb279759c67273b26f | [] | no_license | Hansung-Lee/SSAFY | 87ebea0808bb40381678d678e1035dc5fa2c2eb0 | cdb7ae1bba0e98e733eed703da2c62217c319462 | refs/heads/master | 2020-04-14T20:03:05.975040 | 2019-05-16T08:57:21 | 2019-05-16T08:57:21 | 164,080,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | def subset(li):
for i in range(1<<len(li)):
temp = []
for j in range(len(li)):
if i & (1<<j):
temp.append(li[j])
if temp and sum(temp) == 0:
print(temp)
# def subset2(li,n,r):
# global temp
# if r == 0:
# print(temp)
# temp = []
# elif n < r:
# return
# else:
# temp.append(li[n-1])
# subset2(li, n-1, r-1)
# subset2(li, n-1, r)
li = [-1,3,-9,6,7,-6,1,5,4,-2]
subset(li)
# subset2(li,10,9) | [
"[email protected]"
] | |
8489ac70eb0eaae6b07f33f76b0ef93c44e29136 | 5bb0b1ee382d2ffce5cf59cd9b90e75d66396335 | /P12_视频网站视频真实上传日期_日期爬取_requests_re_匹配分组/001_Vixen_网站视频日期爬取_re查找_re分组匹配_日期匹配分组_交换顺序.py | e66d2cbf3eac4ada296d2b8a83f37194de16e510 | [] | no_license | FelixZFB/TZKT_Study_Note | d94ae82299fcce060da76cd4339b4182ab44f8c5 | 65f1089621d5236e3cddf37e6a3040556b4d0851 | refs/heads/master | 2021-06-26T09:16:17.051870 | 2020-12-01T02:14:39 | 2020-12-01T02:14:39 | 181,028,395 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,899 | py | # -*- coding:utf-8 -*-
# project_xxx\venv\Scripts python
'''
Author: Felix
Email: [email protected]
Blog: https://blog.csdn.net/u011318077
Date: 2019/9/16 21:22
Desc:
'''
# 视频网址视频下面日期是视频上传日期,真实拍摄日期大多数都早一天
# 视频源码中的日期正好是早一天的真实日期,直接提取对应日期即可
import requests
import re
import time
def get_date(url_list):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3730.400 QQBrowser/10.5.3805.400",
}
# 循环URL地址
for url in url_list:
# 每间隔3秒请求一个页面
response = requests.get(url=url, headers=headers)
time.sleep(3)
# 获取页面源码
html = response.content.decode()
# 寻找出所有的视频真实日期(源码中同一个日期有三个),
# 但是,放在一个标签内部的有一个唯一的日期,两边加上标签>即可提取出唯一的日期
# 类似格式:>November 11, 2018<
date_list = re.findall(">([a-zA-Z]+ \d+, \d+)<", html)
with open("date.txt", "a", encoding="utf-8") as f:
for date in date_list:
# July 18, 2019修改为Vixen.19.07.18
# 先替换, 20为, Vixen.
date = date.replace(", 20", ", Vixen.")
# 匹配分组,交换日期的顺序,中间用.连接
date_new = re.sub(r'([a-zA-Z]+) (\d+), (Vixen.\d+)', r'\3.\1.\2', date)
# 分别进行月份替换,月份替换成数字月份
date_new = date_new.replace("January", "01")
date_new = date_new.replace("February", "02")
date_new = date_new.replace("March", "03")
date_new = date_new.replace("April", "04")
date_new = date_new.replace("May", "05")
date_new = date_new.replace("June", "06")
date_new = date_new.replace("July", "07")
date_new = date_new.replace("August", "08")
date_new = date_new.replace("September", "09")
date_new = date_new.replace("October", "10")
date_new = date_new.replace("November", "11")
date_new = date_new.replace("December", "12")
# 写入所有的日期
f.write(date_new + "\n")
# 最原始的日期也保存一份
with open("date_list.txt", "a", encoding="utf-8") as f:
f.write(url + ":爬取完成" + "\n")
f.write(str(date_list) + "\n")
print(url + ":爬取完成")
print(date_list)
if __name__ == '__main__':
url_list = ["https://www.vixen.com/videos?page=" + str(i) + "&size=12" for i in range(1, 21)]
get_date(url_list)
| [
"[email protected]"
] | |
c39633b0d4ddbeeb4101389894b53606ca92dd4e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5765824346324992_0/Python/jcb/p2.py | bde3cb4839091e6513ee3b2edad179bc816deaa6 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import fractions
def lcm(x, y):
return (x * y) / fractions.gcd(x, y)
def solve(f):
#parse
num_barbers, n = tuple([int(x) for x in f.readline().strip().split(" ")])
barbers = [[i + 1, int(x), 0] for i, x in enumerate(f.readline().strip().split(" "))]
#print num_barbers, n, barbers
if num_barbers != len(barbers):
print "Wrong number of barbers on line:", line
sys.exit(0)
l = reduce(lcm, [x[1] for x in barbers])
#print l
cycle = 0
for i in xrange(num_barbers):
cycle += l / barbers[i][1]
left = n % cycle
if left == 0:
left = cycle
while left > 0:
smallest = None
#print barbers
for i in xrange(num_barbers):
if barbers[i][2] == 0:
barbers[i][2] = barbers[i][1]
left -= 1
if left == 0:
return barbers[i][0]
if not smallest or barbers[i][2] < smallest:
smallest = barbers[i][2]
#print barbers
#print smallest
for i in xrange(num_barbers):
barbers[i][2] -= smallest
#print barbers
total = None
count = 0
f = sys.stdin
while f:
if not total:
total = int(f.readline().strip())
continue
elif count < total:
count += 1
print "Case #%d: %s" % (count, solve(f))
else:
break
if count < total:
print "Wrong number of test cases"
sys.exit(0)
| [
"[email protected]"
] | |
43efe6f40cb20df2c4eb6e4f4c25c9fcfb33a3e6 | c4d1e606d2ebbeaaf3a4dee6f851a45dfa68dabe | /docs-crawler/docs/docs/spiders/numpy_spider.py | b4dae3615cf706dc8551bad586a9d824e7f85ac2 | [
"Apache-2.0"
] | permissive | niansong1996/threepio | 4d4a976da5a199c77fca936861ba78cc3f45fa7c | 5a143e045ca56560c29d03251cabc61cc8b982f6 | refs/heads/master | 2021-05-18T23:11:50.644479 | 2020-03-30T17:01:59 | 2020-03-30T17:01:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | import re
from docs.items import ApiItem
from scrapy.spiders import Rule, CrawlSpider
from scrapy.linkextractors import LinkExtractor
from w3lib.html import remove_tags
class NumpySpider(CrawlSpider):
name = "numpy"
version = "1.17.0"
allowed_domains = ['scipy.org']
start_urls = [f'https://docs.scipy.org/doc/numpy/reference/generated/']
split_def = re.compile(r'^([\w\.]+)\(([\w\,\s=\*\.]*)\)')
rules = (
Rule(LinkExtractor(
allow=(re.compile(r'.+\.html')),
),
callback='parse_api',),
)
def parse_api(self, response):
self.logger.info(f'Scraping {response.url}')
fdef = response.css('dl.function > dt')
defs = []
for selector in fdef:
text = (remove_tags(selector.get())
.replace('\n', '')
.replace(' ', '')
.replace('[source]', ''))
defs.append(text)
for text in defs:
split = self.split_def.match(text)
if split is None:
continue
function_name = split.groups()[0].split('.')[-1]
params = split.groups()[1].split(',')
args = [p for p in params if '=' not in p]
kwargs = [p.split('=') for p in params if '=' in p]
item = ApiItem()
item['code'] = text
item['function_name'] = function_name
item['args'] = args
item['kwargs'] = kwargs
yield item
| [
"[email protected]"
] | |
8f83835fe45446652f09806816af34d50e785cb3 | 8f7b7a910520ba49a2e614da72f7b6297f617409 | /Problemset/longest-substring-without-repeating-characters/longest-substring-without-repeating-characters.py | dc8487db359cebaaaaf3829e36b44a49590f76ec | [] | no_license | fank-cd/python_leetcode | 69c4466e9e202e48502252439b4cc318712043a2 | 61f07d7c7e76a1eada21eb3e6a1a177af3d56948 | refs/heads/master | 2021-06-16T23:41:55.591095 | 2021-03-04T08:31:47 | 2021-03-04T08:31:47 | 173,226,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py |
# @Title: 无重复字符的最长子串 (Longest Substring Without Repeating Characters)
# @Author: [email protected]
# @Date: 2019-07-10 11:51:35
# @Runtime: 68 ms
# @Memory: 13.3 MB
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
滑窗法
"""
if not s:
return 0
left_index = 0
lookup = set()
leng = len(s)
max_len = 0
cur_len = 0
for i in range(leng):
cur_len +=1
while s[i] in lookup:
lookup.remove(s[left_index])
left_index += 1
cur_len -= 1
if cur_len >max_len:
max_len = cur_len
lookup.add(s[i])
return max_len
| [
"[email protected]"
] | |
f3dc9a759d3de427f44885d86311c78e81183969 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/web/web_app_azure_storage_accounts.py | d55fb8b2ed5c239a1fd3dd9fdf97ebe2e482e77b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,101 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppAzureStorageAccountsArgs', 'WebAppAzureStorageAccounts']
@pulumi.input_type
class WebAppAzureStorageAccountsArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
kind: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input['AzureStorageInfoValueArgs']]]] = None):
"""
The set of arguments for constructing a WebAppAzureStorageAccounts resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[Mapping[str, pulumi.Input['AzureStorageInfoValueArgs']]] properties: Azure storage accounts.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['AzureStorageInfoValueArgs']]]]:
"""
Azure storage accounts.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['AzureStorageInfoValueArgs']]]]):
pulumi.set(self, "properties", value)
class WebAppAzureStorageAccounts(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['AzureStorageInfoValueArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
AzureStorageInfo dictionary resource.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['AzureStorageInfoValueArgs']]]] properties: Azure storage accounts.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppAzureStorageAccountsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
AzureStorageInfo dictionary resource.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param WebAppAzureStorageAccountsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppAzureStorageAccountsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[pulumi.InputType['AzureStorageInfoValueArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppAzureStorageAccountsArgs.__new__(WebAppAzureStorageAccountsArgs)
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppAzureStorageAccounts"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppAzureStorageAccounts")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppAzureStorageAccounts, __self__).__init__(
'azure-native:web:WebAppAzureStorageAccounts',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppAzureStorageAccounts':
"""
Get an existing WebAppAzureStorageAccounts resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppAzureStorageAccountsArgs.__new__(WebAppAzureStorageAccountsArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return WebAppAzureStorageAccounts(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Mapping[str, 'outputs.AzureStorageInfoValueResponse']]:
"""
Azure storage accounts.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
1277be50c35d5f8141d67a8e05e653e78d81d442 | 4e29395020ce78f435e75e0b3f1e09b227f6f4d8 | /ataraxia/algorithm/Eval/eval.py | 5d666f6ab8a46c7ce2cea1d61d56bac77ffe1cdf | [] | no_license | luoyangustc/argus | 8b332d94af331a2594f5b1715ef74a4dd98041ad | 2ad0df5d7355c3b81484f6625b82530b38b248f3 | refs/heads/master | 2020-05-25T21:57:37.815370 | 2019-05-22T09:42:40 | 2019-05-22T09:42:40 | 188,005,059 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,628 | py | import pandas as pd
from collections import Counter
import pickle
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import average_precision_score
import json
import matplotlib.pyplot as plt
import itertools
import numpy as np
import json
import os
import docopt
dic_pred = {}
dic_pred_score = {}
dic_gt = {}
dic_gt_score = {}
y_gt = []
y_pred=[]
y_gt_score = []
y_score = []
pulp_score_list = []
sexy_score_list = []
normal_score_list = []
pulp_gt_list = []
sexy_gt_list = []
normal_gt_list = []
with open("output.json") as f1 ,open("groundtruth.json") as f2:
lines2 = f2.readlines()
d = json.load(f1)
for k,v in d.items():
lst = []
dic_pred[k] = v['Top-1 Index'][0]
lst.append(float(v["Confidence"][0]))
lst.append(float(v["Confidence"][1]))
lst.append(float(v["Confidence"][2]))
dic_pred_score[k] = lst
for line in lines2:
lst = []
basename = os.path.basename(json.loads(line.strip())['url'])
classes = json.loads(line.strip())['label'][0]['data'][0]["class"]
if classes == "sexy":
index = 1
lst = [0,1,0]
elif classes == "pulp":
index = 0
lst = [1,0,0]
else:
index = 2
lst = [0,0,1]
dic_gt[basename] = index
dic_gt_score[basename] = lst
for k in dic_pred:
if k in dic_gt:
y_gt.append(dic_gt[k])
y_pred.append(dic_pred[k])
pulp_score_list.append(dic_pred_score[k][0])
sexy_score_list.append(dic_pred_score[k][1])
normal_score_list.append(dic_pred_score[k][2])
pulp_gt_list.append(dic_gt_score[k][0])
sexy_gt_list.append(dic_gt_score[k][1])
normal_gt_list.append(dic_gt_score[k][2])
y_score.append(pulp_score_list)
y_score.append(sexy_score_list)
y_score.append(normal_score_list)
y_gt_score.append(pulp_gt_list)
y_gt_score.append(sexy_gt_list)
y_gt_score.append(normal_gt_list)
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues, figsize=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(This function is copied from the scikit docs.)
"""
plt.figure(figsize=figsize)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
classes = ['pulp','sexy','normal']
cm = confusion_matrix(y_gt, y_pred, labels=np.arange(len(classes)))
p = precision_score(y_gt, y_pred, average=None)
r = recall_score(y_gt, y_pred, average=None)
# ap = average_precision_score(y_gt,y_pred)
acc = accuracy_score(y_gt, y_pred)
print('accuracy:', acc)
for i in range(len(classes)):
ap = average_precision_score(y_gt_score[i],y_score[i])
print('%s precision:' % classes[i], p[i])
print('%s recall:' % classes[i], r[i])
print('%s ap:'%classes[i],ap)
print('Top-1 error ',1-acc)
plot_confusion_matrix(cm, classes) | [
"[email protected]"
] | |
a08bbad48829c38821e8d071a6a442413f27293f | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-opensearch/aliyunsdkopensearch/request/v20171225/DescribeAppGroupDataReportRequest.py | 758871adcb9db7d9bd59f3a70169885a82a3c3bd | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,917 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkopensearch.endpoint import endpoint_data
class DescribeAppGroupDataReportRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'OpenSearch', '2017-12-25', 'DescribeAppGroupDataReport','opensearch')
self.set_uri_pattern('/v4/openapi/app-groups/[appGroupIdentity]/data-report')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_endTime(self):
return self.get_query_params().get('endTime')
def set_endTime(self,endTime):
self.add_query_param('endTime',endTime)
def get_startTime(self):
return self.get_query_params().get('startTime')
def set_startTime(self,startTime):
self.add_query_param('startTime',startTime)
def get_appGroupIdentity(self):
return self.get_path_params().get('appGroupIdentity')
def set_appGroupIdentity(self,appGroupIdentity):
self.add_path_param('appGroupIdentity',appGroupIdentity) | [
"[email protected]"
] | |
c06d9cfdd26f717ccda451b45843ae4a72787c06 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/367b3079-2a92-41b6-b880-54e3b3730861__squareRoot.py | 80bbf1197a618a7a41813e23e44f3e78c6e83bf1 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # import math
def closeEnough(num1,num2):
if (abs(num1-num2) < 0.001):
return True
else:
return False
def squareRoot(num,guess):
# guess = 1
if(closeEnough((num/guess),guess)):
print round(guess,4)
else:
guess = ((num/guess)+guess)/2
squareRoot(num,guess)
if __name__ == '__main__':
num = input("Enter number:")
squareRoot(float(num),1.0)
| [
"[email protected]"
] | |
5ef4daa35ad9543b2fe5089654f203686e072f58 | 6550cc368f029b3955261085eebbddcfee0547e1 | /第9部分-flask+智能玩具(火龙果)/day118/今日代码/day118/goto_tuling.py | 1b8d9603d97063fdb0054446d7e0569040b61834 | [] | no_license | vividyellow/oldboyeduPython14qi | d00c8f45326e16464c3d4e8df200d93779f68bd3 | de1e9f6efafa2846c068b3fe5ad6e1ca19f74a11 | refs/heads/master | 2022-09-17T21:03:17.898472 | 2020-01-31T10:55:01 | 2020-01-31T10:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | import requests
url = "http://openapi.tuling123.com/openapi/api/v2"
data_dict = {
"reqType":0,
"perception": {
"inputText": {
"text": "北京"
},
},
"userInfo": {
"apiKey": "c3a9ba0d958a43658a5acdcae50c13ae",
"userId": "jinwangbas"
}
}
def tl(text,uid):
data_dict["perception"]["inputText"]["text"] = text
data_dict["userInfo"]["userId"] = uid
res = requests.post(url,json=data_dict)
res_json = res.json()
return res_json.get("results")[0]["values"]["text"] | [
"[email protected]"
] | |
5f267713a18175d80bf3a5ca7febbc4b744eccfe | a83108c53c454102317d7bb4e769f36b661c75ed | /config/geturlParams.py | b8c892530271f545e9612e647a80ae8f630fed4e | [] | no_license | xuechao1/DX_interfaceTest | aa35a83390113c5be7d6bdf6e6c796b35059f63e | 524cce8cc06dc8f045a9b98bfafdbaecd25726df | refs/heads/master | 2022-12-05T17:00:45.289070 | 2020-08-20T10:14:28 | 2020-08-20T10:14:28 | 282,176,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from config import readConfig as readConfig
readconfig = readConfig.ReadConfig()
class geturlParams(): # 定义一个方法,将从配置文件中读取的进行拼接
def get_Url(self):
new_url = readconfig.get_http('scheme') + '://' + readconfig.get_http('baseurl') + ':8888' + '/login' + '?'
# logger.info('new_url'+new_url)
return new_url
if __name__ == '__main__': # 验证拼接后的正确性
print(geturlParams().get_Url())
| [
"[email protected]"
] | |
c215661b1e7f55ca27629f44a33101451779afe1 | 0aa64aa023f80c97c8ded68dee7541ca7d3aa274 | /ImageEnhancer/__init__.py | 86c7e8ae06480cd84cd538a85a494986eb1b6c22 | [] | no_license | YuMurata/gdrive_scripts | fb7751625b0d73d50ee91f9ab74aa9883a317231 | ce2cf48b757eca558e8f93892b9b929352431bfd | refs/heads/master | 2020-12-20T04:38:00.127118 | 2020-03-13T05:02:26 | 2020-03-13T05:02:26 | 235,964,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from .enhance_definer \
import (enhance_class_list, enhance_dict, enhance_name_list,
MAX_PARAM, MIN_PARAM)
from .image_enhancer \
import (ImageEnhancer, ImageEnhancerException, ResizableEnhancer)
from .generate_param import generate_random_param
| [
"[email protected]"
] | |
a076db6989aa4821c423febbc9cc4b4248a6ecb3 | 18430833920b3193d2f26ed526ca8f6d7e3df4c8 | /src/transmittals/migrations/0051_auto_20160226_1127.py | 4cfd707e588c319531e2c6f51be7efc6f132cf50 | [
"MIT"
] | permissive | providenz/phase | ed8b48ea51d4b359f8012e603b328adf13d5e535 | b0c46a5468eda6d4eae7b2b959c6210c8d1bbc60 | refs/heads/master | 2021-01-17T06:56:07.842719 | 2016-06-28T11:17:53 | 2016-06-28T11:17:53 | 47,676,508 | 0 | 0 | null | 2015-12-09T07:45:19 | 2015-12-09T07:45:18 | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transmittals', '0050_make_entities'),
]
operations = [
migrations.AlterField(
model_name='trsrevision',
name='originator_new',
field=models.ForeignKey(verbose_name='Originator', to='accounts.Entity'),
),
]
| [
"[email protected]"
] | |
58ef51cbfb19381e058ed5c3e284c3f524d96b96 | de468d3ec6b7b69664678789e5fa71b613b29687 | /scine_heron/tests/create_molecule_animator_test.py | 2b7d9a0197b9dff66dcdc7a40c6f8a4ae1afba00 | [
"BSD-3-Clause"
] | permissive | qcscine/heron | dc566bf8bfdd5b5271ed79faed249a6552390d0d | 688d2a510fda9f6bfaf5ef3af91fa3b988703a28 | refs/heads/master | 2023-04-06T23:31:14.931706 | 2022-08-31T05:40:15 | 2022-08-31T05:40:15 | 526,650,129 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,771 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = """ This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
"""
Tests for the create_molecule_animator function.
"""
from scine_heron.energy_profile.energy_profile_status_manager import (
EnergyProfileStatusManager,
)
from scine_heron.status_manager import StatusManager
from scine_heron.haptic.haptic_client import HapticClient
from scine_heron.electronic_data.electronic_data_status_manager import (
ElectronicDataStatusManager,
)
from scine_heron.settings.settings_status_manager import SettingsStatusManager
from scine_heron.molecule.create_molecule_animator import create_molecule_animator
from scine_heron.molecule.animator import Animator
from typing import Optional, List, TYPE_CHECKING, Any
import pytest
from vtk import vtkMolecule
# TODO Disabled as long as test_updates_molecule is disabled
# from PySide2.QtWidgets import QApplication
# from PySide2.QtCore import QEventLoop
if TYPE_CHECKING:
Signal = Any
else:
from PySide2.QtCore import Signal
@pytest.fixture(name="animator") # type: ignore[misc]
def create_animator(molecule: vtkMolecule) -> Animator:
"""
Creates a molecule animator with the function
`create_molecule_animator`.
"""
settings_manager = SettingsStatusManager()
energy_status_manager = EnergyProfileStatusManager()
charge_status_manager = StatusManager[Optional[List[float]]](None)
electronic_data_status_manager = ElectronicDataStatusManager()
return create_molecule_animator(
0,
molecule,
settings_manager,
HapticClient(),
energy_status_manager,
electronic_data_status_manager,
charge_status_manager,
Signal(),
)
# TODO this test does not work without a haptic device
# def test_updates_molecule(
# _app: QApplication, animator: Animator, molecule: vtkMolecule
# ) -> None:
# """
# Checks that the animator applies the gradient to the molecule.
# """
# startX = molecule.GetAtom(0).GetPosition().GetX()
# animator.start()
# loop = QEventLoop()
# animator.render_signal.connect(loop.quit)
# loop.exec_()
# assert molecule.GetAtom(0).GetPosition().GetX() > startX
# assert molecule.GetAtom(0).GetPosition().GetY() == pytest.approx(0.0)
# assert molecule.GetAtom(0).GetPosition().GetZ() == pytest.approx(0.0)
# assert molecule.GetAtom(1).GetPosition().GetX() == pytest.approx(
# -1.0 * molecule.GetAtom(0).GetPosition().GetX()
# )
# assert molecule.GetAtom(1).GetPosition().GetY() == pytest.approx(0.0)
# assert molecule.GetAtom(1).GetPosition().GetZ() == pytest.approx(0.0)
| [
"[email protected]"
] | |
8ded2230e932e52c5fa2c4c833ee7824fad8c28e | 638af6b8c580eeae23fc1034882c4b514195137a | /Packages/vcs/Test/test_mesh_leg.py | 5b4d3ee35046a718c7cf7f1b6b698c33d332b6db | [] | no_license | doutriaux1/uvcdat | 83684a86b514b8cac4d8900a503fc13d557fc4d2 | 37e9635f988696c346b4c3cdb49144d1e21dab5d | refs/heads/master | 2021-01-17T07:57:22.897539 | 2015-02-02T22:52:12 | 2015-02-02T22:52:12 | 14,878,320 | 1 | 0 | null | 2015-02-19T20:54:25 | 2013-12-02T23:44:46 | C | UTF-8 | Python | false | false | 793 | py | # Adapted for numpy/ma/cdms2 by convertcdms.py
import vcs,cdms2 as cdms,sys,support,os
bg=support.bg
f=cdms.open(os.path.join(cdms.__path__[0],'..','..','..','..','sample_data','sampleCurveGrid4.nc'))
s=f('sample')
x=vcs.init()
t=x.createtemplate('jj')
m=x.createmeshfill('hh')
m.mesh='y'
m=x.createisofill('jj')
t.scale(.8)
t.legend.y2=.8
t.legend.x1=.8
t.legend.x2=.82
x.plot(s,t,m,bg=bg)
support.check_plot(x)
x.clear()
t.legend.x2=.78
t.legend.x1=.8
x.plot(s,m,t,bg=bg)
support.check_plot(x)
x.clear()
t.legend.y2=t.legend.y1
t.legend.y1=.8
x.plot(s,m,t,bg=bg)
support.check_plot(x)
x.clear()
t.legend.x1=.2
t.legend.x2=.8
t.legend.y1=.15
t.legend.y2=.2
x.plot(s,m,t,bg=bg)
support.check_plot(x)
x.clear()
t.legend.y1=.15
t.legend.y2=.1
x.plot(s,m,t,bg=bg)
support.check_plot(x)
| [
"[email protected]"
] | |
4ae301b5361e0b6d3d2a61c245a7ff6c6621f51a | 809a18b3dd7e90393a69d48f20be840041ef396e | /models/pointer.py | fbe08458d00cd150934dca5c3bb574b7b10f5407 | [] | no_license | wanwanaa/transformer-pointer | e58d4cb4b4f8a412515316457afac8708cdf1ef5 | aac7bff63e19d4845ac23dfcf9131f1a20812efc | refs/heads/master | 2020-05-17T17:08:05.747337 | 2019-09-08T13:02:30 | 2019-09-08T13:02:30 | 183,840,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,745 | py | import torch
import torch.nn as nn
from models.encoder import Encoder
from models.decoder import Decoder
class Pointer(nn.Module):
def __init__(self, config):
super().__init__()
self.linear = nn.Sequential(
nn.Linear(config.model_size*3, config.model_size),
nn.SELU(),
nn.Linear(config.model_size, config.model_size))
self.linear_prob = nn.Linear(config.model_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, emb, hidden, context):
"""
:param emb:(batch, 1, model_size)
:param hidden: (batch, 1, model_size)
:param context: (batch, 1, model_size)
:return:(batch, c_len)
"""
context = self.linear(torch.cat((emb, hidden, context), dim=-1))
# -> (batch, 1, model_size) -> (batch, 1, 1)
prob = self.sigmoid(self.linear_prob(context)).squeeze()
return prob
class Luong_Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.model_size = config.model_size
self.linear_in = nn.Sequential(
nn.Linear(config.model_size, config.model_size),
nn.SELU(),
nn.Linear(config.model_size, config.model_size)
)
self.linear_out = nn.Sequential(
nn.Linear(config.model_size, config.model_size),
nn.SELU(),
nn.Linear(config.model_size, config.model_size)
)
self.softmax = nn.Softmax(dim=-1)
def forward(self, output, encoder_out):
"""
:param output: (batch, 1, hidden_size) decoder output
:param encoder_out: (batch, t_len, hidden_size) encoder hidden state
:return: attn_weight (batch, time_step)
"""
out = self.linear_in(output) # (batch, 1, hidden_size)
out = out.transpose(1, 2) # (batch, hidden_size, 1)
attn_weights = torch.bmm(encoder_out, out) # (batch, t_len, 1)
attn_weights = self.softmax(attn_weights.transpose(1, 2)) # (batch, 1, t_len)
context = torch.bmm(attn_weights, encoder_out)
context = self.linear_out(context) # (batch, 1, model_size)
return attn_weights.squeeze(), context
class Transformer_Pointer(nn.Module):
def __init__(self, config):
super().__init__()
self.encoder_word = Encoder(config, config.src_vocab_size)
self.encoder_char = Encoder(config, config.tgt_vocab_size)
self.pointer = Pointer(config)
self.attention = Luong_Attention(config)
self.decoder = Decoder(config)
self.linear_out = nn.Linear(config.model_size, config.tgt_vocab_size)
self.softmax = nn.Softmax(dim=-1)
self.s_len = config.s_len
self.bos = config.bos
# add <bos> to sentence
def convert(self, x):
"""
:param x:(batch, s_len) (word_1, word_2, ... , word_n)
:return:(batch, s_len) (<bos>, word_1, ... , word_n-1)
"""
if torch.cuda.is_available():
start = (torch.ones(x.size(0), 1) * self.bos).type(torch.cuda.LongTensor)
else:
start = (torch.ones(x.size(0), 1) * self.bos).type(torch.LongTensor)
x = torch.cat((start, x), dim=1)
return x[:, :-1]
def forward(self, x_w, x_c, y):
"""
:param x_w:
:param x_c:
:param y:
:return: (batch, s_len, vocab_size)
"""
y_s = self.convert(y)
encoder_out = self.encoder_word(x_w)
encoder_attn = self.encoder_char(x_c)
final = []
for i in range(self.s_len):
dec_output = self.decoder(x_w, y_s[:, :i+1], encoder_out)
emb = self.decoder.embedding(y_s[:, i].unsqueeze(1))
output = self.linear_out(dec_output[:, -1, :])
# gen (batch, vocab_size)
gen = self.softmax(output)
# pointer
# ptr (batch, c_len)
# context (batch, 1, model_size)
ptr, context = self.attention(dec_output[:, -1, :].unsqueeze(1), encoder_attn)
# prob (batch, )
prob = self.pointer(emb, dec_output[:, -1, :].unsqueeze(1), context).unsqueeze(1)
final_out = (1-prob) * gen
final_out = final_out.scatter_add_(1, x_c, prob*ptr)
final.append(final_out)
return torch.stack(final)
def sample(self, x_w, x_c):
encoder_out = self.encoder_word(x_w)
encoder_attn = self.encoder_char(x_c)
start = torch.ones(x_w.size(0)) * self.bos
start = start.unsqueeze(1)
if torch.cuda.is_available():
start = start.type(torch.cuda.LongTensor)
else:
start = start.type(torch.LongTensor)
# the first <start>
out = torch.ones(x_w.size(0)) * self.bos
out = out.unsqueeze(1)
final = []
for i in range(self.s_len):
if torch.cuda.is_available():
out = out.type(torch.cuda.LongTensor)
else:
out = out.type(torch.LongTensor)
dec_output = self.decoder(x_w, out, encoder_out)
emb = self.decoder.embedding(out[:, -1].unsqueeze(1))
output = self.linear_out(dec_output[:, -1, :])
gen = self.softmax(output)
ptr, context = self.attention(dec_output[:, -1, :].unsqueeze(1), encoder_attn)
# prob (batch, )
prob = self.pointer(emb, dec_output[:, -1, :].unsqueeze(1), context).unsqueeze(1)
final_out = (1 - prob) * gen
final_out = final_out.scatter_add_(1, x_c, prob * ptr)
final.append(final_out)
gen = torch.argmax(gen, dim=-1).unsqueeze(1)
out = torch.cat((out, gen), dim=1)
return torch.stack(final), out | [
"[email protected]"
] | |
40f8185de6c03d4570b59f34d711c624447175de | 093b9569be9d1c4e5daf92efbebc38f680917b2d | /.history/base/views_20210828170402.py | 51bdc9919f9fd0fdfec82eaaf4503593d19de3b0 | [] | no_license | Justin-Panagos/todoList | 95b1e97ff71af1b0be58e7f8937d726a687cea4d | 10539219b59fcea00f8b19a406db3d4c3f4d289e | refs/heads/master | 2023-08-04T13:27:13.309769 | 2021-08-29T14:06:43 | 2021-08-29T14:06:43 | 400,827,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from django.shortcuts import render
from django.http import HttpResponse
def taskList(request):{
return HttpResponse('')
}
| [
"[email protected]"
] | |
8f2dc8276cd6ae79f6dc311c301f1ec35490ef60 | 7d3592b74233ee8b1afa2fac00fa5a6f045f5525 | /tutorials/inputFromDependent/inputFromDependent.py | a53bf2778a6a20a6298dff152673c8f3bb9fa422 | [
"Apache-2.0"
] | permissive | afcarl/PyPPL | eca2a3e32729d7fd65042164b82c84f21877de2d | c6c654f163d1bdd0ae0357025c5782b17c14b93c | refs/heads/master | 2020-03-25T18:51:46.872319 | 2018-08-08T16:10:21 | 2018-08-08T16:10:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py |
from pyppl import PyPPL, Proc, Channel
pSort = Proc(desc = 'Sort files.')
pSort.input = {"infile:file": Channel.fromPattern("./data/*.txt")}
pSort.output = "outfile:file:{{in.infile | fn}}.sorted"
pSort.forks = 5
pSort.script = """
sort -k1r {{in.infile}} > {{out.outfile}}
"""
pAddPrefix = Proc(desc = 'Add line number to each line.')
pAddPrefix.depends = pSort
# automatically inferred from pSort.output
pAddPrefix.input = "infile:file"
pAddPrefix.output = "outfile:file:{{in.infile | fn}}.ln"
pAddPrefix.exdir = './export'
pAddPrefix.forks = 5
pAddPrefix.script = """
paste -d. <(seq 1 $(wc -l {{in.infile}} | cut -f1 -d' ')) {{in.infile}} > {{out.outfile}}
"""
PyPPL().start(pSort).run()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.