blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5a5e944bd41c1e4cfadd2c3c620774ec34c22e1 | 31e41995dea5e4a41bc9b942da7e5266cd686757 | /learning/training/python/py2/pgms/sec4/outputparams.py | 5894f5ae44a48540fe4caeee5abca53df43f5154 | []
| no_license | tamle022276/python | 3b75758b8794801d202565c05d32976c146beffd | 4fec225d1e5e2bf0adac5048f7f9f3313ac76e23 | refs/heads/master | 2020-04-01T21:03:01.458768 | 2017-03-13T20:47:35 | 2017-03-13T20:47:35 | 64,878,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | #!/usr/bin/env python
# outputparams.py - simulate output parameters
def assign(m, n):
m = 10
n = [3, 4]
return m, n
a = 5; b = [1, 2]
(a, b) = assign(a, b) # updates a, b
print a, b
#####################################
#
# $ outputparams.py
# 10 [3, 4]
#
| [
"[email protected]"
]
| |
5ebdc3a4b1499d03dc0954911ba0248fd4c5dfb8 | e254a1124bbe6be741159073a22898b0824e2a4f | /customuser/admin.py | 6c225a0579ce6bb67949bffc24b32ad6df83f3a0 | []
| no_license | skiboorg/stdiplom | 0df83b8e42e999abc43a01157cb24cffd10d0666 | 13101381c7db8a4b949048e8cbfcf9673cf7ecde | refs/heads/master | 2022-11-12T23:55:21.136176 | 2020-06-29T05:57:03 | 2020-06-29T05:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import User,Guest
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
"""Define admin model for custom User model with no email field."""
fieldsets = (
(None, {'fields': ('email', 'password', 'used_promo')}),
(_('Personal info'), {'fields': ('fio', 'phone', 'comment', 'is_allow_email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2', 'phone'),
}),
)
list_display = ('email', 'fio', 'phone')
ordering = ('email',)
search_fields = ('email', 'fio', 'phone')
admin.site.register(Guest) | [
"[email protected]"
]
| |
d6522db0345b146f5c997b5624fec7901716705a | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.server-Zope-3.2.1/zope.server/linereceiver/linetask.py | b6e21554887b4b549e2db8b1c9d3414ff467116b | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
]
| permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Line Task
$Id: linetask.py 27442 2004-09-03 08:16:55Z shane $
"""
import socket
import time
from zope.server.interfaces import ITask
from zope.interface import implements
class LineTask(object):
"""This is a generic task that can be used with command line
protocols to handle commands in a separate thread.
"""
implements(ITask)
def __init__(self, channel, command, m_name):
self.channel = channel
self.m_name = m_name
self.args = command.args
self.close_on_finish = 0
def service(self):
"""Called to execute the task.
"""
try:
try:
self.start()
getattr(self.channel, self.m_name)(self.args)
self.finish()
except socket.error:
self.close_on_finish = 1
if self.channel.adj.log_socket_errors:
raise
except:
self.channel.exception()
finally:
if self.close_on_finish:
self.channel.close_when_done()
def cancel(self):
'See ITask'
self.channel.close_when_done()
def defer(self):
'See ITask'
pass
def start(self):
now = time.time()
self.start_time = now
def finish(self):
hit_log = self.channel.server.hit_log
if hit_log is not None:
hit_log.log(self)
| [
"[email protected]"
]
| |
09748ed4d962cf5b7f4a079ab8e5b4811299f4c0 | 99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6 | /Django/PROJECT02/PROJECT02/jobs/models.py | 5d8ee670119eeaf75fc29f8879c7f9b7d6106061 | []
| no_license | HSx3/TIL | 92acc90758015c2e31660617bd927f7f100f5f64 | 981c9aaaf09c930d980205f68a28f2fc8006efcb | refs/heads/master | 2020-04-11T21:13:36.239246 | 2019-05-08T08:18:03 | 2019-05-08T08:18:03 | 162,099,042 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from django.db import models
# Create your models here.
class Job(models.Model):
name = models.CharField(max_length=20)
pastjob = models.CharField(max_length=30)
def __str__(self):
return self.name | [
"[email protected]"
]
| |
b1b17de27b17b10b04b0d215f31b42d2845350ab | c31ee8136a57a96649196081e1cfde0676c2a481 | /larcv/app/arxiv/arxiv/LArOpenCVHandle/cfg/mac/arxiv/analyze.py | 1ff1c17fba6bd79428e15f9dc424c3ee27064942 | [
"MIT"
]
| permissive | DeepLearnPhysics/larcv2 | b12b46168e5c6795c70461c9495e29b427cd88b5 | 31863c9b094a09db2a0286cfbb63ccd2f161e14d | refs/heads/develop | 2023-06-11T03:15:51.679864 | 2023-05-30T17:51:19 | 2023-05-30T17:51:19 | 107,551,725 | 16 | 19 | MIT | 2023-04-10T10:15:13 | 2017-10-19T13:42:39 | C++ | UTF-8 | Python | false | false | 1,113 | py | from larcv import larcv
import ROOT, sys
from ROOT import std
if len(sys.argv) < 2:
print 'Usage: python',sys.argv[0],'CONFIG_FILE [LARCV_FILE1 LARCV_FILE2 ...]'
sys.exit(1)
proc = larcv.ProcessDriver('ProcessDriver')
print "Loading config... ",sys.argv[1]
proc.configure(sys.argv[1])
print "Loaded"
print sys.argv
if len(sys.argv) > 1:
flist=ROOT.std.vector('std::string')()
for x in xrange(len(sys.argv)-6):
print "Pushing back...",sys.argv[x+6]
flist.push_back(sys.argv[x+6])
proc.override_input_file(flist)
proc.override_ana_file(sys.argv[2] + ".root")
proc.override_output_file(sys.argv[3] + ".root")
ana_id = proc.process_id("LArbysImageAna")
ext_id = proc.process_id("LArbysImageExtract")
out_id = proc.process_id("LArbysImageOut")
ana_proc = proc.process_ptr(ana_id)
ext_proc = proc.process_ptr(ext_id)
out_proc = proc.process_ptr(out_id)
out_proc.SetLArbysImageAna(ana_proc)
out_proc.SetLArbysImageExtract(ext_proc)
ana_proc.SetInputLArbysMCFile(sys.argv[4]);
ana_proc.SetInputLArbysRecoFile(sys.argv[5])
proc.initialize()
proc.batch_process()
proc.finalize()
| [
"[email protected]"
]
| |
c5c4b6f0b936cd29d654915642a877ac48a21b78 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03806/s696918602.py | 35f161589d7dd759d0031fd48f8415a6aae2215a | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | def main():
INF = 100 * 40 + 1
MX = 4000
N, Ma, Mb = map(int, input().split())
dp = [[INF] * (MX * 2 + 1) for _ in range(2)]
i, j = 0, 1
for _ in range(N):
ai, bi, ci = map(int, input().split())
x = Ma * bi - Mb * ai # Σai:Σbi=Ma:Mb<->Ma*Σbi-Mb*Σai=0
for k in range(-MX, MX + 1):
dp[j][k] = dp[i][k]
dp[j][x] = min(dp[j][x], ci)
for k in range(-MX + x, MX + 1):
dp[j][k] = min(
dp[j][k],
dp[i][k - x] + ci
)
i, j = j, i
res = dp[i][0]
print(-1 if res == INF else res)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
4b076855d9faf7d4b9b52c1ba3bcabde38de220d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/3477.py | 445b282b68ddf7bc5304da572da944985b261730 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | """
Problem B. Tidy Numbers
Problem
Tatiana likes to keep things tidy. Her toys are sorted from smallest to largest,
her pencils are sorted from shortest to longest and her computers from oldest to
newest. One day, when practicing her counting skills, she noticed that some
integers, when written in base 10 with no leading zeroes, have their digits
sorted in non-decreasing order. Some examples of this are 8, 123, 555, and
224488. She decided to call these numbers tidy. Numbers that do not have this
property, like 20, 321, 495 and 999990, are not tidy.
She just finished counting all positive integers in ascending order from 1 to N.
What was the last tidy number she counted?
Input
The first line of the input gives the number of test cases, T. T lines follow.
Each line describes a test case with a single integer N, the last number counted
by Tatiana.
Output
For each test case, output one line containing Case #x: y, where x is the test
case number (starting from 1) and y is the last tidy number counted by Tatiana.
Limits
1 ≤ T ≤ 100.
Small dataset
1 ≤ N ≤ 1000.
Large dataset
1 ≤ N ≤ 1018.
Sample
Input
Output
4
132
1000
7
111111111111111110
Case #1: 129
Case #2: 999
Case #3: 7
Case #4: 99999999999999999
"""
def get_closest_tidy_number(n):
if n < 10:
return n
n_str = str(n)
n_len = len(n_str)
prev_value = -1
break_idx = -1
# find position and value of the first digit to the left that breaks
# non-decreasing order
for idx in range(len(n_str)):
value = int(n_str[idx])
if value < prev_value:
break_idx = idx
break
prev_value = value
if break_idx == -1:
return n
# decimal place from the right: 0 means 1s, 1 means 10s and so on
# place = len(n_str) - break_idx - 1
tidy_value = int(n_str[:break_idx] + '0' * (n_len - break_idx)) - 1
n_str = str(tidy_value)
while break_idx > 1:
break_idx -= 1
if int(n_str[break_idx]) < int(n_str[break_idx - 1]):
tidy_value = int(n_str[:break_idx] + '0' * (n_len - break_idx)) - 1
n_str = str(tidy_value)
else:
return tidy_value
return tidy_value
test_cases = int(input())
for i in range(1, test_cases + 1):
input_str = int(input())
tidy_number = get_closest_tidy_number(input_str)
print("Case #{}: {}".format(i, tidy_number))
| [
"[email protected]"
]
| |
68e4482c14a3dab16659aa7b39e7e1d5f4c639ed | edd1adb88112045d16d3e6417117d45ceed4a634 | /classical/tidybot-opt14-strips/api.py | fb141663dc9a4e046bd1d3dc18576e2df06bd7ef | []
| no_license | AI-Planning/classical-domains | 26de25bf23622f95c877960c1d52f444922d8737 | 4bd0b42d89ea02bd38af6f93cf20a0ab0cbda9d9 | refs/heads/main | 2023-04-27T07:55:55.832869 | 2023-03-29T01:46:11 | 2023-03-29T01:46:11 | 253,298,999 | 24 | 12 | null | 2023-04-18T01:45:39 | 2020-04-05T18:02:53 | PDDL | UTF-8 | Python | false | false | 2,822 | py | domains = [
{'description': 'The Tidybot domain models a household cleaning task, in which one or more robots must pick up a set of objects and put them into goal locations. The world is structured as a 2d grid, divided into navigable locations and surfaces on which objects may lie. Robots have a gripper, which moves relative to the robot, up to some maximum radius. Existing objects block the gripper, so that it may be necessary to move one object out of the way to put another one down. Robots can carry one object at a time in the gripper, but may also make use of a cart, that can hold multiple objects. The instance generator creates worlds that contain rectangular surfaces ("tables"), as well as U-shaped enclosures ("cupboards"), which are the goal locations of objects.',
'ipc': '2014',
'name': 'tidybot',
'problems': [('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p01.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p02.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p03.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p04.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p05.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p06.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p07.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p08.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p09.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p10.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p11.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p12.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p13.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p14.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p15.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p16.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p17.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p18.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p19.pddl'),
('tidybot-opt14-strips/domain.pddl',
'tidybot-opt14-strips/p20.pddl')]}
] | [
"[email protected]"
]
| |
9704f2152ae475830dc15c917f3fe61bda494b73 | 55ceefc747e19cdf853e329dba06723a44a42623 | /_CodeTopics/LeetCode/201-400/000387/000387.py | f9281d3ccb22db12c9f847e92d1c3e8f262be557 | []
| no_license | BIAOXYZ/variousCodes | 6c04f3e257dbf87cbe73c98c72aaa384fc033690 | ee59b82125f100970c842d5e1245287c484d6649 | refs/heads/master | 2023-09-04T10:01:31.998311 | 2023-08-26T19:44:39 | 2023-08-26T19:44:39 | 152,967,312 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
def str_to_dict(s):
dic = {}
for ch in s:
if ch in dic:
dic[ch] += 1
else:
dic[ch] = 1
return dic
dic = str_to_dict(s)
for i in range(len(s)):
if dic[s[i]] == 1:
return i
return -1
"""
https://leetcode-cn.com/submissions/detail/133018753/
104 / 104 个通过测试用例
状态:通过
执行用时: 108 ms
内存消耗: 13.8 MB
执行用时:108 ms, 在所有 Python 提交中击败了79.54%的用户
内存消耗:13.8 MB, 在所有 Python 提交中击败了21.14%的用户
"""
| [
"[email protected]"
]
| |
7e221de13be5313611640449392570d027655ac8 | 0a7d76af2d8dced3c65fbcbda9af6d17b1e429c0 | /tests/forms_tests/tests/test_media.py | 2b73a041becbfaf07ab7b9065f321e9886cac50a | [
"Python-2.0",
"BSD-3-Clause"
]
| permissive | manhnd1112/GR | 607d4e9add987dd994c0fe20629b03631769c02a | 2ee9da122afeb33b3ee589a7f64d3f74d2654a1a | refs/heads/master | 2022-12-11T00:36:05.143147 | 2018-05-31T10:03:35 | 2018-05-31T10:03:35 | 125,654,350 | 0 | 0 | BSD-3-Clause | 2022-12-08T00:58:26 | 2018-03-17T17:42:34 | Python | UTF-8 | Python | false | false | 24,831 | py | import warnings
from django.forms import CharField, Form, Media, MultiWidget, TextInput
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
@override_settings(
STATIC_URL='http://media.example.com/static/',
)
class FormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(
css={'all': ('path/to/css1', '/path/to/css2')},
js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'),
)
self.assertEqual(
str(m),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
self.assertEqual(
repr(m),
"Media(css={'all': ('path/to/css1', '/path/to/css2')}, "
"js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'))"
)
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(
str(m3),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Media objects can be interrogated by media type
self.assertEqual(
str(w1.media['css']),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />"""
)
self.assertEqual(
str(w1.media['js']),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(
str(w1.media + w2.media + w3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# media addition hasn't affected the original objects
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super().media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super().media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(
str(w6.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(
str(w7.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(
str(w8.media),
"""<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(
str(w9.media),
"""<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(
str(w11.media),
"""<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(
str(w12.media),
"""<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(
str(multimedia.media),
"""<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super().__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(
str(mymulti.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(
str(f1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(
str(f1.media + f2.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(
str(f3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
)
# Media works in templates
self.assertEqual(
Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />"""
)
def test_html_safe(self):
media = Media(css={'all': ['/path/to/css']}, js=['/path/to/js'])
self.assertTrue(hasattr(Media, '__html__'))
self.assertEqual(str(media), media.__html__())
def test_merge(self):
test_values = (
(([1, 2], [3, 4]), [1, 2, 3, 4]),
(([1, 2], [2, 3]), [1, 2, 3]),
(([2, 3], [1, 2]), [1, 2, 3]),
(([1, 3], [2, 3]), [1, 2, 3]),
(([1, 2], [1, 3]), [1, 2, 3]),
(([1, 2], [3, 2]), [1, 3, 2]),
)
for (list1, list2), expected in test_values:
with self.subTest(list1=list1, list2=list2):
self.assertEqual(Media.merge(list1, list2), expected)
def test_merge_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(Media.merge([1, 2], [2, 1]), [1, 2])
self.assertEqual(
str(w[-1].message),
'Detected duplicate Media files in an opposite order:\n1\n2'
)
| [
"[email protected]"
]
| |
eb6724585a47c16c7058930111a03405d5276fc7 | 69439004c494c2d56018468d3fec8c9e56036fc8 | /tests/zeus/utils/test_auth.py | 5c2197a339a137df799456193c58afdd897db536 | [
"Apache-2.0"
]
| permissive | buchiputaobutuputaopi/zeus-1 | 6dbc54e65c925040b1c1e01683625cea49299b4e | 8a606642d9ef8f239df2e8d7079ea4d130d78cb3 | refs/heads/master | 2021-06-25T07:26:52.278251 | 2017-09-06T03:53:04 | 2017-09-06T03:53:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from zeus import auth
def test_login_user(client, default_user):
with client.session_transaction() as session:
auth.login_user(default_user.id, session=session)
assert session['uid'] == default_user.id
assert session['expire']
| [
"[email protected]"
]
| |
05a469cc480e500bf829d0d976976b2b96478216 | 2d4af29250dca8c72b74e190e74d92f1467120a0 | /TaobaoSdk/Response/SimbaAdgroupOnlineitemsGetResponse.py | f660d7d21eb7fabf204fd071e5a8506e12f10f55 | []
| no_license | maimiaolmc/TaobaoOpenPythonSDK | 2c671be93c40cf487c0d7d644479ba7e1043004c | d349aa8ed6229ce6d76a09f279a0896a0f8075b3 | refs/heads/master | 2020-04-06T03:52:46.585927 | 2014-06-09T08:58:27 | 2014-06-09T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,175 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 获取用户上架在线销售的全部宝贝
# @author [email protected]
# @date 2013-03-07 19:54:48
# @version: 0.0.0
from datetime import datetime
import os
import sys
import time
_jsonEnode = None
try:
import demjson
_jsonEnode = demjson.encode
except Exception:
try:
import simplejson
except Exception:
try:
import json
except Exception:
raise Exception("Can not import any json library")
else:
_jsonEnode = json.dumps
else:
_jsonEnode = simplejson.dumps
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__parentPath = os.path.normpath(os.path.join(__getCurrentPath(), os.path.pardir))
if __parentPath not in sys.path:
sys.path.insert(0, __parentPath)
from Domain.SimbaItemPartition import SimbaItemPartition
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">Response: 获取用户上架在线销售的全部宝贝</SPAN>
# <UL>
# </UL>
class SimbaAdgroupOnlineitemsGetResponse(object):
def __init__(self, kargs=dict()):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">请求的返回信息,包含状态等</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">dict</SPAN>
# </LI>
# </UL>
self.responseStatus = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">请求的响应内容</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.responseBody = None
self.code = None
self.msg = None
self.sub_code = None
self.sub_msg = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">带分页的淘宝商品</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">SimbaItemPartition</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Object</SPAN>
# </LI>
# </UL>
self.page_item = None
self.__init(kargs)
def isSuccess(self):
return self.code == None and self.sub_code == None
def _newInstance(self, name, value):
types = self._getPropertyType(name)
propertyType = types[0]
isArray = types[1]
if propertyType == bool:
if isArray:
if not value:
return []
return [x for x in value[value.keys()[0]]]
else:
return value
elif propertyType == datetime:
format = "%Y-%m-%d %H:%M:%S"
if isArray:
if not value:
return []
return [datetime.strptime(x, format) for x in value[value.keys()[0]]]
else:
return datetime.strptime(value, format)
elif propertyType == str:
if isArray:
if not value:
return []
return [x for x in value[value.keys()[0]]]
else:
#like taobao.simba.rpt.adgroupbase.get, response.rpt_adgroup_base_list is a json string,but will be decode into a list via python json lib
if not isinstance(value, basestring):
#the value should be a json string
return _jsonEnode(value)
return value
else:
if isArray:
if not value:
return []
return [propertyType(x) for x in value[value.keys()[0]]]
else:
return propertyType(value)
def _getPropertyType(self, name):
properties = {
"page_item": "SimbaItemPartition",
}
levels = {
"page_item": "Object",
}
nameType = properties[name]
pythonType = None
if nameType == "Number":
pythonType = int
elif nameType == "String":
pythonType = str
elif nameType == 'Boolean':
pythonType = bool
elif nameType == "Date":
pythonType = datetime
elif nameType == 'Field List':
pythonType == str
elif nameType == 'Price':
pythonType = float
elif nameType == 'byte[]':
pythonType = str
else:
pythonType = getattr(sys.modules["Domain.%s" % nameType], nameType)
# 是单个元素还是一个对象
level = levels[name]
if "Array" in level:
return (pythonType, True)
else:
return (pythonType, False)
def __init(self, kargs):
if kargs.has_key("page_item"):
self.page_item = self._newInstance("page_item", kargs["page_item"])
if kargs.has_key("code"):
self.code = kargs["code"]
if kargs.has_key("msg"):
self.msg = kargs["msg"]
if kargs.has_key("sub_code"):
self.sub_code = kargs["sub_code"]
if kargs.has_key("sub_msg"):
self.sub_msg = kargs["sub_msg"]
| [
"[email protected]"
]
| |
7b0209b5129a33a20957245a3ed25f1bda5ed1ce | e6d556d97081576da6469cf1e8c1dd14565db2da | /code/tkinter/icons.py | 32bdcc5c4a086dae60cb06cb946bb8bd9480cc34 | []
| no_license | Scotth72/codePractice | 0b7c795050d08a34dff2b99507b20094d233739a | 475482fab0d69f93d936dc1ba8c2511174089b7c | refs/heads/master | 2023-01-19T02:58:40.977634 | 2020-11-26T15:55:47 | 2020-11-26T15:55:47 | 313,863,106 | 0 | 0 | null | 2020-11-26T15:55:48 | 2020-11-18T08:00:39 | Python | UTF-8 | Python | false | false | 234 | py | from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title("Learn to use Icons")
root.iconbitmap('../icons/mando.png')
btn_quit = Button(root, text="Exit Program", command=root.quit)
btn_quit.pack()
root.mainloop() | [
"[email protected]"
]
| |
96b9713d9cbdcfaf580b86638d5ce9c0f08f5285 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_4/run_cfg.py | d8c7fb4def9e957dabac9d51c5ec12ae8fa44d92 | []
| no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_23_1_Yum.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_24_1_892.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_25_1_9AW.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_26_1_347.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-100_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_27_1_dAe.root')
)
| [
"[email protected]"
]
| |
bc1e4713048fff7e4cc96fdf6e7e0c26fb0e0f23 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/dpacreativefeed/model/update_creative_feed_response_wrapper_body.py | b75f4e07354b58dd0fce557dbdad17c8b1b70fd0 | [
"Apache-2.0"
]
| permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 11,241 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.dpacreativefeed.model.dpa_creative_feed_type import DpaCreativeFeedType
globals()['DpaCreativeFeedType'] = DpaCreativeFeedType
class UpdateCreativeFeedResponseWrapperBody(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([DpaCreativeFeedType],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""UpdateCreativeFeedResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([DpaCreativeFeedType]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UpdateCreativeFeedResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([DpaCreativeFeedType]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
]
| |
1b2603db33a6d30fc510ef9b6fd941c16bf4721d | c4750ec6eeda0092e3a5515d4878cfe42e117e90 | /test/test_inference/test_compiled.py | d863528604a3ebdc39f003c9c320c12eab01a952 | [
"MIT"
]
| permissive | phgn0/jedi | 6e5e83778fe699d9735ab52a46ee94dec2a8be99 | eb9af151ea0f447ab9d5d00d14e8fee542bc09d1 | refs/heads/master | 2020-09-02T23:38:36.442447 | 2019-11-10T14:03:49 | 2019-11-10T14:03:49 | 219,332,443 | 1 | 0 | NOASSERTION | 2019-11-03T16:42:27 | 2019-11-03T16:42:26 | null | UTF-8 | Python | false | false | 5,139 | py | from textwrap import dedent
import math
import sys
from collections import Counter
from datetime import datetime
import pytest
from jedi.inference import compiled
from jedi.inference.compiled.access import DirectObjectAccess
from jedi.inference.gradual.conversion import _stub_to_python_value_set
def test_simple(inference_state, environment):
obj = compiled.create_simple_object(inference_state, u'_str_')
upper, = obj.py__getattribute__(u'upper')
objs = list(upper.execute_with_values())
assert len(objs) == 1
if environment.version_info.major == 2:
expected = 'unicode'
else:
expected = 'str'
assert objs[0].name.string_name == expected
def test_builtin_loading(inference_state):
string, = inference_state.builtins_module.py__getattribute__(u'str')
from_name, = string.py__getattribute__(u'__init__')
assert from_name.tree_node
assert not from_name.py__doc__() # It's a stub
def test_next_docstr(inference_state):
next_ = compiled.builtin_from_name(inference_state, u'next')
assert next_.tree_node is not None
assert next_.py__doc__() == '' # It's a stub
for non_stub in _stub_to_python_value_set(next_):
assert non_stub.py__doc__() == next.__doc__
def test_parse_function_doc_illegal_docstr():
docstr = """
test_func(o
doesn't have a closing bracket.
"""
assert ('', '') == compiled.value._parse_function_doc(docstr)
def test_doc(inference_state):
"""
Even CompiledObject docs always return empty docstrings - not None, that's
just a Jedi API definition.
"""
str_ = compiled.create_simple_object(inference_state, u'')
# Equals `''.__getnewargs__`
obj, = str_.py__getattribute__(u'__getnewargs__')
assert obj.py__doc__() == ''
def test_string_literals(Script, environment):
def typ(string):
d = Script("a = %s; a" % string).goto_definitions()[0]
return d.name
assert typ('""') == 'str'
assert typ('r""') == 'str'
if environment.version_info.major > 2:
assert typ('br""') == 'bytes'
assert typ('b""') == 'bytes'
assert typ('u""') == 'str'
else:
assert typ('b""') == 'str'
assert typ('u""') == 'unicode'
def test_method_completion(Script, environment):
code = dedent('''
class Foo:
def bar(self):
pass
foo = Foo()
foo.bar.__func__''')
assert [c.name for c in Script(code).completions()] == ['__func__']
def test_time_docstring(Script):
import time
comp, = Script('import time\ntime.sleep').completions()
assert comp.docstring(raw=True) == time.sleep.__doc__
expected = 'sleep(secs: float) -> None\n\n' + time.sleep.__doc__
assert comp.docstring() == expected
def test_dict_values(Script, environment):
if environment.version_info.major == 2:
# It looks like typeshed for Python 2 returns Any.
pytest.skip()
assert Script('import sys\nsys.modules["alshdb;lasdhf"]').goto_definitions()
def test_getitem_on_none(Script):
script = Script('None[1j]')
assert not script.goto_definitions()
issue, = script._inference_state.analysis
assert issue.name == 'type-error-not-subscriptable'
def _return_int():
return 1
@pytest.mark.parametrize(
'attribute, expected_name, expected_parent', [
('x', 'int', 'builtins'),
('y', 'int', 'builtins'),
('z', 'bool', 'builtins'),
('cos', 'cos', 'math'),
('dec', 'Decimal', 'decimal'),
('dt', 'datetime', 'datetime'),
('ret_int', '_return_int', 'test.test_inference.test_compiled'),
]
)
def test_parent_context(same_process_inference_state, attribute, expected_name, expected_parent):
import decimal
class C:
x = 1
y = int
z = True
cos = math.cos
dec = decimal.Decimal(1)
dt = datetime(2000, 1, 1)
ret_int = _return_int
o = compiled.CompiledObject(
same_process_inference_state,
DirectObjectAccess(same_process_inference_state, C)
)
x, = o.py__getattribute__(attribute)
assert x.py__name__() == expected_name
module_name = x.parent_context.py__name__()
if module_name == '__builtin__':
module_name = 'builtins' # Python 2
assert module_name == expected_parent
assert x.parent_context.parent_context is None
@pytest.mark.skipif(sys.version_info[0] == 2, reason="Ignore Python 2, because EOL")
@pytest.mark.parametrize(
'obj, expected_names', [
('', ['str']),
(str, ['str']),
(''.upper, ['str', 'upper']),
(str.upper, ['str', 'upper']),
(math.cos, ['cos']),
(Counter, ['Counter']),
(Counter(""), ['Counter']),
(Counter.most_common, ['Counter', 'most_common']),
(Counter("").most_common, ['Counter', 'most_common']),
]
)
def test_qualified_names(same_process_inference_state, obj, expected_names):
o = compiled.CompiledObject(
same_process_inference_state,
DirectObjectAccess(same_process_inference_state, obj)
)
assert o.get_qualified_names() == tuple(expected_names)
| [
"[email protected]"
]
| |
6429ff3a5cdd451090741ad95d4eb7c834662443 | 7ae0f100b49763f79b276260bbc0e87bd904da3e | /src/wdf/management/commands/prepare_dump.py | e65ea353701bb3108f1a5dec39c80cfd359756f9 | []
| no_license | wondersell/wildsearch-indexer | d88a5b3bce17acc1cb61d365f55ab5d9f63f61ae | 67d5f29f6d405c055cfa211ddf0b70521382a671 | refs/heads/master | 2023-07-19T00:33:34.371231 | 2020-12-31T11:20:00 | 2020-12-31T11:20:00 | 285,488,583 | 2 | 0 | null | 2021-07-19T06:26:44 | 2020-08-06T06:09:51 | Python | UTF-8 | Python | false | false | 1,339 | py | import logging
from django.core.management.base import BaseCommand
from wdf.exceptions import DumpStateError
from wdf.indexer import Indexer
from wdf.tasks import prepare_dump
class Command(BaseCommand):
help = 'Prepares job for importing' # noqa: VNE003
def add_arguments(self, parser):
parser.add_argument('job_id', type=str)
parser.add_argument('--chunk_size', type=int, default=5000, required=False)
parser.add_argument('--background', choices=['yes', 'no'], default='yes')
def handle(self, *args, **options):
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('[%(levelname)s] %(name)s: %(message)s'))
logger = logging.getLogger('')
logger.addHandler(console)
job_id = options['job_id']
if options['background'] == 'yes':
prepare_dump.delay(job_id=job_id)
self.stdout.write(self.style.SUCCESS(f'Job #{job_id} added to process queue for preparing'))
else:
try:
indexer = Indexer(get_chunk_size=options['chunk_size'])
indexer.prepare_dump(job_id=options['job_id'])
except DumpStateError as error:
self.stdout.write(self.style.ERROR(f'Job #{job_id} processing failed: {error}'))
| [
"[email protected]"
]
| |
c4e8dbc6684184e78245deb69b8a5f098817f5d9 | f6f632bee57875e76e1a2aa713fdbe9f25e18d66 | /python/CrackingTheCodingInterview_6/01_08_zero-matrix-lcci.py | 064080aa330c0cdb7f50aa9177f2c29ebc6ce08e | []
| no_license | Wang-Yann/LeetCodeMe | b50ee60beeeb3661869bb948bef4fbe21fc6d904 | 44765a7d89423b7ec2c159f70b1a6f6e446523c2 | refs/heads/master | 2023-08-07T05:31:23.428240 | 2021-09-30T15:33:53 | 2021-09-30T15:33:53 | 253,497,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-07-12 00:02:47
# @Last Modified : 2020-07-12 00:02:47
# @Mail : [email protected]
# @Version : 1.0.0
"""
# 编写一种算法,若M × N矩阵中某个元素为0,则将其所在的行与列清零。
#
#
#
# 示例 1:
#
# 输入:
# [
# [1,1,1],
# [1,0,1],
# [1,1,1]
# ]
# 输出:
# [
# [1,0,1],
# [0,0,0],
# [1,0,1]
# ]
#
#
# 示例 2:
#
# 输入:
# [
# [0,1,2,0],
# [3,4,5,2],
# [1,3,1,5]
# ]
# 输出:
# [
# [0,0,0,0],
# [0,4,5,0],
# [0,3,1,0]
# ]
#
# Related Topics 数组
# 👍 10 👎 0
"""
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not matrix:
return
rows = set()
cols = set()
m, n = len(matrix), len(matrix[0])
for i in range(m):
for j in range(n):
if not matrix[i][j]:
rows.add(i)
cols.add(j)
for i in range(m):
for j in range(n):
if i in rows or j in cols:
matrix[i][j] = 0
# leetcode submit region end(Prohibit modification and deletion)
@pytest.mark.parametrize("args,expected", [
(
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[0, 0, 0],
[1, 0, 1]
]),
pytest.param(
[
[0, 1, 2, 0],
[3, 4, 5, 2],
[1, 3, 1, 5]
],
[
[0, 0, 0, 0],
[0, 4, 5, 0],
[0, 3, 1, 0]
]),
])
def test_solutions(args, expected):
Solution().setZeroes(args)
assert args == expected
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=tee-sys", __file__])
| [
"[email protected]"
]
| |
0c9f915ad0956041421ba3152c8f1d36b03896a0 | b0a64cf2d36c7da2c81f920cab6f67e8a8e5b2d4 | /models/VGG_mini_BN_PReLU.py | c0390f9195d03450ae354830944220579419c08a | []
| no_license | OminiaVincit/chainer-cifar10 | 69407a114e35b9100af56142092ee9e14577a423 | 449c55f205ea5fd59313598af0f27feb51b18da4 | refs/heads/master | 2021-01-19T06:31:02.379472 | 2015-07-15T20:29:14 | 2015-07-15T20:29:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from chainer import Variable, FunctionSet
import chainer.functions as F
class VGG_mini_BN_PReLU(FunctionSet):
"""
VGGnet for CIFAR-10
"""
def __init__(self):
super(VGG_mini_BN_PReLU, self).__init__(
conv1_1=F.Convolution2D(3, 64, 3, stride=1, pad=1),
bn1_1=F.BatchNormalization(64, decay=0.9, eps=1e-5),
prelu1_1=F.PReLU(64),
conv1_2=F.Convolution2D(64, 64, 3, stride=1, pad=1),
bn1_2=F.BatchNormalization(64, decay=0.9, eps=1e-5),
prelu1_2=F.PReLU(64),
conv2_1=F.Convolution2D(64, 128, 3, stride=1, pad=1),
bn2_1=F.BatchNormalization(128, decay=0.9, eps=1e-5),
prelu2_1=F.PReLU(128),
conv2_2=F.Convolution2D(128, 128, 3, stride=1, pad=1),
bn2_2=F.BatchNormalization(128, decay=0.9, eps=1e-5),
prelu2_2=F.PReLU(128),
conv3_1=F.Convolution2D(128, 256, 3, stride=1, pad=1),
prelu3_1=F.PReLU(256),
conv3_2=F.Convolution2D(256, 256, 3, stride=1, pad=1),
prelu3_2=F.PReLU(256),
conv3_3=F.Convolution2D(256, 256, 3, stride=1, pad=1),
prelu3_3=F.PReLU(256),
conv3_4=F.Convolution2D(256, 256, 3, stride=1, pad=1),
prelu3_4=F.PReLU(256),
fc4=F.Linear(4096, 1024),
prelu4=F.PReLU(),
fc5=F.Linear(1024, 1024),
prelu5=F.PReLU(),
fc6=F.Linear(1024, 10)
)
def forward(self, x_data, y_data, train=True):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h = self.prelu1_1(self.bn1_1(self.conv1_1(x)))
h = self.prelu1_2(self.bn1_2(self.conv1_2(h)))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(h, ratio=0.25, train=train)
h = self.prelu2_1(self.bn2_1(self.conv2_1(h)))
h = self.prelu2_2(self.bn2_2(self.conv2_2(h)))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(h, ratio=0.25, train=train)
h = self.prelu3_1(self.conv3_1(h))
h = self.prelu3_2(self.conv3_2(h))
h = self.prelu3_3(self.conv3_3(h))
h = self.prelu3_4(self.conv3_4(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(h, ratio=0.25, train=train)
h = F.dropout(self.prelu4(self.fc4(h)), train=train, ratio=0.5)
h = F.dropout(self.prelu5(self.fc5(h)), train=train, ratio=0.5)
h = self.fc6(h)
if train:
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
else:
return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
| [
"[email protected]"
]
| |
d1e8d70b961b1be945693a91169e369f2646ef5b | ac216a2cc36f91625e440247986ead2cd8cce350 | /appengine/findit/pipelines/test/send_notification_for_culprit_pipeline_test.py | 511524ebf3afcb0224df7cc05d4923d14340ae07 | [
"BSD-3-Clause"
]
| permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 1,037 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from common.waterfall import failure_type
from pipelines.send_notification_for_culprit_pipeline import (
SendNotificationForCulpritPipeline)
from services import constants
from services import culprit_action
from services.parameters import SendNotificationForCulpritParameters
from waterfall.test import wf_testcase
class SendNotificationForCulpritPipelineTest(wf_testcase.WaterfallTestCase):
@mock.patch.object(
culprit_action, 'SendNotificationForCulprit', return_value=True)
def testSendNotification(self, _):
pipeline_input = SendNotificationForCulpritParameters(
cl_key='mockurlsafekey',
force_notify=True,
revert_status=constants.CREATED_BY_SHERIFF,
failure_type=failure_type.COMPILE)
pipeline = SendNotificationForCulpritPipeline(pipeline_input)
self.assertTrue(pipeline.run(pipeline_input))
| [
"[email protected]"
]
| |
ed8c41ec34cf718b87ecb1eb8f5ac9e984af1550 | 00f3468d8917ac0c1b4df8b4dc50e82c0d9be3fa | /hhalign_with_hmms.py | d7ff5f7a284a3efec299ba27c0e89dd5efbc57e9 | []
| no_license | berkeleyphylogenomics/BPG_utilities | 4e332bb401b8c057502a1a0a1d532396bfff9542 | bbf5df137a0a459598c3f9073d80f0086e5f7550 | refs/heads/master | 2021-01-01T19:21:13.740575 | 2014-11-05T18:40:31 | 2014-11-05T18:40:31 | 24,867,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,914 | py | #!/usr/bin/python
"""
run_hhalign_with_input_hmms.py Terry Farrah May 2008 Sjolander lab
Align 2 HMMs using HHalign.
Do not let HHalign generate HMMs; instead, provide HMMs.
Optionally, use PSI-PRED to generate secondary structure and
tell HHalign to use this info in its alignment.
Warnings:
Must run in same dir as input hmms (because of SAM pickiness).
Path for PSIPRED data is hard-coded.
SAM hmms tend to be here:
/home/ruchira/SHMM-SHMM/current_method/1a7w/1a7w.mod
/home/ruchira/SHMM-SHMM/current_method/1a7w/ascii_ghmm/1a7w.mod
Those on Ohana are generated using a newer version of FlowerPower.
6/11/08
WORK REMAINING:
-- get psi-pred installed properly on ohana. Does psi-pred make use of
a shell variable to tell it where the psi-pred data is? If so, set it.
Get script/binary installed in /usr/bin.
-- remove hardcoded path to script_dir (should be non-issue after above is done)
"""
import os, sys, glob, BPG_common.fasta
from optparse import OptionParser
from matchmaker.shmm_shmm_lib import *
from matchmaker.align_originals_via_shmms_and_score import *
script_dir = "/home/tfarrah/hmm-hmm"
psipred_data_dir = "/clusterfs/ohana/software/lib/psipred/data"
def add_ss_info_to_hmmer( hmm_filename, psipred_filename,
out_filename):
""" Input: 2) .horiz file generated by psipred suite
(specifically, by psipass2)
1) HMMER format HMM file
Output: HMM file with psipred secondary structure records
added, specifically for input to HHalign
"""
# open secondary structure file
psipred_file = open(psipred_filename, "r")
# read all lines, saving Pred: and Conf: lines in two lists (after cleaving the prefixes)
pred_lines = []
conf_lines = []
for line in psipred_file.readlines():
if line.startswith("Pred:"): pred_lines.append(line[6:].strip())
if line.startswith("Conf:"): conf_lines.append(line[6:].strip())
# get sequence length from this data
pred_seqlen = sum(map (len, pred_lines))
conf_seqlen = sum(map (len, conf_lines))
# if sequence length is zero, or if the two lengths differ, issue warning
if pred_seqlen == 0:
print >> sys.stderr, \
"Error in file %s: no lines beginning with Pred:, or all such lines empty" \
% (psipred_filename)
sys.exit(0)
if pred_seqlen != conf_seqlen:
print >> sys.stderr, \
"Error in file %s: lengths of Pred, Conf strings differ (%d v. %d)" \
% (psipred_filename, pred_seqlen, conf_seqlen)
sys.exit(0)
# close .horiz file
psipred_file.close()
# open hmm file and output file
hmm_file = open(hmm_filename, "r")
if out_filename:
out_file = open(out_filename, "w")
else:
out_file = sys.stdout
# read and copy all lines to output file,
# inserting SS info in appropriate place
for line in hmm_file.readlines():
if line.startswith("XT"):
print >> out_file, "SSCIT", "Secondary structure info generated by PSIPRED"
for ssline in pred_lines: print >> out_file, "SSPRD", ssline
for ssline in conf_lines: print >> out_file, "SSCNF", ssline
# skip any pre-existing SS info lines
if line.startswith("SSPRD") or line.startswith("SSCNF"):
continue
# check to see that HMM length matches length of SS prediction
if line.startswith("LENG"):
hmm_seqlen = int (line[6:])
if hmm_seqlen != pred_seqlen:
print >> sys.stderr, \
"Warning: lengths of SS prediction, HMM differ (%d v. %d)" \
% (hmm_seqlen, pred_seqlen)
print >> out_file, line.rstrip()
# count match states while doing so
# compare match state count with seq length
# and issue warning if different
# close hmm file and output file
hmm_file.close()
out_file.close()
def extract_XY_alignment(seqX, seqY, file):
# extract fasta format alignment from HHalign
# standard output, adding X's and -'s to each end
# (-ofas gives fasta output but leaves off residue #s)
def add_to_seq(hhalign_line, seq):
return seq + hhalign_line
def prefix_X_and_dash(numX_for_Z, numX_for_W, seqZ, seqW):
for i in range(0,numX_for_Z):
seqZ = 'X' + seqZ
seqW = '-' + seqW
for i in range(0,numX_for_W):
seqW = 'X' + seqW
seqZ = '-' + seqZ
return(seqZ, seqW)
def append_X_and_dash(numX_for_Z, numX_for_W, seqZ, seqW):
for i in range(0,numX_for_Z):
seqZ = seqZ + 'X'
seqW = seqW + '-'
for i in range(0,numX_for_W):
seqW = seqW + 'X'
seqZ = seqZ + '-'
return(seqZ, seqW)
# Open file and read sequences
hhalign_file = open(file, "r")
lines = hhalign_file.readlines()
seq_start = False
seqZ = ""
seqW = ""
seqZ_start = None
seqW_start = None
for line in lines:
line = line.strip()
if line.startswith("No 1"):
seq_start = True
continue
if seq_start:
if (line.startswith("Q") or line.startswith("T")) \
and not line[2:].startswith("Consensus") \
and not line[2:].startswith("ss_pred"):
(symbol, seqname, start_res_string, seq, end_res_string,
seqlen_string) = line.split()
start_res = int(start_res_string)
end_res = int(end_res_string)
seqlen = int(seqlen_string[1:-1])
if symbol == "Q":
seqZ = seqZ + seq
if seqZ_start == None: seqZ_start = start_res
seqZ_end = end_res
seqZ_len = seqlen
elif symbol == "T":
seqW = seqW + seq
if seqW_start == None: seqW_start = start_res
seqW_end = end_res
seqW_len = seqlen
(seqZ, seqW) = prefix_X_and_dash(seqZ_start-1, seqW_start-1, seqZ, seqW)
(seqZ, seqW) = append_X_and_dash(seqZ_len-seqZ_end, seqW_len-seqW_end, seqZ, seqW)
# Create X-Y alignment according to W-Z alignment.
alignment = align_four_way(seqX, seqZ, seqW, seqY)
return alignment
def main_test():
file = sys.argv[1]
extract_XY_alignment(None, None, file)
def main():
#====================
# Process command line
#====================
parser = OptionParser()
parser.add_option("--hmm1",
dest="hmm1_filename",
help="first input HMM in SAM (.mod) format. NOTE: all input files must be in the same directory, and the program needs to be run from that directory. ALSO NOTE: the number of match states in the HMMs must equal the sequence length of the corresponding seed",
metavar="FILE")
parser.add_option("--hmm2",
dest="hmm2_filename",
help="second input HMM in SAM (.mod) format",
metavar="FILE")
parser.add_option("--seq1",
dest="seq1_filename",
help="first seed seq in fasta (.fa) format",
metavar="FILE")
parser.add_option("--seq2",
dest="seq2_filename",
help="second seed seq in fasta (.fa) format",
metavar="FILE")
parser.add_option("-o", "--out",
dest="out_filename",
help="alignment output file",
metavar="FILE")
parser.add_option("-s", "--ss",
dest="use_sec_struc",
default=False,
action="store_true",
help="use and display secondary structure information",
metavar="SS",)
parser.add_option("-w", "--ssw",
dest="sec_struc_weight",
metavar="FRACTION",
type="float",
default=0.11,
help="Weight to give secondary structure info; default =0.11")
parser.add_option("-d", "--ppdata",
dest="psipred_data_dir",
metavar="DIR",
default="/clusterfs/ohana/software/lib/psipred/data",
help="Location of weight files for PSI-PRED, default /clusterfs/ohana/software/lib/psipred/data")
(options, args) = parser.parse_args()
# check that necessary options are given, and that values are valid
# assign option values to variables
hmm_filename = [None, None]
seq_filename = [None, None]
if not options.hmm1_filename:
parser.error("Option --hmm1 required")
else:
hmm_filename[0] = options.hmm1_filename
if not options.hmm2_filename:
parser.error("Option --hmm2 required")
else:
hmm_filename[1] = options.hmm2_filename
if not options.seq1_filename:
parser.error("Option --seq1 required")
else:
seq_filename[0] = options.seq1_filename
if not options.seq2_filename:
parser.error("Option --seq2 required")
else:
seq_filename[1] = options.seq2_filename
if options.out_filename:
out_file = open(options.out_filename, "w")
else:
out_file = sys.stdout
use_sec_struc = options.use_sec_struc
sec_struc_weight = options.sec_struc_weight
psipred_data_dir = options.psipred_data_dir
runname = [None, None]
for i in range(0,2):
hmm_file_basename = os.path.basename(hmm_filename[i])
#print hmm_file_basename
runname[i] = os.path.splitext(hmm_file_basename)[0]
#print runname[i]
# Create HMMER format HMM
# creates .con.hmm and .asc.mod files; we only need the first
cmd = "convert.pl %s" % hmm_filename[i]
#print cmd
os.system(cmd)
hmmer_hmm_filename = os.path.splitext(hmm_filename[i])[0] + ".con.hmm"
hhalign_hmm_filename = os.path.splitext(hmm_file_basename)[0] + ".hhm"
if use_sec_struc:
cmd = "sam2psi %s -i %s" % (runname[i], hmm_filename[i])
#print cmd
os.system(cmd)
# we must run makemat on a copy of the .ckp file, because
# it will overwrite the original
cmd = "cp %s.ckp %s.makemat.ckp" % (runname[i], runname[i])
#print cmd
os.system(cmd)
cmd = "echo %s.makemat.ckp > %s.pn" % (runname[i], runname[i])
#print cmd
os.system(cmd)
cmd = "echo %s.cks > %s.sn" % (runname[i], runname[i])
#print cmd
os.system(cmd)
cmd = "makemat -P %s" % (runname[i])
#print cmd
os.system(cmd)
# the name of the makemat output file is stored in a file
makemat_matrix_record_filename = runname[i] + ".mn"
makemat_matrix_record_file = open(makemat_matrix_record_filename, "r")
makemat_matrix_filename = makemat_matrix_record_file.readline().strip()
#print makemat_matrix_filename
cmd = "cp %s %s.mtx" % (makemat_matrix_filename, runname[i])
#print cmd
os.system(cmd)
cmd ="psipred %s.mtx %s/weights.dat %s/weights.dat2 %s/weights.dat3 %s/weights.dat4 > %s.ss" % \
(runname[i], psipred_data_dir, psipred_data_dir, psipred_data_dir,
psipred_data_dir, runname[i])
#print cmd
os.system(cmd)
cmd = "psipass2 %s/weights_p2.dat 1 1.0 1.0 %s.ss2 %s.ss > %s.horiz" % \
(psipred_data_dir, runname[i], runname[i], runname[i])
#print cmd
os.system(cmd)
# we want to make this into a function
# instead of a separate script
#cmd = "%s/add_ss_info_to_hmmer.py -i %s.con.hmm -s %s.horiz -o %s.ss.hmm" % \
#(script_dir, runname[i], runname[i], runname[i])
#print cmd
#os.system(cmd)
add_ss_info_to_hmmer("%s.con.hmm" % runname[i], "%s.horiz" % runname[i],
"%s.ss.hmm" % runname[i])
# convert HMMER format to HHalign (.hhm) format
# optional but increases performance according to HHsearch doc
# however in my experience when HHalign is run with the resulting
# .hhm files instead of the .hmm files, it does not seem to make
# use of the secondary structure information.
#cmd = "hhmake -i %s.ss.hmm -o %s.ss.hhm" % (runname[i], runname[i])
#print cmd
#os.system(cmd)
else:
pass #see above for why
# convert HMMER format to HHalign (.hhm) format
# optional but increases performance according to HHsearch doc
#cmd = "hhmake -i %s.con.hmm -o %s.hhm" % (runname[i], runname[i])
#print cmd
#os.system(cmd)
pair = "%s_%s" % (runname[0], runname[1])
if use_sec_struc:
hhalign_output_base_filename = "%s.ss.hhalign" % (pair)
hhalign_alignment_filename = "%s.align" % (hhalign_output_base_filename)
cmd = "hhalign -i %s.ss.hmm -t %s.ss.hmm -ssw %f -o %s.align -ofas %s.fa > /dev/null" % \
(runname[0], runname[1], sec_struc_weight, hhalign_output_base_filename, hhalign_output_base_filename)
#print cmd
os.system(cmd)
else:
hhalign_output_base_filename = "%s.hhalign" % (pair)
hhalign_alignment_filename = "%s.align" % (hhalign_output_base_filename)
cmd = "hhalign -i %s.con.hmm -t %s.con.hmm -o %s.align -ofas %s.fa > /dev/null" % \
(runname[0], runname[1], hhalign_output_base_filename, hhalign_output_base_filename)
#print cmd
os.system(cmd)
# get X and Y sequences
seqX = BPG_common.fasta.ReadOneSequence(seq_filename[0])
seqY = BPG_common.fasta.ReadOneSequence(seq_filename[1])
# alignment is a 4-tuple of equal-length strings: X,Z,W,Y
alignment = extract_XY_alignment(seqX, seqY, hhalign_alignment_filename)
print >> out_file, ">%s" % runname[0]
print >> out_file, alignment[0]
print >> out_file, ">%s" % runname[1]
print >> out_file, alignment[3]
# to activate the below, need to store alignment in file, and need reference alignment
# CS_score_filename = "%s_CS.out" % pair
# compute_sp_cs_score (alignment_filename, reference_alignment_filename,
#CS_score_filename)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ee28104231e39d74f9252de0420dfa501e38557a | 6efacaed48c9c2015b20baae5b1e7812cf2614a0 | /Po/test/Abackground_mouse_one.py | 533335d6d971031ab7fe5f3398b20fcedabe8681 | []
| no_license | Felixshao/play | 53e12b7b592634a3e5515addde978e1b2a2a4591 | 4364cb91141bbbca835688d19bddb87aa0beb6b4 | refs/heads/master | 2021-05-23T19:49:56.095083 | 2020-04-07T06:09:10 | 2020-04-07T06:09:10 | 253,441,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | import pygame, os
from pygame.locals import *
from sys import exit
from config.GetProjectPath import get_project_path
path = get_project_path()
background_img_filepath = os.path.join(path, 'img', 'sushiplate.jpg')
mouse_img_filepath = os.path.join(path, 'img', 'fugu.png')
# 初始化pygame,未硬件做准备
pygame.init()
# 新建窗口, 传入参数:分辨率、标志(0代表不用特性)、色深
screen = pygame.display.set_mode((1920, 1080), 0, 32)
# 设置窗口标题
pygame.display.set_caption('Abcakground_mouse_one')
# 加载并转换图像, convert()方法,将图像数据转化为Surface对象,convert_alpha()处理掉透明部分
background = pygame.image.load(background_img_filepath).convert()
mouse = pygame.image.load(mouse_img_filepath).convert_alpha()
# 游戏主循环
while True:
for event in pygame.event.get():
# 接收到退出指令后退出游戏
if event.type == QUIT:
exit()
# 画上背景, bit方法,传参:Surface对象,左上角坐标
screen.blit(background, (0, 0))
# 获取鼠标位置
x, y = pygame.mouse.get_pos()
# 计算光标左上角位置
x -= mouse.get_width() / 2
y -= mouse.get_height() / 2
# 画上光标
screen.blit(mouse, (x, y))
# 刷新画面
pygame.display.update() | [
"[email protected]"
]
| |
b5ddb5c8af232999eb8ae226c25d305066c76157 | fddc2ed5301b00f668bcb772518e0348db459538 | /convokit/communityEmbedder.py | 143d36c2f5b9fefe78dddccf919d797401191a38 | [
"MIT"
]
| permissive | deepthimhegde/Cornell-Conversational-Analysis-Toolkit | 289fd22a81d9d06c7aeb5270c11acc4ec40424bf | eb9a103f8d5e34d378b0b6d6bda3fa43587363a1 | refs/heads/master | 2020-05-29T11:16:01.765154 | 2019-05-17T18:29:27 | 2019-05-17T18:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,889 | py | import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from collections import defaultdict
from .transformer import Transformer
class CommunityEmbedder(Transformer):
"""
Must be run after threadEmbedder.fit_transform()
Groups threads together into communities
in this space for visualization or other such purposes.
:param community_key: Key in "meta" dictionary of each utterance
whose corresponding value we'll use as the community label for that
utterance (see threadEmbedder)
:param n_components: Number of dimensions to embed communities into
:param method: Embedding method; "svd", "tsne" or "none"
"""
def __init__(self, community_key=None, n_components=2, method="none"):
self.community_key = community_key
self.n_components = n_components
self.method = method
def transform(self, corpus):
"""
Same as fit_transform()
"""
return self.fit_transform(corpus)
def fit_transform(self, corpus):
"""
:param corpus: the Corpus to use
:return: a Corpus with new meta key: "communityEmbedder",
value: Dict, containing "pts": an array with rows corresponding
to embedded communities, and "labels": an array whose ith entry is
the community of the ith row of X.
"""
if self.community_key is None:
raise RuntimeError("Must specify community_key to retrieve label information from utterance")
corpus_meta = corpus.get_meta()
if "threadEmbedder" not in corpus_meta:
raise RuntimeError("Missing threadEmbedder metadata: "
"threadEmbedder.fit_transform() must be run on the Corpus first")
thread_embed_data = corpus_meta["threadEmbedder"]
X_mid = thread_embed_data["X"]
roots = thread_embed_data["roots"]
if self.method.lower() == "svd":
f = TruncatedSVD
elif self.method.lower() == "tsne":
f = TSNE
elif self.method.lower() == "none":
f = None
else:
raise Exception("Invalid embed_communities embedding method")
if f is not None:
X_embedded = f(n_components=self.n_components).fit_transform(X_mid)
else:
X_embedded = X_mid
labels = [corpus.get_utterance(root).get("meta")[self.community_key]
for root in roots]
# label_counts = Counter(labels)
subs = defaultdict(list)
for x, label in zip(X_embedded, labels):
subs[label].append(x / np.linalg.norm(x))
labels, subs = zip(*subs.items())
pts = [np.mean(sub, axis=0) for sub in subs]
retval = {"pts": pts, "labels": labels}
corpus.add_meta("communityEmbedder", retval)
return corpus
| [
"[email protected]"
]
| |
765478bbc01b00211d961da6d0bd4bdab237f828 | 208baab269ddffab1a93e7dc70b052d07bf50560 | /hood/migrations/0002_auto_20200120_1140.py | a6e6fe56c70f1ee382edb53a3eebe174b83a3671 | []
| no_license | marysinaida/Neighborhood | a1035f09515ae9a24bed74ddf1263e06db134c94 | a285df5528bb99d6cb69f9ab41e320682422fe9d | refs/heads/master | 2020-12-13T23:29:18.148498 | 2020-01-21T15:04:53 | 2020-01-21T15:04:53 | 234,562,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-01-20 08:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hood', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bName', models.CharField(max_length=100)),
('bEmail', models.EmailField(max_length=100)),
],
options={
'ordering': ['bName'],
},
),
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('location', models.CharField(max_length=50)),
('occupants', models.PositiveIntegerField()),
('health_contact', models.PositiveIntegerField()),
('police_contact', models.PositiveIntegerField()),
('hood_pic', models.ImageField(blank=True, upload_to='images/')),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField()),
('image', models.ImageField(blank=True, upload_to='posts/')),
('date_posted', models.DateTimeField(auto_now_add=True)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hood.Neighborhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_posted'],
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True)),
('email', models.EmailField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=50)),
('profile_pic', models.ImageField(blank=True, upload_to='images/')),
('hood', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hood.Neighborhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['first_name']},
),
migrations.AddField(
model_name='business',
name='hood',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hood.Neighborhood'),
),
migrations.AddField(
model_name='business',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
d33535490a49ccc63731773d42cd5a17f661d234 | a2948d87a8f1901c6faf922f7b8cfba825f84d9b | /resources.py | c5484e2f0cc861b20e66986f69bf1105fbfacb38 | []
| no_license | sourcery-ai-bot/4x2d | 03360fdcd5cfb135acbe0dfbdf571fb1e4d98a5a | 68a5daf2410ae6ffe1220bb7ce85b95647097157 | refs/heads/main | 2023-03-11T10:38:01.353467 | 2021-02-28T06:57:06 | 2021-02-28T06:57:06 | 344,102,678 | 0 | 0 | null | 2021-03-03T11:25:28 | 2021-03-03T11:25:27 | null | UTF-8 | Python | false | false | 412 | py | import os
import sys
def resource_path(relative_path): # needed for bundling
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path) | [
"[email protected]"
]
| |
48d6e9a8f1cd30cb302f9c81eb5ca4370302e805 | c190538d85c00e03bf655af83629a5bddfd6d797 | /src/dcos_e2e_cli/dcos_vagrant/commands/install_dcos.py | 111fd161ceac49b9b4021c8e8b78de2ff50e1e44 | [
"Apache-2.0"
]
| permissive | yankcrime/dcos-e2e | e8d52aa10ecfba029c28b269354fea9fe0f85f7b | 449ca9ebc98399efc00e424d9131d2634de0471c | refs/heads/master | 2020-05-30T00:00:07.725954 | 2019-05-30T15:57:37 | 2019-05-30T15:57:37 | 189,449,013 | 0 | 0 | Apache-2.0 | 2019-05-30T16:42:28 | 2019-05-30T16:42:28 | null | UTF-8 | Python | false | false | 2,927 | py | """
Install DC/OS on a provisioned Vagrant cluster.
"""
from pathlib import Path
from typing import Any, Dict, Optional
import click
from dcos_e2e.backends import Vagrant
from dcos_e2e_cli.common.arguments import installer_argument
from dcos_e2e_cli.common.create import get_config
from dcos_e2e_cli.common.doctor import get_doctor_message
from dcos_e2e_cli.common.install import (
install_dcos_from_path,
run_post_install_steps,
)
from dcos_e2e_cli.common.options import (
cluster_id_option,
extra_config_option,
genconf_dir_option,
license_key_option,
security_mode_option,
variant_option,
verbosity_option,
)
from dcos_e2e_cli.common.utils import command_path
from dcos_e2e_cli.common.variants import get_install_variant
from dcos_e2e_cli.common.workspaces import workspace_dir_option
from ._common import ClusterVMs
from ._wait_for_dcos import wait_for_dcos_option
from .doctor import doctor
from .wait import wait
@click.command('install')
@installer_argument
@extra_config_option
@workspace_dir_option
@variant_option
@license_key_option
@genconf_dir_option
@security_mode_option
@cluster_id_option
@verbosity_option
@wait_for_dcos_option
@click.pass_context
def install_dcos(
ctx: click.core.Context,
installer: Path,
extra_config: Dict[str, Any],
variant: str,
workspace_dir: Path,
license_key: Optional[Path],
security_mode: Optional[str],
cluster_id: str,
genconf_dir: Optional[Path],
wait_for_dcos: bool,
) -> None:
"""
Install DC/OS on a provisioned Vagrant cluster.
"""
doctor_command_name = command_path(sibling_ctx=ctx, command=doctor)
wait_command_name = command_path(sibling_ctx=ctx, command=wait)
doctor_message = get_doctor_message(
doctor_command_name=doctor_command_name,
)
dcos_variant = get_install_variant(
given_variant=variant,
installer_path=installer,
workspace_dir=workspace_dir,
doctor_message=doctor_message,
)
cluster_backend = Vagrant()
cluster_vms = ClusterVMs(cluster_id=cluster_id)
dcos_config = get_config(
cluster_representation=cluster_vms,
extra_config=extra_config,
dcos_variant=dcos_variant,
security_mode=security_mode,
license_key=license_key,
)
cluster = cluster_vms.cluster
install_dcos_from_path(
cluster=cluster,
cluster_representation=cluster_vms,
dcos_config=dcos_config,
ip_detect_path=cluster_backend.ip_detect_path,
doctor_message=doctor_message,
dcos_installer=installer,
local_genconf_dir=genconf_dir,
)
run_post_install_steps(
cluster=cluster,
cluster_id=cluster_id,
dcos_config=dcos_config,
doctor_command_name=doctor_command_name,
http_checks=True,
wait_command_name=wait_command_name,
wait_for_dcos=wait_for_dcos,
)
| [
"[email protected]"
]
| |
52425699c2b0f3f4f3701d850f4388930fbaf38d | 62babb33b9bede95aac217db04636956279bb2e2 | /sort/topological sort/1385E Directing Edges.py | 269591b4f10d040c69bde6a7be642cc5a8b56613 | []
| no_license | tycyd/codeforces | 0322e31daf18544944c769fd2a50c6d006015e34 | e0773f069c6c5793f9d9a07b61878a589e375a5f | refs/heads/master | 2023-08-12T05:00:39.467404 | 2021-09-30T16:39:21 | 2021-09-30T16:39:21 | 266,847,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | from sys import stdin, stdout
if __name__ == '__main__':
def directing_edges(n, m, ind, dic, seq):
q = []
res = []
for i in range(n):
if ind[i] == 0:
q.append(i)
#while len(q) > 0:
while q:
#cnt = len(q)
#for i in range(cnt):
cur = q.pop()
res.append(cur)
if cur in dic:
for next in dic[cur]:
ind[next] -= 1
if ind[next] == 0:
q.append(next)
#print(res)
if len(res) < n:
stdout.write("NO\n")
else:
stdout.write("YES\n")
pos = [0]*n
for i in range(n):
pos[res[i]] = i
#print(pos)
for sq in seq:
if pos[sq[0]] < pos[sq[1]]:
#stdout.write(str(sq[0]+1) + " " + str(sq[1]+1) + '\n')
print(sq[0]+1, sq[1]+1)
else:
#stdout.write(str(sq[1]+1) + " " + str(sq[0]+1) + '\n')
print(sq[1] + 1, sq[0] + 1)
t = int(stdin.readline())
for i in range(t):
n, m = map(int, stdin.readline().split())
dic = {}
ind = [0] * n
seq = []
for j in range(m):
t, x, y = map(int, stdin.readline().split())
x -= 1
y -= 1
seq.append([x, y])
if t == 1:
if x not in dic:
dic[x] = []
dic[x].append(y)
ind[y] += 1
directing_edges(n, m, ind, dic, seq) | [
"[email protected]"
]
| |
51488b6af889fd61bcc3bde0f432eebce76ef284 | fb84e82ab80f2af43d3cdcf9a6c0351228d0f682 | /validate.py | e93c4b1bb4adf2936a69d41ba81724c3c0b0e580 | []
| no_license | doctorwk007/semseg | bf1ea79e8e5f9a0084de98e0bd588a2c46af30b0 | 39f7e642014a1e8e21a84d0ff1e0057469b5d8e4 | refs/heads/master | 2020-04-12T01:10:35.164155 | 2018-12-15T03:03:27 | 2018-12-15T03:03:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,095 | py | # -*- coding: utf-8 -*-
import torch
import os
import argparse
import cv2
import time
import numpy as np
import visdom
from torch.autograd import Variable
from scipy import misc
from semseg.dataloader.camvid_loader import camvidLoader
from semseg.dataloader.cityscapes_loader import cityscapesLoader
from semseg.dataloader.freespace_loader import freespaceLoader
from semseg.loss import cross_entropy2d
from semseg.metrics import scores
from semseg.modelloader.EDANet import EDANet
from semseg.modelloader.bisenet import BiSeNet
from semseg.modelloader.deeplabv3 import Res_Deeplab_101, Res_Deeplab_50
from semseg.modelloader.drn import drn_d_22, DRNSeg, drn_a_asymmetric_18, drn_a_asymmetric_ibn_a_18, drnseg_a_50, drnseg_a_18, drnseg_a_34, drnseg_e_22, drnseg_a_asymmetric_18, drnseg_a_asymmetric_ibn_a_18, drnseg_d_22, drnseg_d_38
from semseg.modelloader.drn_a_irb import drnsegirb_a_18
from semseg.modelloader.drn_a_refine import drnsegrefine_a_18
from semseg.modelloader.duc_hdc import ResNetDUC, ResNetDUCHDC
from semseg.modelloader.enet import ENet
from semseg.modelloader.enetv2 import ENetV2
from semseg.modelloader.erfnet import erfnet
from semseg.modelloader.fc_densenet import fcdensenet103, fcdensenet56, fcdensenet_tiny
from semseg.modelloader.fcn import fcn, fcn_32s, fcn_16s, fcn_8s
from semseg.modelloader.fcn_mobilenet import fcn_MobileNet, fcn_MobileNet_32s, fcn_MobileNet_16s, fcn_MobileNet_8s
from semseg.modelloader.fcn_resnet import fcn_resnet18, fcn_resnet34, fcn_resnet18_32s, fcn_resnet18_16s, \
fcn_resnet18_8s, fcn_resnet34_32s, fcn_resnet34_16s, fcn_resnet34_8s, fcn_resnet50_32s, fcn_resnet50_16s, fcn_resnet50_8s
from semseg.modelloader.lrn import lrn_vgg16
from semseg.modelloader.segnet import segnet, segnet_squeeze, segnet_alignres, segnet_vgg19
from semseg.modelloader.segnet_unet import segnet_unet
from semseg.modelloader.sqnet import sqnet
def validate(args):
init_time = str(int(time.time()))
if args.vis:
vis = visdom.Visdom()
if args.dataset_path == '':
HOME_PATH = os.path.expanduser('~')
local_path = os.path.join(HOME_PATH, 'Data/CamVid')
else:
local_path = args.dataset_path
local_path = os.path.expanduser(args.dataset_path)
if args.dataset == 'CamVid':
dst = camvidLoader(local_path, is_transform=True, split=args.dataset_type)
elif args.dataset == 'CityScapes':
dst = cityscapesLoader(local_path, is_transform=True, split=args.dataset_type)
elif args.dataset == 'FreeSpace':
dst = freespaceLoader(local_path, is_transform=True, split=args.dataset_type)
else:
pass
val_loader = torch.utils.data.DataLoader(dst, batch_size=1, shuffle=False)
# if os.path.isfile(args.validate_model):
if args.validate_model != '':
model = torch.load(args.validate_model)
else:
try:
model = eval(args.structure)(n_classes=args.n_classes, pretrained=args.init_vgg16)
except:
print('missing structure or not support')
exit(0)
if args.validate_model_state_dict != '':
try:
model.load_state_dict(torch.load(args.validate_model_state_dict, map_location='cpu'))
except KeyError:
print('missing key')
if args.cuda:
model.cuda()
# some model load different mode different performance
model.eval()
# model.train()
gts, preds, errors, imgs_name = [], [], [], []
for i, (imgs, labels) in enumerate(val_loader):
print(i)
# if i==1:
# break
img_path = dst.files[args.dataset_type][i]
img_name = img_path[img_path.rfind('/')+1:]
imgs_name.append(img_name)
# print('img_path:', img_path)
# print('img_name:', img_name)
# print(labels.shape)
# print(imgs.shape)
# 将np变量转换为pytorch中的变量
imgs = Variable(imgs, volatile=True)
labels = Variable(labels, volatile=True)
if args.cuda:
imgs = imgs.cuda()
labels = labels.cuda()
outputs = model(imgs)
loss = cross_entropy2d(outputs, labels)
loss_np = loss.cpu().data.numpy()
loss_np_float = float(loss_np)
# print('loss_np_float:', loss_np_float)
errors.append(loss_np_float)
# 取axis=1中的最大值,outputs的shape为batch_size*n_classes*height*width,
# 获取max后,返回两个数组,分别是最大值和相应的索引值,这里取索引值为label
pred = outputs.cpu().data.max(1)[1].numpy()
gt = labels.cpu().data.numpy()
if args.save_result:
if not os.path.exists('/tmp/'+init_time):
os.mkdir('/tmp/'+init_time)
pred_labels = outputs.cpu().data.max(1)[1].numpy()
label_color = dst.decode_segmap(labels.cpu().data.numpy()[0]).transpose(2, 0, 1)
pred_label_color = dst.decode_segmap(pred_labels[0]).transpose(2, 0, 1)
label_color_cv2 = label_color.transpose(1, 2, 0)
label_color_cv2 = cv2.cvtColor(label_color_cv2, cv2.COLOR_RGB2BGR)
cv2.imwrite('/tmp/'+init_time+'/gt_{}'.format(img_name), label_color_cv2)
pred_label_color_cv2 = pred_label_color.transpose(1, 2, 0)
pred_label_color_cv2 = cv2.cvtColor(pred_label_color_cv2, cv2.COLOR_RGB2BGR)
cv2.imwrite('/tmp/'+init_time+'/pred_{}'.format(img_name), pred_label_color_cv2)
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
# print('errors:', errors)
# print('imgs_name:', imgs_name)
errors_indices = np.argsort(errors).tolist()
# print('errors_indices:', errors_indices)
# for top_i in range(len(errors_indices)):
# for top_i in range(10):
# top_index = errors_indices.index(top_i)
# # print('top_index:', top_index)
# img_name_top = imgs_name[top_index]
# print('img_name_top:', img_name_top)
score, class_iou = scores(gts, preds, n_class=dst.n_classes)
for k, v in score.items():
print(k, v)
class_iou_list = []
for i in range(dst.n_classes):
class_iou_list.append(round(class_iou[i], 2))
# print(i, round(class_iou[i], 2))
print('classes:', range(dst.n_classes))
print('class_iou_list:', class_iou_list)
# best validate: python validate.py --structure fcn32s --validate_model_state_dict fcn32s_camvid_9.pt
if __name__=='__main__':
# print('validate----in----')
parser = argparse.ArgumentParser(description='training parameter setting')
parser.add_argument('--structure', type=str, default='fcn32s', help='use the net structure to segment [ fcn32s ResNetDUC segnet ENet drn_d_22 ]')
parser.add_argument('--validate_model', type=str, default='', help='validate model path [ fcn32s_camvid_9.pkl ]')
parser.add_argument('--validate_model_state_dict', type=str, default='', help='validate model state dict path [ fcn32s_camvid_9.pt ]')
parser.add_argument('--init_vgg16', type=bool, default=False, help='init model using vgg16 weights [ False ]')
parser.add_argument('--dataset', type=str, default='CamVid', help='train dataset [ CamVid CityScapes FreeSpace ]')
parser.add_argument('--dataset_path', type=str, default='~/Data/CamVid', help='train dataset path [ ~/Data/CamVid ~/Data/cityscapes ~/Data/FreeSpaceDataset ]')
parser.add_argument('--dataset_type', type=str, default='val', help='dataset type [ train val test ]')
parser.add_argument('--n_classes', type=int, default=12, help='train class num [ 12 ]')
parser.add_argument('--vis', type=bool, default=False, help='visualize the training results [ False ]')
parser.add_argument('--cuda', type=bool, default=False, help='use cuda [ False ]')
parser.add_argument('--save_result', type=bool, default=False, help='save the val dataset prediction result [ False True ]')
args = parser.parse_args()
# print(args.resume_model)
# print(args.save_model)
print(args)
validate(args)
# print('validate----out----')
| [
"[email protected]"
]
| |
68ac0eeb5d55a38888952d35a6cd32b67c9bde23 | d7b4e2e391e1f15fd7cb4fbf4d9aee598131b007 | /AE_Datasets/R_A/datasets/CWRUCWT.py | 66ff726731086772786eee97b0378a32b4c39b8e | [
"MIT"
]
| permissive | wuyou33/DL-based-Intelligent-Diagnosis-Benchmark | eba2ce6f948b5abe68069e749f64501a32e1d7ca | e534f925cf454d07352f7ef82d75a8d6dac5355c | refs/heads/master | 2021-01-02T15:06:29.041349 | 2019-12-28T21:47:21 | 2019-12-28T21:47:21 | 239,673,952 | 1 | 0 | MIT | 2020-02-11T04:15:21 | 2020-02-11T04:15:20 | null | UTF-8 | Python | false | false | 5,887 | py | import os
import numpy as np
import pandas as pd
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from datasets.MatrixDatasets import dataset
from datasets.matrix_aug import *
from tqdm import tqdm
import pickle
import pywt
signal_size=100
datasetname = ["12k Drive End Bearing Fault Data", "12k Fan End Bearing Fault Data", "48k Drive End Bearing Fault Data",
"Normal Baseline Data"]
normalname = ["97.mat", "98.mat", "99.mat", "100.mat"]
# For 12k Drive End Bearing Fault Data
dataname1 = ["105.mat", "118.mat", "130.mat", "169.mat", "185.mat", "197.mat", "209.mat", "222.mat",
"234.mat"] # 1797rpm
dataname2 = ["106.mat", "119.mat", "131.mat", "170.mat", "186.mat", "198.mat", "210.mat", "223.mat",
"235.mat"] # 1772rpm
dataname3 = ["107.mat", "120.mat", "132.mat", "171.mat", "187.mat", "199.mat", "211.mat", "224.mat",
"236.mat"] # 1750rpm
dataname4 = ["108.mat", "121.mat", "133.mat", "172.mat", "188.mat", "200.mat", "212.mat", "225.mat",
"237.mat"] # 1730rpm
# For 12k Fan End Bearing Fault Data
dataname5 = ["278.mat", "282.mat", "294.mat", "274.mat", "286.mat", "310.mat", "270.mat", "290.mat",
"315.mat"] # 1797rpm
dataname6 = ["279.mat", "283.mat", "295.mat", "275.mat", "287.mat", "309.mat", "271.mat", "291.mat",
"316.mat"] # 1772rpm
dataname7 = ["280.mat", "284.mat", "296.mat", "276.mat", "288.mat", "311.mat", "272.mat", "292.mat",
"317.mat"] # 1750rpm
dataname8 = ["281.mat", "285.mat", "297.mat", "277.mat", "289.mat", "312.mat", "273.mat", "293.mat",
"318.mat"] # 1730rpm
# For 48k Drive End Bearing Fault Data
dataname9 = ["109.mat", "122.mat", "135.mat", "174.mat", "189.mat", "201.mat", "213.mat", "250.mat",
"262.mat"] # 1797rpm
dataname10 = ["110.mat", "123.mat", "136.mat", "175.mat", "190.mat", "202.mat", "214.mat", "251.mat",
"263.mat"] # 1772rpm
dataname11 = ["111.mat", "124.mat", "137.mat", "176.mat", "191.mat", "203.mat", "215.mat", "252.mat",
"264.mat"] # 1750rpm
dataname12 = ["112.mat", "125.mat", "138.mat", "177.mat", "192.mat", "204.mat", "217.mat", "253.mat",
"265.mat"] # 1730rpm
# label
label = [1, 2, 3, 4, 5, 6, 7, 8, 9] # The failure data is labeled 1-9
axis = ["_DE_time", "_FE_time", "_BA_time"]
def CWT(lenth,data):
scale = np.arange(1,lenth)
cwtmatr, freqs = pywt.cwt(data, scale, 'mexh')
return cwtmatr
# generate Training Dataset and Testing Dataset
def get_files(root, test=False):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
normalname:List of normal data
dataname:List of failure data
'''
data_root1 = os.path.join('/tmp', root, datasetname[3])
data_root2 = os.path.join('/tmp', root, datasetname[0])
path1 = os.path.join('/tmp', data_root1, normalname[0]) # 0->1797rpm ;1->1772rpm;2->1750rpm;3->1730rpm
data, lab = data_load(path1, axisname=normalname[0],label=0) # nThe label for normal data is 0
for i in tqdm(range(len(dataname1))):
path2 = os.path.join('/tmp', data_root2, dataname1[i])
data1, lab1 = data_load(path2, dataname1[i], label=label[i])
data += data1
lab += lab1
return [data, lab]
def data_load(filename, axisname, label):
'''
This function is mainly used to generate test data and training data.
filename:Data location
axisname:Select which channel's data,---->"_DE_time","_FE_time","_BA_time"
'''
datanumber = axisname.split(".")
if eval(datanumber[0]) < 100:
realaxis = "X0" + datanumber[0] + axis[0]
else:
realaxis = "X" + datanumber[0] + axis[0]
fl = loadmat(filename)[realaxis]
fl = fl.reshape(-1,)
data = []
lab = []
start, end = 0, signal_size
while end <= fl.shape[0]/10:
x = fl[start:end]
imgs = CWT(signal_size + 1, x)
data.append(imgs)
lab.append(label)
start += signal_size
end += signal_size
return data, lab,
def data_transforms(dataset_type="train", normlize_type="-1-1"):
transforms = {
'train': Compose([
ReSize(size=0.32),
Reshape(),
Normalize(normlize_type),
RandomScale(),
RandomCrop(),
Retype(),
]),
'val': Compose([
ReSize(size=0.32),
Reshape(),
Normalize(normlize_type),
Retype(),
])
}
return transforms[dataset_type]
class CWRUCWT(object):
num_classes = 10
inputchannel = 1
def __init__(self, data_dir,normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
if len(os.path.basename(self.data_dir).split('.')) == 2:
with open(self.data_dir, 'rb') as fo:
list_data = pickle.load(fo, encoding='bytes')
else:
list_data = get_files(self.data_dir, test)
with open(os.path.join(self.data_dir, "CWRUCWT.pkl"), 'wb') as fo:
pickle.dump(list_data, fo)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train',self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val',self.normlizetype))
return train_dataset, val_dataset
| [
"[email protected]"
]
| |
669a113c17fd1fe1e8f0256f0d625bbbc78a9be4 | 46404c77e04907225475e9d8be6e0fd33227c0b1 | /wildcard pattern matching.py | 0ed16783c406fd5ec5eaf2858e1c35ca373e0e95 | []
| no_license | govardhananprabhu/DS-task- | 84b46e275406fde2d56c301fd1b425b256b29064 | bf54f3d527f52f61fefc241f955072f5ed9a6558 | refs/heads/master | 2023-01-16T07:41:27.064836 | 2020-11-27T11:52:50 | 2020-11-27T11:52:50 | 272,928,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | """
Given two strings 'str' and a wildcard pattern 'pattern' of length N and M respectively, You have to print '1' if the wildcard pattern is matched with str else print '0' .
The wildcard pattern can include the characters ‘?’ and ‘*’
‘?’ – matches any single character
‘*’ – Matches any sequence of characters (including the empty sequence)
Note: The matching should cover the entire str (not partial str).
Constraints:
1 <= length of(str,pat) <= 200
H 7
T 2300
Tag yahoo string
In des
First line contain string s.
Second line contain string,denotes the pattern.
Ot des
Print 1 if it is wildcard pattern else 0.
baaabab
ba*a?
1
baaabab
*****ba*****ab
1
baaabab
a*ab
0
water
*r
1
master
m*e
0
Exp
From sample:replace '*' with "aab" and '?' with 'b'.
Hint
Each occurrence of ‘?’ character in wildcard pattern can be replaced with any other character and each occurrence of ‘*’ with a sequence of characters such that the wildcard pattern becomes identical to the input string after replacement.
"""
def strrmatch(strr, pattern, n, m):
if (m == 0):
return (n == 0)
lookup = [[False for i in range(m + 1)] for j in range(n + 1)]
lookup[0][0] = True
for j in range(1, m + 1):
if (pattern[j - 1] == '*'):
lookup[0][j] = lookup[0][j - 1]
for i in range(1, n + 1):
for j in range(1, m + 1):
if (pattern[j - 1] == '*'):
lookup[i][j] = lookup[i][j - 1] or lookup[i - 1][j]
elif (pattern[j - 1] == '?' or strr[i - 1] == pattern[j - 1]):
lookup[i][j] = lookup[i - 1][j - 1]
else:
lookup[i][j] = False
return lookup[n][m]
strr = input()
pattern = input()
if (strrmatch(strr, pattern, len(strr),len(pattern))):
print("1")
else:
print("0")
| [
"[email protected]"
]
| |
2ea59d15a88cd4a3cfba74fb74162da032c006d3 | d613fecbe4845ed4a0f1d667439640ed10c8922a | /app1/views/ajax.py | e9581d351b9923bc2a953751021d2bda01cc0396 | []
| no_license | AnyiYim/DjangoTeacherManagerDemo | e18bdb312237e39da00f62006e9e7a98d817d08c | eecfaac3bd5badfb3ac1aed5b2e3f034e505e26e | refs/heads/master | 2021-04-27T00:23:38.853148 | 2018-03-04T16:08:46 | 2018-03-04T16:08:46 | 123,805,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | from django.shortcuts import render, redirect, HttpResponse
from app1 import models
def ajax1(request):
return render(request, 'ajax1.html')
def ajax2(request):
u = request.GET.get('username')
p = request.GET.get('password')
return HttpResponse('我愿意')
def ajax4(request):
nid=request.GET.get('nid')
msg='成功'
try:
models.Students.objects.get(id=nid).delete()
except Exception as e:
msg = str(e)
return HttpResponse(msg)
| [
"[email protected]"
]
| |
abeee0a41c8430a1ec73728748161b3758a74b77 | 3b662ff24ba24b09e4de7ceb2c2d3bd298591e88 | /Python/libraries/recognizers-number-with-unit/recognizers_number_with_unit/number_with_unit/extractors.py | 757cb5c9e949ff59c7d4e4ba56149821509b421a | [
"MIT"
]
| permissive | gzhebrunov/Recognizers-Text | 39d916e891a09b26032430184dc90394e197d195 | 157daf7ac85cc5b4e1e708aed8f96601fd28a612 | refs/heads/master | 2020-04-03T13:51:32.840384 | 2018-10-30T01:22:05 | 2018-10-30T01:22:05 | 155,301,539 | 0 | 0 | MIT | 2018-10-30T00:48:07 | 2018-10-30T00:48:06 | null | UTF-8 | Python | false | false | 15,594 | py | from abc import ABC, abstractmethod
from typing import List, Dict, Set, Pattern, Match
from copy import deepcopy
from collections import namedtuple
from itertools import chain
import regex
from .constants import *
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor, ExtractResult
from recognizers_number.culture import CultureInfo
PrefixUnitResult = namedtuple('PrefixUnitResult', ['offset', 'unit'])
class NumberWithUnitExtractorConfiguration(ABC):
@property
@abstractmethod
def extract_type(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def suffix_list(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def prefix_list(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def ambiguous_unit_list(self) -> List[str]:
raise NotImplementedError
@property
@abstractmethod
def unit_num_extractor(self) -> Extractor:
raise NotImplementedError
@property
@abstractmethod
def build_prefix(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def build_suffix(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def connector_token(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def compound_unit_connector_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def pm_non_unit_regex(self) -> Pattern:
raise NotImplementedError
@property
def culture_info(self) -> CultureInfo:
return self._culture_info
def __init__(self, culture_info: CultureInfo):
self._culture_info = culture_info
class NumberWithUnitExtractor(Extractor):
def __init__(self, config: NumberWithUnitExtractorConfiguration):
self.config: NumberWithUnitExtractorConfiguration = config
if self.config.suffix_list:
self.suffix_regex: Set[Pattern] = self._build_regex_from_set(self.config.suffix_list.values())
else:
self.suffix_regex: Set[Pattern] = set()
if self.config.prefix_list:
max_length = max(map(len, ('|'.join(self.config.prefix_list.values()).split('|'))))
self.max_prefix_match_len = max_length + 2
self.prefix_regex: Set[Pattern] = self._build_regex_from_set(self.config.prefix_list.values())
else:
self.max_prefix_match_len = 0
self.prefix_regex: Set[Pattern] = set()
self.separate_regex = self._build_separate_regex_from_config()
def extract(self, source: str) -> List[ExtractResult]:
if not self._pre_check_str(source):
return list()
mapping_prefix: Dict[float, PrefixUnitResult] = dict()
matched: List[bool] = [False] * len(source)
numbers: List[ExtractResult] = self.config.unit_num_extractor.extract(source)
result: List[ExtractResult] = list()
source_len = len(source)
if self.max_prefix_match_len != 0:
for num in numbers:
if num.start is None or num.length is None:
continue
max_find_prefix = min(self.max_prefix_match_len, num.start)
if max_find_prefix == 0:
continue
left: str = source[num.start - max_find_prefix:num.start]
last_index = len(left)
best_match: Match = None
for pattern in self.prefix_regex:
collection = list(filter(lambda x: len(x.group()), regex.finditer(pattern, left)))
for match in collection:
if left[match.start():last_index].strip() == match.group():
if best_match is None or best_match.start() >= match.start():
best_match = match
if best_match:
mapping_prefix[num.start] = PrefixUnitResult(
offset=last_index - best_match.start(),
unit=left[best_match.start():last_index]
)
for num in numbers:
if num.start is None or num.length is None:
continue
start = num.start
length = num.length
max_find_len = source_len - start - length
prefix_unit: PrefixUnitResult = mapping_prefix.get(start, None)
if max_find_len > 0:
right = source[start + length:start + length + max_find_len]
unit_match_list = map(lambda x: list(regex.finditer(x, right)), self.suffix_regex)
unit_match = chain.from_iterable(unit_match_list)
unit_match = list(filter(lambda x: x.group(), unit_match))
max_len = 0
for match in unit_match:
if match.group():
end_pos = match.start() + len(match.group())
if match.start() >= 0:
middle: str = right[:min(match.start(), len(right))]
if max_len < end_pos and (not middle.strip() or middle.strip() == self.config.connector_token):
max_len = end_pos
if max_len != 0:
for i in range(length + max_len):
matched[i+start] = True
ex_result = ExtractResult()
ex_result.start = start
ex_result.length = length + max_len
ex_result.text = source[start:start+length+max_len]
ex_result.type = self.config.extract_type
if prefix_unit:
ex_result.start -= prefix_unit.offset
ex_result.length += prefix_unit.offset
ex_result.text = prefix_unit.unit + ex_result.text
num.start = start - ex_result.start
ex_result.data = num
is_not_unit = False
if ex_result.type == Constants.SYS_UNIT_DIMENSION:
non_unit_match = self.config.pm_non_unit_regex.finditer(source)
for match in non_unit_match:
if ex_result.start >= match.start() and ex_result.end <= match.end():
is_not_unit = True
if is_not_unit:
continue
result.append(ex_result)
continue
if prefix_unit:
ex_result = ExtractResult()
ex_result.start = num.start - prefix_unit.offset
ex_result.length = num.length + prefix_unit.offset
ex_result.text = prefix_unit.unit + num.text
ex_result.type = self.config.extract_type
num.start = start - ex_result.start
ex_result.data = num
result.append(ex_result)
if self.separate_regex:
result = self._extract_separate_units(source, result)
return result
def validate_unit(self, source: str) -> bool:
return not source.startswith('-')
def _pre_check_str(self, source: str) -> bool:
return len(source) != 0
def _extract_separate_units(self, source: str, num_depend_source: List[ExtractResult]) -> List[ExtractResult]:
result = deepcopy(num_depend_source)
match_result: List[bool] = [False] * len(source)
for ex_result in num_depend_source:
for i in range(ex_result.start, ex_result.end+1):
match_result[i] = True
match_collection = list(filter(lambda x: x.group(), regex.finditer(self.separate_regex, source)))
for match in match_collection:
i = 0
while i < len(match.group()) and not match_result[match.start()+i]:
i += 1
if i == len(match.group()):
for j in range(i):
match_result[j] = True
is_not_unit = False
if match.group() == Constants.AMBIGUOUS_TIME_TERM:
non_unit_match = self.config.pm_non_unit_regex.finditer(source)
for time in non_unit_match:
if self._dimension_inside_time(match, time):
is_not_unit = True
if is_not_unit:
continue
to_add = ExtractResult()
to_add.start = match.start()
to_add.length = len(match.group())
to_add.text = match.group()
to_add.type = self.config.extract_type
result.append(to_add)
return result
def _build_regex_from_set(self, definitions: List[str], ignore_case: bool = True) -> Set[Pattern]:
return set(map(lambda x: self.__build_regex_from_str(x, ignore_case), definitions))
def __build_regex_from_str(self, source: str, ignore_case: bool) -> Pattern:
tokens = map(regex.escape, source.split('|'))
definition = '|'.join(tokens)
definition = f'{self.config.build_prefix}({definition}){self.config.build_suffix}'
flags = regex.S + regex.I if ignore_case else regex.S
return RegExpUtility.get_safe_reg_exp(definition, flags)
def _build_separate_regex_from_config(self, ignore_case: bool = True) -> Pattern:
separate_words: Set[str] = set()
for add_word in self.config.prefix_list.values():
separate_words |= set(filter(self.validate_unit, add_word.split('|')))
for add_word in self.config.suffix_list.values():
separate_words |= set(filter(self.validate_unit, add_word.split('|')))
for to_delete in self.config.ambiguous_unit_list:
separate_words.discard(to_delete)
tokens = map(regex.escape, separate_words)
if not tokens:
return None
tokens = sorted(tokens, key=len, reverse=True)
definition = '|'.join(tokens)
definition = f'{self.config.build_prefix}({definition}){self.config.build_suffix}'
flags = regex.S + regex.I if ignore_case else regex.S
return RegExpUtility.get_safe_reg_exp(definition, flags)
def _dino_comparer(self, x: str, y: str) -> int:
if not x:
if not y:
return 0
else:
return 1
else:
if not y:
return -1
else:
if len(x) != len(y):
return len(y) - len(x)
else:
if x.lower() < y.lower():
return -1
if y.lower() < x.lower():
return 1
return 0
def _dimension_inside_time(self, dimension: Match, time: Match) -> bool:
is_sub_match = False
if dimension.start() >= time.start() and dimension.end() <= time.end():
is_sub_match = True
return is_sub_match
class BaseMergedUnitExtractor(Extractor):
def __init__(self, config: NumberWithUnitExtractorConfiguration):
self.config = config
def extract(self, source: str) -> List[ExtractResult]:
if self.config.extract_type == Constants.SYS_UNIT_CURRENCY:
result = self.__merged_compound_units(source)
else:
result = NumberWithUnitExtractor(self.config).extract(source)
return result
def __merged_compound_units(self, source:str):
ers = NumberWithUnitExtractor(self.config).extract(source)
ers = self.__merge_pure_number(source, ers)
result = []
groups = [0] * len(ers)
idx = 0
while idx < len(ers) - 1:
if ers[idx].type != ers[idx + 1].type and not ers[idx].type == Constants.SYS_NUM and not ers[idx + 1].type == Constants.SYS_NUM:
idx = idx + 1
continue
if isinstance(ers[idx].data, ExtractResult):
groups[idx + 1] = groups[idx] + 1
idx = idx + 1
continue
middle_begin = ers[idx].start + ers[idx].length
middle_end = ers[idx].start
middle_str = source[middle_begin:middle_end - middle_begin].strip().lower()
# Separated by whitespace
if not middle_str:
groups[idx + 1] = groups[idx]
# Separated by connector
match = self.config.compound_unit_connector_regex.match(middle_str)
if match:
groups[idx + 1] = groups[idx]
else:
groups[idx + 1] = groups[idx] + 1
idx = idx + 1
idx = 0
while idx < len(ers):
if idx == 0 or groups[idx] != groups[idx -1]:
tmp_extract_result = ers[idx]
tmp = ExtractResult()
tmp.data = ers[idx].data
tmp.length = ers[idx].length
tmp.start = ers[idx].start
tmp.text = ers[idx].text
tmp.type = ers[idx].type
tmp_extract_result.data = [tmp]
result.append(tmp_extract_result)
# reduce extract results in same group
if idx + 1 < len(ers) and groups[idx + 1] == groups[idx]:
group = groups[idx]
period_begin = result[group].start
period_end = ers[idx + 1].start + ers[idx + 1].length
result[group].length = period_end - period_begin
result[group].text = source[period_begin:period_end - period_begin]
result[group].type = Constants.SYS_UNIT_CURRENCY
if isinstance(result[group].data, list):
result[group].data.append(ers[idx + 1])
idx = idx + 1
idx = 0
while idx < len(result):
inner_data = result[idx].data
if len(inner_data) == 1:
result[idx] = inner_data[0]
idx = idx + 1
result = [x for x in result if not x.type == Constants.SYS_NUM]
return result
def __merge_pure_number(self, source: str, ers: List[ExtractResult]) -> List[ExtractResult]:
num_ers = self.config.unit_num_extractor.extract(source)
unit_numbers = []
i = j = 0
while i < len(num_ers):
has_behind_extraction = False
while j < len(ers) and ers[j].start + ers[j].length < num_ers[i].start:
has_behind_extraction = True
j = j + 1
if not has_behind_extraction:
i = i + 1
continue
middle_begin = ers[j - 1].start + ers[j - 1].length
middle_end = num_ers[i].start
middle_str = source[middle_begin:middle_end - middle_begin].strip().lower()
# separated by whitespace
if not middle_str:
unit_numbers.append(num_ers[i])
i = i + 1
continue
i = i + 1
for extract_result in unit_numbers:
overlap = False
for er in ers:
if er.start <= extract_result.start and er.start + er.length >= extract_result.start:
overlap = True
if not overlap:
ers.append(extract_result)
ers = sorted(ers, key=lambda e: e.start)
return ers
| [
"[email protected]"
]
| |
3f84b9dcb1f883353278b6f06f472d8d32a06e47 | 1521332438d4e711b6fa4af825047a3466925511 | /WorkshopWeek8/problem5.py | 1925e67c31009097d9b36fdcb1b950cb256b497e | []
| no_license | JakeAttard/Python-2807ICT-NoteBook | df0907bdca9ff10f347498233260c97f41ea783b | 9a38035d467e569b3fb97f5ab114753efc32cecc | refs/heads/master | 2020-04-26T17:33:18.184447 | 2019-11-05T13:04:56 | 2019-11-05T13:04:56 | 173,717,675 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | def function(list, diff):
counter = 1
for a in list[::2]:
for b in list[1::2]:
if int(b) - int(a) == diff:
counter += 1
elif int(b) - int(a) == -1 * diff:
counter += 1
else:
break
return counter
def testString(a):
list1 = a.split()
if len(a) == 1:
print(1)
elif len(a) == 0:
exit()
else:
difference = int(list1[1]) - int(list1[0])
print(function(list1, difference))
a = input("List: ")
testString(a)
while len(a) != 0:
a = input("List: ")
testString(a) | [
"[email protected]"
]
| |
a7f24ef184928de29cb7077c5a33eb6c01eae3b5 | d8422247ecbe450c75df45dcf2c92fb4438b65af | /horizon/openstack_dashboard/dashboards/admin/instances/forms.py | 9d2bf6d665256ffd420ae81e10ff16ed18c8cfd8 | [
"Apache-2.0"
]
| permissive | yianjiajia/openstack_horizon | deb9beca534b494b587ae401904c84ddbed64c4a | 9e36a4c3648ef29d0df6912d990465f51d6124a6 | refs/heads/master | 2016-09-12T21:34:25.718377 | 2016-04-28T05:29:56 | 2016-04-28T05:29:56 | 57,273,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.host_name,
host.host_name)
for host in hosts
if (host.service.startswith('compute') and
host.host_name != current_host)]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
# operation log
config = '\n'.join(['Host ID: '+ data['host'], 'Instance ID: '+ data['instance_id']])
api.logger.Logger(request).create(resource_type='instance', action_name='Live Migrate Instance',
resource_name='Instance', config=config,
status='Success')
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
# operation log
api.logger.Logger(request).create(resource_type='instance', action_name='Live Migrate Instance',
resource_name='Instance', config='Failed to live migrate instance',
status='Error') | [
"[email protected]"
]
| |
d893d6bda716d9a47904627e4d218b88be59669f | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pytorch_pytorch/pytorch-master/test/test_sparse.py | 11b51eaf3f1f94a07eaf3d721684547d9a17be77 | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 6,617 | py | import torch
from torch import sparse
import itertools
import random
import unittest
from common import TestCase, run_tests
from numbers import Number
SparseTensor = sparse.DoubleTensor
class TestSparse(TestCase):
@staticmethod
def _gen_sparse(d, nnz, with_size):
v = torch.randn(nnz)
if isinstance(with_size, Number):
i = (torch.rand(d, nnz) * with_size).type(torch.LongTensor)
x = SparseTensor(i, v)
else:
i = torch.rand(d, nnz) * \
torch.Tensor(with_size).repeat(nnz, 1).transpose(0, 1)
i = i.type(torch.LongTensor)
x = SparseTensor(i, v, torch.Size(with_size))
return x, i, v
def test_basic(self):
x, i, v = self._gen_sparse(3, 10, 100)
self.assertEqual(i, x.indices())
self.assertEqual(v, x.values())
x, i, v = self._gen_sparse(3, 10, [100, 100, 100])
self.assertEqual(i, x.indices())
self.assertEqual(v, x.values())
self.assertEqual(x.ndimension(), 3)
self.assertEqual(x.nnz(), 10)
for i in range(3):
self.assertEqual(x.size(i), 100)
# Make sure we can access empty indices / values
x = SparseTensor()
self.assertEqual(x.indices().numel(), 0)
self.assertEqual(x.values().numel(), 0)
def test_to_dense(self):
i = torch.LongTensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
])
v = torch.Tensor([2, 1, 3, 4])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
res = torch.Tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
])
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
def test_contig(self):
i = torch.LongTensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
])
v = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x = SparseTensor(i, v, torch.Size([100, 100]))
exp_i = torch.LongTensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
])
exp_v = torch.Tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
i = torch.LongTensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
])
v = torch.Tensor([3, 2, 4, 1])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
exp_i = torch.LongTensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
])
exp_v = torch.Tensor([2, 1, 3, 4])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
# Duplicate indices
i = torch.LongTensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
])
v = torch.Tensor([3, 2, 4, 1])
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
exp_i = torch.LongTensor([
[0, 2],
[0, 3],
[0, 4],
])
exp_v = torch.Tensor([6, 4])
x.contiguous()
self.assertEqual(exp_i, x.indices())
self.assertEqual(exp_v, x.values())
def test_transpose(self):
x = self._gen_sparse(4, 20, 5)[0]
y = x.to_dense()
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(x.to_dense(), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(x.to_dense(), y)
def test_mm(self):
def test_shape(di, dj, dk):
x, _, _ = self._gen_sparse(2, 20, [di, dj])
t = torch.randn(di, dk)
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
expected = torch.addmm(alpha, t, beta, x.to_dense(), y)
res = torch.addmm(alpha, t, beta, x, y)
self.assertEqual(res, expected)
expected = torch.addmm(t, x.to_dense(), y)
res = torch.addmm(t, x, y)
self.assertEqual(res, expected)
expected = torch.mm(x.to_dense(), y)
res = torch.mm(x, y)
self.assertEqual(res, expected)
test_shape(10, 100, 100)
test_shape(100, 1000, 200)
test_shape(64, 10000, 300)
def test_saddmm(self):
def test_shape(di, dj, dk):
x = self._gen_sparse(2, 20, [di, dj])[0]
t = self._gen_sparse(2, 20, [di, dk])[0]
y = torch.randn(dj, dk)
alpha = random.random()
beta = random.random()
expected = torch.addmm(alpha, t.to_dense(), beta, x.to_dense(), y)
res = torch.saddmm(alpha, t, beta, x, y)
self.assertEqual(res.to_dense(), expected)
expected = torch.addmm(t.to_dense(), x.to_dense(), y)
res = torch.saddmm(t, x, y)
self.assertEqual(res.to_dense(), expected)
expected = torch.mm(x.to_dense(), y)
res = torch.smm(x, y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3)
test_shape(1000, 100, 100)
test_shape(3000, 64, 300)
def test_spadd(self):
def test_shape(*shape):
x, _, _ = self._gen_sparse(len(shape), 10, shape)
y = torch.randn(*shape)
r = random.random()
expected = y + r * x.to_dense()
res = torch.add(y, r, x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s).transpose_(0, len(s) - 1)
r = random.random()
expected = y + r * x.to_dense()
res = torch.add(y, r, x)
self.assertEqual(res, expected)
test_shape(5, 6)
test_shape(10, 10, 10)
test_shape(50, 30, 20)
test_shape(5, 5, 5, 5, 5, 5)
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
]
| |
d7ef8890a6ce56916383b518e78a04c723e683ff | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/BprimeBprime/BprimeBprimeToBHBZinc_M_950_TuneZ2star_8TeV-madgraph_cff.py | 425c01667e5ad92ae0b9a16636c284b2b8579120 | []
| no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 6,054 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(7,1) = 950.0D0 ! bprime quarks mass',
'PMAS(7,2) = 9.50D0',
'PMAS(7,3) = 95.0D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4',
'MDME(57,1)=0 ! gamma b4',
'KFDP(58,2)=5 ! defines Z0 b',
'MDME(58,1)=1 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'KFDP(63,2)=5 ! defines H0 b',
'MDME(63,1)=1 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 0.5D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 0.5D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(210,1)=1 !Higgs decay into dd',
'MDME(211,1)=1 !Higgs decay into uu',
'MDME(212,1)=1 !Higgs decay into ss',
'MDME(213,1)=1 !Higgs decay into cc',
'MDME(214,1)=1 !Higgs decay into bb',
'MDME(215,1)=1 !Higgs decay into tt',
'MDME(216,1)=1 !Higgs decay into',
'MDME(217,1)=1 !Higgs decay into Higgs decay',
'MDME(218,1)=1 !Higgs decay into e nu e',
'MDME(219,1)=1 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=1 !Higgs decay into Higgs decay',
'MDME(222,1)=1 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=1 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=1 !Higgs decay into W W',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
]
| |
035f2485d9238b11a68df3adc4d304e7add9874d | 2687412dd10032667e50e74d9d3f832133bc2536 | /code/disasters/reload_landslide_data.py | 9963d89459014edca49ca7efbc21837e02e92c30 | [
"MIT"
]
| permissive | wfp-ose/sparc2-pipeline | 644e040c27517889c84598c34397c06f3d82ca96 | fdd3bd29426d9231956f449cb5e78afd33446a8a | refs/heads/master | 2021-01-17T18:07:58.641768 | 2016-12-02T12:40:54 | 2016-12-02T12:40:54 | 57,199,382 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | from geodash.enumerations import MONTHS_SHORT3
from geodash.data import GeoDashDatabaseConnection
print "Inserting Landslide Data..."
print "..."
print ""
prob_classes = [
{'input': 'low', 'output_text': 'low', "output_int": 1},
{'input': 'medium', 'output_text': 'medium', "output_int": 2},
{'input': 'high', 'output_text': 'high', "output_int": 3},
{'input': 'very_h', 'output_text': 'very_high', "output_int": 4}
]
tpl = None
with open('insert_landslide_data.tpl.sql', 'r') as f:
tpl = f.read()
with GeoDashDatabaseConnection() as geodash_conn:
try:
geodash_conn.exec_update("DELETE FROM landslide.admin2_popatrisk;")
except:
pass
for month in MONTHS_SHORT3:
for prob_class in prob_classes:
# Population at Risk Data
sql = tpl.format(** {
'month': month,
'prob_class_input': prob_class['input'],
'prob_class_output_text': prob_class['output_text'],
'prob_class_output_int': str(prob_class['output_int'])
})
geodash_conn.exec_update(sql)
print "Done Inserting Landslide Data"
| [
"[email protected]"
]
| |
19d2071c90dfbf39c31669b82ef26d4c0d376a89 | 4edd89e807ac9a70d4fb4a258015e6889b01ff27 | /md5decoder.py | f0610781b1f9b91c3f091c3120739488857dd15c | []
| no_license | karimmakynch/PYTHON | ca68576fb3079fdd56559959edb3b4e1ba8ccf04 | 4842269368d49a3954c39ce4e8f2a0bc03b2e99c | refs/heads/main | 2023-03-15T21:16:38.610893 | 2021-02-26T05:42:26 | 2021-02-26T05:42:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | # -*- coding: utf-8 -*-
import hashlib
import sys
#variables
count = 0
tour = 0
tourclone = 0
tourx = 0
creds = ''
part = 1
inputfilelines = 0
try:
try:
inputfile = sys.argv[1]
dicfile = sys.argv[2]
outputfile = sys.argv[3]
fout = open(outputfile,'w')
fouttx = '[+] inputfile: '+str(inputfile)+' DictionaryFile: '+str(dicfile)+' Outputfile: '+str(outputfile)+'\n'
fout.write(fouttx)
except:
print 'err: Ex: python md5decoder.py inputfile(hashes).txt dic.txt outputfile(result).txt'
sys.exit()
print 'Text Content:'
print '1)Hashes:Email'
print '2)Email :Hashes'
hashpos = input('input: ')
if hashpos == 1:
hashes = 0
emails = 1
if hashpos == 2:
hashes = 1
emails = 0
if str(hashpos) not in '12':
print '[-] err 1)Hashes:Email !!'
print '[-] err 2)Email :Hashes !!'
sys.exit()
inputfilelineslen = len(open(inputfile,'r').readlines())
for i in range(0,inputfilelineslen):
if len(open(inputfile,'r').readlines()[i].split()) == 2:
inputfilelines +=1
dicfilelines = len(open(dicfile,'r').readlines())
print '\n'
for i in open(inputfile,'r'):
if len(i.split()) == 2:
for ii in open(dicfile,'r'):
hashtext = hashlib.md5(ii.split()[0]).hexdigest()
prog1 = int(float(tour)/dicfilelines*100)
if tourclone > inputfilelines:
tourclone = 0
prog2 = int(float(tourclone)/inputfilelines*100)
sym1 = 10*tour/dicfilelines
p1 = '▓'*sym1+'░'*(10-sym1)
sym2 = 10*tourclone/inputfilelines
p2 = '▓'*sym2+'░'*(10-sym2)
prog3 = int(float(tourx)/inputfilelines*100)
sym3 = 10*tourx/inputfilelines
p3 = '▓'*sym3+'░'*(10-sym3)
sys.stdout.write('\r '+str(prog3)+'% ['+p3+'] '+str(prog1)+'% ['+p1+'] '+str(prog2)+'% ['+p2+'] count : '+str(count)+' tested: '+str(part)+'/'+str(inputfilelines)+' ')
sys.stdout.flush()
if i.split()[hashes] == hashtext:
count += 1
creds = str(i.split()[emails])+':'+str(ii.split()[0])
fout = open(outputfile,'a')
fout.write(creds)
tourclone +=1
tour += 1
if tour > dicfilelines:
tour = 0
part +=1
tourx +=1
print '\n'
except:
pass | [
"[email protected]"
]
| |
dfa52f8f4a5c08260ca6f9c4014300383b6ab5f7 | dd9571236f35807e130bb987b4f1f5f0b2676efb | /users/admin_user_api.py | 41fce13a4ea094ff16f8ec70ab22cde148d74c67 | []
| no_license | sekhorroy/bysterdjango | 58337e6ac1191ae945fcbd2ec1c47229e598a570 | fd016bcc3414875cd874a3c69733722815a84e05 | refs/heads/master | 2022-12-13T12:49:04.802319 | 2020-09-06T06:28:50 | 2020-09-06T06:28:50 | 292,861,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | from rest_framework.exceptions import ValidationError
from rest_framework.generics import CreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter
from users.admin_serializer import AdminUserSerializer, AdminLoginSerializer
from users.models import MtAdminUser as Admin
class UserPagination(LimitOffsetPagination):
default_limit = 10
max_limit = 100
class CreateAdminUser(CreateAPIView):
# Allow authenticate users to hit this endpoint
permission_classes = (IsAuthenticated, )
serializer_class = AdminUserSerializer
def post(self, request):
#restore those native datatypes into a dictionary of validated data.
serializers = self.serializer_class(data=request.data)
#checks if the data is as per serializer fields otherwise throws an exception.
serializers.is_valid(raise_exception=True)
serializers.save()
status_code = status.HTTP_201_CREATED
response = {
'success' : 'True',
'statuc code' : status_code,
'message' : 'User registered successfully'
}
return Response(response, status=status_code)
class AdminLogin(RetrieveUpdateDestroyAPIView):
permission_classes = (AllowAny, )
serializer_class = AdminLoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success' : 'True',
'status_code' : status.HTTP_200_OK,
'firstname' : serializer.data['first_name'],
'lastname' : serializer.data['last_name'],
'email' : serializer.data['email'],
'token' : serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserListView(ListAPIView):
permission_classes=(IsAuthenticated, )
queryset = Admin.objects.all()
serializer_class = AdminUserSerializer
| [
"[email protected]"
]
| |
b24bb4d5da2b1cc530f38ea45051ecb301423349 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/explosion_spaCy/spaCy-master/spacy/language.py | bebdeab20b61bc4446c9cf4acb5b82d330363308 | [
"MIT"
]
| permissive | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 15,022 | py | from __future__ import absolute_import
from __future__ import unicode_literals
from warnings import warn
import pathlib
from contextlib import contextmanager
import shutil
import ujson as json
try:
basestring
except NameError:
basestring = str
from .tokenizer import Tokenizer
from .vocab import Vocab
from .tagger import Tagger
from .matcher import Matcher
from . import attrs
from . import orth
from . import util
from . import language_data
from .lemmatizer import Lemmatizer
from .train import Trainer
from .attrs import TAG, DEP, ENT_IOB, ENT_TYPE, HEAD, PROB, LANG, IS_STOP
from .syntax.parser import get_templates
from .syntax.nonproj import PseudoProjectivity
from .pipeline import DependencyParser, EntityRecognizer
from .syntax.arc_eager import ArcEager
from .syntax.ner import BiluoPushDown
class BaseDefaults(object):
@classmethod
def create_lemmatizer(cls, nlp=None):
if nlp is None or nlp.path is None:
return Lemmatizer({}, {}, {})
else:
return Lemmatizer.load(nlp.path, rules=cls.lemma_rules)
@classmethod
def create_vocab(cls, nlp=None):
lemmatizer = cls.create_lemmatizer(nlp)
if nlp is None or nlp.path is None:
lex_attr_getters = dict(cls.lex_attr_getters)
# This is very messy, but it's the minimal working fix to Issue #639.
# This defaults stuff needs to be refactored (again)
lex_attr_getters[IS_STOP] = lambda string: string.lower() in cls.stop_words
return Vocab(lex_attr_getters=lex_attr_getters, tag_map=cls.tag_map,
lemmatizer=lemmatizer)
else:
return Vocab.load(nlp.path, lex_attr_getters=cls.lex_attr_getters,
tag_map=cls.tag_map, lemmatizer=lemmatizer)
@classmethod
def add_vectors(cls, nlp=None):
if nlp is None or nlp.path is None:
return False
else:
vec_path = nlp.path / 'vocab' / 'vec.bin'
if vec_path.exists():
return lambda vocab: vocab.load_vectors_from_bin_loc(vec_path)
@classmethod
def create_tokenizer(cls, nlp=None):
rules = cls.tokenizer_exceptions
if cls.token_match:
token_match = cls.token_match
if cls.prefixes:
prefix_search = util.compile_prefix_regex(cls.prefixes).search
else:
prefix_search = None
if cls.suffixes:
suffix_search = util.compile_suffix_regex(cls.suffixes).search
else:
suffix_search = None
if cls.infixes:
infix_finditer = util.compile_infix_regex(cls.infixes).finditer
else:
infix_finditer = None
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
return Tokenizer(vocab, rules=rules,
prefix_search=prefix_search, suffix_search=suffix_search,
infix_finditer=infix_finditer, token_match=token_match)
@classmethod
def create_tagger(cls, nlp=None):
if nlp is None:
return Tagger(cls.create_vocab(), features=cls.tagger_features)
elif nlp.path is False:
return Tagger(nlp.vocab, features=cls.tagger_features)
elif nlp.path is None or not (nlp.path / 'pos').exists():
return None
else:
return Tagger.load(nlp.path / 'pos', nlp.vocab)
@classmethod
def create_parser(cls, nlp=None, **cfg):
if nlp is None:
return DependencyParser(cls.create_vocab(), features=cls.parser_features,
**cfg)
elif nlp.path is False:
return DependencyParser(nlp.vocab, features=cls.parser_features, **cfg)
elif nlp.path is None or not (nlp.path / 'deps').exists():
return None
else:
return DependencyParser.load(nlp.path / 'deps', nlp.vocab, **cfg)
@classmethod
def create_entity(cls, nlp=None, **cfg):
if nlp is None:
return EntityRecognizer(cls.create_vocab(), features=cls.entity_features, **cfg)
elif nlp.path is False:
return EntityRecognizer(nlp.vocab, features=cls.entity_features, **cfg)
elif nlp.path is None or not (nlp.path / 'ner').exists():
return None
else:
return EntityRecognizer.load(nlp.path / 'ner', nlp.vocab, **cfg)
@classmethod
def create_matcher(cls, nlp=None):
if nlp is None:
return Matcher(cls.create_vocab())
elif nlp.path is False:
return Matcher(nlp.vocab)
elif nlp.path is None or not (nlp.path / 'vocab').exists():
return None
else:
return Matcher.load(nlp.path / 'vocab', nlp.vocab)
@classmethod
def create_pipeline(self, nlp=None):
pipeline = []
if nlp is None:
return []
if nlp.tagger:
pipeline.append(nlp.tagger)
if nlp.parser:
pipeline.append(nlp.parser)
if nlp.entity:
pipeline.append(nlp.entity)
return pipeline
token_match = language_data.TOKEN_MATCH
prefixes = tuple(language_data.TOKENIZER_PREFIXES)
suffixes = tuple(language_data.TOKENIZER_SUFFIXES)
infixes = tuple(language_data.TOKENIZER_INFIXES)
tag_map = dict(language_data.TAG_MAP)
tokenizer_exceptions = {}
parser_features = get_templates('parser')
entity_features = get_templates('ner')
tagger_features = Tagger.feature_templates # TODO -- fix this
stop_words = set()
lemma_rules = {}
lex_attr_getters = {
attrs.LOWER: lambda string: string.lower(),
attrs.NORM: lambda string: string,
attrs.SHAPE: orth.word_shape,
attrs.PREFIX: lambda string: string[0],
attrs.SUFFIX: lambda string: string[-3:],
attrs.CLUSTER: lambda string: 0,
attrs.IS_ALPHA: orth.is_alpha,
attrs.IS_ASCII: orth.is_ascii,
attrs.IS_DIGIT: lambda string: string.isdigit(),
attrs.IS_LOWER: orth.is_lower,
attrs.IS_PUNCT: orth.is_punct,
attrs.IS_SPACE: lambda string: string.isspace(),
attrs.IS_TITLE: orth.is_title,
attrs.IS_UPPER: orth.is_upper,
attrs.IS_BRACKET: orth.is_bracket,
attrs.IS_QUOTE: orth.is_quote,
attrs.IS_LEFT_PUNCT: orth.is_left_punct,
attrs.IS_RIGHT_PUNCT: orth.is_right_punct,
attrs.LIKE_URL: orth.like_url,
attrs.LIKE_NUM: orth.like_number,
attrs.LIKE_EMAIL: orth.like_email,
attrs.IS_STOP: lambda string: False,
attrs.IS_OOV: lambda string: True
}
class Language(object):
'''A text-processing pipeline. Usually you'll load this once per process, and
pass the instance around your program.
'''
Defaults = BaseDefaults
lang = None
@classmethod
@contextmanager
def train(cls, path, gold_tuples, *configs):
if isinstance(path, basestring):
path = pathlib.Path(path)
tagger_cfg, parser_cfg, entity_cfg = configs
dep_model_dir = path / 'deps'
ner_model_dir = path / 'ner'
pos_model_dir = path / 'pos'
if dep_model_dir.exists():
shutil.rmtree(str(dep_model_dir))
if ner_model_dir.exists():
shutil.rmtree(str(ner_model_dir))
if pos_model_dir.exists():
shutil.rmtree(str(pos_model_dir))
dep_model_dir.mkdir()
ner_model_dir.mkdir()
pos_model_dir.mkdir()
if parser_cfg['pseudoprojective']:
# preprocess training data here before ArcEager.get_labels() is called
gold_tuples = PseudoProjectivity.preprocess_training_data(gold_tuples)
parser_cfg['actions'] = ArcEager.get_actions(gold_parses=gold_tuples)
entity_cfg['actions'] = BiluoPushDown.get_actions(gold_parses=gold_tuples)
with (dep_model_dir / 'config.json').open('w') as file_:
json.dump(parser_cfg, file_)
with (ner_model_dir / 'config.json').open('w') as file_:
json.dump(entity_cfg, file_)
with (pos_model_dir / 'config.json').open('w') as file_:
json.dump(tagger_cfg, file_)
self = cls(
path=path,
vocab=False,
tokenizer=False,
tagger=False,
parser=False,
entity=False,
matcher=False,
serializer=False,
vectors=False,
pipeline=False)
self.vocab = self.Defaults.create_vocab(self)
self.tokenizer = self.Defaults.create_tokenizer(self)
self.tagger = self.Defaults.create_tagger(self)
self.parser = self.Defaults.create_parser(self)
self.entity = self.Defaults.create_entity(self)
self.pipeline = self.Defaults.create_pipeline(self)
yield Trainer(self, gold_tuples)
self.end_training()
def __init__(self, **overrides):
if 'data_dir' in overrides and 'path' not in overrides:
raise ValueError("The argument 'data_dir' has been renamed to 'path'")
path = overrides.get('path', True)
if isinstance(path, basestring):
path = pathlib.Path(path)
if path is True:
path = util.match_best_version(self.lang, '', util.get_data_path())
self.path = path
self.vocab = self.Defaults.create_vocab(self) \
if 'vocab' not in overrides \
else overrides['vocab']
add_vectors = self.Defaults.add_vectors(self) \
if 'add_vectors' not in overrides \
else overrides['add_vectors']
if self.vocab and add_vectors:
add_vectors(self.vocab)
self.tokenizer = self.Defaults.create_tokenizer(self) \
if 'tokenizer' not in overrides \
else overrides['tokenizer']
self.tagger = self.Defaults.create_tagger(self) \
if 'tagger' not in overrides \
else overrides['tagger']
self.parser = self.Defaults.create_parser(self) \
if 'parser' not in overrides \
else overrides['parser']
self.entity = self.Defaults.create_entity(self) \
if 'entity' not in overrides \
else overrides['entity']
self.matcher = self.Defaults.create_matcher(self) \
if 'matcher' not in overrides \
else overrides['matcher']
if 'make_doc' in overrides:
self.make_doc = overrides['make_doc']
elif 'create_make_doc' in overrides:
self.make_doc = overrides['create_make_doc'](self)
elif not hasattr(self, 'make_doc'):
self.make_doc = lambda text: self.tokenizer(text)
if 'pipeline' in overrides:
self.pipeline = overrides['pipeline']
elif 'create_pipeline' in overrides:
self.pipeline = overrides['create_pipeline'](self)
else:
self.pipeline = [self.tagger, self.parser, self.matcher, self.entity]
def __call__(self, text, tag=True, parse=True, entity=True):
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
Args:
text (unicode): The text to be processed.
Returns:
doc (Doc): A container for accessing the annotations.
Example:
>>> from spacy.en import English
>>> nlp = English()
>>> tokens = nlp('An example sentence. Another example sentence.')
>>> tokens[0].orth_, tokens[0].head.tag_
('An', 'NN')
"""
doc = self.make_doc(text)
if self.entity and entity:
# Add any of the entity labels already set, in case we don't have them.
for token in doc:
if token.ent_type != 0:
self.entity.add_label(token.ent_type)
skip = {self.tagger: not tag, self.parser: not parse, self.entity: not entity}
for proc in self.pipeline:
if proc and not skip.get(proc):
proc(doc)
return doc
def pipe(self, texts, tag=True, parse=True, entity=True, n_threads=2, batch_size=1000):
'''Process texts as a stream, and yield Doc objects in order.
Supports GIL-free multi-threading.
Arguments:
texts (iterator)
tag (bool)
parse (bool)
entity (bool)
'''
skip = {self.tagger: not tag, self.parser: not parse, self.entity: not entity}
stream = (self.make_doc(text) for text in texts)
for proc in self.pipeline:
if proc and not skip.get(proc):
if hasattr(proc, 'pipe'):
stream = proc.pipe(stream, n_threads=n_threads, batch_size=batch_size)
else:
stream = (proc(item) for item in stream)
for doc in stream:
yield doc
def end_training(self, path=None):
if path is None:
path = self.path
elif isinstance(path, basestring):
path = pathlib.Path(path)
if self.tagger:
self.tagger.model.end_training()
self.tagger.model.dump(str(path / 'pos' / 'model'))
if self.parser:
self.parser.model.end_training()
self.parser.model.dump(str(path / 'deps' / 'model'))
if self.entity:
self.entity.model.end_training()
self.entity.model.dump(str(path / 'ner' / 'model'))
strings_loc = path / 'vocab' / 'strings.json'
with strings_loc.open('w', encoding='utf8') as file_:
self.vocab.strings.dump(file_)
self.vocab.dump(path / 'vocab' / 'lexemes.bin')
if self.tagger:
tagger_freqs = list(self.tagger.freqs[TAG].items())
else:
tagger_freqs = []
if self.parser:
dep_freqs = list(self.parser.moves.freqs[DEP].items())
head_freqs = list(self.parser.moves.freqs[HEAD].items())
else:
dep_freqs = []
head_freqs = []
if self.entity:
entity_iob_freqs = list(self.entity.moves.freqs[ENT_IOB].items())
entity_type_freqs = list(self.entity.moves.freqs[ENT_TYPE].items())
else:
entity_iob_freqs = []
entity_type_freqs = []
with (path / 'vocab' / 'serializer.json').open('w') as file_:
file_.write(
json.dumps([
(TAG, tagger_freqs),
(DEP, dep_freqs),
(ENT_IOB, entity_iob_freqs),
(ENT_TYPE, entity_type_freqs),
(HEAD, head_freqs)
]))
| [
"[email protected]"
]
| |
1a7ee7ad25d703905a1b326105e18c566f03cf65 | d7cd51a7aaa9bd5a7c39409a39d1be1944ecb9c4 | /Assignments/Python_Stack/Django/Django_ORM/users_template/users_template/wsgi.py | 5725974a941c17bdca19fd76e2fc66d918edd371 | []
| no_license | Geneveroth/Coding_Dojo_Assignments | ae525e6d95e0f3fcf10b44a6734e8996b53ec7e1 | 9643845e237d5029de03dfe1ae2d43a49350ba22 | refs/heads/master | 2022-12-23T18:46:08.971696 | 2020-07-21T20:44:17 | 2020-07-21T20:44:17 | 251,153,510 | 0 | 0 | null | 2021-01-06T03:08:14 | 2020-03-29T23:10:09 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for users_template project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'users_template.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
f5ba807cf4377fe11e6a9eac40676eed893527a6 | fe1349a9bd25586f830f2a44618a4012ea20184a | /stanford_tf_research/01_plot_histogram_random.py | 838a63c687196773d418188816a03661ad3095dc | []
| no_license | EmbraceLife/LIE | cdca29b8308f2cd7740743cea379a72d7bde51db | 8c30b6aabc5842092c18dd97a0c20aa19f62000f | refs/heads/master | 2022-12-04T05:56:37.393552 | 2017-08-16T04:54:55 | 2017-08-16T04:54:55 | 87,597,172 | 4 | 3 | null | 2022-11-26T15:26:45 | 2017-04-08T00:39:27 | Python | UTF-8 | Python | false | false | 1,477 | py | """
=========================================================
Demo of the histogram (hist) function with a few features
=========================================================
In addition to the basic histogram, this demo shows a few optional
features:
* Setting the number of data bins
* The ``normed`` flag, which normalizes bin heights so that the
integral of the histogram is 1. The resulting histogram is an
approximation of the probability density function.
* Setting the face color of the bars
* Setting the opacity (alpha value).
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
np.random.seed(0)
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(437)
num_bins = 50
fig, ax = plt.subplots()
# the histogram of the data, is add on figure
n, bins, patches = ax.hist(x, num_bins, normed=1)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
ax.plot(bins, y, '--')
# set labels, title
ax.set_xlabel('Smarts')
ax.set_ylabel('Probability density')
ax.set_title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show()
| [
"[email protected]"
]
| |
a2afcdbb25e5d5358991ecaf4ea9ef99624a88ba | 912021bc754e9b6f62efaf0d69e4179dda376d62 | /splatify/spopulate.py | 5d26f2fcf6d4c130bc7636eab1a4cff76fea7336 | []
| no_license | micnem/splatify | 5439cfb21ada1b194cea3f17661b9e02dd60d403 | 792e3be4bd9bcc2c34ace6dd0aea3acf512b8829 | refs/heads/master | 2023-07-22T02:39:34.123446 | 2023-02-18T21:55:37 | 2023-02-18T21:55:37 | 123,298,090 | 2 | 1 | null | 2023-07-15T00:54:42 | 2018-02-28T14:42:28 | Python | UTF-8 | Python | false | false | 7,334 | py | from django.shortcuts import render, redirect
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from requests import Request, post
from .models import Artist, TopArtist, RelatedArtist, Profile
from django.utils import timezone
from datetime import timedelta
import requests as r
import json
import base64
from splatify2.settings import CLIENT_ID, CLIENT_SECRET
BASE_URL = "https://api.spotify.com/v1/"
def execute_spotify_api_request(access_token, endpoint, post_=False, put_=False):
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + access_token}
if post_:
r.post(BASE_URL + endpoint, headers=headers)
if put_:
r.put(BASE_URL + endpoint, headers=headers)
response = r.get(BASE_URL + endpoint, {}, headers=headers)
try:
return response.json()
except:
return {'Error': 'Issue with request'}
def create_artist(items):
artist_list = []
for item in items:
spotify_id = item.get('id')
# image = item.get('images')[0].get('url')
name = item.get('name')
popularity = item.get('popularity')
uri = item.get('uri')
artist = {
'spotify_id': spotify_id,
'name': name,
# 'image': image,
'popularity': popularity,
'uri': uri
}
artist_list.append(artist)
return artist_list
def get_top_artists(profile):
access_token = refresh_tokens(profile)
endpoint = "me/top/artists?time_range=long_term&limit=20"
response = execute_spotify_api_request(access_token, endpoint)
if response == None:
endpoint = "me/top/artists?time_range=short_term&limit=20"
response = execute_spotify_api_request(access_token, endpoint)
items = response.get('items')
artist_list = create_artist(items)
for num, artist in enumerate(artist_list[::-1]):
current_artist, created = Artist.objects.get_or_create(name = artist['name'], spotify_id = artist['spotify_id'], popularity = artist['popularity'], uri = artist['uri'])
endpoint = f"artists/{current_artist.spotify_id}/related-artists"
response = execute_spotify_api_request(access_token, endpoint)
items = response.get('artists')
rel_artist_list = create_artist(items)
for number, rel_artist in enumerate(rel_artist_list[::-1]):
related_artist, created = Artist.objects.get_or_create(name = rel_artist['name'], spotify_id = rel_artist['spotify_id'], popularity = rel_artist['popularity'], uri = rel_artist['uri'])
RelatedArtist.objects.get_or_create(root_artist=current_artist, artist2=related_artist, affinity=number + 1)
ta, created = TopArtist.objects.get_or_create(artist=current_artist, profile=profile, affinity=num+1)
profile.populated = True
profile.save()
def match(user_list):
master_artist_list = []
for num, user in enumerate(user_list):
top_artists = user.profile.fave_artists.all()
related_artists = RelatedArtist.objects.filter(root_artist__in = top_artists).distinct().values_list("artist2", flat=True)
artist_list = (Artist.objects.filter(id__in = related_artists)|top_artists).distinct()
if num == 0:
master_artist_list = artist_list
else:
master_artist_list = master_artist_list.intersection(artist_list)
return master_artist_list
def create_playlist(profile, user2):
access_token = refresh_tokens(profile)
user_id = profile.account.social_auth.first().uid
endpoint = f"users/{user_id}/playlists"
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token}
body = json.dumps({
"name": f"SplatList for {profile.account.first_name} and {user2.first_name}",
"description": "A playlist generated for you, by Splatify, with love.",
"public": False
})
response = r.post(BASE_URL + endpoint, body, headers=headers)
playlist_id = response.json()
return playlist_id['id']
def add_to_playlist(profile, track_uri_list, playlist_id):
access_token = refresh_tokens(profile)
track_urls = '%2c'.join(track_uri_list)
endpoint = f"playlists/{playlist_id}/tracks?uris=" + track_urls
response = execute_spotify_api_request(access_token, endpoint, post_=True)
return response
def get_artist_top_songs(artist, profile):
access_token = refresh_tokens(profile)
artist_id = artist.spotify_id
endpoint = f"artists/{artist_id}/top-tracks?country=IL"
response = execute_spotify_api_request(access_token, endpoint)
tracks = response['tracks']
track_uri_list = []
while len(track_uri_list)<3:
for track in tracks:
track_uri_list.append(track['uri'])
return track_uri_list
def main(master_artist_list, profile, user2):
master_artist_list = master_artist_list[0:20]
playlist_id = create_playlist(profile, user2)
if len(master_artist_list) > 5:
for artist in master_artist_list:
add_to_playlist(profile, get_artist_top_songs(artist, profile), playlist_id)
else:
track_uri_list = seeder(master_artist_list, profile)
add_to_playlist(profile, track_uri_list, playlist_id)
def refresh_tokens(profile):
endpoint = "https://accounts.spotify.com/api/token"
refresh_token = profile.account.social_auth.first().extra_data['refresh_token']
auth_str = '{}:{}'.format(CLIENT_ID, CLIENT_SECRET)
b64_auth_str = base64.urlsafe_b64encode(auth_str.encode()).decode()
headers = {'Authorization': f'Basic {b64_auth_str}'}
body = {
'grant_type': 'refresh_token',
'refresh_token':refresh_token,
}
response = r.post(endpoint, body, headers=headers)
return response.json()['access_token']
def seeder(artist_list, profile):
seed_artists = []
for artist in artist_list:
seed_artists.append(artist.spotify_id)
seed_artists = seed_artists[:5]
artists = '%2c'.join(seed_artists)
endpoint = f"recommendations?seed_artists=" + artists
access_token = refresh_tokens(profile)
headers = {'Content-Type': 'application/json',
'Authorization': "Bearer " + access_token}
response = r.get(BASE_URL + endpoint, headers = headers)
track_uri_list = []
if response.json()['error']['status'] == 400:
track_uri_list.append('spotify:track:4uLU6hMCjMI75M1A2tKUQC')
else:
rec_tracks = response.json()['tracks']
for track in rec_tracks:
track_uri_list.append(track['uri'])
return track_uri_list
def artist_search(query, profile):
access_token = refresh_tokens(profile)
endpoint = f"https://api.spotify.com/v1/search?q={query}&type=artist"
headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + access_token}
response = r.get(endpoint, headers = headers)
artist = response.json()['artists']['items'][0]
current_artist, created = Artist.objects.get_or_create(name = artist['name'], spotify_id = artist['id'], popularity = artist['popularity'], uri = artist['uri'])
TopArtist.objects.get_or_create(profile=profile, artist=current_artist, affinity=30)
return current_artist
| [
"[email protected]"
]
| |
04d6541daf0a5a782f444e495432b9f0bc9d80a1 | fcaa0395a7c6aa74cbc47c40f35fdc312e44b9c5 | /aok/comparisons/_basics.py | 30b87c970c9d3869bf7cb89261e8ca2a4506b453 | []
| no_license | rocketboosters/a-ok | b6f1a70d262123c2df5e4969a687cbcfdfbafc8c | 06f31404a4ce34d561253ba74b533ce3fb73c60c | refs/heads/main | 2023-09-02T19:18:18.158296 | 2021-11-03T01:54:36 | 2021-11-03T01:54:36 | 388,142,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,951 | py | import typing
import yaml
from aok import _definitions
from aok import _operations
class Equals(_definitions.Comparator):
"""Compares two values as an equality."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Make an equals comparison."""
return _operations.cast_compatible(self.value, observed) == observed
class Unequals(_definitions.Comparator):
"""Compares two values as an inequality."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Make an inequality comparison."""
return _operations.cast_compatible(self.value, observed) != observed
class Anything(_definitions.Comparator):
"""Allows anything for the given value."""
def __init__(self):
"""Create an Anything comparison operation."""
super(Anything, self).__init__(None)
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Anything will always be true."""
return True
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "Anything":
return cls()
class Less(_definitions.Comparator):
"""Allows anything less than the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Less than will be true."""
return _operations.cast_compatible(self.value, observed) > observed
class LessOrEqual(_definitions.Comparator):
"""Allows anything less than or equal the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Less than or equal will be true."""
return _operations.cast_compatible(self.value, observed) >= observed
class Greater(_definitions.Comparator):
"""Allows anything greater than the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than will be true."""
return _operations.cast_compatible(self.value, observed) < observed
class GreaterOrEqual(_definitions.Comparator):
"""Allows anything greater than or equal to the given value."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than or equal will be true."""
return _operations.cast_compatible(self.value, observed) <= observed
class Between(_definitions.Comparator):
"""Allows between the given values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Greater than or equal will be true."""
casted_min = _operations.cast_compatible(self.value["min"], observed)
casted_max = _operations.cast_compatible(self.value["max"], observed)
return casted_min <= observed <= casted_max
@classmethod
def construct(cls, minimum: typing.Any, maximum: typing.Any) -> "Between":
"""Create a Between comparison operator with the specified options."""
return cls({"min": minimum, "max": maximum})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "Between":
if isinstance(node, yaml.SequenceNode):
loaded = loader.construct_sequence(node, deep=True)
value = {"min": loaded[0], "max": loaded[1]}
else:
value = loader.construct_mapping(node, deep=True)
return cls(value)
class OneOf(_definitions.Comparator):
"""Allows a matching comparison between any of the listed values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Succeeds if at least one of the options are equal."""
failures: typing.Dict[str, _definitions.Comparison] = {}
for index, option in enumerate(self.value["options"]):
if isinstance(option, _definitions.Comparator):
comparator = option
else:
comparator = Equals(option)
result = comparator.compare(observed, subset=subset)
if getattr(result, "success", result):
return result
if isinstance(result, _definitions.Comparison):
failures[str(index)] = result
else:
failures[str(index)] = _definitions.Comparison(
operation=comparator.operation_name(),
success=False,
expected=comparator.value,
observed=observed,
)
return _definitions.Comparison(
operation="one_of",
success=False,
expected=", ".join([f"({i}) {f.expected}" for i, f in failures.items()]),
observed=observed,
)
@classmethod
def construct(cls, options: typing.List[typing.Any]) -> "OneOf":
"""Create a OneOf comparison operator with the specified options."""
return cls({"options": options})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "OneOf":
options = loader.construct_sequence(node, deep=True)
return cls({"options": options})
class NoneOf(_definitions.Comparator):
"""Allows a mismatching comparison between none of the listed values."""
def _compare(
self,
observed: typing.Any,
subset: bool = False,
) -> typing.Union[_definitions.Comparison, bool]:
"""Succeeds if none of the options are equal."""
for index, option in enumerate(self.value["options"]):
if isinstance(option, _definitions.Comparator):
comparator = option
else:
comparator = Equals(option)
result = comparator.compare(observed, subset=subset)
if getattr(result, "success", False):
return _definitions.Comparison(
operation=f"not {result.operation}",
success=False,
expected=result.expected,
observed=result.observed,
children=result.children,
)
return _definitions.Comparison(
operation="none_of",
success=True,
expected=self.value,
observed=observed,
)
@classmethod
def construct(cls, options: typing.List[typing.Any]) -> "NoneOf":
"""Create a NoneOf comparison operator with the specified options."""
return cls({"options": options})
@classmethod
def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "NoneOf":
options = loader.construct_sequence(node, deep=True)
return cls({"options": options})
Anything.register()
anything = getattr(Anything, "constructor", Anything)
Between.register()
between = getattr(Between, "constructor", Between)
Equals.register()
equals = getattr(Equals, "constructor", Equals)
Unequals.register()
unequals = getattr(Unequals, "constructor", Unequals)
Greater.register()
greater = getattr(Greater, "constructor", Greater)
GreaterOrEqual.register()
greater_or_equal = getattr(GreaterOrEqual, "constructor", GreaterOrEqual)
Less.register()
less = getattr(Less, "constructor", Less)
LessOrEqual.register()
less_or_equal = getattr(LessOrEqual, "constructor", LessOrEqual)
NoneOf.register()
none_of = getattr(NoneOf, "constructor", NoneOf)
OneOf.register()
one_of = getattr(OneOf, "constructor", OneOf)
| [
"[email protected]"
]
| |
648e5ca36c4d9b01db5a8637ad045c23b07bf7f6 | 80aabbd44790ec4feee93624f61c29e87d691d6a | /drawBot/ui/drawView.py | 24fac94c74d4a3c9c44d2a34358e011c780327b5 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | asaumierdemers/drawbot | 546961ead63f71859725a87f190f7ebbd45995f2 | 9ba1ef902bdd5c8e291d5d6835e09f05bfa00261 | refs/heads/master | 2020-12-25T19:59:00.391766 | 2016-08-05T10:04:57 | 2016-08-05T10:04:57 | 29,844,501 | 0 | 0 | null | 2015-01-26T04:12:30 | 2015-01-26T04:12:30 | null | UTF-8 | Python | false | false | 2,955 | py | from AppKit import *
from Quartz import PDFView, PDFThumbnailView, PDFDocument
from vanilla import Group
epsPasteBoardType = "CorePasteboardFlavorType 0x41494342"
class DrawBotPDFThumbnailView(PDFThumbnailView):
def draggingUpdated_(self, draggingInfo):
return NSDragOperationNone
class ThumbnailView(Group):
nsViewClass = DrawBotPDFThumbnailView
def setDrawView(self, view):
self.getNSView().setPDFView_(view.getNSView())
def getSelection(self):
try:
# sometimes this goes weirdly wrong...
selection = self.getNSView().selectedPages()
except:
return -1
if selection:
for page in selection:
document = page.document()
index = document.indexForPage_(page)
return index
return -1
class DrawBotPDFView(PDFView):
def performKeyEquivalent_(self, event):
# catch a bug in PDFView
# cmd + ` causes a traceback
# DrawBot[15705]: -[__NSCFConstantString characterAtIndex:]: Range or index out of bounds
try:
return super(DrawBotPDFView, self).performKeyEquivalent_(event)
except:
return False
class DrawView(Group):
nsViewClass = DrawBotPDFView
def __init__(self, posSize):
super(DrawView, self).__init__(posSize)
pdfView = self.getNSView()
pdfView.setAutoScales_(True)
view = pdfView.documentView()
scrollview = view.enclosingScrollView()
scrollview.setBorderType_(NSBezelBorder)
def get(self):
pdf = self.getNSView().document()
if pdf is None:
return None
return pdf.dataRepresentation()
def set(self, pdfData):
pdf = PDFDocument.alloc().initWithData_(pdfData)
self.setPDFDocument(pdf)
def setPath(self, path):
url = NSURL.fileURLWithPath_(path)
document = PDFDocument.alloc().initWithURL_(url)
self.setPDFDocument(document)
def setPDFDocument(self, document):
if document is None:
document = PDFDocument.alloc().init()
self.getNSView().setDocument_(document)
def getPDFDocument(self):
return self.getNSView().document()
def setScale(self, scale):
self.getNSView().setScaleFactor_(scale)
def scale(self):
return self.getNSView().scaleFactor()
def scrollDown(self):
document = self.getNSView().documentView()
document.scrollPoint_((0, 0))
def scrollToPageIndex(self, index):
pdf = self.getPDFDocument()
if pdf is None:
self.scrollDown()
elif 0 <= index < pdf.pageCount():
try:
# sometimes this goes weirdly wrong...
page = pdf.pageAtIndex_(index)
self.getNSView().goToPage_(page)
except:
self.scrollDown()
else:
self.scrollDown()
| [
"[email protected]"
]
| |
cd25ad9f9b621517a8f79e725e360604777f67c1 | a34d9458832a033bb05b1cec9f13c9f997c6e8d0 | /eola/two_d_space.py | 8b4cfdaa771b30e15433839b87f4c8762caa38f3 | []
| no_license | scottopell/manim | 9de0ca8fd1f4a203e557dc5503b38591e0ef66bc | a3fa16ebcbc9b3eb462c0a3434840c954a92e0d1 | refs/heads/master | 2021-01-14T12:44:51.483245 | 2016-09-03T19:29:17 | 2016-09-03T19:29:17 | 59,712,661 | 0 | 0 | null | 2016-05-26T02:18:09 | 2016-05-26T02:18:09 | null | UTF-8 | Python | false | false | 15,698 | py | import numpy as np
from scene import Scene
from mobject import Mobject
from mobject.vectorized_mobject import VMobject, Group
from mobject.tex_mobject import TexMobject, TextMobject
from animation import Animation
from animation.transform import ApplyPointwiseFunction, Transform, \
ApplyMethod, FadeOut, ApplyFunction
from animation.simple_animations import ShowCreation, Write
from topics.number_line import NumberPlane, Axes
from topics.geometry import Vector, Line, Circle, Arrow, Dot, \
BackgroundRectangle, Square
from helpers import *
from eola.matrix import Matrix, VECTOR_LABEL_SCALE_FACTOR, vector_coordinate_label
X_COLOR = GREEN_C
Y_COLOR = RED_C
Z_COLOR = BLUE_D
class VectorScene(Scene):
CONFIG = {
"basis_vector_stroke_width" : 6
}
def add_plane(self, animate = False, **kwargs):
plane = NumberPlane(**kwargs)
if animate:
self.play(ShowCreation(plane, submobject_mode = "lagged_start"))
self.add(plane)
return plane
def add_axes(self, animate = False, color = WHITE, **kwargs):
axes = Axes(color = color, tick_frequency = 1)
if animate:
self.play(ShowCreation(axes, submobject_mode = "one_at_a_time"))
self.add(axes)
return axes
def lock_in_faded_grid(self, dimness = 0.7, axes_dimness = 0.5):
plane = self.add_plane()
axes = plane.get_axes()
plane.fade(dimness)
axes.highlight(WHITE)
axes.fade(axes_dimness)
self.add(axes)
self.freeze_background()
def add_vector(self, vector, color = YELLOW, animate = True, **kwargs):
if not isinstance(vector, Arrow):
vector = Vector(vector, color = color, **kwargs)
if animate:
self.play(ShowCreation(vector))
self.add(vector)
return vector
def write_vector_coordinates(self, vector, **kwargs):
coords = vector_coordinate_label(vector, **kwargs)
self.play(Write(coords))
return coords
def get_basis_vectors(self):
return [
Vector(
vect, color = color,
stroke_width = self.basis_vector_stroke_width
)
for vect, color in [
([1, 0], X_COLOR),
([0, 1], Y_COLOR)
]
]
def get_basis_vector_labels(self, **kwargs):
i_hat, j_hat = self.get_basis_vectors()
return Group(*[
self.get_vector_label(
vect, label, color = color,
label_scale_factor = 1,
**kwargs
)
for vect, label , color in [
(i_hat, "\\hat{\\imath}", X_COLOR),
(j_hat, "\\hat{\\jmath}", Y_COLOR),
]
])
def get_vector_label(self, vector, label,
direction = "left",
rotate = False,
color = None,
label_scale_factor = VECTOR_LABEL_SCALE_FACTOR):
if not isinstance(label, TexMobject):
if len(label) == 1:
label = "\\vec{\\textbf{%s}}"%label
label = TexMobject(label)
if color is None:
color = vector.get_color()
label.highlight(color)
label.scale(label_scale_factor)
label.add_background_rectangle()
angle = vector.get_angle()
if not rotate:
label.rotate(-angle)
if direction is "left":
label.shift(-label.get_bottom() + 0.1*UP)
else:
label.shift(-label.get_top() + 0.1*DOWN)
label.rotate(angle)
label.shift((vector.get_end() - vector.get_start())/2)
return label
def label_vector(self, vector, label, animate = True, **kwargs):
label = self.get_vector_label(vector, label, **kwargs)
if animate:
self.play(Write(label, run_time = 1))
self.add(label)
return label
def position_x_coordinate(self, x_coord, x_line, vector):
x_coord.next_to(x_line, -np.sign(vector[1])*UP)
x_coord.highlight(X_COLOR)
return x_coord
def position_y_coordinate(self, y_coord, y_line, vector):
y_coord.next_to(y_line, np.sign(vector[0])*RIGHT)
y_coord.highlight(Y_COLOR)
return y_coord
def coords_to_vector(self, vector, coords_start = 2*RIGHT+2*UP, clean_up = True):
starting_mobjects = list(self.mobjects)
array = Matrix(vector)
array.shift(coords_start)
arrow = Vector(vector)
x_line = Line(ORIGIN, vector[0]*RIGHT)
y_line = Line(x_line.get_end(), arrow.get_end())
x_line.highlight(X_COLOR)
y_line.highlight(Y_COLOR)
x_coord, y_coord = array.get_mob_matrix().flatten()
self.play(Write(array, run_time = 1))
self.dither()
self.play(ApplyFunction(
lambda x : self.position_x_coordinate(x, x_line, vector),
x_coord
))
self.play(ShowCreation(x_line))
self.play(
ApplyFunction(
lambda y : self.position_y_coordinate(y, y_line, vector),
y_coord
),
FadeOut(array.get_brackets())
)
y_coord, brackets = self.get_mobjects_from_last_animation()
self.play(ShowCreation(y_line))
self.play(ShowCreation(arrow))
self.dither()
if clean_up:
self.clear()
self.add(*starting_mobjects)
def vector_to_coords(self, vector, integer_labels = True, clean_up = True):
starting_mobjects = list(self.mobjects)
show_creation = False
if isinstance(vector, Arrow):
arrow = vector
vector = arrow.get_end()[:2]
else:
arrow = Vector(vector)
show_creation = True
array = vector_coordinate_label(arrow, integer_labels = integer_labels)
x_line = Line(ORIGIN, vector[0]*RIGHT)
y_line = Line(x_line.get_end(), arrow.get_end())
x_line.highlight(X_COLOR)
y_line.highlight(Y_COLOR)
x_coord, y_coord = array.get_mob_matrix().flatten()
x_coord_start = self.position_x_coordinate(
x_coord.copy(), x_line, vector
)
y_coord_start = self.position_y_coordinate(
y_coord.copy(), y_line, vector
)
brackets = array.get_brackets()
if show_creation:
self.play(ShowCreation(arrow))
self.play(
ShowCreation(x_line),
Write(x_coord_start),
run_time = 1
)
self.play(
ShowCreation(y_line),
Write(y_coord_start),
run_time = 1
)
self.dither()
self.play(
Transform(x_coord_start, x_coord, submobject_mode = "all_at_once"),
Transform(y_coord_start, y_coord, submobject_mode = "all_at_once"),
Write(brackets, run_time = 1),
)
self.dither()
self.remove(x_coord_start, y_coord_start, brackets)
self.add(array)
if clean_up:
self.clear()
self.add(*starting_mobjects)
return array, x_line, y_line
def show_ghost_movement(self, vector):
if isinstance(vector, Arrow):
vector = vector.get_end() - vector.get_start()
elif len(vector) == 2:
vector = np.append(np.array(vector), 0.0)
x_max = int(SPACE_WIDTH + abs(vector[0]))
y_max = int(SPACE_HEIGHT + abs(vector[1]))
dots = VMobject(*[
Dot(x*RIGHT + y*UP)
for x in range(-x_max, x_max)
for y in range(-y_max, y_max)
])
dots.set_fill(BLACK, opacity = 0)
dots_halfway = dots.copy().shift(vector/2).set_fill(WHITE, 1)
dots_end = dots.copy().shift(vector)
self.play(Transform(
dots, dots_halfway, rate_func = rush_into
))
self.play(Transform(
dots, dots_end, rate_func = rush_from
))
self.remove(dots)
class LinearTransformationScene(VectorScene):
CONFIG = {
"include_background_plane" : True,
"include_foreground_plane" : True,
"foreground_plane_kwargs" : {
"x_radius" : 2*SPACE_WIDTH,
"y_radius" : 2*SPACE_HEIGHT,
"secondary_line_ratio" : 0
},
"background_plane_kwargs" : {
"color" : GREY,
"secondary_color" : DARK_GREY,
"axes_color" : GREY,
"stroke_width" : 2,
},
"show_coordinates" : False,
"show_basis_vectors" : True,
"i_hat_color" : X_COLOR,
"j_hat_color" : Y_COLOR,
"leave_ghost_vectors" : False,
"t_matrix" : [[3, 0], [1, 2]],
}
def setup(self):
if hasattr(self, "has_setup"):
return
self.has_setup = True
##^This is to not break all the old Scenes
self.background_mobjects = []
self.foreground_mobjects = []
self.transformable_mobjects = []
self.moving_vectors = []
self.transformable_labels = []
self.moving_mobjects = []
self.t_matrix = np.array(self.t_matrix)
self.background_plane = NumberPlane(
**self.background_plane_kwargs
)
if self.show_coordinates:
self.background_plane.add_coordinates()
if self.include_background_plane:
self.add_background_mobject(self.background_plane)
if self.include_foreground_plane:
self.plane = NumberPlane(**self.foreground_plane_kwargs)
self.add_transformable_mobject(self.plane)
if self.show_basis_vectors:
self.i_hat, self.j_hat = [
self.add_vector(
coords, color, animate = False, stroke_width = 6
)
for coords, color in [
((1, 0), self.i_hat_color),
((0, 1), self.j_hat_color),
]
]
def add_special_mobjects(self, mob_list, *mobs_to_add):
for mobject in mobs_to_add:
if mobject not in mob_list:
mob_list.append(mobject)
self.add(mobject)
def add_background_mobject(self, *mobjects):
self.add_special_mobjects(self.background_mobjects, *mobjects)
def add_foreground_mobject(self, *mobjects):
self.add_special_mobjects(self.foreground_mobjects, *mobjects)
def add_transformable_mobject(self, *mobjects):
self.add_special_mobjects(self.transformable_mobjects, *mobjects)
def add_moving_mobject(self, mobject, target_mobject = None):
mobject.target = target_mobject
self.add_special_mobjects(self.moving_mobjects, mobject)
def add_unit_square(self, color = YELLOW, opacity = 0.3, animate = False):
square = Square(color = color, side_length = 1)
square.shift(-square.get_corner(DOWN+LEFT))
if animate:
added_anims = map(Animation, self.moving_vectors)
self.play(ShowCreation(square), *added_anims)
self.play(square.set_fill, color, opacity, *added_anims)
else:
square.set_fill(color, opacity)
self.add_transformable_mobject(square)
self.bring_to_front(*self.moving_vectors)
self.square = square
return self
def add_vector(self, vector, color = YELLOW, **kwargs):
vector = VectorScene.add_vector(
self, vector, color = color, **kwargs
)
self.moving_vectors.append(vector)
return vector
def write_vector_coordinates(self, vector, **kwargs):
coords = VectorScene.write_vector_coordinates(self, vector, **kwargs)
self.add_foreground_mobject(coords)
return coords
def add_transformable_label(self, vector, label, new_label = None, **kwargs):
label_mob = self.label_vector(vector, label, **kwargs)
if new_label:
label_mob.target_text = new_label
else:
label_mob.target_text = "L(%s)"%label_mob.expression
label_mob.vector = vector
label_mob.kwargs = kwargs
if "animate" in label_mob.kwargs:
label_mob.kwargs.pop("animate")
self.transformable_labels.append(label_mob)
return label_mob
def add_title(self, title, scale_factor = 1.5, animate = False):
if not isinstance(title, Mobject):
title = TextMobject(title).scale(scale_factor)
title.to_edge(UP)
title.add_background_rectangle()
if animate:
self.play(Write(title))
self.add_foreground_mobject(title)
self.title = title
return self
def get_matrix_transformation(self, transposed_matrix):
transposed_matrix = np.array(transposed_matrix)
if transposed_matrix.shape == (2, 2):
new_matrix = np.identity(3)
new_matrix[:2, :2] = transposed_matrix
transposed_matrix = new_matrix
elif transposed_matrix.shape != (3, 3):
raise "Matrix has bad dimensions"
return lambda point: np.dot(point, transposed_matrix)
def get_piece_movement(self, pieces):
start = VMobject(*pieces)
target = VMobject(*[mob.target for mob in pieces])
if self.leave_ghost_vectors:
self.add(start.copy().fade(0.7))
return Transform(start, target, submobject_mode = "all_at_once")
def get_moving_mobject_movement(self, func):
for m in self.moving_mobjects:
if m.target is None:
m.target = m.copy()
target_point = func(m.get_center())
m.target.move_to(target_point)
return self.get_piece_movement(self.moving_mobjects)
def get_vector_movement(self, func):
for v in self.moving_vectors:
v.target = Vector(func(v.get_end()), color = v.get_color())
return self.get_piece_movement(self.moving_vectors)
def get_transformable_label_movement(self):
for l in self.transformable_labels:
l.target = self.get_vector_label(
l.vector.target, l.target_text, **l.kwargs
)
return self.get_piece_movement(self.transformable_labels)
def apply_transposed_matrix(self, transposed_matrix, **kwargs):
func = self.get_matrix_transformation(transposed_matrix)
if "path_arc" not in kwargs:
net_rotation = np.mean([
angle_of_vector(func(RIGHT)),
angle_of_vector(func(UP))-np.pi/2
])
kwargs["path_arc"] = net_rotation
self.apply_function(func, **kwargs)
def apply_inverse_transpose(self, t_matrix, **kwargs):
t_inv = np.linalg.inv(np.array(t_matrix).T).T
self.apply_transposed_matrix(t_inv, **kwargs)
def apply_nonlinear_transformation(self, function, **kwargs):
self.plane.prepare_for_nonlinear_transform()
self.apply_function(function, **kwargs)
def apply_function(self, function, added_anims = [], **kwargs):
if "run_time" not in kwargs:
kwargs["run_time"] = 3
anims = [
ApplyPointwiseFunction(function, t_mob)
for t_mob in self.transformable_mobjects
] + [
self.get_vector_movement(function),
self.get_transformable_label_movement(),
self.get_moving_mobject_movement(function),
] + [
Animation(f_mob)
for f_mob in self.foreground_mobjects
] + added_anims
self.play(*anims, **kwargs)
| [
"[email protected]"
]
| |
a98ee3453af8e367bb94991bb6722e190e0aab83 | 604c7b40f58830c16c51b4514765a6c1915769c4 | /bnop_source/b_code/core/object_model/bnop_repositories.py | e07a5d0d940ed0673114233db8b6b95c9beac9aa | [
"MIT"
]
| permissive | boro-alpha/bnop | 2e3a0654ddf73dce357928d399853c8d0fc936e7 | ae80ce88f12f3b9d509f416aea4f19dc20f1081b | refs/heads/master | 2023-06-11T17:34:42.743589 | 2021-06-30T07:24:51 | 2021-06-30T07:24:51 | 381,096,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | class BnopRepositories(object):
def __init__(
self,
uuid):
self.uuid = \
uuid
| [
"[email protected]"
]
| |
4ed7b0073e5f3f21e7883ee46de2d41af70f1429 | b00840e56173dc2a196442bd354b9e3cc13b17df | /code_util/createJobScript.py | c360a93fc09e90dace29b76e6b66c43797d94224 | []
| no_license | Sportsfan77777/vortex | 56c28fb760f6c98de4a7c8fdcf1168d78b4e57af | 780ec14937d1b79e91a367d58f75adc905b8eef2 | refs/heads/master | 2023-08-31T02:50:09.454230 | 2023-08-24T10:55:05 | 2023-08-24T10:55:05 | 41,785,163 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | """
makes a new job script
"""
import argparse
def new_argument_parser(description = "Make a new job script."):
parser = argparse.ArgumentParser()
# File
parser.add_argument("fn",
help = 'job file name (.sh appended to the end) that must be included, error otherwise')
# Basic Parameters
parser.add_argument('-c', dest = "num_cores", type = int, default = 1,
help = 'number of cores (default: 1)')
parser.add_argument('-p', dest = "ptile", type = int, default = None,
help = 'number of cores needed on each computer (default: num_cores)')
parser.add_argument('--err', dest = "err_name", default = "err_%I",
help = 'job error file name (default: err_%I)')
parser.add_argument('--out', dest = "out_name", default = "out_%I",
help = 'job output file name (default: out_%I)')
parser.add_argument('-q', dest = "queue", default = "medium",
help = 'queue (default: medium)')
parser.add_argument('--name', dest = "name", default = None,
help = 'queue (default: fn)')
parser.add_argument('--gpu', dest = "gpu", action = 'store_true', default = False,
help = 'request gpu resource (default: no gpus)')
# Modules
parser.add_argument('--python_off', dest = "python", action = 'store_false', default = True,
help = 'include python module (default: include)')
parser.add_argument('--fftw_off', dest = "fftw", action = 'store_false', default = True,
help = 'include fftw module (default: include)')
parser.add_argument('--openmpi_off', dest = "openmpi", action = 'store_false', default = True,
help = 'include openmpi module (default: include)')
# Job
parser.add_argument('--mpi', dest = "mpirun", action = 'store_true', default = False,
help = 'use mpirun (default: do not use mpirun)')
parser.add_argument('-j', dest = "job", default = "",
help = 'job command (default: empty string)')
parser.add_argument('-o', dest = "output", default = None,
help = 'output file (.out appended to the end) (default: name)')
return parser
###############################################################################
### Parse Arguments ###
args = new_argument_parser().parse_args()
# Names
if args.name is None:
args.name = args.fn
if args.output is None:
args.output = args.name
args.fn = "%s.sh" % args.fn
args.output = "%s.out" % args.output
# Cores
if (args.ptile is None) or (args.ptile > args.num_cores):
args.ptile = args.num_cores
###############################################################################
### Write File ###
with open(args.fn, 'w') as f:
f.write("#!/bin/bash\n")
### Basic Parameters ###
f.write("#BSUB -n %d\n" % args.num_cores)
f.write("#BSUB -e %s\n" % args.err_name)
f.write("#BSUB -o %s\n" % args.out_name)
f.write('#BSUB -q "%s"\n' % args.queue)
f.write("#BSUB -u mhammer\n")
f.write("#BSUB -J %s\n" % args.name)
if args.gpu:
f.write("#BSUB -R gpu\n")
f.write('#BSUB -R "span[ptile=%d]"\n' % args.ptile)
# Line Break #
f.write("\n")
### Modules ###
if args.python:
f.write("module load python/2.7.3\n")
if args.fftw:
f.write("module load fftw/2.1.5\n")
if args.openmpi:
f.write("module load openmpi\n")
# Line Break
f.write("\n")
### Job ###
if args.mpirun:
f.write("mpirun -np %d " % args.num_cores)
f.write("%s " % args.job)
f.write("> %s\n" % args.output)
# Line Break
f.write("\n")
| [
"[email protected]"
]
| |
fe0881db35f3f5d538b836ae7ffdbb95c3e3210e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/308/84669/submittedfiles/testes.py | 9138abd6a192264d1bfcda194bb1960c01f572ad | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def mostrar():
print(a+b)
print('Resultado')
a = 3
b = 4
mostrar() | [
"[email protected]"
]
| |
d3be12214002bf0e8ed2b4e329795a1e62b70612 | b2f755bdb8c5a73cf28679b14de1a7100cd48b35 | /Interview/4/31.py | 398dfb0ecf5c8643733ea6c6524bdb8f8ed60db3 | []
| no_license | Futureword123456/Interview | cc50e1a3e4e85e4ac570469fc8a839029cdc6c50 | 5cb36dc5f2459abd889e1b29f469d5149139dc5f | refs/heads/master | 2023-03-25T15:24:23.939871 | 2021-03-13T08:15:54 | 2021-03-13T08:15:54 | 345,374,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# @Time : 2021/3/8 0008
# @Author : yang
# @Email : [email protected]
# @File : 31.py
"""Python 获取昨天日期"""
import datetime
def getyesterday():
days = datetime.date.today()
"""
datetime.timedelta对象代表两个时间之间的时间差
两个date或datetime对象相减就可以返回一个timedelta对象。
"""
day = datetime.timedelta(days=1)
return days-day
if __name__ == "__main__":
print(getyesterday())
print(datetime.datetime.now()) | [
"[email protected]"
]
| |
4238d3e59229db3f82e82deeaea7ce90768f81e6 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /未完成题目/LCP/LCP25/LCP25_Python_1.py | dafd8c2c8eabcccd19a5f5df0444b87409140e43 | []
| no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | class Solution:
def keyboard(self, k: int, n: int) -> int:
pass
if __name__ == "__main__":
print(Solution().keyboard(1, 1)) # 26
print(Solution().keyboard(1, 2)) # 650
| [
"[email protected]"
]
| |
aac36e5e97effc021d51bddce00836cf86108ad9 | e1fe1ed4f2ba8ab0146ce7c08d65bc7947150fc8 | /credit11315/pipelines.py | 6e80a0ff0684dd2011f6c21e58ced8a6f581ef7f | []
| no_license | yidun55/credit11315 | 0d88ceef314efa444de58eb5da8939c1acff3abe | b048ec9db036a382287d5faacb9490ccbf50735c | refs/heads/master | 2021-01-20T01:03:30.617914 | 2015-07-31T09:58:24 | 2015-07-31T09:58:24 | 38,853,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import log
import os
os.chdir("/home/dyh/data/credit11315/infoDetail")
class Credit11315Pipeline(object):
def process_item(self, item, spider):
if len(item.keys()) == 1: #存的是content
try:
os.chdir("/home/dyh/data/credit11315/infoDetail")
with open(spider.writeInFile,"a") as f:
f.write(item["content"])
except Exception,e:
log.msg("content pipeline error_info=%s"%e, level=log.ERROR)
else:
for key in item.iterkeys():
try:
os.chdir("/home/dyh/data/credit11315/infoDetail")
with open('detailInfoScrapy_'+key,"a") as f:
f.write(item[key]+"\n")
except Exception,e:
log.msg("DetailInformation(Item) pipeline error_info=%s"%e, level=log.ERROR)
| [
"[email protected]"
]
| |
7f7be7515b49d2339d45739a3d6096151dc8de80 | 9381c0a73251768441dc45c7e181548742b9bdbc | /src/educative/fibonacci_numbers/house_thief_memo.py | dfe266791fa02380306c6208bd07804a7c2fbd97 | []
| no_license | Flaeros/leetcode | 45cc510ec513bfb26dbb762aa1bd98f3b42dce18 | 1dcea81a21bd39fee3e3f245a1418526bd0a5e8f | refs/heads/master | 2022-06-02T14:15:31.539238 | 2022-04-18T14:44:18 | 2022-04-18T14:49:05 | 250,183,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | def find_max_steal(wealth):
memo = [-1 for _ in range(len(wealth))]
return find_max_steal_rec(wealth, memo, 0)
def find_max_steal_rec(wealth, memo, index):
if index >= len(wealth):
return 0
if memo[index] == -1:
inclusive = wealth[index] + find_max_steal_rec(wealth, memo, index + 2)
exclusive = find_max_steal_rec(wealth, memo, index + 1)
memo[index] = max(inclusive, exclusive)
return memo[index]
def main():
print(find_max_steal([2, 5, 1, 3, 6, 2, 4]))
print(find_max_steal([2, 10, 14, 8, 1]))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
75ff04700bbef3333f0e5b04408e5c6d166a6e34 | 2caa47f0bdb2f03469a847c3ba39496de315d992 | /Contest/ABC117/b/main.py | d60a1bb7a504f56ae3ca6140c9cb43c9ca6653d3 | [
"CC0-1.0"
]
| permissive | mpses/AtCoder | 9023e44885dc67c4131762281193c24b69d3b6da | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | refs/heads/master | 2023-03-23T17:00:11.646508 | 2021-03-20T12:21:19 | 2021-03-20T12:21:19 | 287,489,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | #!/usr/bin/env python3
_ = input()
*l, m = sorted(map(int, input().split()))
print("Yes" if sum(l) > m else "No") | [
"[email protected]"
]
| |
9ad86092e385a8f8238bb7bb27ac3740c79a39f7 | 1ecb282756c95d9ae19035761c6e4bb480fdaf26 | /python/lsst/ctrl/stats/records/generic.py | a07b96fbfc651a578c7b2e48c3f7924b5d26cf16 | []
| no_license | provingground-moe/ctrl_stats | 58cba09f95a30007fc5df10d6d8992719b0f5368 | 14790770765b3a167d0d9f318b40e12bbb5df0bb | refs/heads/master | 2020-06-10T20:42:34.260304 | 2017-08-24T21:26:34 | 2017-08-24T21:26:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #
# LSST Data Management System
# Copyright 2008-2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
from __future__ import absolute_import
from .record import Record
class Generic(Record):
"""Generic log event
Listed in documention as not used, but here for completeness.
Parameters
----------
year: `str`
the year to tag the job with
lines: list
the strings making up this record
"""
def __init__(self, year, lines):
Record.__init__(self, year, lines)
eventClass = Generic
eventCode = "008"
| [
"[email protected]"
]
| |
bec987b46ec463a48ccfb01582519267edeb81fd | 8cb210f5a7b9a46dcdd1c4f4cdebb9b006e16d2f | /scripts/gridengine/paramsearch/runScript.py | 8f25665b32d3f6f045302200aca7832f8ad4e096 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | drwiner/Py3Dial | 0ed4572c3d110907a27a8a8f3167299db0de1919 | 0aa5b68f4548bb15e9d167165c17306fd267ee4f | refs/heads/master | 2020-03-26T19:13:53.511897 | 2018-08-18T21:45:32 | 2018-08-18T21:45:32 | 145,254,529 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,102 | py | import os
import sys
import argparse
""" DETAILS:
# THIS FILE EXPLORES GP/REGR/FF/LSTM MODELS
-- Try varying AT LEAST the following network parameters:
a) network structures: n_hideen, L1, L2, acitivation
b) learning rate, decay, and regularisation
"""
################################################
### repository path
################################################
repository_path = os.path.abspath(os.path.join(os.getcwd(),'../../../'))
def config_text(domains, root, seed,
screen_level,
maxturns,
belieftype, useconfreq, policytype, startwithhello, inpolicyfile, outpolicyfile, learning,
maxiter, gamma, learning_rate, tau, replay_type, minibatch_size, capacity,
exploration_type, epsilon_start, epsilon_end, n_in, features, max_k, \
learning_algorithm, architecture, h1_size, h2_size,
kernel,
random, scale,
usenewgoalscenarios,
nbestsize,
patience,
penaliseallturns,
wrongvenuepenalty,
notmentionedvaluepenalty,
sampledialogueprobs,
save_step,
confscorer,
oldstylepatience,
forcenullpositive,
file_level,
maxinformslots,
informmask,
informcountaccepted,
requestmask, confusionmodel, byemask,
n_samples, alpha_divergence, alpha, sigma_eps, sigma_prior,
stddev_var_mu, stddev_var_logsigma, mean_log_sigma,
nbestgeneratormodel,
delta, beta, is_threshold, train_iters_per_episode, training_frequency,
no_head, keep_prob, dropout_start,
old_style_parameter_sampling):
text = '[GENERAL]' + '\n'
text += 'domains = ' + domains + '\n'
text += 'singledomain = True' + '\n'
text += 'root = ' + root + '\n'
text += 'seed = ' + seed + '\n'
text += '\n'
text += '[conditional]' + '\n'
text += 'conditionalsimuser = True\n'
text += 'conditionalbeliefs = True\n'
text += '\n'
text += '[agent]' + '\n'
text += 'maxturns = ' + maxturns + '\n'
text += '\n'
text += '[logging]' + '\n'
text += 'screen_level = ' + screen_level + '\n'
text += 'file_level = ' + file_level + '\n'
text += '\n'
text += '[simulate]' + '\n'
text += 'mindomainsperdialog = 1\n'
text += 'maxdomainsperdialog = 1\n'
text += 'forcenullpositive = ' + forcenullpositive + '\n'
text += '\n'
text += '[policy]' + '\n'
text += 'maxinformslots = ' + maxinformslots + '\n'
text += 'informmask = ' + informmask + '\n'
text += 'informcountaccepted = ' + informcountaccepted + '\n'
text += 'requestmask = ' + requestmask + '\n'
text += 'byemask = ' + byemask + '\n'
text += '\n'
text += '[policy_' + domains + ']' + '\n'
text += 'belieftype = ' + belieftype + '\n'
text += 'useconfreq = ' + useconfreq + '\n'
text += 'policytype = ' + policytype + '\n'
text += 'startwithhello = ' + startwithhello + '\n'
text += 'inpolicyfile = ' + inpolicyfile + '\n'
text += 'outpolicyfile = ' + outpolicyfile + '\n'
text += 'learning = ' + learning + '\n'
text += 'save_step = ' + save_step + '\n'
text += '\n'
text += '[dqnpolicy_' + domains + ']' + '\n'
text += 'maxiter = ' + maxiter + '\n'
text += 'gamma = ' + gamma + '\n'
text += 'learning_rate = ' + learning_rate + '\n'
text += 'tau = ' + tau + '\n'
text += 'replay_type = ' + replay_type + '\n'
text += 'minibatch_size = ' + minibatch_size + '\n'
text += 'capacity = ' + capacity + '\n'
text += 'exploration_type = ' + exploration_type + '\n'
text += 'epsilon_start = ' + epsilon_start + '\n'
text += 'epsilon_end = ' + epsilon_end + '\n'
text += 'n_in = ' + n_in + '\n'
text += 'features = ' + features + '\n'
text += 'max_k = ' + max_k + '\n'
text += 'learning_algorithm = ' + learning_algorithm + '\n'
text += 'architecture = ' + architecture + '\n'
text += 'h1_size = ' + h1_size + '\n'
text += 'h2_size = ' + h2_size + '\n'
text += 'training_frequency = ' + training_frequency + '\n'
# Bayesian parameters
text += 'n_samples = ' + n_samples + '\n'
text += 'stddev_var_mu = ' + stddev_var_mu + '\n'
text += 'stddev_var_logsigma = ' + stddev_var_logsigma + '\n'
text += 'mean_log_sigma = ' + mean_log_sigma + '\n'
text += 'sigma_prior = ' + sigma_prior + '\n'
text += 'alpha =' + alpha + '\n'
text += 'alpha_divergence =' + alpha_divergence + '\n'
text += 'sigma_eps = ' + sigma_eps + '\n'
text += 'no_head = ' + no_head + '\n'
text += 'keep_prob = ' + keep_prob + '\n'
text += 'dropout_start = ' + dropout_start + '\n'
text += '\n'
# ACER
text += 'delta = ' + delta + '\n'
text += 'beta = ' + beta + '\n'
text += 'is_threshold = ' + is_threshold + '\n'
text += 'train_iters_per_episode = ' + train_iters_per_episode + '\n'
text += '\n'
text += '[gppolicy_' + domains + ']' + '\n'
text += 'kernel = ' + kernel + '\n'
text += '\n'
text += '[gpsarsa_' + domains + ']' + '\n'
text += 'random = ' + random + '\n'
text += 'scale = ' + scale + '\n'
text += '\n'
text += '[usermodel]' + '\n'
text += 'usenewgoalscenarios = ' + usenewgoalscenarios + '\n'
text += 'sampledialogueprobs = ' + sampledialogueprobs + '\n'
text += 'oldstylepatience = ' + oldstylepatience + '\n'
text += 'oldstylesampling = ' + old_style_parameter_sampling + '\n'
text += '\n'
text += '[errormodel]' + '\n'
text += 'nbestsize = ' + nbestsize + '\n'
text += 'confusionmodel = ' + confusionmodel + '\n'
text += 'nbestgeneratormodel = ' + nbestgeneratormodel + '\n'
text += 'confscorer = ' + confscorer + '\n'
text += '\n'
text += '[goalgenerator]' + '\n'
text += 'patience = ' + patience + '\n'
text += '\n'
text += '[eval]' + '\n'
text += 'rewardvenuerecommended = 0' + '\n'
text += 'penaliseallturns = ' + penaliseallturns + '\n'
text += 'wrongvenuepenalty = ' + wrongvenuepenalty + '\n'
text += 'notmentionedvaluepenalty = ' + notmentionedvaluepenalty + '\n'
text += '\n'
text += '[eval_' + domains + ']' + '\n'
text += 'successmeasure = objective' + '\n'
text += 'successreward = 20' + '\n'
text += '\n'
return text
def run_on_grid(targetDir, step, iter_in_step, test_iter_in_step, parallel, execDir, configName, text, mode,
error):
################################################
### config file
config = repository_path + configName + '.cfg'
# if directory not exist, then creat one
config_dir = repository_path + 'configures/'
if not os.path.exists(config_dir):
os.makedirs(config_dir)
with open(config, 'w') as f:
f.write(text)
runStr = 'running ' + config
print '{0:*^60}'.format(runStr)
# command = 'python run_grid_pyGPtraining_rpg.py ' + targetDir + ' 3 10000 1 ' + execDir + ' 15 1 ' + config
if mode == ('train', 'grid'):
command = 'python run_grid_pyGPtraining_rpg.py ' + targetDir + ' ' + step + ' ' + \
iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('test', 'grid'):
command = 'python run_grid_pyGPtraining_rpg_test.py ' + targetDir + ' TEST ' + step + ' ' + \
test_iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('train', 'own'):
command = 'python run_own_pyGPtraining_rpg.py ' + targetDir + ' ' + step + ' ' + \
iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('test', 'own'):
command = 'python run_own_pyGPtraining_rpg_test.py ' + targetDir + ' TEST ' + step + ' ' + \
test_iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
print command
os.system(command)
def main(argv):
step = '10'
iter_in_step = '100'
test_iter_in_step = '100'
save_step = '100'
parallel = '1'
maxiter = str(int(step) * int(iter_in_step))
################################################
### Domain information
################################################
domains = 'CamRestaurants' # SF restaurants
if len(argv) > 4:
repository_path = argv[4]
root = repository_path
seed = argv[3]
screen_level = 'warning'
file_level = 'warning'
maxturns = '25'
################################################
### General policy information
################################################
belieftype = 'focus'
useconfreq = 'False'
policytype_vary = ['bdqn']#dropout', 'concrete', 'bootstrapped'] #'dqn', 'bbqn', 'bdqn'] # 'dropout', 'concrete'
startwithhello = 'False'
inpolicyfile = 'policyFile'
outpolicyfile = 'policyFile'
learning = 'True'
maxinformslots = '5' # Maximum number of slot values that are presented in the inform summary action
informmask = 'True' # Decides if the mask over inform type actions is used or not (having the mask active speeds up learning)
informcountaccepted = '4' # number of accepted slots needed to unmask the inform_byconstraints action
requestmask = 'True' # Decides if the mask over inform type actions is used or not
byemask = 'True'
################################################
### DNN architecture options
################################################
gamma = '0.99' # discount factor
learning_rate = '0.001' # learning rate
tau_vary = ['0.02'] # target policy network update frequency 0.02 is equal to update policy after 50 epochs
replay_type_vary = ['vanilla'] # ['vanilla'] experience replay
minibatch_size_vary = ['64'] # how many turns are in the batch
capacity_vary = ['1000'] # how many turns/dialogues are in ER
exploration_type_vary = ['e-greedy'] # 'e-greedy', 'Boltzman'
epsilon_s_e_vary = [('0.9', '0.0')] # , ('0.3', '0.0')]#, ('0.5', '0.1')]
training_frequency = '2' # how often train the model, episode_count % frequency == 0
features = '["discourseAct", "method", "requested", "full", "lastActionInformNone", "offerHappened", "inform_info"]'
max_k = '5'
learning_algorithm = 'dqn'
architecture = 'vanilla'
h1_size = ['130']#, '200', '300']
h2_size = ['50']#, '75', '100']
################################################
### Bayesian estimation parameters
################################################
n_samples = '1' # number of samples for action choice
alpha_divergence = 'False' # use alpha divergence?
alpha = '0.85'
sigma_eps = '0.01' # variance size for sampling epsilon
sigma_prior = '1.5' # prior for variance in KL term
stddev_var_mu = '0.01' # stdv for weights
stddev_var_logsigma = '0.01' # stdv of variance for variance
mean_log_sigma = '0.000001' # prior mean for variance
no_head = '3' # number of heads used for
keep_prob = '0.9' # dropout level
dropout_start = '0.2' # concrete dropout level
################################################
### ACER parameters
################################################
beta = '0.95'
delta = '1.0'
is_threshold = '5.0'
train_iters_per_episode = '1'
################################################
### User model and environment model info.
################################################
usenewgoalscenarios = 'True'
sampledialogueprobs = 'True'
old_style_parameter_sampling = 'True' # for bdqn True
confusionmodel = 'RandomConfusions'
confscorer = 'additive' # 'additive'
nbestgeneratormodel = 'SampledNBestGenerator'
nbestsize = '3'
patience = '3'
penaliseallturns = 'True'
wrongvenuepenalty = '0'
notmentionedvaluepenalty = '0'
oldstylepatience = 'True'
forcenullpositive = 'False'
runError_vary = ['0']
if domains is 'CamRestaurants':
n_in = '268'
elif domains is 'CamHotels':
n_in = '111'
elif domains is 'SFRestaurants':
n_in = '636'
elif domains is 'SFHotels':
n_in = '438'
elif domains is 'Laptops11':
n_in = '257'
elif domains is 'TV':
n_in = '188'
elif domains is 'Booking':
n_in = '188'
################################################
### GP policy training options
################################################
kernel = 'polysort'
random = 'False'
scale = '3'
ConfigCounter = 0
listFile = open(argv[0], 'w')
runMode = ('train', 'grid')
if argv[1] not in ('train', 'test') or argv[2] not in ('grid', 'own'):
print '\n!!!!! WRONG COMMAND !!!!!\n'
print 'EXAMPLE: python runScript.py list [train|test] [grid|own]\n'
exit(1)
elif argv[1] == 'train':
if argv[2] == 'grid':
runMode = ('train', 'grid')
elif argv[2] == 'own':
runMode = ('train', 'own')
elif argv[1] == 'test':
if argv[2] == 'grid':
runMode = ('test', 'grid')
elif argv[2] == 'own':
runMode = ('test', 'own')
listOutput = '{0: <6}'.format('PARAM') + '\t'
listOutput += '{0: <10}'.format('type') + '\t'
listOutput += '{0: <10}'.format('actor_lr') + '\t'
listOutput += '{0: <10}'.format('critic_lr') + '\t'
listOutput += '{0: <10}'.format('replaytype') + '\t'
listOutput += '{0: <10}'.format('nMini') + '\t'
listOutput += '{0: <10}'.format('capacity') + '\t'
listOutput += '{0: <10}'.format('runError') + '\t'
listFile.write(listOutput + '\n')
for policytype in policytype_vary:
for tau in tau_vary:
for replay_type in replay_type_vary:
for minibatch_size in minibatch_size_vary:
for exploration_type in exploration_type_vary:
for capacity in capacity_vary:
for epsilon_s_e in epsilon_s_e_vary:
epsilon_start, epsilon_end = epsilon_s_e
for h1 in h1_size:
for h2 in h2_size:
for runError in runError_vary:
execDir = repository_path
if policytype == 'gp':
targetDir = 'CamRestaurants_gp_'
elif policytype == 'dqn' or policytype == 'dqn_vanilla':
targetDir = 'CamRestaurants_dqn_'
elif policytype == 'a2c':
targetDir = 'CamRestaurants_a2c_'
elif policytype == 'enac':
targetDir = 'CamRestaurants_enac_'
elif policytype == 'bdqn':
targetDir = 'CamRestaurants_bdqn_'
elif policytype == 'bbqn':
targetDir = 'CamRestaurants_bbqn_'
elif policytype == 'concrete':
targetDir = 'CamRestaurants_concrete_'
elif policytype == 'bootstrapped':
targetDir = 'CamRestaurants_bootstrapped_'
elif policytype == 'dropout':
targetDir = 'CamRestaurants_dropout_'
elif policytype == 'acer':
targetDir = 'CamRestaurants_acer_'
elif policytype == 'a2cis':
targetDir = 'CamRestaurants_a2cis_'
elif policytype == 'tracer':
targetDir = 'CamRestaurants_tracer_'
listOutput = '{0: <10}'.format(targetDir) + '\t'
listOutput += '{0: <10}'.format(policytype) + '\t'
listOutput += '{0: <10}'.format(learning_rate) + '\t'
listOutput += '{0: <10}'.format(replay_type) + '\t'
listOutput += '{0: <10}'.format(minibatch_size) + '\t'
listOutput += '{0: <10}'.format(capacity) + '\t'
listOutput += '{0: <10}'.format(runError) + '\t'
targetDir += 'learning_rate' + learning_rate + '_replay_type' + replay_type + \
'_minibatch_size' + minibatch_size + '_capacity' + capacity + '_runError' + runError
text = config_text(domains, root, seed,
screen_level,
maxturns,
belieftype, useconfreq, policytype, startwithhello,
inpolicyfile, outpolicyfile, learning,
maxiter, gamma, learning_rate, tau, replay_type,
minibatch_size, capacity,
exploration_type, epsilon_start, epsilon_end, n_in,
features, max_k, learning_algorithm, architecture, h1,
h2,
kernel,
random, scale,
usenewgoalscenarios,
nbestsize,
patience,
penaliseallturns,
wrongvenuepenalty,
notmentionedvaluepenalty,
sampledialogueprobs,
save_step,
confscorer,
oldstylepatience,
forcenullpositive,
file_level,
maxinformslots, informmask,informcountaccepted,requestmask, confusionmodel, byemask,
n_samples, alpha_divergence, alpha, sigma_eps, sigma_prior,
stddev_var_mu, stddev_var_logsigma, mean_log_sigma,
nbestgeneratormodel,
delta, beta, is_threshold, train_iters_per_episode, training_frequency,
no_head, keep_prob, dropout_start,
old_style_parameter_sampling)
# run_on_grid(targetDir, execDir, configName, text)
tmpName = 'gRun' + str(ConfigCounter)
run_on_grid(tmpName, step, iter_in_step, test_iter_in_step, parallel, execDir, tmpName, text,
runMode, runError)
listFile.write(tmpName + '\t' + listOutput + '\n')
ConfigCounter += 1
if __name__ == "__main__":
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description='DeepRL parameter search')
parser.add_argument('-s', '--seed', help='set the random seed', required=False, type=str, default="123")
parser.add_argument('-tn', '--train', help='script is set to train policies (default)', action='store_true')
parser.add_argument('-tt', '--test', help='script is set to test/evaluate policies', action='store_true')
parser.add_argument('--own', help='run on local machine (default)', action='store_true')
parser.add_argument('--grid', help='run on grid', action='store_true')
parser.add_argument('-f', '--file', help='the list file', required=False, type=str, default='list')
parser.add_argument('-p', '--pydial', help='the path to pydial', required=False, type=str, default='../../../')
if len(argv) > 0 and not argv[0][0] == '-':
if len(sys.argv) != 5:
parser.print_help()
# print '\n!!!!! WRONG COMMAND !!!!!\n'
# print 'EXAMPLE: python runScript.py list [train|test] [grid|own]\n'
exit(1)
# main(argv)
else:
# parser = argparse.ArgumentParser(description='DeepRL parameter search')
# parser.add_argument('-s', '--seed', help='set the random seed', required=False, type=str, default="123")
# parser.add_argument('-tn', '--train', help='script is set to train policies (default)', action='store_true')
# parser.add_argument('-tt', '--test', help='script is set to test/evaluate policies', action='store_true')
# parser.add_argument('--own', help='run on local machine (default)', action='store_true')
# parser.add_argument('--grid', help='run on grid', action='store_true')
# parser.add_argument('-f', '--file', help='the list file', required=False, type=str, default='list')
# parser.add_argument('-p', '--pydial', help='the path to pydial', required=False, type=str, default='../../../')
args = parser.parse_args()
own = not args.grid
grid = not args.own and args.grid
if own == grid:
pass # issue error with parameter help
train = not args.test
test = not args.train and args.test
if train == test:
pass # issue error with parameter help
pydialpath = os.path.abspath(os.path.join(os.getcwd(),args.pydial))
argv = [args.file, 'test' if test else 'train', 'grid' if grid else 'own', args.seed, pydialpath]
# print argv
main(argv)
# END OF FILE
| [
"[email protected]"
]
| |
0ad73be05ea4a42a3b2118023282236427d3145d | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.3.0/examples/stats.py | 86a1e5c5b193afe5fb375e4eef30098d3dbc84b2 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
]
| permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 4,052 | py | # Copyright (c) 2009-2015 Tom Keffer <[email protected]>
# See the file LICENSE.txt for your rights.
"""Example of how to extend the search list used by the Cheetah generator.
*******************************************************************************
This search list extension offers two extra tags:
'alltime': All time statistics.
For example, "what is the all time high temperature?"
'seven_day': Statistics for the last seven days.
That is, since midnight seven days ago.
*******************************************************************************
To use this search list extension:
1) Copy this file to the user directory. See https://bit.ly/33YHsqX for where your user
directory is located.
2) Modify the option search_list in the skin.conf configuration file, adding
the name of this extension. When you're done, it will look something like
this:
[CheetahGenerator]
search_list_extensions = user.stats.MyStats
You can then use tags such as $alltime.outTemp.max for the all-time max
temperature, or $seven_day.rain.sum for the total rainfall in the last
seven days.
*******************************************************************************
"""
import datetime
import time
from weewx.cheetahgenerator import SearchList
from weewx.tags import TimespanBinder
from weeutil.weeutil import TimeSpan
class MyStats(SearchList): # 1
def __init__(self, generator): # 2
SearchList.__init__(self, generator)
def get_extension_list(self, timespan, db_lookup): # 3
"""Returns a search list extension with two additions.
Parameters:
timespan: An instance of weeutil.weeutil.TimeSpan. This will
hold the start and stop times of the domain of
valid times.
db_lookup: This is a function that, given a data binding
as its only parameter, will return a database manager
object.
"""
# First, create TimespanBinder object for all time. This one is easy
# because the object timespan already holds all valid times to be
# used in the report.
all_stats = TimespanBinder(timespan,
db_lookup,
context='year',
formatter=self.generator.formatter,
converter=self.generator.converter,
skin_dict=self.generator.skin_dict) # 4
# Now get a TimespanBinder object for the last seven days. This one we
# will have to calculate. First, calculate the time at midnight, seven
# days ago. The variable week_dt will be an instance of datetime.date.
week_dt = datetime.date.fromtimestamp(timespan.stop) \
- datetime.timedelta(weeks=1) # 5
# Convert it to unix epoch time:
week_ts = time.mktime(week_dt.timetuple()) # 6
# Form a TimespanBinder object, using the time span we just
# calculated:
seven_day_stats = TimespanBinder(TimeSpan(week_ts, timespan.stop),
db_lookup,
context='week',
formatter=self.generator.formatter,
converter=self.generator.converter,
skin_dict=self.generator.skin_dict) # 7
# Now create a small dictionary with keys 'alltime' and 'seven_day':
search_list_extension = {'alltime' : all_stats,
'seven_day' : seven_day_stats} # 8
# Finally, return our extension as a list:
return [search_list_extension] # 9
| [
"[email protected]"
]
| |
f51558bfe5192cb59b9736a74ce3591e50b861b9 | d8b201ba6bf57db0101d88836429bbcb3a10b857 | /Math/TriangleQuest.py | c1a6cd2a1b0e05a20a09b0582cb16f0e25c80188 | [
"MIT"
]
| permissive | MaxCodeXTC/PythonHackerRankSolutions | 32ad41df3fbd33f8651cdc5099c8ec3d37d9bc17 | 987618b61b71fe5e9a40275fb348476657bbea57 | refs/heads/master | 2022-06-28T06:00:19.126751 | 2020-05-07T09:23:37 | 2020-05-07T09:23:37 | 262,471,271 | 1 | 0 | null | 2020-05-09T02:24:11 | 2020-05-09T02:24:10 | null | UTF-8 | Python | false | false | 127 | py | '''
Title : Triangle Quest
Subdomain : Math
Domain : Python
Author : codeperfectplus
Created : 17 January 2020
'''
| [
"[email protected]"
]
| |
8dd6db002b7cfee421083e2f1a14012671d69f19 | 3941f6b431ccb00ab75f19c52e40e5dad2e98b9b | /Dasymetric/dasym_tables.py | 41bc20d47b5ee4b9da2c2f6b66632d0c1d6ba20e | [
"Apache-2.0"
]
| permissive | scw/global-threats-model | 70c375c1633e8578f1e41f278b443f1501ceb0ec | 11caa662373c5dbfbb08bb0947f3dd5eedc0b4e0 | refs/heads/master | 2016-09-05T11:25:13.056352 | 2013-08-22T22:10:13 | 2013-08-22T22:10:13 | 3,566,652 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,033 | py | # ---------------------------------------------------------------------------
# dasym_tables.py
# Created on: Wed Jan 11 2006
# Written by: Matthew Perry
# Usage: See the "script arguments" section
# ---------------------------------------------------------------------------
#================================================================#
# Prepare Environment
# Import system modules
import sys, string, os, win32com.client
# Create the Geoprocessor object
gp = win32com.client.Dispatch("esriGeoprocessing.GpDispatch.1")
# Set the necessary product code
gp.SetProduct("ArcInfo")
# Check out any necessary licenses
gp.CheckOutExtension("spatial")
# Load required toolboxes...
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Analyst Tools.tbx")
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Conversion Tools.tbx")
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Data Management Tools.tbx")
#----------------------------------------#
# Script Arguments
Temp_Workspace = "C:\\WorkSpace\\temp"
try:
#INPUTS
Spatial_Units_Raster = sys.argv[1] # raster containing country code
Attribute_Lookup_Table = sys.argv[2] # dbf containing countries and all attributes of interest
Attribute_Lookupt_Table_Join_Item = sys.argv[3] # country code
Attribute_Lookup_Table_Value_Item = sys.argv[4] # the variable of interest
Aux_Raster = sys.argv[5] # landcover
Weighting_Table = sys.argv[6] # Table relating land cover classes to relative weights
Weighting_Table_Join_Field = sys.argv[7] # column with landcover codes
Weighting_Table_Weight_Field = sys.argv[8] # column with relative wieghts
#OUTPUTS
Combined_Raster = sys.argv[9] # output of aml, input to gp script
Combined_Raster_Table = sys.argv[10] # output of aml, input to gp script
Output_Raster = sys.argv[11] # the dasymetric map
except:
#INPUTS
Spatial_Units_Raster = "C:\\WorkSpace\\FAO\\dasym\\units\\units_as"
Attribute_Lookup_Table = "C:\\WorkSpace\\FAO\\dasym\\lookups\\faocia.dbf"
Attribute_Lookupt_Table_Join_Item = "CODE"
Attribute_Lookup_Table_Value_Item = "FERT"
Aux_Raster = "C:\\WorkSpace\\clipped_rusle_inputs\\as_igbp"
Weighting_Table = "C:\\WorkSpace\\FAO\\dasym\\weights\\C.dbf"
Weighting_Table_Join_Field = "LANDCOVER"
Weighting_Table_Weight_Field = "WEIGHT"
#OUTPUTS
Combined_Raster = Temp_Workspace + "\\ctpc"
Combined_Raster_Table = Temp_Workspace + "\\ctpc.dbf"
Output_Raster = "C:\\WorkSpace\\FAO\\dasym\\outputs\\as_fertC"
#--------------------------------#
# Constants
Joined_Output_Table_Name = "combine_weight_join"
Joined_Output_Table = Temp_Workspace + "\\" + Joined_Output_Table_Name + ".dbf"
Combine_Reclass = Temp_Workspace + "\\combine2_rcl"
Temp_Raster = Temp_Workspace + "\\temp_dasy"
Combined_Raster_Table_Variable_Field = "VOI" # Should be constant
#================================================================#
# Main
#---------------------------------#
# Call the AML as the first step
# b/c ArcGIS can't handle raster attribute tables
amlPath = os.path.dirname(sys.argv[0]) + "\\"
sCommandLine = "arc.exe \"&run\" \"" + amlPath + "dasym_combine.aml \" "
sCommandLine += Spatial_Units_Raster + " " + Attribute_Lookup_Table + " "
sCommandLine += Attribute_Lookupt_Table_Join_Item + " " + Attribute_Lookup_Table_Value_Item + " "
sCommandLine += Aux_Raster + " "
sCommandLine += Combined_Raster + " " + Combined_Raster_Table + " " + Temp_Workspace + "'"
os.system(sCommandLine)
# gp.AddMessage(" ****** Combined Layers")
print " ****** Combined Layers"
#------------------------------------------------#
# Determine the column names based on user input
base = os.path.basename(Combined_Raster_Table)
split = base.split(".")
combinedPrefix = split[0]
base = os.path.basename(Weighting_Table)
split = base.split(".")
weightedPrefix = split[0]
base = os.path.basename(Aux_Raster)
split = base.split(".")
auxprefix = split[0]
auxprefix = auxprefix[:10]
Variable_Field = combinedPrefix + "_VOI" # "ctfc_VOI" # Combined_Raster_Table _ VOI
Variable_Field = Variable_Field[:10]
Weight_Field = weightedPrefix + "_" + Weighting_Table_Weight_Field # "TFC_WEIGHT"
Weight_Field = Weight_Field[:10]
Count_Field = combinedPrefix + "_COUNT" # Combined_Raster_Table _ COUNT
Count_Field = Count_Field[:10]
Value_Field = combinedPrefix + "_VALUE" # Combined_Raster_Table _ VALU
Value_Field = Value_Field[:10]
Combined_Raster_Table_Join_Field = auxprefix.upper() # "LANDCOVER2" # Name of aux raster truncated and caps
try:
#------------------------------------------------#
# Join Tables and create new output table
gp.MakeTableView_management(Combined_Raster_Table, "ctable")
gp.AddJoin_management("ctable", Combined_Raster_Table_Join_Field, Weighting_Table, Weighting_Table_Join_Field, "KEEP_ALL")
gp.TableToTable_conversion("ctable", Temp_Workspace, Joined_Output_Table_Name)
print " ****** Created joined table"
#------------------------------------------------#
# Add fields
gp.AddField_management(Joined_Output_Table, "totalpc", "DOUBLE", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
gp.AddField_management(Joined_Output_Table, "valuepp", "LONG", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
gp.MakeTableView_management(Joined_Output_Table, "jtable")
print " ****** Added Fields and reloaded table view"
#------------------------------------------------#
# Calculate Total of Variable Per Auxillary Data Class
gp.CalculateField_management("jtable", "totalpc", "[" + Variable_Field + "] * [" + Weight_Field + "]")
# Calculate Value of variable per pixel
gp.CalculateField_management("jtable", "valuepp", "int( [totalpc] * 10000.0 / [" + Count_Field + "]) ")
print " ****** Calculated New Fields"
#------------------------------------------------#
# Reclass by Table...
gp.ReclassByTable_sa(Combined_Raster, "jtable", Value_Field , Value_Field, "valuepp", Temp_Raster , "DATA")
print " ****** Reclassed Raster"
#------------------------------------------------#
# Scale Raster to original units
Map_Algebra_expression = Temp_Raster + " / 10000.0"
gp.SingleOutputMapAlgebra_sa(Map_Algebra_expression, Output_Raster)
print " ****** Scaled raster"
except:
print gp.GetMessages()
sys.exit(1)
| [
"[email protected]"
]
| |
4e59b315ea0eefd8f148888dc07903bba562e531 | 59182ffe28c054d9f33ee9b8885a52fd5944440c | /twilio/rest/wireless/v1/usage_record.py | 91867ba874f25cc0138ed624ec37517e3a4b31c9 | [
"MIT"
]
| permissive | NCPlayz/twilio-python | 652b508e086ee7e6658015e74f3bd19572012502 | 08898a4a1a43b636a64c9e98fbb0b6ee1792c687 | refs/heads/master | 2020-08-12T22:24:06.816467 | 2019-10-09T19:25:08 | 2019-10-09T19:25:08 | 214,854,286 | 0 | 0 | MIT | 2019-10-13T16:29:39 | 2019-10-13T16:29:39 | null | UTF-8 | Python | false | false | 9,237 | py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class UsageRecordList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the UsageRecordList
:param Version version: Version that contains the resource
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordList
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordList
"""
super(UsageRecordList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/UsageRecords'.format(**self._solution)
def stream(self, end=values.unset, start=values.unset, granularity=values.unset,
limit=None, page_size=None):
"""
Streams UsageRecordInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.wireless.v1.usage_record.UsageRecordInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(end=end, start=start, granularity=granularity, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, end=values.unset, start=values.unset, granularity=values.unset,
limit=None, page_size=None):
"""
Lists UsageRecordInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.wireless.v1.usage_record.UsageRecordInstance]
"""
return list(self.stream(
end=end,
start=start,
granularity=granularity,
limit=limit,
page_size=page_size,
))
def page(self, end=values.unset, start=values.unset, granularity=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UsageRecordInstance records from the API.
Request is executed immediately
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
params = values.of({
'End': serialize.iso8601_datetime(end),
'Start': serialize.iso8601_datetime(start),
'Granularity': granularity,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return UsageRecordPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UsageRecordInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UsageRecordPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordList>'
class UsageRecordPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the UsageRecordPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordPage
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
super(UsageRecordPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UsageRecordInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
"""
return UsageRecordInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordPage>'
class UsageRecordInstance(InstanceResource):
""" """
class Granularity(object):
HOURLY = "hourly"
DAILY = "daily"
ALL = "all"
def __init__(self, version, payload):
"""
Initialize the UsageRecordInstance
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
"""
super(UsageRecordInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'period': payload['period'],
'commands': payload['commands'],
'data': payload['data'],
}
# Context
self._context = None
self._solution = {}
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def period(self):
"""
:returns: The time period for which usage is reported
:rtype: dict
"""
return self._properties['period']
@property
def commands(self):
"""
:returns: An object that describes the aggregated Commands usage for all SIMs during the specified period
:rtype: dict
"""
return self._properties['commands']
@property
def data(self):
"""
:returns: An object that describes the aggregated Data usage for all SIMs over the period
:rtype: dict
"""
return self._properties['data']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordInstance>'
| [
"[email protected]"
]
| |
5aae57fc607a70052c54ad09b04cbd25840d0f28 | 9fc6604ae98e1ae91c490e8201364fdee1b4222a | /eg_msg_base/models/msg_status.py | 4501bdc8b00a217bae754eaa0a5b5c32b395123c | []
| no_license | nabiforks/baytonia | b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4 | 58cb304d105bb7332f0a6ab685015f070988ba56 | refs/heads/main | 2023-03-23T21:02:57.862331 | 2021-01-04T03:40:58 | 2021-01-04T03:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from odoo import models, fields
class MsgStatus(models.Model):
_name = "msg.status"
name = fields.Char(string="Status", readonly=True)
is_last_status = fields.Boolean(string="Is Last Status")
sms_instance_id = fields.Many2one(comodel_name="sms.instance", string="Sms Instance", readonly=True)
| [
"[email protected]"
]
| |
afe69ae31a6285f10b876f9f4c269a0bde8cf181 | 0049d7959ff872e2ddf6ea3ce83b6c26512425a6 | /advtempproject/advtempproject/wsgi.py | 0eb3d019ab04a1ac8d64047a343bd0a726103e5c | []
| no_license | srazor09/Django_projects | 9806ab25d966af780cdabe652a1792220c7806a8 | 8d664ba4c9478bd93c8e5bcbcaf594e8ffe6ce93 | refs/heads/master | 2023-04-18T02:13:15.993393 | 2021-05-04T20:34:05 | 2021-05-04T20:34:05 | 364,379,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for advtempproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advtempproject.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
3707942092f8a2717e1e159fd36fc8769e28c5ee | 5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e | /py/topcoder/TCCC 2003 Semifinals 2/TicSolver.py | a7805ff035c64217729de5ff4c0bd9d4ebc789e0 | [
"MIT"
]
| permissive | shhuan/algorithms | 36d70f1ab23dab881bf1a15573fbca7b2a3f4235 | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | refs/heads/master | 2021-05-07T14:21:15.362588 | 2017-11-07T08:20:16 | 2017-11-07T08:20:16 | 109,799,698 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | # -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class TicSolver:
def whoWins(self, board):
return ""
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(board, __expected):
startTime = time.time()
instance = TicSolver()
exception = None
try:
__result = instance.whoWins(board);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("TicSolver (500 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("TicSolver.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
board = []
for i in range(0, int(f.readline())):
board.append(f.readline().rstrip())
board = tuple(board)
f.readline()
__answer = f.readline().rstrip()
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(board, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1430750694
PT, TT = (T / 60.0, 75.0)
points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
| [
"[email protected]"
]
| |
f095c17c392697ec5fb7da951dd4309508663a2f | c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8 | /Hacker Rank/Day3_04_02_20.py | c7cd53f8193dae0cfdc503af27bf0d8b26745ef5 | []
| no_license | Silentsoul04/FTSP_2020 | db0dae6cd9c371f3daa9219f86520dfa66348236 | 7e603af918da2bcfe4949a4cf5a33107c837894f | refs/heads/master | 2022-12-21T20:44:32.031640 | 2020-09-20T12:29:58 | 2020-09-20T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 22:00:31 2020
@author: Rajesh
"""
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
#########################
a = "this is a string"
b = a.split(" ") # a is converted to a list of strings.
print(b)
c= "-".join(b)
print(c)
#####################
def b(a):
c= a.split()
d = "-".join(c)
return d
if __name__ == '__main__':
line = input()
result = b(line)
print(result)
######################
def print_full_name(a, b):
print("Hello" , a , b+"! You just delved into python.")
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
##############################
def mutate_string(string, position, character):
l = list(string)
l[position] = character
string = ''.join(l)
return string
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
| [
"[email protected]"
]
| |
175b341a56c39c15bc473eabefdea8436aba734f | 09d79c3509252cfccac35bb28de9a0379094823a | /alx/movies/migrations/0002_auto_20201123_1045.py | 1ac4f1ab4103dc7788ff628ea113fe1d93025510 | []
| no_license | marianwitkowski/python2311 | 73ad491016cd6d0010d0203db43aca2c6debe0ad | 9bbeca3fb6d8658a1321ab099ff2102cd7de76e0 | refs/heads/master | 2023-01-22T13:13:56.695680 | 2020-12-02T14:58:15 | 2020-12-02T14:58:15 | 315,350,865 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.1.3 on 2020-11-23 09:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='movie',
options={'verbose_name': 'Film', 'verbose_name_plural': 'Filmy'},
),
]
| [
"[email protected]"
]
| |
9252178bd560c85b23332610a4299b0ec0f71f57 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /q4bBcq5NET4CH5Rcb_16.py | 5f42fed2a73573979ea8acc56462e2f23301b0ed | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py |
def jay_and_bob(txt):
a={"half":"14 grams","quarter":"7 grams","eighth":"3.5 grams","sixteenth":"1.75 grams"}
return a[txt]
| [
"[email protected]"
]
| |
8c5fb8bc6094cee02d62818ed1fdba969117d0ea | 57235e5fbd29dc5e0b3f24649e15a48935edd65f | /boa3_test/test_sc/built_in_methods_test/IsInstanceListLiteral.py | 11e0d91468bec39a5306a5e643f06dec8b69c858 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
]
| permissive | DanPopa46/neo3-boa | ae75543bdc4e0aeadf45578b6b5e4c45b9253557 | e4ef340744b5bd25ade26f847eac50789b97f3e9 | refs/heads/development | 2023-04-01T19:25:08.216180 | 2021-04-15T17:45:38 | 2021-04-15T17:45:38 | 358,663,228 | 0 | 0 | Apache-2.0 | 2021-04-16T16:46:46 | 2021-04-16T16:46:31 | null | UTF-8 | Python | false | false | 94 | py | from boa3.builtin import public
@public
def Main() -> bool:
return isinstance([], list)
| [
"[email protected]"
]
| |
f0f7d898a452de3ce1b9a7940f8dcd61c38c6500 | 18f8abb90efece37949f5b5758c7752b1602fb12 | /py/django_tools/django-haystack/tests/simple_tests/tests/simple_backend.py | d9b5120d942eb0f05a4fcbd1769c58de0da181cd | [
"BSD-3-Clause",
"MIT"
]
| permissive | marceltoben/evandrix.github.com | caa7d4c2ef84ba8c5a9a6ace2126e8fd6db1a516 | abc3fbfb34f791f84e9a9d4dc522966421778ab2 | refs/heads/master | 2021-08-02T06:18:12.953567 | 2011-08-23T16:49:33 | 2011-08-23T16:49:33 | 2,267,457 | 3 | 5 | null | 2021-07-28T11:39:25 | 2011-08-25T11:18:56 | C | UTF-8 | Python | false | false | 5,799 | py | from datetime import date
from django.conf import settings
from django.test import TestCase
from haystack import connections, connection_router
from haystack import indexes
from haystack.query import SearchQuerySet
from haystack.utils.loading import UnifiedIndex
from core.models import MockModel
from core.tests.mocks import MockSearchResult
class SimpleMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class SimpleSearchBackendTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SimpleSearchBackendTestCase, self).setUp()
self.backend = connections['default'].get_backend()
self.index = connections['default'].get_unified_index().get_index(MockModel)
self.sample_objs = MockModel.objects.all()
def test_update(self):
self.backend.update(self.index, self.sample_objs)
def test_remove(self):
self.backend.remove(self.sample_objs[0])
def test_clear(self):
self.backend.clear()
def test_search(self):
# No query string should always yield zero results.
self.assertEqual(self.backend.search(u''), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'*')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'*')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'daniel')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'daniel')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'should be a string')['hits'], 1)
self.assertEqual([result.pk for result in self.backend.search(u'should be a string')['results']], [8])
# Ensure the results are ``SearchResult`` instances...
self.assertEqual(self.backend.search(u'should be a string')['results'][0].score, 0)
self.assertEqual(self.backend.search(u'index document')['hits'], 6)
self.assertEqual([result.pk for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
# Regression-ville
self.assertEqual([result.object.id for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
self.assertEqual(self.backend.search(u'index document')['results'][0].model, MockModel)
# No support for spelling suggestions
self.assertEqual(self.backend.search(u'Indx')['hits'], 0)
self.assertFalse(self.backend.search(u'Indx').get('spelling_suggestion'))
# No support for facets
self.assertEqual(self.backend.search(u'', facets=['name']), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', facets=['name'])['hits'], 23)
self.assertEqual(self.backend.search(u'', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}})['hits'], 23)
self.assertEqual(self.backend.search(u'', query_facets={'name': '[* TO e]'}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', query_facets={'name': '[* TO e]'})['hits'], 23)
self.assertFalse(self.backend.search(u'').get('facets'))
self.assertFalse(self.backend.search(u'daniel').get('facets'))
# Note that only textual-fields are supported.
self.assertEqual(self.backend.search(u'2009-06-18')['hits'], 0)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.backend.search(u'index document', result_class=MockSearchResult)['results'][0], MockSearchResult))
def test_more_like_this(self):
self.backend.update(self.index, self.sample_objs)
self.assertEqual(self.backend.search(u'*')['hits'], 23)
# Unsupported by 'simple'. Should see empty results.
self.assertEqual(self.backend.more_like_this(self.sample_objs[0])['hits'], 0)
class LiveSimpleSearchQuerySetTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveSimpleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SimpleMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sample_objs = MockModel.objects.all()
self.sqs = SearchQuerySet()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
settings.DEBUG = self.old_debug
super(LiveSimpleSearchQuerySetTestCase, self).tearDown()
def test_general_queries(self):
# For now, just make sure these don't throw an exception.
# They won't work until the simple backend is improved.
self.assertTrue(len(self.sqs.auto_query('daniel')) > 0)
self.assertTrue(len(self.sqs.filter(text='index')) > 0)
self.assertTrue(len(self.sqs.exclude(name='daniel')) > 0)
self.assertTrue(len(self.sqs.order_by('-pub_date')) > 0)
| [
"[email protected]"
]
| |
1031decef22a5f8e9fa6d0446887620f1a17bbd6 | cb95b3a2714f003e76c5e1db1d3e4726f87f14d8 | /pstests/launch_schevers.py | 50881808378a6bad2b948300e21df85af51ae09c | [
"Apache-2.0"
]
| permissive | DMALab/Het | 5aaa9fda1b8c77c0db24a477fe1eccd9665a9fe0 | 81b7e9f0f593108db969fc46a1af3df74b825230 | refs/heads/main | 2023-03-30T13:22:03.085283 | 2021-04-04T05:31:43 | 2021-04-04T05:31:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | from athena import gpu_ops as ad
import os
import sys
import yaml
import multiprocessing
import signal
def main():
def start_scheduler(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "scheduler"
print('Scheduler starts...')
ad.scheduler_init()
ad.scheduler_finish()
def start_server(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "server"
print('Server starts...')
ad.server_init()
ad.server_finish()
def signal_handler(sig, frame):
print("SIGINT signal caught, stop Training")
for proc in server_procs:
proc.kill()
sched_proc.kill()
exit(0)
if len(sys.argv) == 1:
settings = yaml.load(open('./settings/dist_s1.yml').read(), Loader=yaml.FullLoader)
else:
file_path = sys.argv[1]
suffix = file_path.split('.')[-1]
if suffix == 'yml':
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
else:
assert False, 'File type not supported.'
print('Scheduler and servers settings:')
print(settings)
server_procs = []
for key, value in settings.items():
if key == 'shared':
continue
elif key == 'sched':
sched_proc = multiprocessing.Process(target=start_scheduler, args=(value,))
sched_proc.start()
else:
server_procs.append(multiprocessing.Process(target=start_server, args=(value,)))
server_procs[-1].start()
signal.signal(signal.SIGINT, signal_handler)
for proc in server_procs:
proc.join()
sched_proc.join()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
dcff318e512f694dc31b8f2936e54ae11ce40712 | 638fdc6e95bee246cd8784a3a442bda584295d77 | /prj/main/management/commands/import_step.py | 79b97b35a8bf7a0f1d0f2e01c90b812ded017f8c | []
| no_license | zdimon/loyer | ac00faf94c4277eb77d6cdc51e8bf99ef2f7ecb2 | 6df6bc76599bc0fab9ef2bdb600cc3b92daf38c1 | refs/heads/master | 2020-03-27T16:03:36.358793 | 2019-04-18T10:05:18 | 2019-04-18T10:05:18 | 146,757,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,631 | py | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
import json
from prj.settings import BASE_DIR
import os
from main.models import *
import requests
from bs4 import BeautifulSoup
import os.path
import time
import json
from main.tools import headers
from main.tools import getSessionId, readSessionId, getSessionId, readPHPSessionId
import sys
import os
import random
from datetime import datetime, timedelta
from optparse import make_option
from header import copy_cookie, copy_header
def randomDelay():
sl = random.randint(3,6)
print 'sleeping %s' % sl
time.sleep(sl)
def getCountDocs(txt):
import re
if txt.find('Найдено дел: 0')>0:
return 0
#print txt
result = re.search('Найдено дел:(.*)\(отображаются', txt)
cnt = result.group(1)
cnt = cnt.replace(' ','')
#txt = txt.replace(' ','')
return int(cnt)
def getFiles(d):
soup = BeautifulSoup(d.doc_html, 'html.parser')
divs = soup.findAll('td')
#import pdb; pdb.set_trace()
for div in divs:
try:
a = div.find('a')
url = 'http://online.zakon.kz/sud/'+a['href']
txt = makeRequest(url)
f = Files()
f.document = d
f.html = txt
f.save()
f.clearHtml()
print 'File saved!!!'
except Exception, e:
pass
#print str(e)
def makeDateFormat(dt):
tmp = dt.split('-')
return '%s-%s-%s' % (tmp[2],tmp[1],tmp[0])
def makeRequest(url):
try:
txt = requests.get(url,headers=copy_header,cookies=copy_cookie).text
return txt
except requests.exceptions.ReadTimeout as errh:
print ("Http Error timeout!")
except:
randomDelay()
print 'Pepeat request'
makeRequest(url)
def clear(txt):
return txt.replace('\n','').replace('\t','')
def getDate():
f = open('date','r')
date = f.read()
f.close()
return date
def addDay(date):
dt = datetime.strptime(date, '%d-%m-%Y')
return dt + datetime.timedelta(days=1)
def parseRow(row):
tds = row.findAll("td")
if len(tds) == 4:
out = {}
try:
out['date'] = clear(tds[2].text)
except:
pass
try:
name = tds[3].find('a')
out['name'] = clear(name.text)
out['number'] = name.find('b').text
except Exception, e:
print str(e)
try:
out['href'] = name['href']
except Exception, e:
print str(e)
return out
else:
return False
def savePage(html):
soup = BeautifulSoup(html, 'html.parser')
for tr in soup.findAll("tr",{"class": "row"}):
data = parseRow(tr)
if data:
#print 'Saving %s' % page
c = Documents()
c.href = data['href']
c.uid = data['number']
c.title = data['name']
c.date = makeDateFormat(data['date'])
try:
c.save()
print 'Done!!! %s' % c.uid
except Exception, e:
print 'Error saving. Duplicate!'
print str(e)
#else:
# import pdb; pdb.set_trace()
cnt = Documents.objects.all().count()
print 'Total: %s' % cnt
def getListTmp(date,page=1):
url = 'http://online.zakon.kz/sud//search?check=1&sort=date_desc®ion=-&court=-&date_start=%s&date_end=%s&type=-&files=1&number=&sides=&sides_phrase=1&judge=&context=&context_phrase=1&page=%s' % (date,date,page)
txt = makeRequest(url,cookies=copy_cookie, headers=copy_header).txt
#def test():
# def int():
def gidrateList(dict):
out = []
for i in dict:
if len(i['content']) == 0:
url = i['url']
i['content'] = makeRequest(url)
print 'gidrating %s' % i['page']
print 'url: %s' % url;
randomDelay()
out.append(i)
return out
def getList(date):
out = []
'''
f = open('test/1.html', 'r')
txt = f.read()
f.close()
'''
params = {
'sort': 'date_desc',
'date_start': date,
'date_end': date,
'type': '-',
'files': '1',
'sides_phrase': '1',
'context_phrase': '1',
'page': '1'
}
url = 'http://online.zakon.kz/sud//search'
url = 'http://online.zakon.kz/sud/search?date_start=%s&date_end=%s&sides_phrase=1&context_phrase=1&files=1&sort=date_desc' % (date,date)
txt = makeRequest(url)
#print url
f = open('log.html', 'w')
f.write(txt.encode('utf-8'))
f.close()
cnt = getCountDocs(txt.encode('utf-8'))
l = Log()
l.date = makeDateFormat(date)
l.cnt = cnt
try:
l.save()
l.update()
except:
print 'Log for %s is exist' % date
if cnt == 0:
return False
if cnt<=30:
cp = 1
elif cnt%30>0:
cp = (cnt/30)+1
else:
cp = cnt/30
for p in range(1,cp+1):
url = 'http://online.zakon.kz/sud//search?&date_start=%s&date_end=%s&sides_phrase=1&context_phrase=1&files=1&page=%s&sort=date_desc' % (date,date, p)
if p == 1:
out.append({
"url": url,
"page": p,
"content": txt
})
else:
out.append({
"url": url,
"page": p,
"content": ''
})
'''
soup = BeautifulSoup(txt, 'html.parser')
ul = soup.find('ul',{"class": "pagination"})
cnt = 1
try:
lis = ul.findAll('li')
except:
print 'No data for date %s' % date
return False
for li in lis:
try:
if cnt == 1:
out.append({
"url": li.find('a')['href'],
"content": txt
})
else:
out.append({
"url": li.find('a')['href'],
"content": ''
})
cnt = cnt+1
except:
pass
'''
return out
def loadDocs(date):
print 'Loading documents'
for d in Documents.objects.filter(date=date):
if d.is_document_downloaded == False:
print 'case %s' % d.uid
url = 'http://online.zakon.kz/sud//'+d.href
print 'Loading %s' % url
txt = makeRequest(url)
d.doc_html = txt
d.is_document_downloaded = True
d.save()
d.clearDocHtml()
print 'Saving done %s!!!' % d.uid
randomDelay()
else:
print 'Already downloaded!!!'
#getFiles(d)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-s', dest='start')
parser.add_argument('-e', dest='end')
def handle(self, *args, **options):
start_date = options["start"]
end_date = options["end"]
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
date_generated = [end_date - timedelta(days=x) for x in range(0, (end_date-start_date).days)]
print 'Start step importing from %s to %s' % (start_date, end_date)
#print date_generated
#sys.exit()
#dt = getDate()
#dt = '29-08-2018'
#print 'Process %s' % dt
add = []
for l in Log.objects.all():
if l.cnt!=l.fact:
add.append(l.date)
#print add
#sys.exit()
print date_generated
#sys.exit()
#for date in add:
for date in date_generated:
try:
l = Log.objects.get(date=date)
if l.cnt==l.fact:
print 'Date %s is full of data!!!!' % l.date
continue
except:
pass
## Selete all for date
Documents.objects.filter(date=date).delete()
dt = date.strftime("%d-%m-%Y")
lst = getList(dt)
if lst:
for p in gidrateList(lst):
try:
savePage(p['content'])
except:
pass
l = Log.objects.get(date=date)
l.update()
#loadDocs(makeDateFormat(dt))
#for url in getListMock(dt):
# print 'load %s' % url
| [
"[email protected]"
]
| |
e538aa28b1bd9e8f0574539f2c5075b7eea00ec2 | ba962c2441572ba45ff97a97bb713eb8a603a269 | /lunchmap/models.py | 2c9b100ab61729344379c83188a3554f131dd623 | []
| no_license | melonpan777/my-first-blog | 8158104ba2b3c97a8e6350ac57aac77edf85be26 | 9ff5eee69523d8fbbbd004e566090ea715b043d5 | refs/heads/master | 2020-06-04T13:58:16.704685 | 2019-06-15T11:14:00 | 2019-06-15T11:14:00 | 192,051,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | from django.db import models
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Shop(models.Model):
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
memo = models.CharField(max_length=255, null=True)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
category = models.ForeignKey(
Category,
on_delete=models.PROTECT,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('lunchmap:detail', kwargs={'pk': self.pk})
| [
"[email protected]"
]
| |
f6b131bbddadded5e915501ce5a719b1e74ce352 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/cloudformation/checks/resource/aws/APIGatewayXray.py | 79b7ec85c6b5ac40b0aaa6c2c422267e4a656db6 | [
"Apache-2.0"
]
| permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 710 | py | from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class APIGatewayXray(BaseResourceValueCheck):
def __init__(self):
name = "Ensure API Gateway has X-Ray Tracing enabled"
id = "CKV_AWS_73"
supported_resources = ['AWS::ApiGateway::Stage', "AWS::Serverless::Api"]
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'Properties/TracingEnabled'
def get_expected_value(self):
return True
check = APIGatewayXray()
| [
"[email protected]"
]
| |
a809cf5f7c25bbfabfc4c575d1a07b237ec8bc9c | 018d3ade7ce3c9797ec53e5b29e93c343cbd41e3 | /test/test_dynamic_shapes.py | 0d421b04008d224758407fdd0b571cdfd72af613 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
]
| permissive | aiot-tech/pytorch | 016574055a306f58a46308e4971cf180ffc92e4d | 46730aec35ee047b92b288e0366da0f7e993e5ae | refs/heads/master | 2022-11-18T14:01:22.576441 | 2022-11-04T23:11:17 | 2022-11-05T05:42:07 | 102,860,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,297 | py | # -*- coding: utf-8 -*-
# Owner(s): ["oncall: jit"]
from torch._C import _disabled_torch_function_impl
import torch.fx
import torch.nn.functional as F
from torch.testing._internal.common_utils import run_tests, TestCase, skipIfTorchDynamo, \
IS_WINDOWS, parametrize, instantiate_parametrized_tests
import unittest
import torch
import operator
import itertools
import random
import contextlib
import math
import builtins
import atexit
import io
import os
from torch.utils._pytree import tree_map
from torch.fx.experimental import symbolic_shapes
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.experimental.symbolic_shapes import ShapeEnv, sym_float, guard_int, SymNode
from torch.utils._python_dispatch import TorchDispatchMode
from torch import SymInt
aten = torch.ops.aten
try:
import sympy
# TODO(jansel): these tests fail on windows
HAS_SYMPY = not IS_WINDOWS
except ImportError:
HAS_SYMPY = False
skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy")
meta_funcs = {}
def register_meta(op):
def decorator(f):
def add_func(op):
meta_funcs[op] = f
tree_map(add_func, op)
return f
return decorator
@register_meta([aten.add.Tensor, aten.sub.Tensor])
def binary_meta(a, b):
return a.new_empty(a.shape)
@register_meta(aten.cat.default)
def cat_meta(tensors, dim=0):
concat_length = 0
shape = tensors[0].shape
for tensor in tensors:
for idx, (common_length, length) in enumerate(zip(shape, tensor.shape)):
if idx == dim:
concat_length = concat_length + length
else:
assert length == common_length
new_shape = list(shape)
new_shape[dim] = concat_length
return tensors[0].new_empty(new_shape)
@register_meta([aten.narrow_copy.default])
def narrow_copy_symint_meta(a, dim, start, length, **kwargs):
shape = []
for i, x in enumerate(a.shape):
if i == dim:
shape.append(length)
else:
shape.append(x)
return a.new_empty(tuple(shape))
@register_meta([aten.expand.default])
def expand_symint_meta(a, size, implicit=False):
return a.new_empty(size)
def create_contiguous(shape):
strides = [1]
for dim in reversed(shape[:-1]):
strides.append(dim * strides[-1])
return list(reversed(strides))
class FakeSymbolicTensor(torch.Tensor):
@staticmethod
def __new__(cls, sym_shape, sym_strides, dtype, layout, requires_grad, device, storage_offset=0):
# TODO: this is wrong in general
sym_stride = create_contiguous(sym_shape)
r = torch.Tensor._make_wrapper_subclass(
cls, sym_shape,
sym_stride, storage_offset,
dtype=dtype, layout=layout, requires_grad=requires_grad,
device=device,
)
return r
__torch_function__ = _disabled_torch_function_impl
def new_empty(self, shape):
return FakeSymbolicTensor(shape, None, self.dtype, self.layout, self.requires_grad, self.device)
@classmethod
def __torch_dispatch__(cls, func_overload, types, args=(), kwargs=None):
if func_overload in meta_funcs:
return meta_funcs[func_overload](*args, **kwargs)
if func_overload == torch.ops.aten.new_empty.default:
self = args[0]
shape = args[1]
return FakeSymbolicTensor(shape, self.stride(), self.dtype, self.layout, self.requires_grad, self.device)
raise RuntimeError(f"operator {func_overload} not supported")
def create_symbolic_tensor(name, arg, shape_env, storage_offset=0):
sym_shapes, sym_strides = shape_env.create_symbolic_sizes_strides(arg)
return FakeSymbolicTensor(sym_shapes, sym_strides, arg.dtype, arg.layout, arg.requires_grad, arg.device, storage_offset)
def create_symint(shape_env, i):
return shape_env.create_symintnode(shape_env.create_symbol(i))
@skipIfTorchDynamo("Creating ShapeEnv fails for confusing reasons (also we never expect dynamo to see code like this)")
class TestPySymInt(TestCase):
@skipIfNoSympy
def test_arith_ops(self):
shape_env = ShapeEnv()
symints = []
for i in range(2, 5):
symints.append((i, create_symint(shape_env, i)))
ops = [operator.add, operator.sub, operator.floordiv, operator.mul, operator.mod]
for op in ops:
for args in itertools.permutations(symints, 2):
if not isinstance(args[0][1], int) and ((op != operator.mod or op != operator.floordiv) and args[1][0] != 0):
self.assertTrue(op(args[0][1], args[1][1]) == op(args[0][0], args[1][0]))
@skipIfNoSympy
def test_reverse_arith_ops(self):
shape_env = ShapeEnv()
a = create_symint(shape_env, 2)
self.assertTrue(5 // a == 5 // 2)
a = create_symint(shape_env, 2)
self.assertTrue(5 * a == 5 * 2)
@skipIfNoSympy
def test_roundtrip(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
self.assertTrue(not isinstance(x.shape[0], SymNode))
self.assertTrue(isinstance(x.shape[0], SymInt))
self.assertTrue(x.shape[0] == 5)
self.assertTrue(x.shape[1] == 4)
self.assertTrue(x.shape[2], 3)
self.assertTrue(x.size()[0], 5)
self.assertTrue(x.size()[1], 4)
self.assertTrue(isinstance(x.size()[1], SymInt))
self.assertTrue(x.size()[2] == 3)
self.assertTrue(x.size(0) == 5)
self.assertTrue(x.size(1) == 4)
self.assertTrue(x.size(2) == 3)
self.assertTrue(isinstance(x.size(2), SymInt))
offset = create_symint(shape_env, 2)
y = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env, offset)
self.assertTrue(isinstance(y.storage_offset(), SymInt))
self.assertTrue(y.storage_offset() == 2)
offset = 2
z = create_symbolic_tensor("z", torch.randn(5, 4, 3), shape_env, offset)
self.assertTrue(isinstance(z.storage_offset(), int))
self.assertTrue(z.storage_offset() == 2)
@skipIfNoSympy
def test_binary(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(5, 4, 3), shape_env)
z = x + y
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# broadcasting
y = create_symbolic_tensor("y", torch.randn(1, 4, 1), shape_env)
z = x + y
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
@skipIfNoSympy
def test_symint_args(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(5, 4, 1), shape_env)
LAST_DIM = 2
z = x.narrow_copy(LAST_DIM, 0, y.shape[LAST_DIM])
self.assertTrue(z.shape[2] == y.shape[2])
# arithmetic expr with two symints
z = x.narrow_copy(LAST_DIM, 0, x.shape[LAST_DIM] - y.shape[LAST_DIM])
self.assertTrue(z.shape[2] == 2)
# arithmetic expr with a symint and python int
z = x.narrow_copy(LAST_DIM, 0, x.shape[LAST_DIM] - 1)
self.assertTrue(z.shape[2] == 2)
@skipIfNoSympy
def test_symint_vargs(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(1, 4, 1), shape_env)
# varargs
z = y.expand(x.shape[0], y.shape[1], x.shape[2])
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# shape list
z = y.expand((x.shape[0], y.shape[1], x.shape[2]))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints
z = y.expand(x.shape[0], y.shape[1], 3)
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints in a list
z = y.expand((x.shape[0], y.shape[1], 3))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints
z = y.expand(5, y.shape[1], x.shape[2])
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python ints and symints in a list
z = y.expand((5, y.shape[1], x.shape[2]))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
z = y.expand((y.shape[1],))
z = y.expand(y.shape[1])
@skipIfNoSympy
def test_stride(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 5), shape_env)
self.assertIsInstance(x.stride()[0], SymInt)
@skipIfNoSympy
def test_size_expressions(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
expand_x = x.expand(x.shape[0], x.shape[0])
if expand_x.shape[0] > 3:
result = expand_x + expand_x
else:
result = expand_x + expand_x
gt_op = shape_env.guards[0][0]
self.assertTrue(isinstance(gt_op, sympy.core.relational.StrictGreaterThan))
self.assertTrue(str(x.shape[0]), str(gt_op.args[0]))
self.assertTrue(str(expand_x.shape[1]), str(x.shape[0]))
self.assertTrue(str(expand_x.shape[1]), str(result.shape[0]))
@skipIfNoSympy
def test_int_to_float(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
r = sym_float(x.shape[0])
self.assertIsInstance(r, torch.SymFloat, msg=type(r))
@skipIfNoSympy
def test_aten_ops(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
torch.ops.aten.narrow_copy.default(x, 0, 0, x.shape[0])
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
torch.ops.aten.expand.default(x, [x.shape[0], x.shape[1], x.shape[2]])
def test_fx_trace_intlist(self):
class CustomModule(torch.nn.Module):
def forward(self, x):
bs, c, h, w = x.shape
return F.pad(x, (0, w % 2, 0, h % 2, 0, 0))
m = CustomModule()
x = torch.rand(1, 3, 4, 4)
# should not TypeError: pad(): argument 'pad' (position 2) must be
# tuple of ints, not tuple
torch.fx.symbolic_trace(m)
@skipIfNoSympy
def test_meta_symint(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
r = torch.empty(a0, device='meta')
self.assertIsInstance(r.shape[0], SymInt)
@skipIfNoSympy
def test_guard_int(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
self.assertEqual(guard_int(a0), 2)
self.assertEqual(str(shape_env.guards[0][0]), "Eq(s0, 2)")
@skipIfNoSympy
def test_int_conversion(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
self.assertRaisesRegex(RuntimeError, "Trying to extract", lambda: int(a0))
@skipIfNoSympy
def test_symint_as_scalar(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
sym_int_encountered = False
class TestSymInt(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
assert func == torch.ops.aten.add.Tensor
nonlocal sym_int_encountered
# WARNING: do not do identity tests on the outer
# SymInt/SymFloat, they are NOT STABLE
sym_int_encountered = kwargs["alpha"].node is a0.node
kwargs["alpha"] = 0
return func(*args)
x = torch.rand([4, 4])
with TestSymInt():
y = torch.add(x, x, alpha=a0)
self.assertTrue(sym_int_encountered)
@skipIfNoSympy
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_readable_with_symints(self, mock_stdout):
def f(a, b):
dim0 = a.shape[0] + b.shape[0]
dim1 = a.shape[1] + b.shape[1]
d = a.new_empty(dim0, dim1)
d = torch.ops.aten.native_dropout(d, 0.5, train=True)
return d
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(5, 3), torch.randn(4, 3))
fx_g.print_readable()
self.assertExpectedInline(mock_stdout.getvalue().strip(), """\
class f(torch.nn.Module):
def forward(self, a_1: f32[s0, s1], b_1: f32[s2, s1]):
# No stacktrace found for following nodes
sym_size: Sym(s0) = torch.ops.aten.sym_size(a_1, 0)
sym_size_1: Sym(s2) = torch.ops.aten.sym_size(b_1, 0)
add: Sym(s0 + s2) = sym_size + sym_size_1; sym_size = sym_size_1 = None
sym_size_2: Sym(s1) = torch.ops.aten.sym_size(a_1, 1)
sym_size_3: Sym(s1) = torch.ops.aten.sym_size(b_1, 1); b_1 = None
add_1: Sym(2*s1) = sym_size_2 + sym_size_3; sym_size_2 = sym_size_3 = None
new_empty: f32[s0 + s2, 2*s1] = torch.ops.aten.new_empty.default(a_1, [add, add_1], dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False); a_1 = add = add_1 = None
native_dropout = torch.ops.aten.native_dropout.default(new_empty, 0.5, True); new_empty = None
getitem: f32[s0 + s2, 2*s1] = native_dropout[0]
getitem_1: b8[s0 + s2, 2*s1] = native_dropout[1]; native_dropout = None
return (getitem, getitem_1)""") # noqa: B950
# This environment variable controls whether or not we print expected failure
# lists at the end of a test suite run. The intended usage looks like this:
#
# 1. Run `PYTORCH_COLLECT_EXPECT=1 python test/test_dynamic_shapes.py -k TestSymNumberMagicMethods`.
# 2. Given the printed xfail list, add them to the set expected_failure_sym_magic_methods.
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_failed = []
def print_seen():
out = []
for key, reason in seen_failed:
# Make sure the generated line is lint clean
out.append(f" {key}, # {reason}"[:120])
print("expected_failure_sym_magic_methods = {")
print("\n".join(out))
print("}")
if COLLECT_EXPECT:
atexit.register(print_seen)
expected_failure_sym_magic_methods = {
('floordiv', 'SymInt', 'float'), # Cannot convert complex to float
('floordiv', 'int', 'SymFloat'), # unsupported operand type(s) for //: 'int' and 'SymFloat'
('floordiv', 'SymInt', 'SymFloat'), # Cannot convert complex to float
('mod', 'int', 'SymFloat'), # unsupported operand type(s) for %: 'int' and 'SymFloat'
('sym_int', 'int', 'float'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'float'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'int', 'SymFloat'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'SymFloat'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'int', 'int'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'int'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'int', 'SymInt'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'SymInt'), # sym_int() takes 1 positional argument but 2 were given
}
@skipIfTorchDynamo("Creating ShapeEnv fails for confusing reasons (also we never expect dynamo to see code like this)")
class TestSymNumberMagicMethods(TestCase):
def _do_test(self, fn, inp1, inp2, shape_env, is_unary_fn):
# Helper function
seed_node = (create_symint(shape_env, 1) / 1.).get_pyobj()
def get_sym_inp(inp):
if isinstance(inp, int):
return torch.SymInt(seed_node.to_node(inp))
else:
return torch.SymFloat(seed_node.to_node(inp))
def maybe_xfail(inp1, inp2):
key = (fn, type(inp1).__name__, type(inp2).__name__)
if COLLECT_EXPECT:
@contextlib.contextmanager
def context():
try:
yield
except TypeError as e:
seen_failed.append((key, str(e)))
return context()
if key in expected_failure_sym_magic_methods:
return self.assertRaises(TypeError)
else:
return contextlib.nullcontext()
# These functions might return plain int/float
has_valid_downcast = fn in ["min", "max"]
if fn in symbolic_shapes.magic_methods_on_builtins:
lambda_apply = getattr(builtins, fn)
elif fn in symbolic_shapes.magic_methods_on_math:
lambda_apply = getattr(math, fn)
elif fn in symbolic_shapes.magic_methods_on_submodule:
lambda_apply = getattr(symbolic_shapes, fn)
else:
lambda_apply = getattr(operator, fn)
if fn in symbolic_shapes.always_float_magic_methods:
tp = "float"
elif fn in symbolic_shapes.always_int_magic_methods:
tp = "int"
elif is_unary_fn:
tp = "float" if isinstance(inp1, float) else "int"
else:
tp = "float" if any(isinstance(i, float) for i in [inp1, inp2]) else "int"
def guard_fn(v):
try:
if fn in symbolic_shapes.always_bool_magic_methods:
return bool(v)
else:
return getattr(v.node, f"guard_{tp}")("", 0)
except Exception as e:
if has_valid_downcast:
return v
else:
raise e
# Get reference result
with maybe_xfail(inp1, inp2):
if is_unary_fn:
ref_out = lambda_apply(inp1)
else:
ref_out = lambda_apply(inp1, inp2)
# Symified first arg
sym_inp1 = get_sym_inp(inp1)
with maybe_xfail(sym_inp1, inp2):
if is_unary_fn:
out = lambda_apply(sym_inp1)
else:
out = lambda_apply(sym_inp1, inp2)
self.assertEqual(guard_fn(out), ref_out)
if is_unary_fn:
return
# Symified second arg
sym_inp2 = get_sym_inp(inp2)
with maybe_xfail(inp1, sym_inp2):
out = lambda_apply(inp1, sym_inp2)
self.assertEqual(guard_fn(out), ref_out)
# Symified both args
with maybe_xfail(sym_inp1, sym_inp2):
out = lambda_apply(sym_inp1, sym_inp2)
self.assertEqual(guard_fn(out), ref_out)
@parametrize("fn", list(symbolic_shapes.magic_methods.keys()))
@parametrize("first_type", ["int", "float"])
@parametrize("second_type", ["int", "float"])
def test_method(self, fn, first_type, second_type):
if first_type == "float" and fn not in symbolic_shapes.float_magic_methods:
self.skipTest(f"{fn} is not a float magic method")
is_unary_fn = fn in symbolic_shapes.unary_magic_methods
# Second argument is ignored for unary function. So only run for one type
if is_unary_fn and second_type == "float":
self.skipTest(f"{fn} is unary and already tested")
# We could pass int/float directly for types but then the
# mangled test name is bad
inp1 = random.random() * 2.5
if first_type == "int":
inp1 = int(inp1)
inp2 = random.random() * 2.5
if second_type == "int":
inp2 = int(inp2)
shape_env = ShapeEnv()
self._do_test(fn, inp1, inp2, shape_env, is_unary_fn)
instantiate_parametrized_tests(TestSymNumberMagicMethods)
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
]
| |
fd9e1af03b971a1db1d6893bbd1eb4399fbcb3d6 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /1720. 解码异或后的数组.py | 62dc31fa20f8ded1e4528d692e236b11be60047e | []
| no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | """
未知 整数数组 arr 由 n 个非负整数组成。
经编码后变为长度为 n - 1 的另一个整数数组 encoded ,其中 encoded[i] = arr[i] XOR arr[i + 1] 。例如,arr = [1,0,2,1] 经编码后得到 encoded = [1,2,3] 。
给你编码后的数组 encoded 和原数组 arr 的第一个元素 first(arr[0])。
请解码返回原数组 arr 。可以证明答案存在并且是唯一的。
示例 1:
输入:encoded = [1,2,3], first = 1
输出:[1,0,2,1]
解释:若 arr = [1,0,2,1] ,那么 first = 1 且 encoded = [1 XOR 0, 0 XOR 2, 2 XOR 1] = [1,2,3]
示例 2:
输入:encoded = [6,2,7,3], first = 4
输出:[4,2,0,7,4]
"""
def decode(encoded, first):
res = [first]
for i in encoded:
res.append(first ^ i)
first = res[-1]
return res
print(decode(encoded = [1,2,3], first = 1))
print(decode(encoded = [6,2,7,3], first = 4))
| [
"[email protected]"
]
| |
345bcde7408a2d774ec98727350693d566242b99 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /Geometry/CMSCommonData/python/cmsExtendedGeometry2015XML_RPC2Gap_cfi.py | 400cbd05035b4ff5689fd24ee8fd31feea7fbe54 | []
| permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 15,587 | py | import FWCore.ParameterSet.Config as cms
## Everything that is currently in the PostLS1 geometry (rpc,csc,beampipe)
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/extend/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/CMSCommonData/data/cmsTracker.xml',
'Geometry/CMSCommonData/data/caloBase.xml',
'Geometry/CMSCommonData/data/cmsCalo.xml',
'Geometry/CMSCommonData/data/muonBase.xml',
'Geometry/CMSCommonData/data/cmsMuon.xml',
'Geometry/CMSCommonData/data/mgnt.xml',
'Geometry/CMSCommonData/data/PhaseI/beampipe.xml',
'Geometry/CMSCommonData/data/cmsBeam.xml',
'Geometry/CMSCommonData/data/muonMB.xml',
'Geometry/CMSCommonData/data/muonMagnet.xml',
'Geometry/CMSCommonData/data/cavern.xml',
'Geometry/TrackerCommonData/data/pixfwdMaterials.xml',
'Geometry/TrackerCommonData/data/pixfwdCommon.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x2.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x3.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x4.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPanelBase.xml',
'Geometry/TrackerCommonData/data/pixfwdPanel.xml',
'Geometry/TrackerCommonData/data/pixfwdBlade.xml',
'Geometry/TrackerCommonData/data/pixfwdNipple.xml',
'Geometry/TrackerCommonData/data/pixfwdDisk.xml',
'Geometry/TrackerCommonData/data/pixfwdCylinder.xml',
'Geometry/TrackerCommonData/data/pixfwd.xml',
'Geometry/TrackerCommonData/data/pixbarmaterial.xml',
'Geometry/TrackerCommonData/data/pixbarladder.xml',
'Geometry/TrackerCommonData/data/pixbarladderfull.xml',
'Geometry/TrackerCommonData/data/pixbarladderhalf.xml',
'Geometry/TrackerCommonData/data/pixbarlayer.xml',
'Geometry/TrackerCommonData/data/pixbarlayer0.xml',
'Geometry/TrackerCommonData/data/pixbarlayer1.xml',
'Geometry/TrackerCommonData/data/pixbarlayer2.xml',
'Geometry/TrackerCommonData/data/pixbar.xml',
'Geometry/TrackerCommonData/data/tibtidcommonmaterial.xml',
'Geometry/TrackerCommonData/data/tibmaterial.xml',
'Geometry/TrackerCommonData/data/tibmodpar.xml',
'Geometry/TrackerCommonData/data/tibmodule0.xml',
'Geometry/TrackerCommonData/data/tibmodule0a.xml',
'Geometry/TrackerCommonData/data/tibmodule0b.xml',
'Geometry/TrackerCommonData/data/tibmodule2.xml',
'Geometry/TrackerCommonData/data/tibstringpar.xml',
'Geometry/TrackerCommonData/data/tibstring0ll.xml',
'Geometry/TrackerCommonData/data/tibstring0lr.xml',
'Geometry/TrackerCommonData/data/tibstring0ul.xml',
'Geometry/TrackerCommonData/data/tibstring0ur.xml',
'Geometry/TrackerCommonData/data/tibstring0.xml',
'Geometry/TrackerCommonData/data/tibstring1ll.xml',
'Geometry/TrackerCommonData/data/tibstring1lr.xml',
'Geometry/TrackerCommonData/data/tibstring1ul.xml',
'Geometry/TrackerCommonData/data/tibstring1ur.xml',
'Geometry/TrackerCommonData/data/tibstring1.xml',
'Geometry/TrackerCommonData/data/tibstring2ll.xml',
'Geometry/TrackerCommonData/data/tibstring2lr.xml',
'Geometry/TrackerCommonData/data/tibstring2ul.xml',
'Geometry/TrackerCommonData/data/tibstring2ur.xml',
'Geometry/TrackerCommonData/data/tibstring2.xml',
'Geometry/TrackerCommonData/data/tibstring3ll.xml',
'Geometry/TrackerCommonData/data/tibstring3lr.xml',
'Geometry/TrackerCommonData/data/tibstring3ul.xml',
'Geometry/TrackerCommonData/data/tibstring3ur.xml',
'Geometry/TrackerCommonData/data/tibstring3.xml',
'Geometry/TrackerCommonData/data/tiblayerpar.xml',
'Geometry/TrackerCommonData/data/tiblayer0.xml',
'Geometry/TrackerCommonData/data/tiblayer1.xml',
'Geometry/TrackerCommonData/data/tiblayer2.xml',
'Geometry/TrackerCommonData/data/tiblayer3.xml',
'Geometry/TrackerCommonData/data/tib.xml',
'Geometry/TrackerCommonData/data/tidmaterial.xml',
'Geometry/TrackerCommonData/data/tidmodpar.xml',
'Geometry/TrackerCommonData/data/tidmodule0.xml',
'Geometry/TrackerCommonData/data/tidmodule0r.xml',
'Geometry/TrackerCommonData/data/tidmodule0l.xml',
'Geometry/TrackerCommonData/data/tidmodule1.xml',
'Geometry/TrackerCommonData/data/tidmodule1r.xml',
'Geometry/TrackerCommonData/data/tidmodule1l.xml',
'Geometry/TrackerCommonData/data/tidmodule2.xml',
'Geometry/TrackerCommonData/data/tidringpar.xml',
'Geometry/TrackerCommonData/data/tidring0.xml',
'Geometry/TrackerCommonData/data/tidring0f.xml',
'Geometry/TrackerCommonData/data/tidring0b.xml',
'Geometry/TrackerCommonData/data/tidring1.xml',
'Geometry/TrackerCommonData/data/tidring1f.xml',
'Geometry/TrackerCommonData/data/tidring1b.xml',
'Geometry/TrackerCommonData/data/tidring2.xml',
'Geometry/TrackerCommonData/data/tid.xml',
'Geometry/TrackerCommonData/data/tidf.xml',
'Geometry/TrackerCommonData/data/tidb.xml',
'Geometry/TrackerCommonData/data/tibtidservices.xml',
'Geometry/TrackerCommonData/data/tibtidservicesf.xml',
'Geometry/TrackerCommonData/data/tibtidservicesb.xml',
'Geometry/TrackerCommonData/data/tobmaterial.xml',
'Geometry/TrackerCommonData/data/tobmodpar.xml',
'Geometry/TrackerCommonData/data/tobmodule0.xml',
'Geometry/TrackerCommonData/data/tobmodule2.xml',
'Geometry/TrackerCommonData/data/tobmodule4.xml',
'Geometry/TrackerCommonData/data/tobrodpar.xml',
'Geometry/TrackerCommonData/data/tobrod0c.xml',
'Geometry/TrackerCommonData/data/tobrod0l.xml',
'Geometry/TrackerCommonData/data/tobrod0h.xml',
'Geometry/TrackerCommonData/data/tobrod0.xml',
'Geometry/TrackerCommonData/data/tobrod1l.xml',
'Geometry/TrackerCommonData/data/tobrod1h.xml',
'Geometry/TrackerCommonData/data/tobrod1.xml',
'Geometry/TrackerCommonData/data/tobrod2c.xml',
'Geometry/TrackerCommonData/data/tobrod2l.xml',
'Geometry/TrackerCommonData/data/tobrod2h.xml',
'Geometry/TrackerCommonData/data/tobrod2.xml',
'Geometry/TrackerCommonData/data/tobrod3l.xml',
'Geometry/TrackerCommonData/data/tobrod3h.xml',
'Geometry/TrackerCommonData/data/tobrod3.xml',
'Geometry/TrackerCommonData/data/tobrod4c.xml',
'Geometry/TrackerCommonData/data/tobrod4l.xml',
'Geometry/TrackerCommonData/data/tobrod4h.xml',
'Geometry/TrackerCommonData/data/tobrod4.xml',
'Geometry/TrackerCommonData/data/tobrod5l.xml',
'Geometry/TrackerCommonData/data/tobrod5h.xml',
'Geometry/TrackerCommonData/data/tobrod5.xml',
'Geometry/TrackerCommonData/data/tob.xml',
'Geometry/TrackerCommonData/data/tecmaterial.xml',
'Geometry/TrackerCommonData/data/tecmodpar.xml',
'Geometry/TrackerCommonData/data/tecmodule0.xml',
'Geometry/TrackerCommonData/data/tecmodule0r.xml',
'Geometry/TrackerCommonData/data/tecmodule0s.xml',
'Geometry/TrackerCommonData/data/tecmodule1.xml',
'Geometry/TrackerCommonData/data/tecmodule1r.xml',
'Geometry/TrackerCommonData/data/tecmodule1s.xml',
'Geometry/TrackerCommonData/data/tecmodule2.xml',
'Geometry/TrackerCommonData/data/tecmodule3.xml',
'Geometry/TrackerCommonData/data/tecmodule4.xml',
'Geometry/TrackerCommonData/data/tecmodule4r.xml',
'Geometry/TrackerCommonData/data/tecmodule4s.xml',
'Geometry/TrackerCommonData/data/tecmodule5.xml',
'Geometry/TrackerCommonData/data/tecmodule6.xml',
'Geometry/TrackerCommonData/data/tecpetpar.xml',
'Geometry/TrackerCommonData/data/tecring0.xml',
'Geometry/TrackerCommonData/data/tecring1.xml',
'Geometry/TrackerCommonData/data/tecring2.xml',
'Geometry/TrackerCommonData/data/tecring3.xml',
'Geometry/TrackerCommonData/data/tecring4.xml',
'Geometry/TrackerCommonData/data/tecring5.xml',
'Geometry/TrackerCommonData/data/tecring6.xml',
'Geometry/TrackerCommonData/data/tecring0f.xml',
'Geometry/TrackerCommonData/data/tecring1f.xml',
'Geometry/TrackerCommonData/data/tecring2f.xml',
'Geometry/TrackerCommonData/data/tecring3f.xml',
'Geometry/TrackerCommonData/data/tecring4f.xml',
'Geometry/TrackerCommonData/data/tecring5f.xml',
'Geometry/TrackerCommonData/data/tecring6f.xml',
'Geometry/TrackerCommonData/data/tecring0b.xml',
'Geometry/TrackerCommonData/data/tecring1b.xml',
'Geometry/TrackerCommonData/data/tecring2b.xml',
'Geometry/TrackerCommonData/data/tecring3b.xml',
'Geometry/TrackerCommonData/data/tecring4b.xml',
'Geometry/TrackerCommonData/data/tecring5b.xml',
'Geometry/TrackerCommonData/data/tecring6b.xml',
'Geometry/TrackerCommonData/data/tecpetalf.xml',
'Geometry/TrackerCommonData/data/tecpetalb.xml',
'Geometry/TrackerCommonData/data/tecpetal0.xml',
'Geometry/TrackerCommonData/data/tecpetal0f.xml',
'Geometry/TrackerCommonData/data/tecpetal0b.xml',
'Geometry/TrackerCommonData/data/tecpetal3.xml',
'Geometry/TrackerCommonData/data/tecpetal3f.xml',
'Geometry/TrackerCommonData/data/tecpetal3b.xml',
'Geometry/TrackerCommonData/data/tecpetal6f.xml',
'Geometry/TrackerCommonData/data/tecpetal6b.xml',
'Geometry/TrackerCommonData/data/tecpetal8f.xml',
'Geometry/TrackerCommonData/data/tecpetal8b.xml',
'Geometry/TrackerCommonData/data/tecwheel.xml',
'Geometry/TrackerCommonData/data/tecwheela.xml',
'Geometry/TrackerCommonData/data/tecwheelb.xml',
'Geometry/TrackerCommonData/data/tecwheelc.xml',
'Geometry/TrackerCommonData/data/tecwheeld.xml',
'Geometry/TrackerCommonData/data/tecwheel6.xml',
'Geometry/TrackerCommonData/data/tecservices.xml',
'Geometry/TrackerCommonData/data/tecbackplate.xml',
'Geometry/TrackerCommonData/data/tec.xml',
'Geometry/TrackerCommonData/data/trackermaterial.xml',
'Geometry/TrackerCommonData/data/tracker.xml',
'Geometry/TrackerCommonData/data/trackerpixbar.xml',
'Geometry/TrackerCommonData/data/trackerpixfwd.xml',
'Geometry/TrackerCommonData/data/trackertibtidservices.xml',
'Geometry/TrackerCommonData/data/trackertib.xml',
'Geometry/TrackerCommonData/data/trackertid.xml',
'Geometry/TrackerCommonData/data/trackertob.xml',
'Geometry/TrackerCommonData/data/trackertec.xml',
'Geometry/TrackerCommonData/data/trackerbulkhead.xml',
'Geometry/TrackerCommonData/data/trackerother.xml',
'Geometry/EcalCommonData/data/eregalgo.xml',
'Geometry/EcalCommonData/data/ebalgo.xml',
'Geometry/EcalCommonData/data/ebcon.xml',
'Geometry/EcalCommonData/data/ebrot.xml',
'Geometry/EcalCommonData/data/eecon.xml',
'Geometry/EcalCommonData/data/eefixed.xml',
'Geometry/EcalCommonData/data/eehier.xml',
'Geometry/EcalCommonData/data/eealgo.xml',
'Geometry/EcalCommonData/data/escon.xml',
'Geometry/EcalCommonData/data/esalgo.xml',
'Geometry/EcalCommonData/data/eeF.xml',
'Geometry/EcalCommonData/data/eeB.xml',
'Geometry/HcalCommonData/data/hcalrotations.xml',
'Geometry/HcalCommonData/data/hcalalgo.xml',
'Geometry/HcalCommonData/data/hcalbarrelalgo.xml',
'Geometry/HcalCommonData/data/hcalendcapalgo.xml',
'Geometry/HcalCommonData/data/hcalouteralgo.xml',
'Geometry/HcalCommonData/data/hcalforwardalgo.xml',
'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mbCommon.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb1.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb2.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb3.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb4.xml',
'Geometry/MuonCommonData/data/design/muonYoke.xml',
'Geometry/MuonCommonData/data/v2/mf.xml',
'Geometry/MuonCommonData/data/RPC2Gap/rpcf.xml',
'Geometry/MuonCommonData/data/v2/csc.xml',
'Geometry/MuonCommonData/data/v2/mfshield.xml',
'Geometry/ForwardCommonData/data/forward.xml',
'Geometry/ForwardCommonData/data/v2/forwardshield.xml',
'Geometry/ForwardCommonData/data/brmrotations.xml',
'Geometry/ForwardCommonData/data/brm.xml',
'Geometry/ForwardCommonData/data/totemMaterials.xml',
'Geometry/ForwardCommonData/data/totemRotations.xml',
'Geometry/ForwardCommonData/data/totemt1.xml',
'Geometry/ForwardCommonData/data/totemt2.xml',
'Geometry/ForwardCommonData/data/ionpump.xml',
'Geometry/ForwardCommonData/data/castor.xml',
'Geometry/ForwardCommonData/data/zdcmaterials.xml',
'Geometry/ForwardCommonData/data/lumimaterials.xml',
'Geometry/ForwardCommonData/data/zdcrotations.xml',
'Geometry/ForwardCommonData/data/lumirotations.xml',
'Geometry/ForwardCommonData/data/zdc.xml',
'Geometry/ForwardCommonData/data/zdclumi.xml',
'Geometry/ForwardCommonData/data/cmszdc.xml')+cms.vstring(
'Geometry/MuonCommonData/data/RPC2Gap/muonNumbering.xml',
'Geometry/TrackerCommonData/data/trackerStructureTopology.xml',
'Geometry/TrackerSimData/data/trackersens.xml',
'Geometry/TrackerRecoData/data/trackerRecoMaterial.xml',
'Geometry/EcalSimData/data/ecalsens.xml',
'Geometry/HcalCommonData/data/hcalsenspmf.xml',
'Geometry/HcalSimData/data/hf.xml',
'Geometry/HcalSimData/data/hfpmt.xml',
'Geometry/HcalSimData/data/hffibrebundle.xml',
'Geometry/HcalSimData/data/CaloUtil.xml',
'Geometry/MuonSimData/data/muonSens.xml',
'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecs.xml',
'Geometry/RPCGeometryBuilder/data/RPCSpecs.xml',
'Geometry/ForwardCommonData/data/brmsens.xml',
'Geometry/ForwardSimData/data/castorsens.xml',
'Geometry/ForwardSimData/data/zdcsens.xml',
'Geometry/HcalSimData/data/HcalProdCuts.xml',
'Geometry/EcalSimData/data/EcalProdCuts.xml',
'Geometry/EcalSimData/data/ESProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml',
'Geometry/MuonSimData/data/muonProdCuts.xml',
'Geometry/ForwardSimData/data/CastorProdCuts.xml',
'Geometry/ForwardSimData/data/zdcProdCuts.xml',
'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml'),
rootNodeName = cms.string('cms:OCMS')
)
| [
"[email protected]"
]
| |
47c3d8019181b00a4cc6f1e528455517694034d1 | 1662507ec7104531e4e54209fc32bfdf397b60cd | /backend/wallet/models.py | c0d4a9fbfaf096d0cda2c061ebe3a3c6041ebd63 | []
| no_license | crowdbotics-apps/home-trend-24478 | 4b2397fbefc9469e2d8f00240dff0b3fc3eaa368 | 850309d0bb282cf824f8b8d42ef8c6ab3c43bc1c | refs/heads/master | 2023-03-07T18:34:15.590576 | 2021-02-20T00:34:25 | 2021-02-20T00:34:25 | 338,431,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | from django.conf import settings
from django.db import models
class PaymentMethod(models.Model):
"Generated Model"
wallet = models.ForeignKey(
"wallet.CustomerWallet",
on_delete=models.CASCADE,
related_name="paymentmethod_wallet",
)
account_token = models.CharField(
max_length=255,
)
payment_account = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
class PaymentTransaction(models.Model):
"Generated Model"
price = models.FloatField()
tip = models.FloatField()
tracking_id = models.CharField(
max_length=50,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_tasker",
)
customer = models.ForeignKey(
"task_profile.CustomerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_customer",
)
transaction = models.ForeignKey(
"task.TaskTransaction",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_transaction",
)
payment_method = models.ForeignKey(
"wallet.PaymentMethod",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_payment_method",
)
class TaskerWallet(models.Model):
"Generated Model"
tasker = models.OneToOneField(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskerwallet_tasker",
)
balance = models.FloatField(
max_length=254,
)
expiration_date = models.DateTimeField()
last_transaction = models.DateTimeField()
class CustomerWallet(models.Model):
"Generated Model"
customer = models.OneToOneField(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="customerwallet_customer",
)
balance = models.FloatField()
expiration_date = models.DateTimeField()
last_transaction = models.DateTimeField()
class TaskerPaymentAccount(models.Model):
"Generated Model"
wallet = models.ForeignKey(
"wallet.TaskerWallet",
on_delete=models.CASCADE,
related_name="taskerpaymentaccount_wallet",
)
account_token = models.CharField(
max_length=255,
)
payment_account = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
# Create your models here.
| [
"[email protected]"
]
| |
bd40e87cf094c91dcb5d4c15d6fec0e2daf3068f | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/spaCy/2016/4/test_flag_features.py | 880704e28905500ee8aa5b21c6e60fc6e73fdc58 | [
"MIT"
]
| permissive | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 1,333 | py | from __future__ import unicode_literals
import pytest
from spacy.orth import is_alpha
from spacy.orth import is_digit
from spacy.orth import is_punct
from spacy.orth import is_space
from spacy.orth import is_ascii
from spacy.orth import is_upper
from spacy.orth import is_lower
from spacy.orth import is_title
@pytest.fixture
def words():
return ["1997", "19.97", "hello9", "Hello", "HELLO", "Hello9", "\n", "!",
"!d", "\nd"]
def test_is_alpha(words):
assert not is_alpha(words[0])
assert not is_alpha(words[1])
assert not is_alpha(words[2])
assert is_alpha(words[3])
assert is_alpha(words[4])
assert not is_alpha(words[5])
assert not is_alpha(words[6])
assert not is_alpha(words[7])
assert not is_alpha(words[8])
assert not is_alpha(words[9])
def test_is_digit(words):
assert is_digit(words[0])
assert not is_digit(words[1])
assert not is_digit(words[2])
assert not is_digit(words[3])
assert not is_digit(words[4])
assert not is_digit(words[5])
assert not is_digit(words[6])
assert not is_digit(words[7])
assert not is_digit(words[8])
assert not is_digit(words[9])
def test_is_quote(words):
pass
def test_is_bracket(words):
pass
def test_is_left_bracket(words):
pass
def test_is_right_bracket(words):
pass
| [
"[email protected]"
]
| |
24875a336f66ccd4c114ada3a3e42c2d603c2639 | e81d274d6a1bcabbe7771612edd43b42c0d48197 | /数据库/03_Redis/day48(主从服务器)/demo/02_python操作redis/01.py | 9cef9a735360a75455cde6d390c9cebd36992a94 | [
"MIT"
]
| permissive | ChWeiking/PythonTutorial | 1259dc04c843382f2323d69f6678b9431d0b56fd | 1aa4b81cf26fba2fa2570dd8e1228fef4fd6ee61 | refs/heads/master | 2020-05-15T00:50:10.583105 | 2016-07-30T16:03:45 | 2016-07-30T16:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | #推荐网站
#http://python.jobbole.com/87305/
import redis
#获取连接对象 当我们用Redis和StrictRedis创建连接时,其实内部实现并没有主动给我创建一个连接,我们获得的连接是连接池提供的连接,这个连接由连接池管理,所以我们无需关注连接是否需要主动释放关闭的问题。另外连接池有自己的关闭连接的接口,一旦调用该接口,所有连接都将被关闭,连接池的操作不需要程序员管理,系统redis模块自动管理好了。
conn = redis.StrictRedis('127.0.0.1',6379,password=123456)
#如果是多个增删改,使用管道对象,默认先存在管道中,当execute时候,保存到数据库文件中
pip = conn.pipeline()
pip.set('a',1)
pip.set('b',2)
pip.set('c',3)
#提交
pip.execute()
#查询的时候,可以使用pip,也可以使用conn对象
print(conn.get('a'))
print('哦了')
| [
"[email protected]"
]
| |
794e2904caebb85aa81ccb41eaed66721843747f | 09301c71638abf45230192e62503f79a52e0bd80 | /besco_erp/besco_warehouse/general_stock_fifo/__openerp__.py | 7aa0010772448e6c5236add7f97c1eec77d47520 | []
| no_license | westlyou/NEDCOFFEE | 24ef8c46f74a129059622f126401366497ba72a6 | 4079ab7312428c0eb12015e543605eac0bd3976f | refs/heads/master | 2020-05-27T06:01:15.188827 | 2017-11-14T15:35:22 | 2017-11-14T15:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | # -*- coding: utf-8 -*-
##############################################################################
#
##############################################################################
{
"name" : "General Stock FIFO",
"version" : "9.0",
"author" : "Le Truong Thanh <[email protected]>",
'category': 'General 90',
"depends" : ["general_stock",
"general_account",
],
"init_xml" : [],
"demo_xml" : [],
"description": """
""",
'data': [
# 'security/ir.model.access.csv',
# 'security/security.xml',
'cron.xml',
'stock_fifo_view.xml',
'menu.xml',
],
'test': [
],
'installable': True,
'auto_install': False,
'certificate': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
]
| |
1a4a84046bb067d8317cba7a3dfb51fef729d588 | abf44e8ac8325e1c95b0d0569baee19b8f725b0a | /1_slide_window/7.py | 79fadc05e2ce3816ac627747f460e59868bd8734 | []
| no_license | terrifyzhao/educative2 | 05994b0e7f4e0c8d4319106eddd48ba1dfe5317d | 00e9d630da117fa9550f2efb2191709734c63c8a | refs/heads/master | 2022-12-24T02:51:18.671842 | 2020-09-24T07:43:08 | 2020-09-24T07:43:08 | 276,569,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | def length_of_longest_substring(arr, k):
start = 0
max_len = 0
count_1 = 0
for i in range(len(arr)):
num = arr[i]
if num == 1:
count_1 += 1
if i - start + 1 - count_1 > k:
num = arr[start]
if num == 1:
count_1 -= 1
start += 1
max_len = max(i - start + 1, max_len)
return max_len
def main():
print(length_of_longest_substring([0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1], 2))
print(length_of_longest_substring(
[0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1], 3))
main()
| [
"[email protected]"
]
| |
81f5316150af9c908dd4b3ef8628cca2b90db2b0 | 8fc7635b84b42e61b7efb9eaf7215394b5b5790a | /aliennor-backend copy/aliennorDjangoBackend/aliennorDjangoBackend/wsgi.py | 021b6e96cb9641200f626f50804bb038f497e40a | []
| no_license | phamcong/aliennor-platform | f1e8470aab7ed634859e071f6028931f576ddf3e | e1d71532426ac9414d2158d50ee34c32257618f0 | refs/heads/master | 2021-05-14T17:08:08.629564 | 2018-02-17T23:35:07 | 2018-02-17T23:35:07 | 116,038,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
WSGI config for aliennorDjangoBackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aliennorDjangoBackend.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
40f118a930e06e6edf455277d99dddcc1d85aa9a | 2e6c95871bd255873fb563347c0f070e6fcdde74 | /ngram_2_model_pca.py | 6b077e23c628515f969ffa99bba1c5e5f09cec87 | []
| no_license | MSBradshaw/BioHackathon2020 | 3203c5232bebd70d2c2a88b7f49063a09da023c4 | 31826b698a408541200b6f75bfe9c03217bf2d1a | refs/heads/master | 2022-08-05T11:57:32.221444 | 2020-05-29T17:30:29 | 2020-05-29T17:30:29 | 258,961,184 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,036 | py | import re
import pandas as pd
from bs4 import BeautifulSoup
import datetime
import time
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
import pickle
import os
def date_to_unix_time(date):
if date is None or date == '':
return None
dt = datetime.datetime.strptime(date, '%B %d, %Y')
return int(time.mktime(dt.timetuple()))
def get_n_grams(_text, _n, _gram_dict={}):
# if a special character is being used as punctuation (not in a name) add a space
_text = re.sub('(: )', ' \\g<1>', _text)
_text = re.sub('(- )', ' \\g<1>', _text)
_text = re.sub('(, )', ' \\g<1>', _text)
_text = re.sub('(\\. )', ' \\g<1>', _text)
_text = re.sub('(- )', ' \\g<1>', _text)
_text = re.sub('(\\? )', ' \\g<1>', _text)
_text = re.sub('(; )', ' \\g<1>', _text)
_text = re.sub('(! )', ' \\g<1>', _text)
# remove paranthesis arounda single word
_text = re.sub(' \\(([^ ])\\) ', ' \\g<1> ', _text)
# remove leading and trailing parenthesis
_text = re.sub(' \\(', ' ', _text)
_text = re.sub('\\) ', ' ', _text)
_text_list = _text.split(' ')
# create the n-grams
_done = False
# gram_dict = {}
for _i in range(len(_text_list)):
_gram = ''
_skip = False
for _j in range(_n):
if _i + _j >= len(_text_list):
_done = True
break
# check if the current item is punctuation, if so skip this gram
if _text_list[_i + _j] in ['.', ',', '?', ';', '!', ':', '-']:
_skip = True
break
_gram += _text_list[_i + _j] + ' '
if not _done and not _skip:
# remove trailing space
_gram = _gram[:-1]
# if gram has already been made
if _gram in _gram_dict:
# increment count
_gram_dict[_gram] += 1
else:
# else create new entry
_gram_dict[_gram] = 1
_gram_df = pd.DataFrame({'gram': list(_gram_dict.keys()), 'count': list(_gram_dict.values())})
return _gram_df, _gram_dict
def get_df_of_n_grams(_texts, _n):
_dic = {}
_final_df = None
for _ab in _texts:
_final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n, _dic)
_grams = list(set(_final_df['gram']))
_article_n_grams = {_x: [] for _x in _grams}
for _ab in _texts:
_final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n,{})
for _key in _grams:
if _key in _dic:
_article_n_grams[_key].append(_dic[_key])
else:
_article_n_grams[_key].append(0)
fake_df_n_grams = pd.DataFrame(_article_n_grams)
return fake_df_n_grams
train = pd.read_csv('train.csv')
pickle_cache = 'grams_2_df.pickle'
if os.path.exists(pickle_cache):
grams_2 = pickle.load(open(pickle_cache,'rb'))
else:
grams_2 = get_df_of_n_grams(list(train['abstract']),2)
pickle.dump(grams_2,open(pickle_cache,'wb'),protocol=4)
X = grams_2.to_numpy()
y = train['type'].to_numpy()
pca2 = PCA(n_components=10)
pca2.fit(grams_2.to_numpy().transpose())
# pca = pickle.load(open('real_fake_pca.pickle','rb'))
clf = svm.SVC(kernel='linear', C=1)
scores = cross_val_score(clf,pca2.components_.transpose(), y, cv=5)
#
# with open('svm-cross-val-pca.txt','w') as outfile:
# outfile.write(str(scores))
X_train, X_test, y_train, y_test = train_test_split(pca2.components_.transpose(), y, test_size=0.33, random_state=42)
clf.fit(X_train, y_train)
with open('svm-results-pca.txt','w') as outfile:
outfile.write('Cross Val scores: ' + str(scores) + '\n')
outfile.write('SVM SCore: ' + str(clf.score(X_test,y_test)) + '\n')
preds = clf.predict(X_test)
outfile.write('Predictions: ')
for p in preds:
outfile.write(',' + str(p))
| [
"[email protected]"
]
| |
5d701f0a48dd6c81ab978a9683db47f0cf9fb515 | 587ac0749473666c2bcdfe558bdba8517cb1c0a0 | /sp2020/j.py | 9470133d7c7fc37f2ee060305d69a2e6d4c99a9d | []
| no_license | katrinafyi/cpg | fc2f408baf19791fa7260561a55d29464a42b212 | 0631d1983ec6a45cbe1a8df63963ab8caac51440 | refs/heads/main | 2023-02-21T13:07:02.517306 | 2021-01-23T06:09:39 | 2021-01-23T06:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | def ints(): return [int(x.strip()) for x in input().split()]
# t is interval of measurement
# d is time considered
# p is percentile required
# r is response delay required for responsiveness
num_meas, t, d, p, r = ints()
num_attempts = ints() [0]
SUBMIT = 'S'
REPLY = 'R'
MEASURE = 'M'
timeline = [] # list of (time, response time)
submitted = [None] * num_attempts
for i in range(2 * num_attempts):
a, b, c = input().strip().split()
a = int(a)
b = int(b) - 1
if c == SUBMIT:
submitted[b] = a
else:
timeline.append((a, 'R', a - submitted[b]))
# for i in range(1, num_meas + 1):
# timeline.append((i * t, MEASURE, None))
# timeline.sort()
from collections import deque
from math import ceil, floor
considering = deque()
def measure():
if not considering: return True
l = [x[1] for x in considering]
l.sort()
# print(l)
i = (p/100 * len(l))
if i == int(i): i = int(i) - 1
else: i = floor(i)
return l[i] <= r
# print(num_meas, t, d, p, r)
# print(timeline)
num_responsive = 0
prev_measure = -1
prev_measure_time = 0
changed = True
for time, event, value in timeline:
if event == REPLY:
if time > prev_measure_time + t:
next_measure_time = floor(time / t) * t
while considering and considering[0][0] < next_measure_time - d:
considering.popleft()
m = measure()
num_responsive += m * (time - prev_measure_time + t) // t
prev_measure_time = next_measure_time
considering.append((time, value))
changed = True
print(num_responsive) | [
"[email protected]"
]
| |
4fe2f24ace7a19b1acc48f98e1b7555884e1392c | 6e2e476c5764d5e75c7afe5a531ac5b890ef0c64 | /Models_barAllExecutionTimes.py | 6dd46654dd04bc45d58214343e8245ce54d8db3f | []
| no_license | BrunoDatoMeneses/PythonPloting | d4611f62f2709465e32d3ab2dc4e0d5cef65e783 | b5bd1c7aa5a50144d2db82f29ab754b01084f230 | refs/heads/master | 2023-05-07T14:08:17.225336 | 2021-06-02T09:06:13 | 2021-06-02T09:06:13 | 297,996,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | import _PLOT
from Utils import transpose
import os
import csv
# transpose.transposeFiles()
from _FIG import PLOTTING
from _PARAMS import PARAMETERS
figEndName = "-AllNCS"
#xlabel = 'Learning Cycles (#)'
ylabel = 'Times (ms)'
yStringLong ="ExecuyionTimes"
# figVaryingParamString = "learningCycles"
# varyingParamStringValues = ["500","1000","1500","2000"]
# varyingParamStrings = []
# paramlabelString = " Learning Cycles"
# PARAMETERS.learningCycles= "("
# for value in varyingParamStringValues:
# # precisionRange+= str(int(100*float(label))) + "_"
# # labelStrings.append(labelString + str(int(100*float(label))) + " %")
# PARAMETERS.learningCycles += value + "_"
# varyingParamStrings.append(value + paramlabelString)
#
# PARAMETERS.learningCycles += ")"
PARAMETERS.figSize = (4.5, 3.75)
yStrings = ["perceptsTimeExecution","contextsTimeExecution","headTimeExecution",
"NCSTimeExecution",
"NCS_UselessnessTimeExecution","NCS_IncompetendHeadTimeExecution","NCS_ConcurrenceAndConflictTimeExecution",
"NCS_Create_New_ContextTimeExecution","NCS_OvermappingTimeExecution","NCS_ChildContextTimeExecution","NCS_PotentialRequestTimeExecution"]
yStringsAvg = []
yStringsDev = []
yStringsMin = []
yStringsMax = []
for string in yStrings:
yStringsAvg.append(string+"_Average")
yStringsDev.append(string+"_Deviation")
yStringsMin.append(string+"_Min")
yStringsMax.append(string+"_Max")
xLabelStrings = ["Pcts","Ctxt","Head",
"NCSs",
"NCS Useless.","NCS Unprod.","NCS Conf. and Conc.",
"NCS Ctxt Creation","NCS Redun.","NCS Model","NCS Endo."]
logXScale = False
logYScale = False
# for label in labelStrings:
# yStringLong += label + "_"
XYDevMinMax = []
for y,yDev,min,max,yString in zip(yStringsAvg, yStringsDev, yStringsMin, yStringsMax,yStrings):
if(yString == "endoRequests"):
XYDevMinMax.append([y, yDev, min, max,0.1])
else:
XYDevMinMax.append([y, yDev, min, max, 1])
figName = "ToFill_" + yStringLong + "-" + PARAMETERS.getFigName() + figEndName
print(figName)
PARAMETERS.isActiveLearning = "false"
PARAMETERS.isSelfLearning = "true"
PARAMETERS.isLearnFromNeighbors = "true"
PARAMETERS.isActiveExploitation = "true"
PARAMETERS.activeExploitationCycles = "4000"
PARAMETERS.learningCycles = "500"
varyingParamStrings=[""]
constrains = []
constrains.append(PARAMETERS.getConstainsLabelsAreYStrings(xLabelStrings, XYDevMinMax))
PLOTTING.ROTATION = 45
_PLOT.barWithDeviationConstrained(xLabelStrings, varyingParamStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
figName, ylabel, False, False,
constrains, 1, 1, PARAMETERS.figSize)
_PLOT.barWithDeviationConstrained(xLabelStrings, varyingParamStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
figName, ylabel, False, True,
constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWitMinMaxWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, False, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWithDeviationWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, True, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWitMinMaxWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, True, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWithDeviation(labels, colors, markers, figName, xlabel, ylabel, logXScale, logYScale, xString, yString, deviationString, constrains, 1, 1)
| [
"[email protected]"
]
| |
419bee1b9fe65c8d11a7d4b70693ec15423d958f | cc578cec7c485e2c1060fd075ccc08eb18124345 | /cs15211/24Game.py | ea18464f94d06f61725723f26fa46ca83987f4e3 | [
"Apache-2.0"
]
| permissive | JulyKikuAkita/PythonPrac | 18e36bfad934a6112f727b4906a5e4b784182354 | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | refs/heads/master | 2021-01-21T16:49:01.482561 | 2019-02-07T06:15:29 | 2019-02-07T06:15:29 | 91,907,704 | 1 | 1 | Apache-2.0 | 2019-02-07T06:15:30 | 2017-05-20T18:12:53 | Python | UTF-8 | Python | false | false | 4,885 | py | __source__ = 'https://leetcode.com/problems/24-game/description/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 679. 24 Game
#
# You have 4 cards each containing a number from 1 to 9.
# You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
#
# Example 1:
# Input: [4, 1, 8, 7]
# Output: True
# Explanation: (8-4) * (7-1) = 24
# Example 2:
# Input: [1, 2, 1, 2]
# Output: False
# Note:
# The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
# Every operation done is between two numbers. In particular, we cannot use - as a unary operator.
#
# For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
# You cannot concatenate numbers together.
# For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
#
# Companies
# Google
# Related Topics
# Depth-first Search
#
#868ms 6.09%
import unittest
import itertools
from operator import truediv, mul, add, sub
from fractions import Fraction
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def apply(A, B):
ans = set()
for x, y, op in itertools.product(A, B, (truediv, mul, add, sub)):
if op is not truediv or y: ans.add(op(x, y))
if op is not truediv or x: ans.add(op(y, x))
return ans
A = [{x} for x in map(Fraction, nums)]
for i, j in itertools.combinations(range(4), 2):
r1 = apply(A[i], A[j])
k, l = {0, 1, 2, 3} - {i, j}
if 24 in apply(apply(r1, A[k]), A[l]): return True
if 24 in apply(apply(r1, A[l]), A[k]): return True
if 24 in apply(r1, apply(A[k], A[l])): return True
return False
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/24-game/solution/
Given: (a, b, c, d) - (A tuple of 4)
Generate:
((a+b),c,d) ((a-b),c,d) ((b-a),c,d) ((a*b),c,d) ((a/b),c,d) ((b/a),c,d)
((a+c),b,d) ................................................................. ((c/a),b,d)
((a+d),b,c) ................................................................. ((d/a),b,c)
(a,(b+c),d) ................................................................. (a,(c/b),d)
(a,(b+d),d) ................................................................. (a,(d/b),d)
(a,b,(c+d)) ................................................................. (a,b,(d/c))
There are 36 (6*6) such tuples. Of these, + & - are not order dependent. That is 2+3 = 3+2.
But / & - are order dependent. i.e. 2/3 != 3/2. These look like (e,f,g) i.e. a tuple of 3 now.
Carrying out similar reductions gives 18 (6*3) tuples for each of the above-generated tuples.
These now look like (h, i) i.e. a tuple of 2 now.
Similiar, the final reduction now yields 6 answers (a+b, a-b, a*b, a/b, b-a, b/a)
for each of the above-generated tuple.
Thus in total 36x18x6 final values can be generated using the 4 operators and 4 initial values.
Algo: Generate all such answers using dfs method and stop when it's 24.
Catches:
Use double instead of int
Be careful about the classical divide by zero error
#18ms 56.03%
class Solution {
public boolean judgePoint24(int[] nums) {
ArrayList A = new ArrayList<Double>();
for (int v: nums) A.add((double) v);
return solve(A);
}
private boolean solve(ArrayList<Double> nums) {
if (nums.size() == 0) return false;
if (nums.size() == 1) return Math.abs(nums.get(0) - 24) < 1e-6;
for (int i = 0; i < nums.size(); i++) {
for (int j = 0; j < nums.size(); j++) {
if (i != j) {
ArrayList<Double> nums2 = new ArrayList<Double>();
for (int k = 0; k < nums.size(); k++) if (k != i && k != j) {
nums2.add(nums.get(k));
}
for (int k = 0; k < 4; k++) {
if (k < 2 && j > i) continue;
if (k == 0) nums2.add(nums.get(i) + nums.get(j));
if (k == 1) nums2.add(nums.get(i) * nums.get(j));
if (k == 2) nums2.add(nums.get(i) - nums.get(j));
if (k == 3) {
if (nums.get(j) != 0) {
nums2.add(nums.get(i) / nums.get(j));
} else {
continue;
}
}
if (solve(nums2)) return true;
nums2.remove(nums2.size() - 1);
}
}
}
}
return false;
}
}
''' | [
"[email protected]"
]
| |
26dfeac08449167a930a80d1d44fae4e247ac8ed | d364123a0655bff7e9d725382934fe2c15b5bfc4 | /python3Test/Test/test009.py | d27fb2cf894790461b15d3b1ec2464ef190ccbb4 | []
| no_license | yuan1093040152/SeleniumTest | 88d75361c8419354f56856c326f843a0a89d7ca6 | d155b98702bc46c174499042b43257696b861b5e | refs/heads/master | 2023-08-31T15:00:25.415642 | 2023-08-30T09:26:42 | 2023-08-30T09:26:42 | 227,269,300 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,398 | py | #coding=utf-8
"""
@Author : Yuan Meng
@Time : 2022/6/28 17:24
@Software: PyCharm
Ctrl+shift+v 历史粘贴版
ctrl+alt+空格 自动补全
ctrl+alt+D 分屏
Ctrl+/ 快速注释
"""
import base64,hashlib
import json
from Crypto.Cipher import AES
import binascii
from Crypto.Util.Padding import pad
aa = "0DIGbxUpV2a8TQTf6l4kiKTtrnNaRI1qcMPLxXma82ix0dUGGqNeCE4eYG1ifS8xWL6DR0NHKmflYgcsmW/XZ2MROaTxKQbXq3yLcWZqc8pKcmJbGVhcSXY0mDT5MsmcMCYgti1NJ0WUOHU2Zv6WIP//Q4AstV7/6yBHdFo6UJTGkRmddGAGxO4jdIciDy8vmGk6ot3cCa6GKYfJIhn4/cPXsTkjLY+OQa7BIUsye8+UgjyuiOJx8bMSyWu/FnCK6awaKVLAYdUceayuEvizT4zzGdhDLV+YjqJu7nITL5IupBXSbF5Q4lQO6glviPFZEcKM9RdlPcMhsROzPVtDZswrC7mLphcX5vrZypjmeXdnaP8VfLN7I2xfSozWp5cGZxheCNTnVgX7ayhkHjiBVQoEeuUeESYYlIH0pyJyGOI7uF79e4+jUS2OqNuOlNEg4HSBkX59HggXwiu+3yKsZ/Mfrfhm46VPbWqXHiGRiyXWKnWZd1L//KD927SFpM5UgfP8etVxDjn8tNKKK4kN0FWcYMUBbLd0mQrL8QWAihay2yvpowTlgWZPga4Cr464QRrrj0CJI9lvO1ej3THKTr3K89gWGlzapQqnW+Ot+9sN7PozUQIePBCyVG6myWXcVoTGO2YTQ/TVxvty7YNwZHyMycV7DZUim8HmcMvp/RiAwaY2IOa80bnxWIfFcTnMaGUio2nyB1SX2QPrfg/47OtYkvI+FP0oDIhDhb87+hEdLJ+11LaTUZPIVO2b2d8N7biVGafJLUU2p3NpGuuGdoPYgHLmcXvRGMdI1wxoaUuhW7e1XgSc87qJOG9xViYyJ5ec+yiECDHq8Z91nEvgC1evfPYETY2XtMp3eNSgg5EqFQBMjEYK+z9v92+fObZBmiVqEk+eMgBh98/XOXJFLRnSWQIOFgiuRuCtgUMRX4myo8X+9h8m1f6UENWHV4r+H1qLuNiLnBiu0E/dSaOinSMzVzpG3yzydTwWWtx5W/lgQOFQz5fJTBfj8QNi9+VsZBEe65m93kt5etClJc3FZzesK0R6h0oG67EhQi+dOCmbDE+f3C9uC9u7p+DLvp+sP1oqqu4klwMf/hrcdMwXnwL7rPPTMonXU7Cv4RpNKRu21YIO33JJOM6gdIXedma7PsuV4KVOYazp08yj1fIOv6kgnM4/nI5qDtV4H3zYLc269uAhGa1kg0lO8lAFCXHm3AGKg6agdGahv0aOCtZh77r0gsr9FeZANNMAGb5HtGfpYKixO9PP/I2u4mHVfcNCk1Vs/wvUiTAwAWeUwFHxrvPm9biGJ+Wrjk1M7Gwxorc69rBxmp/XqqRroGUpECS8hAI5M+BLaYp1hDIXk9nUlHR2USpVRkaiLDOLDGmU7af1l3QnZ57r3kpnyTyJY+cYlcuTeDeJmjNtRSsBymlho7Ls4QKSKOf4it+wq7dLoQCroDbdRX+eEibKJe6rrnFOUb1ws3uZPKzygdEFC86XAD5doYVzRCNdSdIxJscRmNcXsrMTXLLRsCsTEF11DWT4jZId1r3nX2EtWx4dfg/bjqkWvTX88WdjIUnIBUJX4nRGH5OItrYrMil8azy3MoMPNxGvzh1Eb6F70EBt+lBOy5oCYHPoaTFaA213Uzldn8mITiL5k4S6tTPpsGdoOEpezMuB69vtW115mP8UgGzo3Lx9ycBQJm9tc6suYCfU4JQN+HY1+DH6QGji099V0E9r2vyqg8qYYB5yjcGL/qLAqvZH1Q0+RZ8Ye9K6jtywIMWwdU3o1s9PRdhoFV5LbZhEw4xBFeC0R1vfh72PyE1I1w=="
bb = "yjGG1V9JYO4/ezGJw8yY3lm390MgKwDjHV1jxZUz+/8="
password = 'ODcyYTUxNGM1N2M2'
# class EncryptDate:
# def __init__(self, key):
# # 初始化密钥
# self.key = key
# # 初始化数据块大小
# self.length = AES.block_size
# # 初始化AES,ECB模式的实例
# self.aes = AES.new(self.key.encode("utf-8"), AES.MODE_ECB)
# # 截断函数,去除填充的字符
# self.unpad = lambda date: date[0:-ord(date[-1])]
# def fill_method(self, aes_str):
# '''pkcs7补全'''
# pad_pkcs7 = pad(aes_str.encode('utf-8'), AES.block_size, style='pkcs7')
#
# return pad_pkcs7
#
# def encrypt(self, encrData):
# # 加密函数,使用pkcs7补全
# res = self.aes.encrypt(self.fill_method(encrData))
# # 转换为base64
# msg = str(base64.b64encode(res), encoding="utf-8")
#
# return msg
# def decrypt(self, decrData):
# # base64解码
# res = base64.decodebytes(decrData.encode("utf-8"))
# # 解密函数
# msg = self.aes.decrypt(res).decode("utf-8")
#
# return self.unpad(msg)
def xx(aa):
# 定义AES,ECB模式
aes = AES.new(password.encode("utf-8"), AES.MODE_ECB)
#截断函数,去除填充的字符
unpad = lambda date: date[0:-ord(date[-1])]
# base64解码
res = base64.decodebytes(aa.encode("utf-8"))
# 解密函数
msg = aes.decrypt(res).decode("utf-8")
dd = unpad(msg)
print(type(dd))
# print(dd[1]['list'])
ee = json.loads(dd)
return ee
print(ee)
print(ee['data']['list'][0]['authStatusStrForXcx'])
# print(xx(aa))
a = ''
b = json.dumps(a)
print(b)
# if __name__ == '__main__':
# key的长度需要补长(16倍数),补全方式根据情况而定,未补齐会报错
# key字符长度决定加密结果,长度16:加密结果AES(128),长度32:结果就是AES(256)
# eg = EncryptDate("ODcyYTUxNGM1N2M2")
# # 加密字符串长同样需要16倍数:需注意,不过代码中pad()方法里,帮助实现了补全(补全方式就是pkcs7)
# # en = eg.encrypt(aa)
# de = eg.decrypt(aa)
# # print(f"加密结果:{en}")
# print(f"解密结果:{de}")
| [
"[email protected]"
]
| |
f2db0f815309f934b46da888e24855c0aad96a91 | 914b504e13df945a50f35eca4d850eb2c5b52c0b | /test/compute/test_base.py | f8c9bd3ba3a0fd128e1401b5f2e96d9796badcc2 | [
"Apache-2.0"
]
| permissive | cloudkick/libcloud | d05c0401bd232279cb38b5abacd3d4c85d7d072f | 9c8605e1518c6b5e2511f0780e1946089a7256dd | refs/heads/master | 2021-01-01T19:51:41.895189 | 2011-03-14T02:34:57 | 2011-03-14T02:34:57 | 258,426 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,958 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.base import Response
from libcloud.common.base import ConnectionKey, ConnectionUserAndKey
from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver
from test import MockResponse
class FakeDriver(object):
type = 0
class BaseTests(unittest.TestCase):
def test_base_node(self):
node = Node(id=0, name=0, state=0, public_ip=0, private_ip=0,
driver=FakeDriver())
def test_base_node_size(self):
node_size = NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0,
driver=FakeDriver())
def test_base_node_image(self):
node_image = NodeImage(id=0, name=0, driver=FakeDriver())
def test_base_response(self):
resp = Response(MockResponse(status=200, body='foo'))
def test_base_node_driver(self):
node_driver = NodeDriver('foo')
def test_base_connection_key(self):
conn = ConnectionKey('foo')
def test_base_connection_userkey(self):
conn = ConnectionUserAndKey('foo', 'bar')
# def test_drivers_interface(self):
# failures = []
# for driver in DRIVERS:
# creds = ProviderCreds(driver, 'foo', 'bar')
# try:
# verifyObject(INodeDriver, get_driver(driver)(creds))
# except BrokenImplementation:
# failures.append(DRIVERS[driver][1])
#
# if failures:
# self.fail('the following drivers do not support the \
# INodeDriver interface: %s' % (', '.join(failures)))
# def test_invalid_creds(self):
# failures = []
# for driver in DRIVERS:
# if driver == Provider.DUMMY:
# continue
# conn = connect(driver, 'bad', 'keys')
# try:
# conn.list_nodes()
# except InvalidCredsException:
# pass
# else:
# failures.append(DRIVERS[driver][1])
#
# if failures:
# self.fail('the following drivers did not throw an \
# InvalidCredsException: %s' % (', '.join(failures)))
if __name__ == '__main__':
sys.exit(unittest.main())
| [
"[email protected]"
]
| |
ab5f894430e4173d4f912b2ff27306986e39d566 | 146c71808bdd5fa458ef73df4a9b5837c83e779d | /tests/check_accuracy/check_accuracy_tests.py | 5bad11a87167ac8aab9336ec6f018f0846e9a884 | [
"MIT"
]
| permissive | aladdinpersson/aladdin | 62cff7ed8c014db91505545986e17b85e1656f98 | 4fd92ff3b6e74761fff75b01070930c9ec6ce29f | refs/heads/main | 2023-04-15T14:41:27.236738 | 2021-04-15T10:39:11 | 2021-04-15T10:39:11 | 352,296,885 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("aladdin/")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression')
from check_accuracy import check_accuracy
class TestCheckAccuracy(unittest.TestCase):
def setUp(self):
pass
def test(self):
pass
if __name__ == "__main__":
print("Running Check Accuracy tests")
unittest.main()
| [
"[email protected]"
]
| |
fe6df273d0824aeb08610dde5812f46f73da6587 | 17cb31350a9d0996e19dd111fc31980df03f82bf | /strawberryfields/devicespecs/device_specs.py | a32f9ef474ec3c53c88b0f76495cc55a33f98019 | [
"Apache-2.0"
]
| permissive | zeta1999/strawberryfields | 3eee705b711bd195cc6f1510461d75f6e7d9821b | 1bf05585be3553a7bb5c2f687dc45b7a064ddb17 | refs/heads/master | 2020-06-09T02:56:19.840324 | 2019-06-21T16:50:59 | 2019-06-21T16:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,785 | py | # Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class for storing device data for validation"""
from typing import List, Set, Dict, Union
import abc
import blackbird
from blackbird.utils import to_DiGraph
class DeviceSpecs(abc.ABC):
"""Abstract base class for backend data"""
@property
@abc.abstractmethod
def modes(self) -> Union[int, None]:
"""The supported number of modes of the device.
If the device supports arbitrary number of modes, set this to 0.
Returns:
int: number of supported modes
"""
@property
@abc.abstractmethod
def local(self) -> bool:
"""Whether the backend supports local execution.
Returns:
bool: ``True`` if the backend supports local execution
"""
@property
@abc.abstractmethod
def remote(self) -> bool:
"""Whether the backend supports remote execution.
Returns:
bool: ``True`` if the backend supports remote execution
"""
@property
@abc.abstractmethod
def interactive(self) -> bool:
"""Whether the backend can be used interactively, that is,
the backend state is not reset between engine executions.
Returns:
bool: ``True`` if the backend supports interactive use
"""
@property
@abc.abstractmethod
def primitives(self) -> Set[str]:
"""The primitive set of quantum operations directly supported
by the backend.
Returns:
set[str]: the quantum primitives the backend supports
"""
@property
@abc.abstractmethod
def decompositions(self) -> Dict[str, Dict]:
"""Quantum operations that are not quantum primitives for the
backend, but are supported via specified decompositions.
This should be of the form
.. code-block:: python
{'operation_name': {'option1': val, 'option2': val,...}}
For each operation specified in the dictionary, the
:meth:`~Operation.decompose` method will be called during
:class:`Program` compilation, with keyword arguments
given by the dictionary value.
Returns:
dict[str, dict]: the quantum operations that are supported
by the backend via decomposition
"""
@property
def parameter_ranges(self) -> Dict[str, List[List[float]]]:
"""Allowed parameter ranges for supported quantum operations.
This property is optional.
Returns:
dict[str, list]: a dictionary mapping an allowed quantum operation
to a nested list of the form ``[[p0_min, p0_max], [p1_min, p0_max], ...]``.
where ``pi`` corresponds to the ``i`` th gate parameter.
"""
return dict()
@property
def graph(self):
"""The allowed circuit topology of the backend device as a directed
acyclic graph.
This property is optional; if arbitrary topologies are allowed by the device,
this will simply return ``None``.
Returns:
networkx.DiGraph: a directed acyclic graph
"""
if self.circuit is None:
return None
# returned DAG has all parameters set to 0
bb = blackbird.loads(self.circuit)
if bb.is_template():
params = bb.parameters
kwargs = {p: 0 for p in params}
# initialize the topology with all template
# parameters set to zero
topology = to_DiGraph(bb(**kwargs))
else:
topology = to_DiGraph(bb)
return topology
@property
def circuit(self):
"""The Blackbird circuit that will be accepted by the backend device.
This property is optional. If arbitrary topologies are allowed by the device,
**do not define this property**. In such a case, it will simply return ``None``.
If the device expects a specific template for the recieved Blackbird
script, this method will return the serialized Blackbird circuit in string
form.
Returns:
Union[str, None]: Blackbird program or template representing the circuit
"""
return None
| [
"[email protected]"
]
| |
1cbeaf068eba123dc4966e2c3b506aa29148b80b | 3ae62276c9aad8b9612d3073679b5cf3cb695e38 | /easyleetcode/leetcodes/Leetcode_105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py | 1485bb44b74ef3a8f62d1d7d1e19faff930fb29d | [
"Apache-2.0"
]
| permissive | gongtian1234/easy_leetcode | bc0b33c3c4f61d58a6111d76707903efe0510cb4 | d2b8eb5d2cafc71ee1ca633ce489c1a52bcc39ce | refs/heads/master | 2022-11-16T17:48:33.596752 | 2020-07-13T02:55:03 | 2020-07-13T02:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
n = len(inorder)
inOrderMap = {inorder[i]: i for i in range(n)}
return self.buildTreeUtil(preorder, inorder, inOrderMap, 0, n - 1, 0, n - 1)
def buildTreeUtil(self, preorder, inorder, inOrderMap, pStart, pEnd, iStart, iEnd):
if pStart > pEnd or iStart > iEnd:
return None
# 根节点,永远是先序遍历第一个点
root = TreeNode(preorder[pStart])
# 根节点索引,根据它,找到中序遍历中根节点位置
rootIdx = inOrderMap[root.val]
# 根节点左边 // rootIdx - iStart 得到左边节点数
# 先序遍历(pStart + 1, pStart + rootIdx - iStart)(左边新起点,左边新终点(左边新起点+左边节点数))
root.left = self.buildTreeUtil(preorder, inorder, inOrderMap, pStart + 1, pStart + rootIdx - iStart, iStart,
rootIdx - 1)
# 根节点右边
# 先序遍历(pStart + rootIdx - iStart+1)(左边新终点(左边新起点+左边节点数)的后一个数 (右边起点))
root.right = self.buildTreeUtil(preorder, inorder, inOrderMap, pStart + rootIdx - iStart + 1, pEnd, rootIdx + 1,
iEnd)
return root
def buildTree2(self, preorder, inorder):
if not preorder:
return None
# preorder:根左右
# inorder:左根右
x = preorder.pop(0)
node = TreeNode(x)
i = inorder.index(x)
# preorder.pop(0) ,此时preorder只剩 左右,:i是左部分
node.left = self.buildTree2(preorder[:i], inorder[:i])
node.right = self.buildTree2(preorder[i:], inorder[i + 1:])
return node
| [
"[email protected]"
]
| |
90904f213074558cd90e413783c1a851ce07f3da | 55550afe1c18aacba9a481c690755cb7395d35f1 | /Week_01/G20190343020019/LeetCode_26_0019.py | 84af779cd76ff44c31d90633db3c8cc0cfbca318 | []
| no_license | algorithm005-class02/algorithm005-class02 | eb5c0865fbb2c58362fddcd4fc8f8b9d02bb208c | 1a1abf5aabdd23755769efaa6c33579bc5b0917b | refs/heads/master | 2020-09-22T11:48:20.613692 | 2020-03-02T05:31:11 | 2020-03-02T05:31:11 | 225,177,649 | 45 | 153 | null | 2020-03-02T05:31:13 | 2019-12-01T14:47:06 | Java | UTF-8 | Python | false | false | 366 | py | class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
size = len(nums)
if size == 0:
return 0
j, pre = 1, nums[0]
for i in range(1, size):
if nums[i] != pre:
if i != j:
nums[j] = nums[i]
j += 1
pre = nums[i]
return j | [
"[email protected]"
]
| |
0f03a302c230541b088a7d1a1fe72c11c2e23cb3 | 473035074bd546694d5e3dbe6decb900ba79e034 | /traffic fluid simulator/backend/env_4_6/model/ExportData.py | e92e4e0140bedbeb4290ef2eb08d29b3a966c9a7 | []
| no_license | johny1614/magazyn | 35424203036191fb255c410412c195c8f41f0ba5 | a170fea3aceb20f59716a7b5088ccdcb6eea472f | refs/heads/master | 2022-03-26T01:10:04.472374 | 2019-09-19T16:34:22 | 2019-09-19T16:34:22 | 171,033,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | import json
from typing import List
import attr
from numpy.core.multiarray import ndarray
from model.Net import Net
@attr.s(auto_attribs=True)
class ExportData:
learningMethod: str
learningEpochs: int
nets: List[Net]
netName: str
densityName: str
def __attrs_post_init__(self):
for net_index in range(len(self.nets)):
if isinstance(self.nets[net_index].densities, ndarray):
self.nets[net_index].densities = self.nets[net_index].densities.tolist()
if isinstance(self.nets[net_index].lights, ndarray):
self.nets[net_index].lights = self.nets[net_index].lights.tolist()
def saveToJson(self):
# self.shift_lights()
dicSelf = attr.asdict(self)
try:
jsonData = json.dumps(dicSelf)
outfile = open('../../front/src/assets/densities/' + self.netName + '_' + self.densityName + '.json', 'w')
except:
outfile = open('../../../front/src/assets/densities/' + self.netName + '_' + self.densityName + '.json',
'w')
outfile.write(str(jsonData))
outfile.close()
def shift(lista, n):
return lista[n:] + lista[:n]
| [
"[email protected]"
]
| |
7f7a6e5e0d461fb8690c5fcb3502db66bced6184 | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/ba/b0bdbe08c6f800161174a93fd5908e78 | d28f1b3c11c1df4b08726984489283823c27df6f | []
| no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | #!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class g_table(object):
| [
"[email protected]"
]
| ||
1f521210b944fba4b071cab3142d9a054dcff27a | 07c61596c1fba2e2a7034fe5af9707794ea2e2c1 | /Hackerrank/Algorithms/The_Time_in_Words.py3 | 6a108c9d2715cc2b096f03b911b89a2ab181b31e | []
| no_license | H-Shen/Collection_of_my_coding_practice | 2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4 | 6415552d38a756c9c89de0c774799654c73073a6 | refs/heads/master | 2023-08-24T21:19:08.886667 | 2023-08-22T03:47:39 | 2023-08-22T03:47:39 | 180,731,825 | 8 | 1 | null | 2021-08-13T18:25:25 | 2019-04-11T06:48:09 | C++ | UTF-8 | Python | false | false | 983 | py3 | #!/bin/python3
import sys
table = {1:'one',2:'two',3:'three',4:'four',
5:'five',6:'six',7:'seven',8:'eight',
9:'nine',10:'ten',11:'eleven',12:'twelve',
13:'thirteen',14:'fourteen',15:'fifteen',
16:'sixteen',17:'seventeen',18:'eighteen',
19:'nineteen',20:'twenty',30:'thirty',40:'forty',
50:'fifty'}
def handle(n):
global table
if n <= 20:
return table[n]
if n <= 100 and n % 10 == 0:
return table[n]
return table[n // 10 * 10] + ' ' + table[n - n // 10 * 10]
h = int(input().strip())
m = int(input().strip())
if m == 0:
print(table[h] + " o' clock")
elif m == 30:
print("half past " + table[h])
elif m == 45:
print("quarter to " + table[h + 1])
elif m == 15:
print("quarter past " + table[h])
elif m > 30:
print(handle(60 - m) + " minutes to " + table[h + 1])
elif m == 1:
print("one minute past " + table[h])
else:
print(handle(m) + " minutes past " + table[h])
| [
"[email protected]"
]
| |
889178905a0c94d6f492f3c62559edfd6bc207fe | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_quill.py | 606bcee98c72edaf39b621ab0b0cf03cce527925 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py |
#calss header
class _QUILL():
def __init__(self,):
self.name = "QUILL"
self.definitions = [u'any of the long sharp pointed hairs on the body of a porcupine', u"a pen made from a bird's feather, used in the past"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
1d41eb6ae4fc12acb15c60378e1c758be087de68 | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_ZCGG/ZX_ZCGG_SJS_SJSGG.py | 88a9dab2b0df420ba808ab19e9438d674c65ae30 | [
"Apache-2.0"
]
| permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
if data.get('URL_')[-3:] == 'pdf':
data['PDF_'] = data.get('URL_')
if not data['PDF_']:
del data['PDF_']
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_ZCGG_SJS_SJSGG", mongo_collection="ZX_ZCGG")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"[email protected]"
]
| |
05488b74e06f143a147e1b5d9892a1eb406e1b21 | a08fc91ecafa7f2b6c8aed7e1ceb33822d4caa49 | /python/algorithms/tree/segmenttree.py | aec0172273542d5f029e5d57384084e6aba33d5d | []
| no_license | bryand1/snippets | 1fcdd4b67809aa27b58e1239d5cca22cfb962f3d | f779bf147c420996613b0778e243154cd750c3dd | refs/heads/master | 2023-01-23T18:47:07.389246 | 2020-12-31T20:10:13 | 2020-12-31T20:10:13 | 138,767,383 | 0 | 0 | null | 2023-01-19T13:02:49 | 2018-06-26T16:56:15 | Python | UTF-8 | Python | false | false | 853 | py | from sys import maxsize
minsize = -99999
def maxquery(segtree, qlo, qhi, lo, hi, pos):
if qlo <= lo and qhi >= hi:
return segtree[pos]
if qlo > hi or qhi < lo:
return minsize
mid = (lo + hi) // 2
return max(
maxquery(segtree, qlo, qhi, lo, mid, 2 * pos + 1),
maxquery(segtree, qlo, qhi, mid + 1, hi, 2 * pos + 2))
def construct(arr, segtree, lo, hi, pos):
if lo == hi:
segtree[pos] = arr[lo]
return
mid = (lo + hi) // 2
construct(arr, segtree, lo, mid, 2 * pos + 1)
construct(arr, segtree, mid + 1, hi, 2 * pos + 2)
segtree[pos] = max(segtree[2 * pos + 1], segtree[2 * pos + 2])
if __name__ == '__main__':
A = [-1, 0, 3, 2, 5]
tree = [minsize] * 2 * (len(A))
construct(A, tree, 0, len(A) - 1, 0)
print(maxquery(tree, 2, 4, 0, 4, 0))
print(tree)
| [
"[email protected]"
]
| |
7cf482daf8a47cd604c5fa2b83bb75aa350f97dd | aee5f372ba1b5fbb1c8acf6080c4c86ae195c83f | /cern-stubs/lsa/client/rest/cern/api/v1/feign/__init__.pyi | 96054066f4590355d07f8781d938bb4307bcfd26 | []
| no_license | rdemaria/pjlsa | 25221ae4a4b6a4abed737a41a4cafe7376e8829f | e64589ab2203338db4253fbc05ff5131142dfd5f | refs/heads/master | 2022-09-03T13:18:05.290012 | 2022-08-16T13:45:57 | 2022-08-16T13:45:57 | 51,926,309 | 1 | 5 | null | 2019-07-11T11:50:44 | 2016-02-17T13:56:40 | Python | UTF-8 | Python | false | false | 5,523 | pyi | import cern.lsa.client.rest.api.v1.dto
import cern.lsa.client.rest.api.v1.feign
import cern.lsa.domain.cern.settings
import java.util
import typing
class IncaFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface IncaFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def findIncaPropertyFieldInfos(self, incaPropertyFieldInfosRequestDto: cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfosRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto]: ...
def saveIncaPropertyFieldInfos(self, collection: typing.Union[java.util.Collection[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto], typing.Sequence[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto]]) -> None: ...
class Urls:
FIND_INCA_PROPERTY_FIELDS_INFO: typing.ClassVar[str] = ...
SAVE_INCA_PROPERTY_FIELDS_INFO: typing.ClassVar[str] = ...
class ParameterFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface ParameterFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def addParametersToParameterGroup(self, long: int, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def deleteCriticalProperty(self, long: int, string: str) -> None: ...
def deleteParameterGroup(self, long: int) -> None: ...
def deleteParameterTypes(self, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def deleteParameters(self, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def findAllHierarchies(self) -> java.util.List[str]: ...
def findCommonHierarchyNames(self, list: java.util.List[int]) -> java.util.Set[str]: ...
def findHierarchyNames(self, list: java.util.List[int]) -> java.util.Set[str]: ...
def findMakeRuleForParameterRelation(self, long: int, long2: int) -> cern.lsa.client.rest.api.v1.dto.MakeRuleConfigInfoDto: ...
def findParameterGroupsByAccelerator(self, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterGroupDto]: ...
def findParameterTrees(self, parameterTreesRequestDto: cern.lsa.client.rest.api.v1.dto.ParameterTreesRequestDto) -> cern.lsa.client.rest.api.v1.dto.ParameterTreeDataDto: ...
def findParameterTypes(self, parameterTypesRequestDto: cern.lsa.client.rest.api.v1.dto.ParameterTypesRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto]: ...
def findParameters(self, parametersRequestDto: cern.lsa.client.rest.api.v1.dto.ParametersRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def findParametersWithSettings(self, long: int, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def findParametersWithoutSettings(self, long: int, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def getMaxDelta(self, long: int) -> float: ...
def removeParametersFromParameterGroup(self, long: int, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def saveCriticalProperty(self, propertyAndDeviceDto: cern.lsa.client.rest.api.v1.dto.PropertyAndDeviceDto) -> None: ...
def saveParameterGroup(self, parameterGroupDto: cern.lsa.client.rest.api.v1.dto.ParameterGroupDto) -> None: ...
def saveParameterTypes(self, collection: typing.Union[java.util.Collection[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto], typing.Sequence[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto]]) -> None: ...
def saveParameters(self, list: java.util.List[cern.lsa.client.rest.api.v1.dto.ParameterAttributesDto]) -> None: ...
class Urls:
FIND_PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETER_TYPES_URL: typing.ClassVar[str] = ...
FIND_ALL_HIERARCHIES_URL: typing.ClassVar[str] = ...
FIND_HIERARCHIES_BY_PARAMETERS_URL: typing.ClassVar[str] = ...
FIND_COMMON_HIERARCHIES_BY_PARAMETERS_URL: typing.ClassVar[str] = ...
SAVE_PARAMETER_RELATIONS: typing.ClassVar[str] = ...
CRITICAL_PROPERTIES_URL: typing.ClassVar[str] = ...
PARAMETER_GROUPS_URL: typing.ClassVar[str] = ...
PARAMETER_GROUP_BY_ID_URL: typing.ClassVar[str] = ...
PARAMETER_GROUP_PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETER_RELATION_MAKE_RULE_URL: typing.ClassVar[str] = ...
FIND_PARAMETER_TREES_URL: typing.ClassVar[str] = ...
PARAMETERS_WITHOUT_SETTINGS_URL: typing.ClassVar[str] = ...
PARAMETERS_WITH_SETTINGS_URL: typing.ClassVar[str] = ...
PARAMETER_MAX_DELTA_URL: typing.ClassVar[str] = ...
class ReDriveSettingsFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface ReDriveSettingsFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def reDriveDeviceSettings(self, reDriveRequest: cern.lsa.domain.cern.settings.ReDriveRequest) -> cern.lsa.domain.cern.settings.ReDriveResponse: ...
class Urls:
REDRIVE_DEVICES: typing.ClassVar[str] = ...
class __module_protocol__(typing.Protocol):
# A module protocol which reflects the result of ``jp.JPackage("cern.lsa.client.rest.cern.api.v1.feign")``.
IncaFeignService: typing.Type[IncaFeignService]
ParameterFeignService: typing.Type[ParameterFeignService]
ReDriveSettingsFeignService: typing.Type[ReDriveSettingsFeignService]
| [
"[email protected]"
]
| |
9a36090e137b6c733f445cb587a0720eccd62adb | 3bb70650b4b83e4653dcc18c8233c106c7a5611a | /sale_shortcut/shortcut_getter.py | 44c35030e8a22f587ada781c66cd6059851922bb | []
| no_license | khanhlu2013/pos_connect_code | 48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef | fdf70de858c10b175832af31ecc0cf770d028396 | refs/heads/master | 2023-04-08T02:35:46.181265 | 2016-10-18T21:12:51 | 2016-10-18T21:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from sale_shortcut.models import Parent,Child
def get_shortcut(id):
return Parent.objects.prefetch_related('child_set').get(pk=id)
def get_shorcut_lst(store_id):
return Parent.objects.filter(store_id=store_id).prefetch_related('child_set') | [
"[email protected]"
]
| |
f3b575a591741af71ff96affecc01aa7f7b1eeef | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/109/usersdata/188/63757/submittedfiles/av2_p3_civil.py | a916fc1a714178afe44fb0829312c99cd7cc0417 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding: utf-8 -*-
import numpy as np
def linhas (a,m):
soma=0
for i in range(0,m.shape[1],1):
soma=soma+m[a,i]
return(soma)
def colunas (a,m):
soma=0
for i in range(0,m.shape[0],1):
soma=soma+m[i,a]
return(soma)
h=int(input("Digite a dimensão da matriz:"))
x=int(input("Digite x:"))
y=int(input("Digite y:"))
q=np.zeros((h,h))
print(q)
for i in range(0,q.shape[0],1):
for j in range(0,q.shape[1],1):
q[i,j]=float(input("Digite o termo:"))
b=(linhas(x,q)+colunas(y,q)-(2*q[x,y]))
printJ
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.