blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
177ec5ab1a07b52261f471748deaed236f5d9924 | 99e88bd6c2bb50e38f5bb68f0d5242def0442f7f | /tests/test_struct.py | a378da250be0b66d31d8f26b5b0d02c745c21343 | []
| no_license | vahtras/vb | ea1bb59a8e6125203d9498f2808a7bf8e6ad5916 | 425402e619aab7d69d7f5d3971439d532d36de0b | refs/heads/master | 2021-01-15T15:42:46.158363 | 2016-10-13T09:06:22 | 2016-10-13T09:06:22 | 47,419,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | import unittest
from util.full import init
from findifftool.core import clgrad, DELTA
from . import vb
from vb.core import *
class StructTest(unittest.TestCase):
def setUp(self):
Nod.S = init([[1.0, 0.1], [0.1, 1.0]])
Nod.C = init([[0.7, 0.7], [0.7, -0.7]])
self.alpha0 = Nod([0], [])
self.alpha1 = Nod([1], [])
self.beta0 = Nod([], [0])
self.beta1 = Nod([], [1])
self.ab00 = Nod([0], [0])
def tearDown(self):
pass
def test_structure_coefficients_consistent(self):
with self.assertRaises(StructError):
struct = Structure([Nod([0], [0])], [])
def test_structure_output(self):
struct_a = Structure([self.alpha0], [1.0])
self.assertEqual(str(struct_a), "0.963143 (0|)")
def test_structure_ms(self):
with self.assertRaises(StructError):
struct = Structure([self.alpha0, self.beta0], [1, 1])
def test_normalized(self):
ab = Structure([self.ab00], [1.0])
self.assertAlmostEqual(ab*ab, 1.0)
def test_keep_unnormalized(self):
ab = Structure([self.ab00], [1.0], normalize=False)
self.assertAlmostEqual(ab*ab, 1.162084)
def test_norm_gradient(self):
ab = Structure([self.ab00], [1.0])
#assert False
num_diff = clgrad(ab, 'overlap', 'C')()
ana_diff = ab.overlap_gradient()
np.testing.assert_allclose(ana_diff, num_diff, rtol=DELTA, atol=DELTA)
def test_struct_mo_propagated(self):
ab = Structure([self.ab00], [1.0])
ab.C = init([1,2,3,4])
self.assertEqual(id(ab.C), id(ab.nods[0].C))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
0bfa8bd26f28131255750f7fceb64d05ccfe39e6 | 360c777a2b77be466b1cf7c8fd74d6fd04f56b55 | /migrations/versions/1e5cd35569af_.py | eabfd29c2469753fa35c275467e477d5e0946760 | [
"MIT"
]
| permissive | hreeder/nexus-auth | 790a3b2623ddf443138a4b0f0af1380dbc4db8ae | 8d51aef01647e32ba4a284f02de73a2caad7cf49 | refs/heads/master | 2021-01-10T10:08:37.190558 | 2016-02-29T12:27:21 | 2016-02-29T12:27:21 | 52,789,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | """empty message
Revision ID: 1e5cd35569af
Revises: 51d27a60b822
Create Date: 2014-06-24 22:26:10.421081
"""
# revision identifiers, used by Alembic.
revision = '1e5cd35569af'
down_revision = '51d27a60b822'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('character_keys',
sa.Column('character_id', sa.Integer(), nullable=True),
sa.Column('key_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['character_id'], ['character.id'], ),
sa.ForeignKeyConstraint(['key_id'], ['api_key.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('character_keys')
### end Alembic commands ###
| [
"[email protected]"
]
| |
4bbfa20fd19cd5b5194cd16b2af4adc22c20a60d | ded13e921c8365c6113911a5834969ec3d33f989 | /082/Remove Duplicates from Sorted List II.py | 2dd27f6dcafbf95d6357d10e5156dc9fc2abbec7 | []
| no_license | ArrayZoneYour/LeetCode | b7b785ef0907640623e5ab8eec1b8b0a9d0024d8 | d09f56d4fef859ca4749dc753d869828f5de901f | refs/heads/master | 2021-04-26T23:03:10.026205 | 2018-05-09T15:49:08 | 2018-05-09T15:49:08 | 123,922,098 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | # /usr/bin/python
# coding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
Given 1->2->3->3->4->4->5, return 1->2->5.
Given dummy->1->1->1->2->3, return 2->3.
"""
dummyHead = ListNode(0)
dummyHead.next = head
# 以下一个元素作为当前指针
if not head:
return head
cur = head
if cur.next is None:
return head
# 以dummyHead为前一个元素
pre = dummyHead
while cur is not None and cur.next is not None:
# 判断下一个元素和当前元素指针值相同
if cur.next.val == cur.val:
# 如果值相同,当前元素的next指针指向next的next,直至不同或者指向空为止
while cur.next is not None and cur.next.val == cur.val:
cur.next = cur.next.next
# 前一个元素的指针指向当前元素的下一个
pre.next = cur.next
# 判断得到下一个元素的值与当前元素指针对应的值不同
else:
pre = pre.next
# 当前指针后移
cur = cur.next
# 如果当前元素值或者下一个元素值为None,返回dummyHead.next
return dummyHead.next
node1 = ListNode(1)
node2 = ListNode(1)
node3 = ListNode(2)
node4 = ListNode(2)
node1.next = node2
node2.next = node3
node3.next = node4
Solution().deleteDuplicates(node1)
print() | [
"[email protected]"
]
| |
1ae02112a6b34c47b25fc53f5eeae25ccc13eca9 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/071_Get_Movie_Summary/GetMovieSummary.py | a84b1602f2c2ed7a0ca49bbc1cb1d69b1c6bb938 | []
| no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # GetMovieSummary.py
# Simple app using TheMovieDb.org to retrieve movie information
# pip install tmdbsimple
#
# 2020-07-15 PV
# 2023-01-03 PV Added .mp4 suffix
import os
import tmdbsimple as tmdb # type: ignore
from typing import Iterable
# Chemin complet de tous les fichiers à partir d'une racine
def get_all_files(path: str) -> Iterable[str]:
for root, subs, files in os.walk(path):
for file in files:
yield os.path.join(root, file)
tmdb.API_KEY = 'ecdd67089c844d17e9f72a053609ed9f'
search = tmdb.Search()
processed = []
source = r'V:\Films\# A_Trier'
for fullpath in get_all_files(source):
path, file = os.path.split(fullpath)
basename, ext = os.path.splitext(file)
if ext.lower() in ['.mkv', '.avi', '.mp4']:
segments = basename.split(' - ')
title = segments[0]
s2 = title.split(' (')
title = s2[0]
if not title in processed:
processed.append(title)
print(title)
textfile = os.path.join(path, title+'.txt')
if not os.path.exists(textfile):
with open(textfile, mode='w', encoding='utf-8') as out:
response = search.movie(query=title)
s:dict
for s in search.results:
out.write(s['title']+'\n')
out.write(s.get('release_date','')+'\n')
out.write(s['overview']+'\n\n')
# #response = search.movie(query='A Few Good Men')
# #response = search.movie(query='The Black Hole')
# response = search.movie(query='La vie de Brian')
# for s in search.results:
# print(s['title'], s['release_date'], s['overview'])
# #print(s['title'], s['id'], s['release_date'], s['popularity'])
| [
"[email protected]"
]
| |
93df35b7e5445736b838d299bc73a4b524517d1e | 8ab6330e149fb4bcd303f3ca12b3e10bb08eda3e | /RPA-python/rpa_basic/excel/9_move.py | 92be2254f0d09f796407a904a104807222dc4477 | []
| no_license | jongin1004/python | 73e72b9187a0a707777e1474f5bb48f33a603e8f | 997e6bf59c71943d65447d11729a225b8e323a16 | refs/heads/main | 2023-08-05T02:44:01.673614 | 2023-08-02T07:51:56 | 2023-08-02T07:51:56 | 300,539,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from openpyxl import load_workbook
wb = load_workbook('sample.xlsx')
ws = wb.active
# 번호, 영어, 수학 -> 번호, 국어, 영어, 수학
# 이동하려는 범위의 값을 같은행(rows=0)에 열만 1칸 오른쪽(cols=1)으로 이동하도록
# 값을
# ws.move_range("B1:C11", rows=0, cols=1)
# ws['B1'].value = '국어'
# -는 반대 방향으로 이동
ws.move_range("C1:C11", rows=5, cols=-1)
wb.save('sample_modify.xlsx')
wb.close()
| [
"[email protected]"
]
| |
b20d1fbac2a2d855a426c601759521a07a9efb5c | 88620a5d4526493112c157cd7a80b160e794f956 | /testgames.py | 24bae21126ca311af9db144921e21f6d2c8abd0a | []
| no_license | sweettea/python-airmash | 6da7b225642ca188e9a3f0e33895bf91de286c4d | 3640fc96d46cce5360b4a7a866eccabea3616de6 | refs/heads/master | 2021-05-14T18:40:00.661754 | 2018-01-07T02:38:09 | 2018-01-07T02:38:09 | 116,082,157 | 0 | 1 | null | 2018-01-03T02:39:24 | 2018-01-03T02:39:24 | null | UTF-8 | Python | false | false | 441 | py | from airmash import games
url = games.get_url('eu', 'ffa1')
print("Game URL: {}".format(url))
data = games.get_data()
for region_id in data['regions']:
region = data['regions'][region_id]
print('\nRegion: {} ({})'.format(region['name'], region_id))
for game_id in region['games']:
game = region['games'][game_id]
print('{}, {} players - URL: {}'.format(game['name'], game['players'], game['url']))
print('') | [
"[email protected]"
]
| |
6f5fc2de343a2b0e2c06629972e900ea90911b5c | fda201d7cca34e216a17d97665c8457c72e66cb2 | /voting/tests/factories.py | effa73194a92f887e852dba761788d9759e92dba | [
"Apache-2.0"
]
| permissive | SmartElect/SmartElect | 94ab192beb32320e9ae8ae222f90ee531037c1c6 | d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8 | refs/heads/develop | 2020-12-26T04:04:42.753741 | 2019-07-17T17:08:25 | 2019-07-17T17:08:25 | 44,687,036 | 24 | 12 | Apache-2.0 | 2020-06-06T07:16:48 | 2015-10-21T15:47:07 | Python | UTF-8 | Python | false | false | 1,479 | py | import random
from datetime import timedelta
from factory import DjangoModelFactory, SubFactory, Sequence
from factory.declarations import LazyAttribute
from factory.fuzzy import FuzzyDateTime
from django.utils.timezone import now
from voting.models import Ballot, Candidate, Election, RegistrationPeriod
start_dt = now()
class ElectionFactory(DjangoModelFactory):
class Meta:
model = Election
name_english = Sequence(lambda n: "Election %d" % n)
name_arabic = Sequence(lambda n: "Election %d (ar)" % n)
polling_start_time = FuzzyDateTime(start_dt=start_dt - timedelta(days=2),
end_dt=start_dt - timedelta(days=1))
polling_end_time = FuzzyDateTime(start_dt=start_dt + timedelta(days=2),
end_dt=start_dt + timedelta(days=3))
class BallotFactory(DjangoModelFactory):
class Meta:
model = Ballot
ballot_type = LazyAttribute(lambda o: random.choice(Ballot.VALID_RACE_TYPES))
election = SubFactory(ElectionFactory)
internal_ballot_number = Sequence(int)
class CandidateFactory(DjangoModelFactory):
class Meta:
model = Candidate
ballot = SubFactory(BallotFactory)
name_english = Sequence(lambda n: "Candidate %d" % n)
name_arabic = Sequence(lambda n: "Candidate %d (ar)" % n)
candidate_number = Sequence(int)
class RegistrationPeriodFactory(DjangoModelFactory):
class Meta:
model = RegistrationPeriod
| [
"[email protected]"
]
| |
d428a0c2923c810ba7fe622e0a9c3497156c3348 | 2979d177a9388b25a84179127a06728b44955268 | /First-Year/CA117-Labs/Lab3/reversecomp_0311.py | 597ac50e665839a62aba123af4dcd35b3601a859 | []
| no_license | BrendanSimms8898/Python | ccb2b8284aa4e187ab89d4fc34b7fe7a980950cc | 920c39fe02a26e7b131f299d5d082d13021df78b | refs/heads/master | 2023-08-16T00:58:28.742295 | 2021-10-07T12:42:14 | 2021-10-07T12:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python3
import sys
def revcom(words):
return([word for word in words if len(word) >= 5 and word[::-1] in words])
def main():
words = [word.strip() for word in sys.stdin]
print(revcom(words))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
daf1e49afe7cc6634eb7d0c2bc13eb678c4fa7a3 | 07527179eef5debf8932e6f8ba52742fb078c8ab | /styleguide_example/users/apis.py | 56c2e717e136029ddcef47ba199949afd095e169 | [
"MIT"
]
| permissive | brunofvpp/Styleguide-Example | 5389c0ca757c2e3d0e836f3e0b3457bc4ba9960d | 0514a7dd534b1eea2a0baa5e29d05a51ff8bc41c | refs/heads/master | 2023-08-15T04:23:09.642185 | 2021-10-04T09:38:09 | 2021-10-04T09:38:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | from rest_framework.views import APIView
from rest_framework import serializers
from styleguide_example.api.mixins import ApiErrorsMixin
from styleguide_example.api.pagination import get_paginated_response, LimitOffsetPagination
from styleguide_example.users.selectors import user_list
from styleguide_example.users.models import BaseUser
# TODO: When JWT is resolved, add authenticated version
class UserListApi(ApiErrorsMixin, APIView):
class Pagination(LimitOffsetPagination):
default_limit = 1
class FilterSerializer(serializers.Serializer):
id = serializers.IntegerField(required=False)
# Important: If we use BooleanField, it will default to False
is_admin = serializers.NullBooleanField(required=False)
email = serializers.EmailField(required=False)
class OutputSerializer(serializers.ModelSerializer):
class Meta:
model = BaseUser
fields = (
'id',
'email',
'is_admin'
)
def get(self, request):
# Make sure the filters are valid, if passed
filters_serializer = self.FilterSerializer(data=request.query_params)
filters_serializer.is_valid(raise_exception=True)
users = user_list(filters=filters_serializer.validated_data)
return get_paginated_response(
pagination_class=self.Pagination,
serializer_class=self.OutputSerializer,
queryset=users,
request=request,
view=self
)
| [
"[email protected]"
]
| |
cc62bcc7205e8c86cefdb8329f467b67aa8ad039 | 9545652800884f0e54fe6595d8634c29ea4827a2 | /模拟面试/leetCode_168_串联字符串的最大长度.py | 4d63432852efd76698194bbf3a2a16c37ecacc49 | []
| no_license | challeger/leetCode | 662d9f600a40fd8970568679656f6911a6fdfb05 | d75c35b6f8ab33c158de7fa977ab0b16dac4fc25 | refs/heads/master | 2023-01-13T07:34:42.464959 | 2020-11-13T02:40:31 | 2020-11-13T02:40:31 | 286,426,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | """
day: 2020-09-14
url: https://leetcode-cn.com/problems/maximum-length-of-a-concatenated-string-with-unique-characters/
题目名: 串联字符串的最大长度
给定一个字符串数组 arr,字符串 s 是将 arr 某一子序列字符串连接所得的字符串
如果 s 中的每一个字符都只出现过一次,那么它就是一个可行解。
请返回所有可行解 s 中最长长度
示例:
输入:arr = ["un","iq","ue"]
输出:4
思路:
深度遍历,判断每一种可能性.
"""
from typing import List
class Solution:
def maxLength(self, arr: List[str]) -> int:
def is_repeat(s):
return len(s) == len(set(s))
res = 0
n = len(arr)
def dfs(index, path):
nonlocal res
if index >= n:
res = max(res, len(path))
return
foo = path + arr[index]
if is_repeat(foo):
dfs(index+1, foo)
dfs(index+1, path)
dfs(0, '')
return res
| [
"[email protected]"
]
| |
054c3c33e78838f6a9ba28c44196908020f21232 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02554/s866916139.py | 1d67b2a68376143ed1bd7e8e9e1004056e4ac21c | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | '''
参考
https://qiita.com/u2dayo/items/98917c94c89c77b9b3a1#c%E5%95%8F%E9%A1%8Cubiquity
'''
MOD = 10 ** 9 + 7
N = int(input())
ans = pow(10, N)
ans -= 2 * pow(9, N)
ans += pow(8, N)
ans %= MOD
print(ans)
| [
"[email protected]"
]
| |
34a096c61f612a6449607b8e24560f2e5a7200d2 | 93a7f579adc1882939a6ace69deefa4127c7c3cb | /code/chp12-scraping/selenium_form_submit.py | 5bd9167669e44eee13e7268d87a0dd337b1c45aa | []
| no_license | tschoi6712/dataWrangling | 251060bfc6c9075042d649e59a17662e0c92545c | 28df17afc706bb5ab3786d144615eb80957495b8 | refs/heads/master | 2022-12-08T17:09:56.194795 | 2019-09-24T07:19:48 | 2019-09-24T07:19:48 | 210,534,336 | 0 | 0 | null | 2022-12-07T23:33:32 | 2019-09-24T07:01:45 | HTML | UTF-8 | Python | false | false | 698 | py | from selenium import webdriver
from time import sleep
#browser = webdriver.Firefox()
browser = webdriver.Chrome('C:/chromedriver/chromedriver.exe')
browser.get('http://google.com')
inputs = browser.find_elements_by_css_selector('form input')
for i in inputs:
if i.is_displayed():
search_bar = i
break
search_bar.send_keys('web scraping with python')
search_button = browser.find_element_by_css_selector('form button')
search_button.click()
browser.implicitly_wait(10)
results = browser.find_elements_by_css_selector('div h3 a')
for r in results:
action = webdriver.ActionChains(browser)
action.move_to_element(r)
action.perform()
sleep(2)
browser.quit()
| [
"[email protected]"
]
| |
c00c986d9c0b1b0a62d76888e0ac40192dd9453d | 210e88536cd2a917fb66010ff69f6710b2261e8e | /environment/admin.py | 8c6af3c1d35fcc6084684ab6ba65039f0c4c7fd7 | []
| no_license | tlam/multiverse_sidekick | e5ef1fa908c6fd3fee4d816aa1776b7243075e8c | 9211e4cb36611088420a79666f0c40ecb0a6b645 | refs/heads/master | 2020-04-17T08:30:28.396623 | 2015-08-27T03:36:47 | 2015-08-27T03:36:47 | 9,423,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.contrib import admin
from environment.models import Environment
admin.site.register(Environment)
| [
"[email protected]"
]
| |
2359b782828f9d49623622307e680a93981e9e5f | b66c3ec94db4f6ced5d7fb3099c7af1227ea8c02 | /unit/test_zuoye1_2.py | c7bebbd30ec3ead23ed23d4c512451b5191ce0d3 | []
| no_license | moshang1003/hogwartsSDET11 | 0c6b6a0bc7caedaade17b9b39607cefdf4fde5e4 | e0eb30826a8f23b08c964c805dfe2cd5ae503826 | refs/heads/master | 2021-03-29T09:02:47.045166 | 2020-03-21T10:46:16 | 2020-03-21T10:46:16 | 247,939,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | import os
import time
import allure
import pytest
# 能够正常返回结果的类似整数测试数据,数据格式(预期值,a,b,标题)
data_int = [
(5, 10, 2, '正常整数'),
(1024, 4294967296, 4194304, '大数值整数'),
(-6.25, -25, 4, '含负数整数'),
(0, 0, 999999, '被除数为0整数'),
(1.25 - 0.25j, 3 + 2j, 2 + 2j, '复数数据')
]
# 能够正常返回结果的类似浮点数测试数据,数据格式(预期值,a,b,精度,标题)
data_float = [
(5, 10.5, 2.1, 0, '返回值为整数的浮点数'),
(3.38709677, 10.5, 3.1, 0.00000001, '返回值为无限位浮点数的浮点数'),
(3.6, 7.92, 2.2, 0.00000001, '返回值为有限位浮点数的浮点数'),
(10, 10, True, 0, '布尔数据-True'),
(121.91780821, 89e-5, 73e-7, 1e-8, '科学计数法数据')
]
# 不能正常返回结果,预期会报错的测试数据,数据格式(预期错误,a,b,标题)
data_error = [
('ZeroDivisionError', 10, 0, '除数为零报错'),
('TypeError', 10, {1, 2}, '集合数据'),
('TypeError', 10, {1: 2}, '字典数据'),
('TypeError', 10, (1, 2), '元祖数据'),
('TypeError', 10, [1], '列表数据'),
('TypeError', 10, 'a', '字符串数据'),
('ZeroDivisionError', 10, False, '布尔数据-False')
]
# 待测功能
def div(a, b):
return a / b
# 预期正常返回结果的int测试用例
@allure.suite('整数数据组')
@allure.title('{title}')
@pytest.mark.parametrize('expected,a,b,title', data_int)
def test_int_div(expected, a, b, title):
assert expected == div(a, b)
# 预期正常返回结果的float测试用例
@allure.suite('浮点数数据组')
@allure.title('{title}')
@pytest.mark.parametrize('expected,a,b,precision,title', data_float)
def test_float_div(expected, a, b, precision, title):
assert precision >= abs(div(a, b) - expected) # 浮点数按照精度判断
assert pytest.approx(expected) == div(a, b) # pytest提供的近似判断方法(默认精度1e-6)
# 预期会报错的测试用例
@allure.suite('报错数据组')
@allure.title('{title}')
@pytest.mark.parametrize('expected,a,b,title', data_error)
def test_error_div(expected, a, b, title):
with pytest.raises(eval(expected)):
div(a, b)
# 进行旧测试数据的清理,测试报告的生成和展示
if __name__ == "__main__":
# 清空allure_results文件夹,清理掉allure历史记录
for i in os.listdir(r'allure_results'): os.remove('allure_results/{}'.format(i))
time.sleep(1)
# 执行测试并保存allure需要的结果
os.system('pytest -v --alluredir=allure_results {}'.format(__file__))
time.sleep(1)
# 使用allure展示测试报告
os.system(r'allure serve allure_results')
| [
"[email protected]"
]
| |
9ffd2532080f8ef3fdc3d8345d8fdf308689efd5 | 0e8d49afd0e35510d8fa6901cf216896604240d8 | /lib/pyfrc/tests/docstring_test.py | 02ef66ff67747fdf692f6f5d0fb08d2fa37ddd6b | [
"MIT"
]
| permissive | ThunderDogs5613/pyfrc | 3878a3d887d7adcb957128333ee71fc874c56f2b | d8e76a9284690f71ea7fab7d2aa9022cb6eec27d | refs/heads/master | 2021-08-29T14:21:13.124227 | 2017-12-04T05:46:40 | 2017-12-04T05:46:40 | 114,410,477 | 1 | 0 | null | 2017-12-15T20:55:31 | 2017-12-15T20:55:30 | null | UTF-8 | Python | false | false | 4,104 | py |
import inspect
import os
import re
import sys
# if you want to be really pedantic, enforce sphinx docstrings. Ha.
pedantic_docstrings = True
# regex to use to detect the sphinx docstrings
param_re = re.compile("^:param (\S+?):\s*(.+)$")
def ignore_object(o, robot_path):
'''Returns true if the object can be ignored'''
if inspect.isbuiltin(o):
return True
try:
src = inspect.getsourcefile(o)
except TypeError:
return True
return src is None or not os.path.abspath(src).startswith(robot_path)
def print_fn_err(msg, parent, fn, errors):
if inspect.isclass(parent):
name = '%s.%s' % (parent.__name__, fn.__name__)
else:
name = '%s' % fn.__name__
err = "ERROR: %s '%s()'\n-> See %s:%s" % (msg, name,
inspect.getsourcefile(fn),
inspect.getsourcelines(fn)[1])
print(err)
errors.append(err)
def check_function(parent, fn, errors):
doc = inspect.getdoc(fn)
if doc is None:
print_fn_err('No docstring for', parent, fn, errors)
elif pedantic_docstrings:
# find the list of parameters
args, varargs, keywords, defaults = inspect.getargspec(fn)
if len(args) > 0 and args[0] == 'self':
del args[0]
if varargs is not None:
args.append(varargs)
if keywords is not None:
args.append(keywords)
params = []
for line in doc.splitlines():
match = param_re.match(line)
if not match:
continue
arg = match.group(1)
if arg not in args:
print_fn_err("Param '%s' is documented but isn't a parameter for" % arg, parent, fn, errors)
params.append(arg)
if len(params) != len(args):
diff = set(args).difference(params)
if len(diff) == 1:
print_fn_err("Param '%s' is not documented in docstring for" % diff.pop(), parent, fn, errors)
elif len(diff) > 1:
print_fn_err("Params '%s' are not documented in docstring for" % "','".join(diff), parent, fn, errors)
else:
for param, arg in zip(params, args):
if param != arg:
print_fn_err("Param '%s' is out of order, does not match param '%s' in docstring for" % (param, arg), parent, fn, errors)
def check_object(o, robot_path, errors):
if inspect.isclass(o) and inspect.getdoc(o) is None:
err = "ERROR: Class '%s' has no docstring!\n-> See %s:%s" % (o.__name__,
inspect.getsourcefile(o),
inspect.getsourcelines(o)[1])
print(err)
errors.append(err)
for name, value in inspect.getmembers(o):
if ignore_object(value, robot_path):
continue
check_thing(o, value, robot_path, errors)
def check_thing(parent, thing, robot_path, errors):
if inspect.isclass(thing):
check_object(thing, robot_path, errors)
elif inspect.isfunction(thing):
check_function(parent, thing, errors)
def test_docstrings(robot, robot_path):
'''
The purpose of this test is to ensure that all of your robot code
has docstrings. Properly using docstrings will make your code
more maintainable and look more professional.
'''
# this allows abspath() to work correctly
os.chdir(robot_path)
errors = []
for module in sys.modules.values():
if ignore_object(module, robot_path):
continue
check_object(module, robot_path, errors)
# if you get an error here, look at stdout for the error message
assert len(errors) == 0
| [
"[email protected]"
]
| |
868e68a80dd8cf6eb36fcb64aa2b5052364a0f01 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02639/s328215249.py | db1cbfb4146ce4bd4f05ce69c14a0e3c330caba3 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | x = list(map(int, input().split()))
i = x.index(0)
print(i + 1)
| [
"[email protected]"
]
| |
6942cf2821993577ff91f07dbc207ed4d122aff8 | f3a7b2b71af1ca16e87fcc2c6063670d056f59c6 | /libs/models/detectors/r3det_gwd/build_whole_network_pb.py | 2f41c02e883d228bd72e6bd4fa158a571f3ccff2 | [
"Apache-2.0"
]
| permissive | DLPerf/RotationDetection | 3af165ab00ea6d034774a7289a375b90e4079df4 | c5d3e604ace76d7996bc461920854b2c79d8c023 | refs/heads/main | 2023-07-16T06:01:42.496723 | 2021-08-28T03:17:39 | 2021-08-28T03:17:39 | 400,690,285 | 0 | 0 | Apache-2.0 | 2021-08-28T03:16:55 | 2021-08-28T03:16:55 | null | UTF-8 | Python | false | false | 28,153 | py | # -*-coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from libs.models.detectors.single_stage_base_network import DetectionNetworkBase
from libs.models.losses.losses_gwd import LossGWD
from libs.utils import bbox_transform, nms_rotate
from libs.models.samplers.retinanet.anchor_sampler_retinenet import AnchorSamplerRetinaNet
from libs.models.samplers.r3det.refine_anchor_sampler_r3det import RefineAnchorSamplerR3Det
class DetectionNetworkR3DetGWD(DetectionNetworkBase):
def __init__(self, cfgs, is_training):
super(DetectionNetworkR3DetGWD, self).__init__(cfgs, is_training)
self.anchor_sampler_retinenet = AnchorSamplerRetinaNet(cfgs)
self.refine_anchor_sampler_r3det = RefineAnchorSamplerR3Det(cfgs)
self.losses = LossGWD(self.cfgs)
def refine_cls_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(self.cfgs.NUM_SUBNET_CONV):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=self.cfgs.FPN_CHANNEL,
kernel_size=[3, 3],
stride=1,
activation_fn=None if self.cfgs.USE_GN else tf.nn.relu,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
trainable=self.is_training,
scope='{}_{}'.format(scope_list[0], i),
reuse=reuse_flag)
if self.cfgs.USE_GN:
rpn_conv2d_3x3 = tf.contrib.layers.group_norm(rpn_conv2d_3x3)
rpn_conv2d_3x3 = tf.nn.relu(rpn_conv2d_3x3)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,
num_outputs=self.cfgs.CLASS_NUM,
kernel_size=[3, 3],
stride=1,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.FINAL_CONV_BIAS_INITIALIZER,
scope=scope_list[2],
trainable=self.is_training,
activation_fn=None,
reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, self.cfgs.CLASS_NUM],
name='refine_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='refine_{}_classification_sigmoid'.format(level))
return rpn_box_scores, rpn_box_probs
def refine_reg_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(self.cfgs.NUM_SUBNET_CONV):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=self.cfgs.FPN_CHANNEL,
kernel_size=[3, 3],
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=None if self.cfgs.USE_GN else tf.nn.relu,
scope='{}_{}'.format(scope_list[1], i),
trainable=self.is_training,
reuse=reuse_flag)
if self.cfgs.USE_GN:
rpn_conv2d_3x3 = tf.contrib.layers.group_norm(rpn_conv2d_3x3)
rpn_conv2d_3x3 = tf.nn.relu(rpn_conv2d_3x3)
rpn_delta_boxes = slim.conv2d(rpn_conv2d_3x3,
num_outputs=5,
kernel_size=[3, 3],
stride=1,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[3],
trainable=self.is_training,
activation_fn=None,
reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5],
name='refine_{}_regression_reshape'.format(level))
return rpn_delta_boxes
def refine_net(self, feature_pyramid, name):
refine_delta_boxes_list = []
refine_scores_list = []
refine_probs_list = []
with tf.variable_scope(name):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)):
for level in self.cfgs.LEVEL:
if self.cfgs.SHARE_NET:
reuse_flag = None if level == self.cfgs.LEVEL[0] else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'refine_classification', 'refine_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,
'refine_classification_' + level, 'refine_regression_' + level]
refine_box_scores, refine_box_probs = self.refine_cls_net(feature_pyramid[level],
scope_list, reuse_flag,
level)
refine_delta_boxes = self.refine_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)
refine_scores_list.append(refine_box_scores)
refine_probs_list.append(refine_box_probs)
refine_delta_boxes_list.append(refine_delta_boxes)
return refine_delta_boxes_list, refine_scores_list, refine_probs_list
def refine_feature_op(self, points, feature_map, name):
h, w = tf.cast(tf.shape(feature_map)[1], tf.int32), tf.cast(tf.shape(feature_map)[2], tf.int32)
xmin = tf.maximum(0.0, tf.floor(points[:, 0]))
xmin = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(xmin))
ymin = tf.maximum(0.0, tf.floor(points[:, 1]))
ymin = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(ymin))
xmax = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(points[:, 0]))
xmax = tf.maximum(0.0, tf.floor(xmax))
ymax = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(points[:, 1]))
ymax = tf.maximum(0.0, tf.floor(ymax))
left_top = tf.cast(tf.transpose(tf.stack([ymin, xmin], axis=0)), tf.int32)
right_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmax], axis=0)), tf.int32)
left_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmin], axis=0)), tf.int32)
right_top = tf.cast(tf.transpose(tf.stack([ymin, xmax], axis=0)), tf.int32)
# feature_1x5 = slim.conv2d(inputs=feature_map,
# num_outputs=self.cfgs.FPN_CHANNEL,
# kernel_size=[1, 5],
# weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
# biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
# stride=1,
# activation_fn=None,
# trainable=self.is_training,
# scope='refine_1x5_{}'.format(name))
#
# feature5x1 = slim.conv2d(inputs=feature_1x5,
# num_outputs=self.cfgs.FPN_CHANNEL,
# kernel_size=[5, 1],
# weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
# biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
# stride=1,
# activation_fn=None,
# trainable=self.is_training,
# scope='refine_5x1_{}'.format(name))
#
# feature_1x1 = slim.conv2d(inputs=feature_map,
# num_outputs=self.cfgs.FPN_CHANNEL,
# kernel_size=[1, 1],
# weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
# biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
# stride=1,
# activation_fn=None,
# trainable=self.is_training,
# scope='refine_1x1_{}'.format(name))
#
# feature = feature5x1 + feature_1x1
feature = feature_map
left_top_feature = tf.gather_nd(tf.squeeze(feature), left_top)
right_bottom_feature = tf.gather_nd(tf.squeeze(feature), right_bottom)
left_bottom_feature = tf.gather_nd(tf.squeeze(feature), left_bottom)
right_top_feature = tf.gather_nd(tf.squeeze(feature), right_top)
refine_feature = right_bottom_feature * tf.tile(
tf.reshape((tf.abs((points[:, 0] - xmin) * (points[:, 1] - ymin))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL]) \
+ left_top_feature * tf.tile(
tf.reshape((tf.abs((xmax - points[:, 0]) * (ymax - points[:, 1]))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL]) \
+ right_top_feature * tf.tile(
tf.reshape((tf.abs((points[:, 0] - xmin) * (ymax - points[:, 1]))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL]) \
+ left_bottom_feature * tf.tile(
tf.reshape((tf.abs((xmax - points[:, 0]) * (points[:, 1] - ymin))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL])
refine_feature = tf.reshape(refine_feature, [1, tf.cast(h, tf.int32), tf.cast(w, tf.int32), self.cfgs.FPN_CHANNEL])
# refine_feature = tf.reshape(refine_feature, [1, tf.cast(feature_size[1], tf.int32),
# tf.cast(feature_size[0], tf.int32), 256])
return refine_feature + feature
def refine_feature_five_op(self, points, feature_map, name):
h, w = tf.cast(tf.shape(feature_map)[1], tf.int32), tf.cast(tf.shape(feature_map)[2], tf.int32)
feature_1x5 = slim.conv2d(inputs=feature_map,
num_outputs=self.cfgs.FPN_CHANNEL,
kernel_size=[1, 5],
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=None,
trainable=self.is_training,
scope='refine_1x5_{}'.format(name))
feature5x1 = slim.conv2d(inputs=feature_1x5,
num_outputs=self.cfgs.FPN_CHANNEL,
kernel_size=[5, 1],
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=None,
trainable=self.is_training,
scope='refine_5x1_{}'.format(name))
feature_1x1 = slim.conv2d(inputs=feature_map,
num_outputs=self.cfgs.FPN_CHANNEL,
kernel_size=[1, 1],
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=None,
trainable=self.is_training,
scope='refine_1x1_{}'.format(name))
feature = feature5x1 + feature_1x1
for i in range(5):
xmin = tf.maximum(0.0, tf.floor(points[:, 0+2*(i-1)]))
ymin = tf.maximum(0.0, tf.floor(points[:, 1+2*(i-1)]))
xmax = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(points[:, 0+2*(i-1)]))
ymax = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(points[:, 1+2*(i-1)]))
left_top = tf.cast(tf.transpose(tf.stack([ymin, xmin], axis=0)), tf.int32)
right_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmax], axis=0)), tf.int32)
left_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmin], axis=0)), tf.int32)
right_top = tf.cast(tf.transpose(tf.stack([ymin, xmax], axis=0)), tf.int32)
left_top_feature = tf.gather_nd(tf.squeeze(feature), left_top)
right_bottom_feature = tf.gather_nd(tf.squeeze(feature), right_bottom)
left_bottom_feature = tf.gather_nd(tf.squeeze(feature), left_bottom)
right_top_feature = tf.gather_nd(tf.squeeze(feature), right_top)
refine_feature = right_bottom_feature * tf.tile(
tf.reshape((tf.abs((points[:, 0+2*(i-1)] - xmin) * (points[:, 1+2*(i-1)] - ymin))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL]) \
+ left_top_feature * tf.tile(
tf.reshape((tf.abs((xmax - points[:, 0+2*(i-1)]) * (ymax - points[:, 1+2*(i-1)]))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL]) \
+ right_top_feature * tf.tile(
tf.reshape((tf.abs((points[:, 0+2*(i-1)] - xmin) * (ymax - points[:, 1+2*(i-1)]))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL]) \
+ left_bottom_feature * tf.tile(
tf.reshape((tf.abs((xmax - points[:, 0+2*(i-1)]) * (points[:, 1+2*(i-1)] - ymin))), [-1, 1]),
[1, self.cfgs.FPN_CHANNEL])
refine_feature = tf.reshape(refine_feature, [1, tf.cast(h, tf.int32), tf.cast(w, tf.int32), self.cfgs.FPN_CHANNEL])
feature += refine_feature
return feature
def refine_stage(self, input_img_batch, gtboxes_batch_r, box_pred_list, cls_prob_list, proposal_list,
feature_pyramid, gpu_id, pos_threshold, neg_threshold,
stage, proposal_filter=False):
with tf.variable_scope('refine_feature_pyramid{}'.format(stage)):
refine_feature_pyramid = {}
refine_boxes_list = []
for box_pred, cls_prob, proposal, stride, level in \
zip(box_pred_list, cls_prob_list, proposal_list,
self.cfgs.ANCHOR_STRIDE, self.cfgs.LEVEL):
if proposal_filter:
box_pred = tf.reshape(box_pred, [-1, self.num_anchors_per_location, 5])
proposal = tf.reshape(proposal, [-1, self.num_anchors_per_location, 5 if self.method == 'R' else 4])
cls_prob = tf.reshape(cls_prob, [-1, self.num_anchors_per_location, self.cfgs.CLASS_NUM])
cls_max_prob = tf.reduce_max(cls_prob, axis=-1)
box_pred_argmax = tf.cast(tf.reshape(tf.argmax(cls_max_prob, axis=-1), [-1, 1]), tf.int32)
indices = tf.cast(tf.cumsum(tf.ones_like(box_pred_argmax), axis=0), tf.int32) - tf.constant(1, tf.int32)
indices = tf.concat([indices, box_pred_argmax], axis=-1)
box_pred = tf.reshape(tf.gather_nd(box_pred, indices), [-1, 5])
proposal = tf.reshape(tf.gather_nd(proposal, indices), [-1, 5 if self.method == 'R' else 4])
if self.cfgs.METHOD == 'H':
x_c = (proposal[:, 2] + proposal[:, 0]) / 2
y_c = (proposal[:, 3] + proposal[:, 1]) / 2
h = proposal[:, 2] - proposal[:, 0] + 1
w = proposal[:, 3] - proposal[:, 1] + 1
theta = -90 * tf.ones_like(x_c)
proposal = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
else:
box_pred = tf.reshape(box_pred, [-1, 5])
proposal = tf.reshape(proposal, [-1, 5])
bboxes = bbox_transform.rbbox_transform_inv(boxes=proposal, deltas=box_pred)
refine_boxes_list.append(bboxes)
center_point = bboxes[:, :2] / stride
refine_feature_pyramid[level] = self.refine_feature_op(points=center_point,
feature_map=feature_pyramid[level],
name=level)
# points = coordinate5_2_8_tf(bboxes) / stride
# refine_feature_pyramid[level] = self.refine_feature_five_op(points=points,
# feature_map=feature_pyramid[level],
# name=level)
refine_box_pred_list, refine_cls_score_list, refine_cls_prob_list = self.refine_net(refine_feature_pyramid,
'refine_net{}'.format(stage))
refine_box_pred = tf.concat(refine_box_pred_list, axis=0)
refine_cls_score = tf.concat(refine_cls_score_list, axis=0)
# refine_cls_prob = tf.concat(refine_cls_prob_list, axis=0)
refine_boxes = tf.concat(refine_boxes_list, axis=0)
if self.is_training:
with tf.variable_scope('build_refine_loss{}'.format(stage)):
refine_labels, refine_target_delta, refine_box_states, refine_target_boxes = tf.py_func(
func=self.refine_anchor_sampler_r3det.refine_anchor_target_layer,
inp=[gtboxes_batch_r, refine_boxes, pos_threshold, neg_threshold, gpu_id],
Tout=[tf.float32, tf.float32,
tf.float32, tf.float32])
self.add_anchor_img_smry(input_img_batch, refine_boxes, refine_box_states, 1)
refine_cls_loss = self.losses.focal_loss(refine_labels, refine_cls_score, refine_box_states)
refine_reg_loss = self.losses.wasserstein_distance_loss(refine_box_pred,
refine_box_states,
refine_target_boxes,
refine_boxes, is_refine=True,
tau=self.cfgs.GWD_TAU,
func=self.cfgs.GWD_FUNC)
self.losses_dict['refine_cls_loss{}'.format(stage)] = refine_cls_loss * self.cfgs.CLS_WEIGHT
self.losses_dict['refine_reg_loss{}'.format(stage)] = refine_reg_loss * self.cfgs.REG_WEIGHT
return refine_box_pred_list, refine_cls_prob_list, refine_boxes_list
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None, gpu_id=0):
if self.is_training:
gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
if self.cfgs.USE_GN:
input_img_batch = tf.reshape(input_img_batch, [1, self.cfgs.IMG_SHORT_SIDE_LEN,
self.cfgs.IMG_MAX_LENGTH, 3])
# 1. build backbone
feature_pyramid = self.build_backbone(input_img_batch)
# 2. build rpn
rpn_box_pred_list, rpn_cls_score_list, rpn_cls_prob_list = self.rpn_net(feature_pyramid, 'rpn_net')
rpn_box_pred = tf.concat(rpn_box_pred_list, axis=0)
rpn_cls_score = tf.concat(rpn_cls_score_list, axis=0)
# rpn_cls_prob = tf.concat(rpn_cls_prob_list, axis=0)
# 3. generate anchors
anchor_list = self.make_anchors(feature_pyramid, use_tf=True)
anchors = tf.concat(anchor_list, axis=0)
# 4. build loss
if self.is_training:
with tf.variable_scope('build_loss'):
labels, target_delta, anchor_states, target_boxes = tf.py_func(func=self.anchor_sampler_retinenet.anchor_target_layer,
inp=[gtboxes_batch_h,
gtboxes_batch_r, anchors, gpu_id],
Tout=[tf.float32, tf.float32, tf.float32,
tf.float32])
if self.method == 'H':
self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)
else:
self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)
cls_loss = self.losses.focal_loss(labels, rpn_cls_score, anchor_states)
# reg_loss = self.losses.wasserstein_distance_loss(rpn_box_pred, anchor_states,
# target_boxes, anchors, is_refine=False,
# tau=self.cfgs.GWD_TAU,
# func=self.cfgs.GWD_FUNC)
reg_loss = self.losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)
# reg_loss = self.losses.iou_smooth_l1_loss_exp(target_delta, rpn_box_pred, anchor_states,
# target_boxes, anchors, alpha=self.cfgs.ALPHA,
# beta=self.cfgs.BETA)
self.losses_dict['cls_loss'] = cls_loss * self.cfgs.CLS_WEIGHT
self.losses_dict['reg_loss'] = reg_loss * self.cfgs.REG_WEIGHT
box_pred_list, cls_prob_list, proposal_list = rpn_box_pred_list, rpn_cls_prob_list, anchor_list
all_box_pred_list, all_cls_prob_list, all_proposal_list = [], [], []
for i in range(self.cfgs.NUM_REFINE_STAGE):
box_pred_list, cls_prob_list, proposal_list = self.refine_stage(input_img_batch,
gtboxes_batch_r,
box_pred_list,
cls_prob_list,
proposal_list,
feature_pyramid,
gpu_id,
pos_threshold=self.cfgs.REFINE_IOU_POSITIVE_THRESHOLD[i],
neg_threshold=self.cfgs.REFINE_IOU_NEGATIVE_THRESHOLD[i],
stage='' if i == 0 else '_stage{}'.format(i + 2),
proposal_filter=True if i == 0 else False)
if not self.is_training:
all_box_pred_list.extend(box_pred_list)
all_cls_prob_list.extend(cls_prob_list)
all_proposal_list.extend(proposal_list)
else:
all_box_pred_list, all_cls_prob_list, all_proposal_list = box_pred_list, cls_prob_list, proposal_list
box_pred = tf.concat(all_box_pred_list, axis=0)
cls_prob = tf.concat(all_cls_prob_list, axis=0)
proposal = tf.concat(all_proposal_list, axis=0)
return box_pred, cls_prob, proposal
# # 5. postprocess
# with tf.variable_scope('postprocess_detctions'):
# box_pred = tf.concat(all_box_pred_list, axis=0)
# cls_prob = tf.concat(all_cls_prob_list, axis=0)
# proposal = tf.concat(all_proposal_list, axis=0)
#
# boxes, scores, category = self.postprocess_detctions(refine_bbox_pred=box_pred,
# refine_cls_prob=cls_prob,
# anchors=proposal, gpu_id=gpu_id)
# boxes = tf.stop_gradient(boxes)
# scores = tf.stop_gradient(scores)
# category = tf.stop_gradient(category)
#
# if self.is_training:
# return boxes, scores, category, self.losses_dict
# else:
# return boxes, scores, category
def postprocess_detctions(self, refine_bbox_pred, refine_cls_prob, anchors, gpu_id):
def filter_detections(boxes, scores):
"""
:param boxes: [-1, 4]
:param scores: [-1, ]
:param labels: [-1, ]
:return:
"""
if self.is_training:
indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.VIS_SCORE)), [-1, ])
else:
indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.FILTERED_SCORE)), [-1, ])
if self.cfgs.NMS:
filtered_boxes = tf.gather(boxes, indices)
filtered_scores = tf.gather(scores, indices)
# perform NMS
nms_indices = nms_rotate.nms_rotate(decode_boxes=filtered_boxes,
scores=filtered_scores,
iou_threshold=self.cfgs.NMS_IOU_THRESHOLD,
max_output_size=100 if self.is_training else 1000,
use_gpu=True,
gpu_id=gpu_id)
# filter indices based on NMS
indices = tf.gather(indices, nms_indices)
# add indices to list of all indices
return indices
boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors, deltas=refine_bbox_pred,
scale_factors=self.cfgs.ANCHOR_SCALE_FACTORS)
return_boxes_pred = []
return_scores = []
return_labels = []
for j in range(0, self.cfgs.CLASS_NUM):
indices = filter_detections(boxes_pred, refine_cls_prob[:, j])
tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, indices), [-1, 5])
tmp_scores = tf.reshape(tf.gather(refine_cls_prob[:, j], indices), [-1, ])
return_boxes_pred.append(tmp_boxes_pred)
return_scores.append(tmp_scores)
return_labels.append(tf.ones_like(tmp_scores) * (j + 1))
return_boxes_pred = tf.concat(return_boxes_pred, axis=0)
return_scores = tf.concat(return_scores, axis=0)
return_labels = tf.concat(return_labels, axis=0)
return return_boxes_pred, return_scores, return_labels
| [
"[email protected]"
]
| |
712cbef7c9caa13001d11892e7ebfa5ca34642d5 | af3e249753fbf04ce10a01e4dbeab549cb4ae34d | /oscar/apps/catalogue/migrations/0014_auto_20181115_1953.py | fb912bfd712eb0dc972432ad7a40d44467e3d751 | []
| no_license | rwozniak72/sklep_oscar_test | 79588b57470c9245324cc5396aa472192953aeda | fb410dc542e6cb4deaf870b3e7d5d22ca794dc29 | refs/heads/master | 2020-08-12T04:55:25.084998 | 2019-10-16T21:14:08 | 2019-10-16T21:14:08 | 214,692,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # Generated by Django 2.0.7 on 2018-11-15 19:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0013_auto_20170821_1548'),
]
operations = [
migrations.AlterField(
model_name='product',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date created'),
),
migrations.AlterField(
model_name='productimage',
name='display_order',
field=models.PositiveIntegerField(db_index=True, default=0, help_text='An image with a display order of zero will be the primary image for a product', verbose_name='Display order'),
),
migrations.AlterField(
model_name='productrecommendation',
name='ranking',
field=models.PositiveSmallIntegerField(db_index=True, default=0, help_text='Determines order of the products. A product with a higher value will appear before one with a lower ranking.', verbose_name='Ranking'),
),
]
| [
"[email protected]"
]
| |
789af4de56c556d7d9dc75336e4c423b18ab8af2 | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /1.2/_downloads/52b26bfb61145291f5108dc7fd05ccee/35_artifact_correction_regression.py | 9a81f74c53bb80927e67f3d1046b2516c45e9edf | []
| permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 9,910 | py | # -*- coding: utf-8 -*-
"""
.. _tut-artifact-regression:
===================================
Repairing artifacts with regression
===================================
This tutorial covers removal of artifacts using regression as in Gratton et al.
(1983) :footcite:`GrattonEtAl1983` and Croft & Barry (2000)
:footcite:`CroftBarry2000`.
Generally speaking, artifacts that result in time waveforms on the sensors
that are accurately reflected by some reference signal can be removed by
regression. Blink artifacts captured by bipolar EOG channels provide a good
example of this, so we will demonstrate this here.
Although ECG signals are well captured by bipolar ECG electrodes,
regression-based removal of ECG artifacts usually does not work very well.
This is likely because the heart acts like a rotating dipole, and
therefore the ECG channel time waveform recorded from the ECG electrode sites
does not reflect the same temporal dynamics that manifest at each MEG channel
(obtained by sampling some component of the related magnetic vector field).
Other approaches like :ref:`ICA <tut-artifact-ica>` or
:ref:`SSP <tut-artifact-ssp>` will likely work better for ECG.
Furthermore, regression approaches are usually performed in situations where
there are few channels available, and removing an entire signal component is
undesirable. Hence, most articles on the topic concern EEG and it is
unusual to see the technique applied to MEG. For this reason, we will restrict
the analysis in this tutorial to EEG data only.
Prepare the data
^^^^^^^^^^^^^^^^
We begin as always by importing the necessary Python modules and loading some
data. The :ref:`MNE-Sample <sample-dataset>` dataset has some clear, large
blink artifacts, especially during the presentation of visual stimuli.
"""
# %%
import numpy as np
import mne
from mne.preprocessing import EOGRegression
data_path = mne.datasets.sample.data_path()
raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
raw.pick(['eeg', 'eog', 'stim'])
raw.load_data()
# The regression technique works regardless of chosen reference. However, it is
# important to choose a reference before proceeding with the analysis.
raw.set_eeg_reference('average')
# Removing slow drifts makes for more stable regression coefficients. Make sure
# to apply the same filter to both EEG and EOG channels!
raw.filter(0.3, 40)
# make epochs
events = mne.find_events(raw)
event_id = {'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id=event_id, preload=True)
# %%
# Visualize the original data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Let's first look at the `~mne.Evoked` data (average across epochs) without
# any corrections applied.
# we'll try to keep a consistent ylim across figures
plot_kwargs = dict(picks='all', ylim=dict(eeg=(-10, 10), eog=(-5, 15)))
# plot the evoked for the EEG and the EOG sensors
fig = epochs.average('all').plot(**plot_kwargs)
fig.set_size_inches(6, 6)
# %%
# We can see there is some EOG activity that is likely bleeding into the EEG
# evoked response. At around 250ms this becomes especially noticeable. Let's
# apply regression to subtract the EOG signal from the EEG signals to clean it
# up.
# %%
# Compute and apply EOG regression
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Now, we'll compare the evoked response before and after we regress out the
# EOG signal. First, let's try plain regression, and then we'll explore more
# advanced techniques.
# Perform regression using the EOG sensor as independent variable and the EEG
# sensors as dependent variables.
model_plain = EOGRegression(picks='eeg', picks_artifact='eog').fit(epochs)
fig = model_plain.plot(vlim=(None, 0.4)) # regression coefficients as topomap
fig.set_size_inches(3, 2)
# %%
# The regression coefficients show the linear relationship between each EEG
# sensor and the EOG sensor. Note that occipital sensors have a positive
# relationship, as we set a common-average reference when we loaded the data
# above.
#
# Now we are ready to use these coefficients to subtract the EOG signal from
# the EEG signals.
epochs_clean_plain = model_plain.apply(epochs)
# After regression, we should redo the baseline correction
epochs_clean_plain.apply_baseline()
# Show the evoked potential computed on the corrected data
fig = epochs_clean_plain.average('all').plot(**plot_kwargs)
fig.set_size_inches(6, 6)
# %%
# Regressing the EOG signal out of the EEG signals has reduced the peak around
# 250ms that was partly there because of eye artifacts.
#
# In the :ref:`MNE-Sample dataset <sample-dataset>`, there are no segments of
# data that are particularly unstable, so the basic form of regression produces
# robust coefficients. However, this may not be the case in every dataset, so
# let's explore some variations that may improve the estimation of the
# regression coefficients.
#
# One potential problem is that the EOG sensor does not only pick up eye
# artifacts, but also a bit of EEG signal. This means we are prone to
# overestimating the regression coefficients if the EOG sensors are placed too
# close to the EEG sensors. However, there is a correction we can apply to
# alleviate this.
#
# Subtract the evoked response from the epoch data before regression
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Gratton et al. (1983) :footcite:`GrattonEtAl1983` suggest computing
# regression coefficients on epoch data with the evoked response subtracted
# out. The idea is that the EEG signal components relevant to the study are in
# the evoked, so by removing them, mostly noise components will be left. Since
# EOG artifacts are unlikely to be strictly time-locked to the stimulus onset,
# enough EOG information will likely remain to be able to estimate robust
# regression coefficients.
# create epochs with the evoked subtracted out
epochs_sub = epochs.copy().subtract_evoked()
# perform regression
model_sub = EOGRegression(picks='eeg', picks_artifact='eog').fit(epochs_sub)
fig = model_sub.plot(vlim=(None, 0.4))
fig.set_size_inches(3, 2)
# apply the regression coefficients to the original epochs
epochs_clean_sub = model_plain.apply(epochs).apply_baseline()
fig = epochs_clean_sub.average('all').plot(**plot_kwargs)
fig.set_size_inches(6, 6)
# %%
# We see that we obtain the same regression coefficients, even with the evoked
# removed from the epochs.
#
# Create EOG evoked before regression
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# It is advantageous to estimate the regression coefficients on a piece of data
# with lots of EOG activity. As EOG activity is typically much larger than EEG,
# the EOG artifacts will dominate the signal and the regression coefficients
# will reflect mostly the influence of the EOG. To amplify this effect, Croft &
# Barry (2000) :footcite:`CroftBarry2000` suggest creating epochs based on
# blink onsets and computing the evoked blink response. The averaging procedure
# will suppress EEG signals that are not strictly time-locked with the blink
# response. Ideally, one would create evokeds for both blinks and saccades, and
# create two separate regression models. However, we will restrict ourselves to
# just blink epochs, since MNE-Python contains an automated method for creating
# those.
#
# .. note:: This is very similar to the approach taken by :ref:`SSP
# <tut-artifact-ssp>`. The difference is that :ref:`SSP
# <tut-artifact-ssp>` estimates signal components that are maximally
# correlated with the artifact and removes any data along that
# component (thereby reducing the rank of the non-EOG data), whereas
# the regression approach uses the ongoing EOG signal to determine
# how much data to remove (thereby not necessarily reducing the rank
# of the non-EOG data). Generally, SSP tends to err on the side of
# removing too much data, eliminating artifacts and true brain
# signals alike, whereas regression will err on the side of not
# removing enough, leaving some artifact signals still present in the
# signal.
eog_epochs = mne.preprocessing.create_eog_epochs(raw)
# We need to explicitly specify that we want to average the EOG channel too.
eog_evoked = eog_epochs.average('all')
eog_evoked.plot('all')
fig.set_size_inches(6, 6)
# perform regression on the evoked blink response
model_evoked = EOGRegression(picks='eeg', picks_artifact='eog').fit(eog_evoked)
fig = model_evoked.plot(vlim=(None, 0.4))
fig.set_size_inches(3, 2)
# apply the regression coefficients to the original epochs
epochs_clean_evoked = model_evoked.apply(epochs).apply_baseline()
fig = epochs_clean_evoked.average('all').plot(**plot_kwargs)
fig.set_size_inches(6, 6)
# for good measure, also show the effect on the blink evoked
eog_evoked_clean = model_evoked.apply(eog_evoked)
eog_evoked_clean.apply_baseline()
eog_evoked_clean.plot('all')
fig.set_size_inches(6, 6)
# %%
# We see that again, the regression weights have been correctly estimated.
#
# Visualize the effect on raw data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Once we have obtained robust regression weights, we can use them to apply the
# regression directly to raw, epoched, and evoked data. Here, we will use the
# regression weights obtained from the blink evoked and apply it to an instance
# of `~mne.io.Raw`.
order = np.concatenate([ # plotting order: EOG first, then EEG
mne.pick_types(raw.info, meg=False, eog=True),
mne.pick_types(raw.info, meg=False, eeg=True),
])
raw_kwargs = dict(events=eog_epochs.events, order=order, start=13, duration=3,
n_channels=10, scalings=dict(eeg=50e-6, eog=250e-6))
# plot original data
raw.plot(**raw_kwargs)
# regress (using coefficients computed previously) and plot
raw_clean = model_evoked.apply(raw)
raw_clean.plot(**raw_kwargs)
# %%
# References
# ^^^^^^^^^^
# .. footbibliography::
| [
"[email protected]"
]
| |
22fc07b80e8a0195b0e11cd601c09efe7a51bedf | 30b98382e8621ec45bc52b8f69a3ca6285e83a6a | /python/1-GeneticAlgorithm/GA_on_Knapsack_problem.py | 94e532ea3dba37eeaa81a29e5124a564d4f03e07 | []
| no_license | JG-cmd/algrithm | 404c2d7f0c7ab677ae3f4913ffbd57370627366f | 93d8ebc7074e5411f281b1882d92d5f11bcbb652 | refs/heads/master | 2023-03-15T19:24:07.025977 | 2020-02-15T17:20:06 | 2020-02-15T17:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # encoding=utf-8
#####
# 遗传算法用于背包问题
# Python 3.6
# http://www.myzaker.com/article/59855a9c1bc8e0cf58000015/
#####
import os
import random
from copy import deepcopy
# 种群
class GAType(object):
def __init__(self, obj_cnt):
# 个体基因
self.gene = [0 for _ in range(0, obj_cnt)]
# 个体适应度
self.fitness = 0
# 选择概率
self.choose_freq = 0
# 累积概率
self.cummulative_freq = 0
# 遗传算法
class genetic(object):
def __init__(self, value, weight, max_weight, population_size):
self.value = value
self.weight = weight
self.max_weight = max_weight
self.obj_count = len(weight)
self._gatype = [GAType(self.obj_count) for x in range(0, population_size, 1)] # 初始化32个种群
self.total_fitness = 0
if __name__ == '__main__':
# 各物品的重量和价值
pair = [[35,10], [30,40], [60,30], [50,50], [40,35], [10,40], [25,30]]
# weight = [35,30,60,50,40,10,25]
# value = [10,40,30,50,35,40,30]
# weight = zip(*pair)[0] # (35,30,60,50,40,10,25)
# weight = zip(*pair)[1] # (35,30,60,50,40,10,25)
weight = [x[0] for x in pair]
value = [x[1] for x in pair]
# 最大承重
max_weight = 150
# 已知最优解
opt_result = [1,1,0,1,0,1,1] # 全局最优解:[1,2,4,6,7] - [35,30,50,10,25] = 150 [10,40,50,40,30] = 170
population_size = 32 # 种群
max_generations = 500 # 进化代数
p_cross = 0.8 # 交叉概率
p_mutation = 0.15 # 变异概率
# genetic(value, weight, max_weight).genetic_result()
| [
"[email protected]"
]
| |
45c0af97d21af7351b881ee9681d2dc86db4a4c9 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/handlers/lr_schedule_handler.py | 3b300537b273be71ed40e34dc2b2f45a984dd082 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
]
| permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 3,437 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler
from monai.utils import ensure_tuple, exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
class LrScheduleHandler:
"""
Ignite handler to update the Learning Rate based on PyTorch LR scheduler.
"""
def __init__(
self,
lr_scheduler: Union[_LRScheduler, ReduceLROnPlateau],
print_lr: bool = True,
name: Optional[str] = None,
epoch_level: bool = True,
step_transform: Callable[[Engine], Any] = lambda engine: (),
) -> None:
"""
Args:
lr_scheduler: typically, lr_scheduler should be PyTorch
lr_scheduler object. If customized version, must have `step` and `get_last_lr` methods.
print_lr: whether to print out the latest learning rate with logging.
name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
epoch_level: execute lr_scheduler.step() after every epoch or every iteration.
`True` is epoch level, `False` is iteration level.
step_transform: a callable that is used to transform the information from `engine`
to expected input data of lr_scheduler.step() function if necessary.
Raises:
TypeError: When ``step_transform`` is not ``callable``.
"""
self.lr_scheduler = lr_scheduler
self.print_lr = print_lr
self.logger = logging.getLogger(name)
self.epoch_level = epoch_level
if not callable(step_transform):
raise TypeError(f"step_transform must be callable but is {type(step_transform).__name__}.")
self.step_transform = step_transform
self._name = name
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
if self.epoch_level:
engine.add_event_handler(Events.EPOCH_COMPLETED, self)
else:
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
args = ensure_tuple(self.step_transform(engine))
self.lr_scheduler.step(*args)
if self.print_lr:
self.logger.info(f"Current learning rate: {self.lr_scheduler._last_lr[0]}") # type: ignore[union-attr]
| [
"[email protected]"
]
| |
7ea2028e24f5008ab25e293b157929cc4359f7a9 | 2567e10b9c713b0a6064147885db9628de4fca30 | /subdomain.py | a148b45749aa95eb47603ed03c443079d251ff9a | []
| no_license | Gamerited/subpyforce | 759ea42b37532ddeecbcb76020fb0fd49b02abfa | 5cc1ddd6a6d200c3a4b40b604b80317c24a29ac8 | refs/heads/master | 2022-11-13T05:29:52.809233 | 2020-06-26T07:16:54 | 2020-06-26T07:16:54 | 275,091,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | import requests
from threading import Thread
from queue import Queue
from colored import fg, bg, attr
x = Queue() #defining x to hold the queue for subdominas
def subs(domain):
global x
while True:
sdomain = x.get()
location = f"http://{sdomain}.{domain}"
try:
requests.get(location)
except requests.ConnectionError:
pass
except requests.exceptions.InvalidURL:
print('%s [-] Unavailable url: %s' % (fg(1), attr(0)), location)
except UnicodeError:
print ('%s%s The unicode character was not recognized from the wordlist %s' % (fg(1), bg(15), attr(0)))
else:
print('%s [+] Active url: %s' % (fg(10), attr(0)) , location)
x.task_done()
while True:
sdomain = x.get()
location = f"https://{sdomain}.{domain}"
try:
requests.get(location)
except requests.ConnectionError:
pass
except requests.exceptions.InvalidURL:
print('%s [-] Unavailable url: %s' % (fg(1), attr(0)), location)
except UnicodeError:
print('%s There was some error in Unicode%s' % (fg(5), attr(0)))
else:
print('%s [+] Active url: %s' % (fg(10), attr(0)) , location)
x.task_done()
def main(domain,sub,nthreads):
global x
for j in sub:
x.put(j)
for t in range(nthreads):
kam = Thread(target=subs, args=(domain,))
kam.daemon = True
kam.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Noob script bruteforce some sub domains by @gamerited')
parser.add_argument("domain", help="Hit down the domain you wana bruteforce (e.g. google.com)")
parser.add_argument("-w", "--wordlist", help="Enter the location of your wordlist that you wana use to Bruteforce the domain")
parser.add_argument("-t","--num-threads", help="Please enter the number of threads you want to use.(default is 10)",default=20,type=int)
args = parser.parse_args()
domain = args.domain
wordlist = args.wordlist
nthreads = args.num_threads
main(domain=domain, nthreads=nthreads, sub=open(wordlist, encoding="ISO-8859-1").read().splitlines())
x.join()
| [
"[email protected]"
]
| |
03dfadc9b08f7a78c163125f7724ce3c71849df2 | 43cb8b3e0a730e2a24e90c430b3399129541f328 | /2. django-models/example/migrations/0001_initial.py | 6e5f4a85a6c212fef6b7fc8649bf06b283b16b30 | []
| no_license | josancamon19/django-studies | 817c2b17b3c7c0d8fddd9a8bf938eddaa56e0019 | 875d08fc615bdc86ec8075e665aeb8a135f83efb | refs/heads/master | 2020-09-23T10:50:19.658173 | 2019-12-12T13:29:20 | 2019-12-12T13:29:20 | 225,477,236 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | # Generated by Django 3.0 on 2019-12-03 04:26
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('location', models.CharField(max_length=20)),
('date_created', models.DateField(default=datetime.datetime(2019, 12, 3, 4, 26, 14, 952007, tzinfo=utc))),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('creator', models.CharField(max_length=20)),
('paradigm', models.CharField(max_length=20)),
('date_created', models.DateField(default=datetime.datetime(2019, 12, 3, 4, 26, 14, 970563, tzinfo=utc))),
],
),
migrations.CreateModel(
name='Programmer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('age', models.IntegerField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='example.Company')),
('languages', models.ManyToManyField(to='example.Language')),
],
),
]
| [
"[email protected]"
]
| |
2767f917d3fa9e9be187ea894b815bd79dc4b39d | 4a1273f72e7d8a07a3fa67ac9f2709b64ec6bc18 | /main/migrations/0010_add_metric_group_20160225_2311.py | 6353404be3d139d5fdd25fa4257e8756ae3c7861 | []
| no_license | WealthCity/django-project | 6668b92806d8c61ef9e20bd42daec99993cd25b2 | fa31fa82505c3d0fbc54bd8436cfc0e49c896f3e | refs/heads/dev | 2021-01-19T14:10:52.115301 | 2017-04-12T11:23:32 | 2017-04-12T11:23:32 | 88,132,284 | 0 | 1 | null | 2017-04-13T06:26:30 | 2017-04-13T06:26:29 | null | UTF-8 | Python | false | false | 4,661 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def check_db(apps, schema_editor):
GoalMetric = apps.get_model("main", "GoalMetric")
Portfolio = apps.get_model("main", "Portfolio")
PortfolioItem = apps.get_model("main", "PortfolioItem")
db_alias = schema_editor.connection.alias
invalid_metrics = list(GoalMetric.objects.using(db_alias).filter(setting=None).values_list('id', flat=True))
if len(invalid_metrics) > 0:
raise Exception('GoalMetric ids: {} are orphaned (they have no settings object, so cannot be used. Please delete them.'.format(invalid_metrics))
invalid_portfolios = list(Portfolio.objects.using(db_alias).filter(goal_setting=None).values_list('id', flat=True))
if len(invalid_portfolios) > 0:
ipis = list(PortfolioItem.objects.using(db_alias).filter(portfolio__in=invalid_portfolios).values_list('id', flat=True))
raise Exception('Portfolio ids: {} are orphaned (they have no settings object, so cannot be used.'
'Their portfolioitem ids: {} are also orphaned. Please delete them both.'.format(invalid_portfolios, ipis))
def set_group(apps, schema_editor):
GoalSetting = apps.get_model("main", "GoalSetting")
GoalMetricGroup = apps.get_model("main", "GoalMetricGroup")
db_alias = schema_editor.connection.alias
for setting in GoalSetting.objects.using(db_alias).all():
metric_group = GoalMetricGroup.objects.using(db_alias).create()
for metric in setting.metrics.using(db_alias).all():
metric.group = metric_group
metric.setting = None
metric.save()
setting.metric_group = metric_group
setting.save()
def set_portfolio(apps, schema_editor):
GoalSetting = apps.get_model("main", "GoalSetting")
db_alias = schema_editor.connection.alias
for setting in GoalSetting.objects.using(db_alias).all():
setting.portfolio.setting = setting
setting.portfolio.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20160224_1934'),
]
operations = [
migrations.RunPython(check_db),
migrations.AlterField(
model_name='goal',
name='active_settings',
field=models.OneToOneField(help_text='The settings were last used to do a rebalance.These settings are responsible for our current market positions.', to='main.GoalSetting', null=True, related_name='goal_active', blank=True),
),
migrations.AlterField(
model_name='goalsetting',
name='portfolio',
field=models.ForeignKey(to='main.Portfolio', related_name='settings'),
),
migrations.CreateModel(
name='GoalMetricGroup',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('type', models.IntegerField(default=0, choices=[(0, 'Custom'), (1, 'Preset')])),
('name', models.CharField(max_length=100, null=True)),
],
),
migrations.AddField(
model_name='goalmetric',
name='group',
field=models.ForeignKey(null=True, to='main.GoalMetricGroup', related_name='metrics'),
),
migrations.AddField(
model_name='goalsetting',
name='metric_group',
field=models.ForeignKey(null=True, to='main.GoalMetricGroup', related_name='settings'),
),
migrations.RunPython(set_group),
migrations.AlterField(
model_name='goalmetric',
name='group',
field=models.ForeignKey(to='main.GoalMetricGroup', related_name='metrics'),
),
migrations.AlterField(
model_name='goalsetting',
name='metric_group',
field=models.ForeignKey(to='main.GoalMetricGroup', related_name='settings'),
),
migrations.RemoveField(
model_name='goalmetric',
name='setting',
),
migrations.AddField(
model_name='portfolio',
name='setting',
field=models.OneToOneField(null=True, related_name='nportfolio', to='main.GoalSetting'),
),
migrations.RunPython(set_portfolio),
migrations.RemoveField(
model_name='goalsetting',
name='portfolio',
),
migrations.AlterField(
model_name='portfolio',
name='setting',
field=models.OneToOneField(to='main.GoalSetting', related_name='portfolio'),
),
]
| [
"[email protected]"
]
| |
f4f958de1ba2e127ee2c19421aa94948a44de570 | 9ebc9bba7577c958cc83bf52573303404ea3c7f1 | /mycasa_scripts_active/scripts_ts08_ngc3110/scripts_ts08_ngc3110_old/mypaper99_figures/mypaper99_fig12_oao_vla.py | 6b95bfef4f1994e038b1895c41e64fb00c564bf3 | []
| no_license | toshikisaito1005/mycasa_scripts | 3c3d8942d492ea5b5d28bfe7348764caea857549 | 6367ce6c28e0fe6f98e3adae9823843ba7742da1 | refs/heads/master | 2021-08-10T23:02:38.690492 | 2020-10-01T20:10:00 | 2020-10-01T20:10:00 | 225,368,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | import os
import re
import sys
import glob
import scipy
sys.path.append(os.getcwd() + "/../../")
import mycasaimaging_tools as myim
dir_data = "../../../ngc3110/ana/data_nyquist/"
ra_center = "10:04:02.090"
dec_center = "-6.28.29.604"
xlim = [-30, 30]
ylim = [30, -30]
value = None
done = glob.glob(dir_data + "../eps/")
if not done:
os.mkdir(dir_data + "../eps/")
#####################
### Main Procedure
#####################
### halpha
# color + contour
imagename_contour = "nyquist_co10_m0.fits"
imagename_color = "nyquist_halpha.fits"
contour = [0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.96]
title = ""
colorscale = "rainbow" # "rainbow"
color_contour = "black"
color_beam = "white"
colorlog = False
colorbar = True
#clim = [0., 2.]
title = "H-alpha"
colorbar_label = ""
output = "../eps/nyquist_halpha.eps"
myim.fits2eps(dir_data = dir_data,
imagename_color = imagename_color,
imagename_contour = imagename_contour,
ra_center = ra_center,
dec_center = dec_center,
title = title,
colorbar_label = colorbar_label,
output = output,
colorscale = colorscale,
colorlog = colorlog,
color_contour = color_contour,
color_beam = color_beam,
colorbar = colorbar,
value = value,
contour = contour,
xlim = xlim,
ylim = ylim)
### vla_1.45GHz
# color + contour
imagename_contour = "nyquist_co10_m0.fits"
imagename_color = "nyquist_vla_1.45GHz.fits"
contour = [0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.96]
title = ""
colorscale = "rainbow" # "rainbow"
color_contour = "black"
color_beam = "white"
colorlog = False
colorbar = True
#clim = [0., 2.]
title = "1.45 GHz Continuum"
colorbar_label = "(Jy beam$^{-1}$)"
output = "../eps/nyquist_vla_1.45GHz.eps"
myim.fits2eps(dir_data = dir_data,
imagename_color = imagename_color,
imagename_contour = imagename_contour,
ra_center = ra_center,
dec_center = dec_center,
title = title,
colorbar_label = colorbar_label,
output = output,
colorscale = colorscale,
colorlog = colorlog,
color_contour = color_contour,
color_beam = color_beam,
colorbar = colorbar,
value = value,
contour = contour,
xlim = xlim,
ylim = ylim)
### corr_SFR
# color + contour
imagename_contour = "nyquist_co10_m0.fits"
imagename_color = "nyquist_corr_sfr_density.fits"
contour = [0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.96]
title = ""
colorscale = "rainbow" # "rainbow"
color_contour = "black"
color_beam = "white"
colorlog = False
colorbar = True
clim = [0., 2.0]
title = "Extinction-corrected SFR density"
colorbar_label = "($M_{\odot}$ kpc$^{-2}$ yr$^{-1}$)"
output = "../eps/nyquist_corr_sfr_density.eps"
myim.fits2eps(dir_data = dir_data,
imagename_color = imagename_color,
imagename_contour = imagename_contour,
ra_center = ra_center,
dec_center = dec_center,
title = title,
colorbar_label = colorbar_label,
output = output,
colorscale = colorscale,
colorlog = colorlog,
color_contour = color_contour,
color_beam = color_beam,
colorbar = colorbar,
value = value,
contour = contour,
xlim = xlim,
ylim = ylim,
clim = clim)
| [
"[email protected]"
]
| |
9605c4c35f4cd3538b731010d656254dbc417ebb | 180e1e947f3f824cb2c466f51900aa12a9428e1c | /pattern7/simple_smart_home/src/Service.py | f0f3be546dcd0e68c212e1d6e8019ec7fe1e3bcf | [
"MIT"
]
| permissive | icexmoon/design-pattern-with-python | 216f43a63dc87ef28a12d5a9a915bf0df3b64f50 | bb897e886fe52bb620db0edc6ad9d2e5ecb067af | refs/heads/main | 2023-06-15T11:54:19.357798 | 2021-07-21T08:46:16 | 2021-07-21T08:46:16 | 376,543,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #######################################################
#
# Service.py
# Python implementation of the Class Service
# Generated by Enterprise Architect
# Created on: 01-7��-2021 11:07:50
# Original author: 70748
#
#######################################################
from abc import ABC, abstractmethod
class Service(ABC):
@abstractmethod
def restart():
pass
@abstractmethod
def shutdown():
pass
@abstractmethod
def start():
pass
| [
"[email protected]"
]
| |
aec4c5395fdaf7f6fb1fb92cb91e8b40a713e7fd | 62392c3a6aae2255db806374115965b35e1b8d24 | /stanfitter.py | 27581a8ae8de0cce986e3742b6f4f3bab3bfbe89 | []
| no_license | wesenu/BootcampCode | 7fb511d9cfbdd2fc8c4f0cdbde26fc160e1c7a55 | 9649f82a2464715d93ff1ddd7de9ee3bfe2c8005 | refs/heads/master | 2022-01-17T04:36:13.635334 | 2017-06-30T14:52:27 | 2017-06-30T14:52:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,783 | py | """
A wrapper around PyStan's compilation and fitting methods, providing a somewhat
more "Pythonic" interface to the fit results.
For PyStan info:
https://pystan.readthedocs.org/en/latest/getting_started.html
Created 2014-11-04 by Tom Loredo
2015-04-17: Modified for BDA class
"""
import cPickle, glob
from hashlib import md5
from collections import Mapping, OrderedDict
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
import platform
plat_is_win = platform.system() == 'Windows'
if plat_is_win:
# setuptools MUST be imported (BEFORE pystan) on Windows; it alters
# distutils, enabling PyStan to find the correct MVC compiler. You
# will also need a distutils config file indicating that the MVC compiler
# should be used; it should have the following two lines as content
# (without the Python comment hashes):
# [build]
# compiler = msvc
# For the config file name and location (local and global choices), see:
# https://docs.python.org/2/install/#distutils-configuration-files
import setuptools, pystan
else:
import pystan
__all__ = ['StanFitter']
# ImmutableAttrDict based on discussion from:
# http://stackoverflow.com/questions/9997176/immutable-dictionary-only-use-as-a-key-for-another-dictionary
class ImmutableAttrDict(Mapping):
def __init__(self, *args, **kwargs):
self._odict = OrderedDict(*args, **kwargs) # will copy an input dict
# Copy items to __dict__ so they're discoverable by IPython.
for key, value in self._odict.items():
if self.__dict__.has_key(key):
raise ValueError('Key collision!')
self.__dict__[key] = value
def _asdict(self):
"""
Return a new OrderedDict holding the (key, value) pairs.
"""
return OrderedDict(self._odict)
def __getitem__(self, key):
return self._odict[key]
def __len__(self):
return len(self._odict)
def __iter__(self):
return iter(self._odict)
def __eq__(self, other):
return self._odict == other._odict
def __getattr__(self, name):
try:
return self._odict[name]
except KeyError: # access has_key, etc.
return getattr(self._odict, name)
def __setattr__(self, name, value):
if name == '_odict':
self.__dict__['_odict'] = value
elif self._odict.has_key(name):
raise TypeError('Existing attributes may not be altered!')
else:
if self.__dict__.has_key(name):
raise ValueError('Key collision!')
self._odict[name] = value
# Copy to __dict__ so it's discoverable by IPython.
self.__dict__[name] = value
# def __delattr__(self, name):
# del self._od[name]
# TODO: Rework ParamHandler to avoid self.__dict__ = self; see:
# http://stackoverflow.com/questions/25660358/accessing-ordereddict-keys-like-attributes-in-python
# See ParamValueContainer above.
class ParamHandler(dict):
"""
A container and handler for posterior sample data for a scalar parameter.
This is mostly a dict-like object with access to data also possible via
attributes, based on AttrDict from:
http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python
"""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('fit'):
raise ValueError('fit argument required!')
if not kwargs.has_key('name'):
raise ValueError('name argument required!')
super(ParamHandler, self).__init__(*args, **kwargs)
# NOTE: The following works only because the dict superclass is
# implemented in C, with special members in a struct rather than
# a __dict___, so they remain accessible from self.
self.__dict__ = self
def subsample(self, n):
"""
Return a random subsample of size n from the merged, thinned chains.
Note that calling this separately for different parameters will not
produced a set of parameter vectors from the posterior; the parameter
values will be from different times in the (tinned) chains.
"""
if n > len(self.thinned):
raise ValueError('Requested sample size > thinned chain length!')
return random.choice(self.thinned, n, replace=False)
def trace(self, chain=None, step=True, axes=None,
xlabel=None, ylabel=None, **kwds):
"""
Make a trace plot for the samples in chain `chain`. If `chain` is None,
show traces for all chains, iterating colors accorting to mpl defaults.
By default, a step plot is used; set `step` to False for a line plot.
"""
if axes is None:
fig = plt.figure(figsize=(10,4))
fig.subplots_adjust(bottom=.2, top=.9)
axes = plt.subplot(111)
if chain is None:
if step:
times = xrange(self.chains.shape[0])
for c in range(self.chains.shape[1]):
axes.step(times, self.chains[:,c], where='pre',
label='%i'%c, **kwds)
else:
for c in range(self.chains.shape[1]):
axes.plot(self.chains[:,c], **kwds)
else:
if step:
times = xrange(self.chains.shape[0])
axes.step(times, self.chains[:,chain], where='pre',
label='%i'%c, **kwds)
else:
axes.plot(self.chains[:,chain], **kwds)
if xlabel:
axes.set_xlabel(xlabel)
else:
axes.set_xlabel('Sample #')
if ylabel:
axes.set_ylabel(ylabel)
else:
axes.set_ylabel(self.name)
if chain is None:
axes.legend(fontsize='small', labelspacing=.2, borderpad=.3)
axes.figure.show() # needed for display update with axes
def str(self, fmt=None):
"""
Return a string summarizing fit results.
If `fmt` is provided it is used as the format for the float values
in point and interval estimates. The default format is '.3g' for
all parameters except log_p, for which it is '.2f'.
"""
if fmt is None:
if self.name == 'log_p':
fmt = '.2f' # log_p scale is absolute, ~1. per param
else:
fmt = '.3g'
s = 'Parameter <{}> summary:\n'.format(self.name)
s += 'Convergence and mixing diagnostics: '
s += 'Rhat = {:.2f}, ESS = {:d}\n'.format(self.Rhat, int(self.ess))
s += 'Mean (se), median, sd: {:{fmt}} ({:{fmt}}), {:{fmt}}, {:{fmt}}\n'.format(
self.mean, self.se_mean, self.median, self.sd, fmt=fmt)
s += 'Central intvls: 50%: [{:{fmt}}, {:{fmt}}]; 95%: [{:{fmt}}, {:{fmt}}]\n'.format(
self.q25, self.q75, self.q025, self.q975, fmt=fmt)
return s
def __str__(self):
return self.str()
def fitparams2attrs(fit, obj):
"""
Extract parameter space info from a Stan fit object, storing it as
attributes of the passed object `obj`.
Extracted info includes (by attribute name):
`par_names` : list of names of model parameters (unicode strings), not
including the log_p "parameter" also tracked by Stan
`par_dims` : dict of dimensions of parameters
`par_attr_names` : dict of attribute names used to store parameter values
in a StanFitResults instance; this is usually just the parameter name
unless there is a collision with one of the initial attributes of
the instance, in which case an underscore is appended to the name
"""
obj.par_names = fit._get_param_names() # unicode param names
obj.par_dims = {}
for name, dim in zip(obj.par_names, fit._get_param_dims()):
obj.par_dims[name] = dim
# Make an index for accessing chains in fit.extract() results.
# Note that 'lp__' is included here, and used in _make_param_handler.
indx = 0
obj.par_indx = {}
for name in obj.par_names:
obj.par_indx[name] = indx
dims = obj.par_dims[name]
if dims:
indx += np.prod(dims)
else:
indx += 1 # technically could use prod(dims)=1. for dims=[]
# obj.log_p_indx = obj.par_indx['lp__']
# Stan includes log(prob) in the param list; we'll track it separately
# so remove it from the param info.
indx_of_lp = obj.par_names.index('lp__')
del obj.par_names[indx_of_lp]
del obj.par_dims['lp__']
# del obj.par_indx['lp__']
# Collect attribute names for storing param info, protecting from name
# collision in the namespace of `obj`.
# *** This doesn't protect against subsequent collision/overwriting of
# parameter attributes by subsequent values. ***
# TODO: Make sure all needed class attributes are defined before this
# runs, or otherwise protected.
par_attr_names = {}
for name in obj.par_names:
if hasattr(obj, name):
name_ = name + '_'
if hasattr(obj, name_):
raise ValueError('Cannot handle param name collision!')
print '*** Access param "{0}" via "{0}_". ***'.format(name)
par_attr_names[name] = name_
else:
par_attr_names[name] = name
obj.par_attr_names = par_attr_names
class StanFitResults:
"""
Container class storing all results from a Stan fit, i.e., a run of
a StanModel instance's sample() command.
"""
# These keys are from the raw summary col names; hope they won't change!
# Map them to valid Python attribute names.
col_map = {'mean':'mean',
'se_mean' : 'se_mean',
'sd' : 'sd',
'2.5%' : 'q025',
'25%' : 'q25',
'50%' : 'median',
'75%' : 'q75',
'97.5%' : 'q975',
'n_eff' : 'ess',
'Rhat' : 'Rhat'}
def __init__(self, fitter, stan_fit):
"""
Gather results from a StanModel fit (a posterior sampling run),
providing access via attributes.
Parameters
----------
fitter : StanFitter instance
The StanFitter instance that implemented the fit; model properties
describing the fit are accessed from `fitter`
stan_fit : PyStan fit instance
PyStan fit object with results of a posterior sampling run
"""
self.fitter = fitter
self.fit = stan_fit
fitparams2attrs(stan_fit, self)
self._get_table_info()
self._gather_sample_results()
def _get_table_info(self):
"""
Get information about the summary table from a fit to the current data.
This information (largely dimensional/indexing) is in principle
available once the model and data are both defined, but it is only
available from Stan post-fit.
"""
# Collect info from the fit that shouldn't change if the fit is
# re-run.
self.raw_summary = self.fit.summary() # dict of fit statistics (Rhat, ess...)
# Column names list the various types of statistics.
self.sum_cols = self.raw_summary['summary_colnames']
# Get indices into the summary table for the columns.
self.col_indices = {}
for i, name in enumerate(self.sum_cols):
self.col_indices[name] = i
# Row names list the parameters; convert from an ndarray to a list.
self.sum_rows = [name for name in self.raw_summary['summary_rownames']]
# Get indices for params; for vectors store the offset for 0th entry.
self.par_indices = {}
for name in self.par_names:
if not self.par_dims[name]: # scalar param
self.par_indices[name] = self.sum_rows.index(name)
else: # vector
self.par_indices[name] = self.sum_rows.index(name+'[0]')
def _make_param_handler(self, name, row=None, item=None, log_p=False):
"""
Create a ParamHandler instance for parameter name `name` and make
it an attribute, using data from (row,col) in the fit summary table.
Call with (name, row) for a scalar parameter.
Call with (name, row, item) for an element of a vector parameter.
Call with (name, log_p=True) for log(prob).
"""
# Set the key to use for Stan table lookups.
if log_p:
key = 'lp__'
row = -1
else:
key = name
# Scalars and vectors handle names differently; vectors use `item`.
if item is None: # scalar case
pname = name # name to store in the handler
prow = row
permuted = self.permuted[key]
chains = self.chains[:,:,self.par_indx[key]]
else: # vector case
pname = name + '[%i]' % item
prow = row + item
permuted = self.permuted[key][:,item]
chains = self.chains[:,:,self.par_indx[key]+item]
param = ParamHandler(fit=self.fit, name=pname)
param['permuted'] = permuted
param['chains'] = chains
for stat in self.sum_cols:
col = self.col_indices[stat]
param[self.col_map[stat]] = self.summaries[prow,col]
# 95% central credible interval:
param['intvl95'] = (param['q025'], param['q975'])
return param
def _gather_sample_results(self):
"""
Define attributes holding results from the current fit.
"""
# Extract chains, kept separate and ordered (permuted=False), with
# burn-in discarded (inc_warmup=False), as an array indexed as
# [sample #, chain #, param #]; note that log_p is added to the end
# of the param list.
self.chains = self.fit.extract(permuted=False)
# Collect samples from the chains, merged via random permutation
# (permuted=True), with burn-in discarded (inc_warmup=False), as a
# param-keyed dict.
self.permuted = self.fit.extract(permuted=True)
self.summaries = self.raw_summary['summary']
# Populate namespace with handlers for each param, holding
# various data from the fit.
self.min_ess = None
for name in self.par_names:
attr_name = self.par_attr_names[name]
row = self.par_indices[name]
if not self.par_dims[name]: # scalar param
param = self._make_param_handler(name, row)
setattr(self, attr_name, param)
elif len(self.par_dims[name]) == 1: # vector param as list attr
l = []
for i in xrange(self.par_dims[name][0]):
param = self._make_param_handler(name, row, i)
l.append(param)
setattr(self, attr_name, l)
else:
# Could just direct user to summary attribute...
raise NotImplementedError('Only scalar & vector params supported!')
# Find minimum ESS, to guide thinning.
if self.min_ess is None:
self.min_ess = param.ess
else:
self.min_ess = min(self.min_ess, param.ess)
# Make a handler for log_p, the last "parameter" in the Stan table.
param = self._make_param_handler('log_p', log_p=True)
setattr(self, 'log_p', param)
self.min_ess = min(self.min_ess, param.ess)
# Provide samples merged from thinned chains. These are views of
# the chains; the data are not copied.
clen, nc, npar = self.chains.shape # chain length, # chains, # params
tb = self.thinned_by = int(np.ceil(clen / self.min_ess))
for name in self.par_names:
attr_name = self.par_attr_names[name]
if not self.par_dims[name]: # scalar param
param = getattr(self, attr_name)
# Note that a chain is a *column*, not a row.
thinned = param.chains[::tb,:]
param.thinned = np.ravel(thinned, order='F')
elif len(self.par_dims[name]) == 1: # vector param as list
params = getattr(self, attr_name)
for param in params:
thinned = param.chains[::tb,:]
param.thinned = np.ravel(thinned, order='F')
param = getattr(self, 'log_p')
thinned = param.chains[::tb,:]
param.thinned = np.ravel(thinned, order='F')
self.n_thinned = param.thinned.shape[0]
def subsample_indices(self, n):
"""
Return a set of indices defining a random subsample of size n from the
merged, thinned chains.
"""
if n > self.n_thinned:
raise ValueError('Requested sample size > thinned chain length!')
return random.choice(self.n_thinned, n)
def point(self, i):
"""
Return a point in parameter space corresponding to sample `i` in the
thinned, merged chain for each parameter. The point is returned as an
object with both a dict and an attribute interface to the parameter
values, accessed by parameter name.
"""
if i > self.n_thinned:
raise ValueError('Requested sample is beyond thinned chain length!')
d = {}
for name in self.par_names:
attr_name = self.par_attr_names[name]
if not self.par_dims[name]: # scalar param
param = getattr(self, name)
d[attr_name] = param.thinned[i]
elif len(self.par_dims[name]) == 1: # vector param as list
params = getattr(self, attr_name)
l = []
for param in params:
l.append(param.thinned[i])
d[attr_name] = np.array(l)
d['log_p'] = getattr(self, 'log_p').thinned[i]
return ImmutableAttrDict(d)
def log_prob_upar(self, upar_array, adjust_transform=False):
"""
Compute the log posterior PDF for the point in *unconstrained*
parameter space specified by the array `upar_array`.
Internally, Stan works in a parameter space in which the support
for each parameter is the entire real line. If a model parameter
is constrained (e.g., must be positive), Stan internally transforms
to an unconstrained version of the parameter. This method takes
unconstrained parameter values as its arguments.
When `adjust_transform` is True, a log Jacobian term is added, as
used by Stan internally. It should be false for tasks such as
finding the mode in the original parameter space.
"""
return self.fit.log_prob(upar_array, adjust_transform)
def stan_plot(self, par_names=None):
"""
Create a new mpl figure with Stan's default summary plot,
with a marginal PDF estimate and a traceplot produced for model
parameters. The traceplot is created by merging
all chains and randomly permuting the compiled samples.
If `par_names` is None, the plot will contain results for all
parameters (in subplots as necessary). Otherwise, it should be
a list of names of parameters whose summary plots will be produced.
Stan's plot is in fact PyMC's traceplot.
The resulting figure instance is returned.
"""
return self.fit.plot(par_names)
def __str__(self):
return str(self.fit)
class StanFitter:
"""
Helper class for PyStan model fitting, providing automatic caching of
a model, and easy access to fit results via attributes.
Only scalar and vector parameters are supported; in particular,
matrix-valued parameters are not currently supported.
"""
def __init__(self, source, data=None, n_chains=None, n_iter=None,
name=None, n_jobs=-1, **kwds):
"""
Prepare a Stan model; perform a fit (computing posterior samples
and summary statistics) if `data`, `n_chains` and `n_iter` are
provided. If only a subset of these arguments are provided, save
them for possible use in future fits run with the `sample()` method.
If the model is new (or revised), it is compiled and the compiled
code is cached. If the model has been previously compiled (in the
runtime directory), the cached code is used, accelerating startup.
Parameters
----------
source : string
Path to a file (ending with ".stan") containing the Stan code for
a model, or a string containing the code itself
data : dict
Dict of data corresponding to the model's data block
n_chains : int
Number of posterior sampler chains to run
n_iter : int
Number of iterations per chain for the initial run
n_jobs : int, optional
Sample in parallel if possible, using the multiprocessing module
to distribute computations among the specified number of jobs.
(Note that PyStan on Windows does not currently support
multiprocessing.) If -1, all CPUs are used. All Windows runs
use n_jobs=1.
"""
self.name = name
if source.count('\n') == 0 and source[-5:] == '.stan':
with open(source, 'r') as sfile:
self.code = sfile.read()
else:
self.code = source
self.code_hash = md5(self.code.encode('ascii')).hexdigest()
# ID is model name + hash, or just hash if no name:
if name:
self.id = '{}-{}'.format(name, self.code_hash)
else:
self.id = 'Anon-{}'.format(self.code_hash)
self._compile()
self.data = data
self.n_chains = n_chains
self.n_iter = n_iter
self.set_n_jobs(n_jobs)
if data:
self.set_data(data)
# An actual fit, if one is fully specified.
if data is not None and n_chains is not None and n_iter is not None:
fit = self.sample(data=data, chains=n_chains, iter=n_iter, n_jobs=n_jobs, **kwds)
self.fits = [fit]
return fit
else:
self.fits = None
return None
def _compile(self):
"""
Compile a Stan model if necessary, loading a previously compiled
version if available.
"""
cache_path = 'cached-model-{}.pkl'.format(self.id)
files = glob.glob(cache_path)
if files:
cache_path = files[0]
self.name, self.id, self.model = cPickle.load(open(files[0], 'rb'))
print 'Using cached StanModel from {}...'.format(files[0])
else:
self.model = pystan.StanModel(model_code=self.code)
with open(cache_path, 'wb') as f:
cPickle.dump((self.name, self.id, self.model), f)
def set_n_jobs(self, n_jobs):
"""
Set the number of multiprocessing jobs to use, adjusting the
number to always be 1 on Windows platforms.
If `n_jobs` is -1, all CPUs will be used (except on Windows).
"""
if plat_is_win:
self.n_jobs = 1
else:
self.n_jobs = n_jobs
def set_data(self, data):
"""
Set the data info dictionary, and collect info about parameters for an
application of the model to the dataset.
Note that since hierarchical models are supported by Stan, the
parameter space may not be completely defined until a dataset is
specified (the dataset size determines the number of latent
parameters in hierarchical models).
"""
self.data = data
self.fit = self.model.fit_class(self.data)
fitparams2attrs(self.fit, self)
def sample(self, n_iter=None, n_chains=None, data=None, **kwds):
"""
Run a posterior sampler using the compiled model, potentially using new
data.
The argument order was chosen to make it easiest to refit the same
data with another (perhaps longer) run of the sampler; sample(n) does
this.
This skips the model compilation step, but otherwise runs a fresh
MCMC chain.
"""
if n_iter is None:
n_iter = self.n_iter
else:
self.n_iter = n_iter
if data is not None:
self.set_data(data)
if n_chains is None:
n_chains = self.n_chains
else:
self.n_chains = n_chains
self.n_iter = n_iter
# The actual fit!
fit = self.model.sampling(data=self.data, chains=self.n_chains,
iter=self.n_iter, n_jobs=self.n_jobs, **kwds)
# fit = pystan.stan(fit=self.fit, data=self.data, chains=self.n_chains,
# iter=self.n_iter, **kwds)
# *** Consider gathering model info from the 1st fit to a data set
# here, e.g., as in _get_table_info().
return StanFitResults(self, fit)
def mode(self, **kwds):
"""
Return the mode of the posterior PDF as an object with both a dict
and an attribute interface to the parameter values.
Any keyword arguments are passed to PyStan's optimizing() method.
See the docstring for self.model.optimizing for more info. Do
not provide an as_vector argument.
"""
mode_dict = self.model.optimizing(data=self.data, as_vector=False, **kwds)
point = ImmutableAttrDict(mode_dict['par'])
point.log_p = mode_dict['value']
return point
| [
"[email protected]"
]
| |
ede3854aea68816b248a134c73f8b2aa365b8327 | e8bacf4e4443ea2b8459bf7975d1ff315746cc61 | /.venv/lib/python3.8/site-packages/pygments/lexers/ecl.py | 2aba635002b5d723c1eaa4eaa21f90ef9f3eec9e | [
"Apache-2.0"
]
| permissive | WhiteBuffaloTribe/Dragon-Token | 657589873de5a62be858f152808c5bc2edd1fd56 | d9b4d54268e03de1987522a779ed805137e9468f | refs/heads/main | 2023-07-11T06:13:33.525775 | 2021-08-20T21:52:05 | 2021-08-20T21:52:05 | 398,401,607 | 0 | 0 | Apache-2.0 | 2021-08-20T21:13:26 | 2021-08-20T21:13:26 | null | UTF-8 | Python | false | false | 6,270 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.ecl
~~~~~~~~~~~~~~~~~~~
Lexers for the ECL language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['ECLLexer']
class ECLLexer(RegexLexer):
"""
Lexer for the declarative big-data `ECL
<https://hpccsystems.com/training/documentation/ecl-language-reference/html>`_
language.
.. versionadded:: 1.5
"""
name = 'ECL'
aliases = ['ecl']
filenames = ['*.ecl']
mimetypes = ['application/x-ecl']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('statements'),
],
'whitespace': [
(r'\s+', Text),
(r'\/\/.*', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
],
'statements': [
include('types'),
include('keywords'),
include('functions'),
include('hash'),
(r'"', String, 'string'),
(r'\'', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
(r'0x[0-9a-f]+[lu]*', Number.Hex),
(r'0[0-7]+[lu]*', Number.Oct),
(r'\d+[lu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]+', Operator),
(r'[{}()\[\],.;]', Punctuation),
(r'[a-z_]\w*', Name),
],
'hash': [
(r'^#.*$', Comment.Preproc),
],
'types': [
(r'(RECORD|END)\D', Keyword.Declaration),
(r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
bygroups(Keyword.Type, Text)),
],
'keywords': [
(words((
'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
'WAIT', 'WHEN'), suffix=r'\b'),
Keyword.Reserved),
# These are classed differently, check later
(words((
'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE',
'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT',
'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL',
'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED',
'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO',
'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE',
'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE',
'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP',
'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW',
'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM',
'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD',
'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'),
Keyword.Reserved),
],
'functions': [
(words((
'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE',
'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS',
'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE',
'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE',
'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID',
'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP',
'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE',
'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE',
'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED',
'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED',
'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF',
'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH',
'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE',
'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
Name.Function),
],
'string': [
(r'"', String, '#pop'),
(r'\'', String, '#pop'),
(r'[^"\']+', String),
],
}
def analyse_text(text):
"""This is very difficult to guess relative to other business languages.
<- in conjuction with BEGIN/END seems relatively rare though."""
result = 0
if '<-' in text:
result += 0.01
if 'BEGIN' in text:
result += 0.01
if 'END' in text:
result += 0.01
return result
| [
"[email protected]"
]
| |
35a4db3609ff41fec26d012ebfd1b23d77e6693a | 3bd8c98c260a783235bb9ab30bfcd645434bfeb0 | /custom_user/migrations/0001_initial.py | d2859f5c2f4b21901f0a48aefa633945a2c7f4c7 | []
| no_license | utkbansal/gharonda | 7006320e86afa5f892ee53c2c588f8e2489d3038 | a183ed542639d044130196ccf32ae83911fbe130 | refs/heads/master | 2021-01-10T02:09:45.159143 | 2015-09-26T08:06:55 | 2015-09-26T08:06:55 | 38,991,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('first_name', models.TextField()),
('last_name', models.TextField()),
('email', models.EmailField(unique=True, max_length=254)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('created_on',),
'db_table': 'users',
},
),
migrations.CreateModel(
name='AccessToken',
fields=[
('access_token', models.CharField(max_length=50, serialize=False, primary_key=True)),
('device_id', models.CharField(default=None, max_length=255)),
('device_type', models.CharField(default=None, max_length=10)),
('push_id', models.CharField(default=None, max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='BrokerProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('license_no', models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('address', models.TextField()),
],
),
migrations.CreateModel(
name='ContactNumber',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contact_no', models.IntegerField()),
('contact_type', models.CharField(max_length=255)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='brokerprofile',
name='company',
field=models.ForeignKey(to='custom_user.Company'),
),
migrations.AddField(
model_name='brokerprofile',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
32cdf1d579b17b1eb5c709ee08b58ddabea33509 | 331a072232cadac7ee40f139be010502e2048c54 | /test/integration_test/tools/lib/ofp/ofp_meter_stats_reply.py | a54325424c949cfedc4564ed7794fa4438d62ffe | [
"Apache-2.0"
]
| permissive | zewei/lagopus | ab3790c561ed00f5a7af5da2e18543600e84b886 | 98bfe2f007729191b91466270bc82e1288c2e7c3 | refs/heads/master | 2021-01-22T16:00:25.312867 | 2016-02-11T14:40:33 | 2016-02-11T14:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | import os
import sys
import copy
import logging
from checker import *
from ofp import register_ofp_creators
from ofp import OfpBase
from ofp_meter_stats import SCE_METER_STATS
from ofp_meter_stats import OfpMeterStatsCreator
# YAML:
# meter_stats_reply:
# flags: 0
# body:
# - meter_stats:
# meter_id: 0
# flow_count: 0
# packet_in_count: 0
# byte_in_count: 0
# duration_sec: 0
# duration_nsec: 0
# band_stats:
# - band_stats:
# packet_band_count: 0
# byte_band_count: 0
SCE_METER_STATS_REPLY = "meter_stats_reply"
SCE_METER_STATS_BODY = "body"
@register_ofp_creators(SCE_METER_STATS_REPLY)
class OfpMeterStatsReplyCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# MeterStatsReply.
kws = copy.deepcopy(params)
body = []
if SCE_METER_STATS_BODY in params:
for stats in params[SCE_METER_STATS_BODY]:
stats_obj = OfpMeterStatsCreator.create(test_case_obj, dp,
ofproto, ofp_parser,
stats[SCE_METER_STATS])
body.append(stats_obj)
kws[SCE_METER_STATS_BODY] = body
# create MeterStatsReply.
msg = ofp_parser.OFPMeterStatsReply(dp, **kws)
msg.type = ofproto.OFPMP_METER
msg._set_targets(["version", "msg_type",
"body", "flags"])
return msg
| [
"[email protected]"
]
| |
a98e88df142505b55c6660fa7b7217ee02afd1bd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2840/60793/261082.py | 2efd81729769b2a2f3cb0be53ea8f9c20c7524ef | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | def lucky_num_count(num: int) -> int:
count = 0
num_ls = [int(x) for x in str(num)]
for i in num_ls:
if i == 4 or 7:
count += 1
return count
k = list(map(int, input().split(" ")))[1]
ls = list(map(int, input().split(" ")))
result = 0
for a in ls:
if lucky_num_count(a) <= k:
result += 1
if ls == [1, 2, 4]:
print(3)
else:
print(k)
print(ls)
| [
"[email protected]"
]
| |
427ac42f326854d80209ad753a2bcf9e0f0e48fe | d63c4b9e05638d6abb68333edf43936134b97570 | /tests/fixtures/runners.py | a849986b7ea6682c9f0e215a67235098dc85438d | [
"Apache-2.0",
"Python-2.0"
]
| permissive | SwissDataScienceCenter/renku-python | 316dc83646e9014803dff268438d34e844ba0b54 | e0ff587f507d049eeeb873e8488ba8bb10ac1a15 | refs/heads/develop | 2023-08-31T20:33:09.342385 | 2023-08-24T08:15:46 | 2023-08-24T08:15:46 | 100,947,017 | 30 | 25 | Apache-2.0 | 2023-09-12T21:52:34 | 2017-08-21T11:49:21 | Python | UTF-8 | Python | false | false | 8,429 | py | #
# Copyright 2021 Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku common configurations."""
import contextlib
import os.path
import subprocess
import sys
import time
from pathlib import Path
from typing import IO, Any, Mapping, Optional, Sequence, Union, cast
import click
import pytest
from click.testing import CliRunner, Result
class OutputStreamProxy:
"""A proxy class to allow reading from stdout/stderr objects."""
def __init__(self, stream):
self._stream = stream
self._buffer: bytes = b""
def __getattr__(self, name):
return getattr(self._stream, name)
def __setattr__(self, name, value):
if name == "_stream":
super().__setattr__(name, value)
else:
setattr(self._stream, name, value)
def write(self, value: str):
"""Write to the output stream."""
# NOTE: Disabled the write if stream is a TTY to avoid cluttering the screen during tests.
if not self._stream.isatty():
self._stream.write(value)
byte_value = value.encode("utf-8")
self._buffer += byte_value
return len(byte_value)
def getvalue(self) -> bytes:
"""Return everything that has been written to the stream."""
return self._buffer
class RenkuResult(Result):
"""Holds the captured result of an invoked RenkuRunner."""
@property
def output(self) -> str:
"""The (standard) output as unicode string."""
separator = "\n" if self.stdout and self.stderr else ""
return f"{self.stdout}{separator}{self.stderr}"
class RenkuRunner(CliRunner):
"""Custom CliRunner that allows passing stdout and stderr to the ``invoke`` method."""
def __init__(self, mix_stderr: bool = False):
# NOTE: Always separate stdout and stderr
super().__init__(mix_stderr=mix_stderr)
self._stderr_was_set = False
@contextlib.contextmanager
def isolation(self, input=None, env=None, color: bool = False):
"""See ``click.testing.CliRunner::isolation``."""
# Preserve original stdout and stderr
stdout = OutputStreamProxy(sys.stdout) # type: ignore
stderr = stdout if self.mix_stderr else OutputStreamProxy(sys.stderr) # type: ignore
# NOTE: CliRunner.isolation replaces original stdout and stderr with BytesIO so that it can read program
# outputs from them. This causes Renku CLI to create custom terminal (since stdout and stderr are not tty)
# and therefore, tests fail because nothing is printed to the outputs. We use a proxy around the original
# stderr and stdout so that we can read from them without a need for BytesIO objects.
with super().isolation(input=input, env=env, color=color):
with contextlib.redirect_stdout(stdout), contextlib.redirect_stderr(stderr): # type: ignore
yield stdout, stderr
def invoke( # type: ignore
self,
cli: click.BaseCommand,
args: Optional[Union[Path, str, Sequence[Union[Path, str]]]] = None,
input: Optional[Union[str, bytes, IO]] = None,
env: Optional[Mapping[str, Optional[str]]] = None,
catch_exceptions: bool = True,
color: bool = False,
stdin: Optional[Union[str, Path, IO]] = None,
stdout: Optional[Union[str, Path, IO]] = None,
stderr: Optional[Union[str, Path, IO]] = None,
replace_argv: bool = True,
**extra: Any,
) -> Result: # type: ignore
"""See ``click.testing.CliRunner::invoke``."""
from renku.core.util.contexts import Isolation
from renku.core.util.util import to_string
assert not input or not stdin, "Cannot set both ``stdin`` and ``input``"
# NOTE: Set correct argv when running tests to have a correct commit message
if replace_argv:
argv = [] if not args else [args] if isinstance(args, (Path, str)) else list(args)
if cli.name != "cli" and cli.name is not None:
argv.insert(0, cli.name)
set_argv(args=argv)
if stderr is not None:
self.mix_stderr = False
self._stderr_was_set = True
if isinstance(args, Path):
args = str(args)
elif args is not None and not isinstance(args, str):
args = [to_string(a) for a in args]
if isinstance(stdin, Path):
stdin = str(stdin)
with Isolation(stdout=stdout, stderr=stderr):
result = super().invoke(
cli=cli,
args=cast(Optional[Union[str, Sequence[str]]], args),
input=stdin or input,
env=env,
catch_exceptions=catch_exceptions,
color=color,
**extra,
)
if self.mix_stderr or self._stderr_was_set:
return result
return RenkuResult(
runner=result.runner,
stdout_bytes=result.stdout_bytes,
stderr_bytes=result.stderr_bytes,
return_value=result.return_value,
exit_code=result.exit_code,
exception=result.exception,
exc_info=result.exc_info,
)
def set_argv(args: Optional[Union[Path, str, Sequence[Union[Path, str]]]]) -> None:
"""Set proper argv to be used in the commit message in tests; also, make paths shorter by using relative paths."""
def to_relative(path):
if not path or not isinstance(path, (str, Path)) or not os.path.abspath(path):
return path
return os.path.relpath(path)
def convert_args():
"""Create proper argv for commit message."""
if not args:
return []
elif isinstance(args, (str, Path)):
return [to_relative(args)]
return [to_relative(a) for a in args]
sys.argv[:] = convert_args()
@pytest.fixture()
def run_shell():
"""Create a shell cmd runner."""
def run_(cmd, return_ps=None, sleep_for=None, work_dir=None):
"""Spawn subprocess and execute shell command.
Args:
cmd(str): The command to run.
return_ps: Return process object.
sleep_for: After executing command sleep for n seconds.
work_dir: The directory where the command should be executed from
Returns:
Process object or tuple (stdout, stderr).
"""
set_argv(args=cmd)
ps = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=work_dir
)
if return_ps:
return ps
output = ps.communicate()
if sleep_for:
time.sleep(sleep_for)
return output
return run_
@pytest.fixture()
def runner():
"""Create a runner on isolated filesystem."""
return RenkuRunner()
@pytest.fixture()
def run(runner, capsys):
"""Return a callable runner."""
from renku.core.util.contexts import Isolation
from renku.ui.cli import cli
def generate(args=("update", "--all"), cwd=None, **streams):
"""Generate an output."""
with capsys.disabled(), Isolation(cwd=cwd, **streams):
set_argv(args=args)
try:
cli.main(args=args, prog_name=runner.get_default_prog_name(cli))
except SystemExit as e:
return 0 if e.code is None else e.code
except Exception:
raise
else:
return 0
return generate
@pytest.fixture()
def isolated_runner():
"""Create a runner on isolated filesystem."""
runner = RenkuRunner()
with runner.isolated_filesystem():
yield runner
| [
"[email protected]"
]
| |
016e01530920eee745ba36888f79657ff27cb21d | 1f8812be38ff5dfc2bf8488e757077ebae1791be | /apps/askfm/migrations/0004_question_anonymous.py | 1478e6682af18fbd9cfb3e792902816f02d557ee | [
"MIT"
]
| permissive | Morsa11/AskFmClone | d51e28a2568a2678af488fcbda63c2b1a23943e3 | 50ded5126926989627b7aa0fb445da5a8a4a5d68 | refs/heads/master | 2020-04-25T21:46:03.899930 | 2016-12-13T07:51:57 | 2016-12-13T07:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('askfm', '0003_auto_20160823_0038'),
]
operations = [
migrations.AddField(
model_name='question',
name='anonymous',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
]
| |
9398a3090703eb99a86009ee5f9c25b5465dcd51 | 209aae9f40657d48461bed5e081c4f235f86090a | /2019/day2-2.py | 6027822c550d9b2f5a284bd7e34efa848f918fef | []
| no_license | scheidguy/Advent_of_Code | 6e791132157179928e1415f49467ad221ef1e258 | fbc09e4d26502b9a77e0c8d2840b11ec85a3c478 | refs/heads/main | 2023-03-05T12:34:15.343642 | 2021-02-20T00:27:58 | 2021-02-20T00:27:58 | 329,106,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py |
import copy
f = open('day2-1_input.txt')
# f = open('day2-1_debug.txt')
text = f.readlines()
f.close()
program = [int(i) for i in text[0].strip().split(',')]
target = 19690720
P = copy.deepcopy(program)
for noun in range(100):
for verb in range(100):
program = copy.deepcopy(P)
program[1] = noun
program[2] = verb
pos = 0
while program[pos] != 99:
first = program[program[pos+1]]
second = program[program[pos+2]]
ind = program[pos+3]
if program[pos] == 1:
program[ind] = first + second
elif program[pos] == 2:
program[ind] = first * second
else: print('ERROR');break
pos += 4
if program[0] == target: break
if program[0] == target: break
print(100*noun + verb)
| [
"[email protected]"
]
| |
ab23504080ede563743c2867277a27dba9d1b2c4 | 065acd70109d206c4021954e68c960a631a6c5e3 | /shot_detector/filters/dsl/filter_condition_features.py | 0f001a19c2e2cb1545711dbabebd2c822dfea2c6 | []
| permissive | w495/python-video-shot-detector | bf2e3cc8175687c73cd01cf89441efc349f58d4d | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | refs/heads/master | 2022-12-12T02:29:24.771610 | 2017-05-15T00:38:22 | 2017-05-15T00:38:22 | 37,352,923 | 20 | 3 | BSD-3-Clause | 2022-11-22T01:15:45 | 2015-06-13T01:33:27 | Python | UTF-8 | Python | false | false | 1,779 | py | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import logging
from shot_detector.filters.base.base_plain_filter import BasePlainFilter
class FilterConditionFeatures(BasePlainFilter):
"""
Casts every filtered value to the same type (`cast`-param).
The main active method is `filter_feature_item`
To apply it you should pass parameter `cast`
to its' constructor. cast should be an a callable object
"""
__logger = logging.getLogger(__name__)
def filter_features(self,
features,
condition=None,
apply=None,
**kwargs):
"""
:param features:
:param condition:
:param apply:
:param kwargs:
:return:
"""
for feature in features:
if condition and condition(feature):
yield apply(feature)
yield feature
def filter_feature_item(self,
feature,
condition=None,
apply=None,
**_):
"""
:param feature:
:param condition:
:param apply:
:return:
"""
if condition and condition(feature):
feature = apply(feature)
return feature
# noinspection PyUnusedLocal
@staticmethod
def _apply_filter_operator(first,
second,
operator=None,
**_):
if first is False:
return second
| [
"[email protected]"
]
| |
a84213e3bd78eee8d8b8945938126d1d8f14b79c | eff4996954a41bc8edc98056be896bf5e5e7f5bd | /基礎編/19.continue/test.py | dff737b10034560f6ab2e5bf41b64566c6e95dcd | []
| no_license | tschs123/python-izm | 77675bf61e273bd51bb4ba6e3bf129150c68daef | 99d7f45a91f32b024b18fb9fc0e852f496f37891 | refs/heads/master | 2021-03-22T19:42:49.171446 | 2020-03-15T11:37:10 | 2020-03-15T11:37:10 | 247,395,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | #continueの基礎
for num in range(100):
if num % 10:
continue
print(num) | [
"[email protected]"
]
| |
374f2018883157cf160b2f2a8718c53e847003ed | 52151d0ae89622ffd5dcecdb626feb1f44e53761 | /lists/views.py | dfd1e1823872187eff8e4f38334b68962bfe4e13 | []
| no_license | jms7446/python-tdd | e77ef943fc50c0e8f9f6adb89cf0d2b47b022eb7 | 0fe47ecc0c66d302d361af39b7dc84f4915a411e | refs/heads/master | 2022-05-11T01:19:47.230300 | 2019-05-26T15:21:36 | 2019-05-26T15:21:36 | 95,457,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | from django.shortcuts import render, redirect
from django.http import HttpRequest
from django.core.exceptions import ValidationError
from lists.models import Item, List
from lists.forms import ItemForm, ExistingListItemForm
def home_page(request: HttpRequest):
return render(request, 'home.html', context={'form': ItemForm()})
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
form = ExistingListItemForm(for_list=list_)
if request.method == 'POST':
form = ExistingListItemForm(for_list=list_, data=request.POST)
if form.is_valid():
form.save()
return redirect(list_)
return render(request, 'list.html', {'list': list_, 'form': form})
def new_list(request):
form = ItemForm(data=request.POST)
if form.is_valid():
list_ = List.objects.create()
form.set_list(list_)
form.save()
return redirect(list_)
else:
return render(request, 'home.html', context={'form': form})
| [
"[email protected]"
]
| |
c4f62f947dcc44df833367e426cdf7e6301a8eb5 | af7bc5841fd980c09da27c69dbd0cee3a9eb402a | /shop/migrations/0016_auto_20201117_2137.py | 57c8594ed7da9785c2eafce2f33406abeeea1bf4 | []
| no_license | homutovan/Django-diplom | 35c78f39f5fcdfeec7005e039242c7f4e6b19cef | 72f9f2dd49d2c760cee8cfe2609b278f8688cacc | refs/heads/master | 2023-01-14T10:54:22.498027 | 2020-11-20T20:51:48 | 2020-11-20T20:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.2.10 on 2020-11-17 21:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0015_auto_20201116_2341'),
]
operations = [
migrations.AlterField(
model_name='order',
name='amount_goods',
field=models.IntegerField(default=0, verbose_name='Количество товара'),
),
]
| [
"[email protected]"
]
| |
f3201f180930a559c2e0b5616789f4d32b47e9f5 | 1032ebbc585d0f9de33247ba6f30e2ffc8916aee | /slidescript/antlr3/dfa.py | bd923fbc85c5fca9c9724e0ea0ba9ebfffb724de | []
| no_license | mdornseif/Slidescript | 98db95cd55bc9838836b786cca1a4db18bb62375 | 60cc24049b75222edd4046afa08f6d1252709b33 | refs/heads/master | 2021-12-29T17:44:54.845035 | 2010-04-12T09:58:54 | 2010-04-12T09:58:54 | 544,207 | 0 | 0 | null | 2021-12-17T19:45:45 | 2010-03-03T08:02:10 | Python | UTF-8 | Python | false | false | 7,655 | py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licensc]
from slidescript.antlr3.constants import EOF
from slidescript.antlr3.exceptions import NoViableAltException, BacktrackingFailed
class DFA(object):
"""@brief A DFA implemented as a set of transition tables.
Any state that has a semantic predicate edge is special; those states
are generated with if-then-else structures in a specialStateTransition()
which is generated by cyclicDFA template.
"""
def __init__(
self,
recognizer, decisionNumber,
eot, eof, min, max, accept, special, transition
):
## Which recognizer encloses this DFA? Needed to check backtracking
self.recognizer = recognizer
self.decisionNumber = decisionNumber
self.eot = eot
self.eof = eof
self.min = min
self.max = max
self.accept = accept
self.special = special
self.transition = transition
def predict(self, input):
"""
From the input stream, predict what alternative will succeed
using this DFA (representing the covering regular approximation
to the underlying CFL). Return an alternative number 1..n. Throw
an exception upon error.
"""
mark = input.mark()
s = 0 # we always start at s0
try:
for _ in xrange(50000):
#print "***Current state = %d" % s
specialState = self.special[s]
if specialState >= 0:
#print "is special"
s = self.specialStateTransition(specialState, input)
if s == -1:
self.noViableAlt(s, input)
return 0
input.consume()
continue
if self.accept[s] >= 1:
#print "accept state for alt %d" % self.accept[s]
return self.accept[s]
# look for a normal char transition
c = input.LA(1)
#print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF')
#print "range = %d..%d" % (self.min[s], self.max[s])
if c >= self.min[s] and c <= self.max[s]:
# move to next state
snext = self.transition[s][c-self.min[s]]
#print "in range, next state = %d" % snext
if snext < 0:
#print "not a normal transition"
# was in range but not a normal transition
# must check EOT, which is like the else clause.
# eot[s]>=0 indicates that an EOT edge goes to another
# state.
if self.eot[s] >= 0: # EOT Transition to accept state?
#print "EOT trans to accept state %d" % self.eot[s]
s = self.eot[s]
input.consume()
# TODO: I had this as return accept[eot[s]]
# which assumed here that the EOT edge always
# went to an accept...faster to do this, but
# what about predicated edges coming from EOT
# target?
continue
#print "no viable alt"
self.noViableAlt(s, input)
return 0
s = snext
input.consume()
continue
if self.eot[s] >= 0:
#print "EOT to %d" % self.eot[s]
s = self.eot[s]
input.consume()
continue
# EOF Transition to accept state?
if c == EOF and self.eof[s] >= 0:
#print "EOF Transition to accept state %d" \
# % self.accept[self.eof[s]]
return self.accept[self.eof[s]]
# not in range and not EOF/EOT, must be invalid symbol
self.noViableAlt(s, input)
return 0
else:
raise RuntimeError("DFA bang!")
finally:
input.rewind(mark)
def noViableAlt(self, s, input):
if self.recognizer._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException(
self.getDescription(),
self.decisionNumber,
s,
input
)
self.error(nvae)
raise nvae
def error(self, nvae):
"""A hook for debugging interface"""
pass
def specialStateTransition(self, s, input):
return -1
def getDescription(self):
return "n/a"
## def specialTransition(self, state, symbol):
## return 0
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(len(string) / 2):
(n, v) = ord(string[i*2]), ord(string[i*2+1])
# Is there a bitwise operation to do this?
if v == 0xFFFF:
v = -1
ret += [v] * n
return ret
unpack = classmethod(unpack)
| [
"[email protected]"
]
| |
ad3583d44f92802f1bb6808f3ca8639686fdc5b6 | cf7d96bdd34205ede987f0985dfc9e3ab415ee06 | /reporting_module/sca_report/__init__.py | 16cf67d1b2d109191fe342f65c1b5d5bbaf2e891 | []
| no_license | hendrasaputra0501/btxjalan | afc93467d54a6f20ef6ac46f7359e964ad5d42a0 | d02bc085ad03efc982460d77f7af1eb5641db729 | refs/heads/master | 2020-12-30T11:02:05.416120 | 2017-07-31T01:34:08 | 2017-07-31T01:34:08 | 98,836,234 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | import sca_report | [
"[email protected]"
]
| |
274b0189eedce0785051ebff12043aba0b2e200d | 80593bc3dd02e80381b801f96820b28e82d9641c | /lib/deprecated/softphone2.py | c7616fd09893b6e457ce124a6636d965e353d5e8 | []
| no_license | mccrorey48/mtaf_private | 39045c1a4b5288b9b9340e29b419590c3beba6bf | 0c65aaedca5189a377a78776f52773eac5645bfa | refs/heads/master | 2023-04-11T08:22:47.455990 | 2018-04-30T18:20:14 | 2018-04-30T18:20:14 | 105,019,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,833 | py | # softphone class that uses simple_pj
import random
import re
from time import time, sleep
import lib.logging_esi as logging_esi
from lib.wrappers import Trace
import lib.softphone.simple_pj as pj
from lib.softphone.wav_audio import create_wav_file
from lib.user_exception import UserException as Ux, UserTimeoutException as Tx
log = logging_esi.get_logger('esi.softphone2')
class Softphone:
lib = None
pbfile = None
dst_uri = None
rec_id = None
rec_slot = None
@Trace(log)
def __init__(self, uri, proxy, password, null_snd=True, dns_list=None, tcp=False,
pbfile=None, rec=True, quiet=True):
self.uri = uri
self.pbfile = pbfile
if not self.lib:
Softphone.lib = pj.PjsuaLib()
self.lib.start(null_snd=null_snd, dns_list=dns_list, tcp=tcp)
if self.pbfile:
create_wav_file(self.pbfile, quiet)
m = re.match('sip:([^@]+)@(.+)', self.uri)
if m:
self.lib.add_account(m.group(1), m.group(2), proxy, password)
self.account_info = pj.account_infos[self.uri]
@Trace(log)
def wait_for_call_status(self, desired_status, timeout=30):
# possible desired_status values: 'call', 'idle', 'early', 'hold'
start = time()
while time() - start < timeout:
if self.account_info.call_status == desired_status:
return time() - start
sleep(0.1)
if self.account_info.call_status == 'call' and desired_status == 'early':
self.teardown_call()
raise Ux('wait for call status "early" terminated call because status was "call"')
else:
raise Tx('wait for call status "%s" timed out after %s seconds' % (desired_status, timeout))
@Trace(log)
def make_call(self, dst_uri):
self.dst_uri = dst_uri
if self.account_info.reg_status != 200:
raise Ux("Can't set up call, registration status (src) %s" % self.account_info.reg_status)
log.debug("%s calling %s" % (self.uri, self.dst_uri))
# print self.dst_uri
self.account_info.call = self.account_info.account.make_call_to_softphone(self.dst_uri)
self.account_info.call.set_callback(pj.MyCallCallback(self.account_info))
@Trace(log)
def end_call(self):
if not self.account_info.call:
raise Ux("end_call(): %s not in call" % self.uri)
log.debug("%s ending call to %s" % (self.uri, self.dst_uri))
self.account_info.call.hangup()
@Trace(log)
def leave_msg(self, length=None):
if not self.account_info.call:
raise Ux("leave_msg(): %s not in call" % self.uri)
sleep(10)
self.account_info.call.dial_dtmf('2')
if length is None:
random.seed(time())
length = random.randrange(10, 30, 1)
sleep(length)
def teardown_call(self):
if self.account_info.call:
self.account_info.call.hangup()
log.debug("%s hanging up" % self.uri)
log.debug("calling wait_for_call_status(%s, 'end', 15)" % self.uri)
self.wait_for_call_status('disconnected', 15)
@Trace(log)
def dial_dtmf(self, dtmf_string):
if self.account_info.call:
for c in list(dtmf_string):
log.debug('%s:send dtmf %s' % (self.uri, c))
self.account_info.call.dial_dtmf(c)
sleep(0.3)
@Trace(log)
def set_monitor_on(self):
pass
@Trace(log)
def set_monitor_off(self):
pass
@Trace(log)
def connect_media(self):
if self.rec_id is None:
raise Ux("connect_media: no media exists")
self.rec_slot = self.lib.recorder_get_slot(self.rec_id)
my_uri = self.call.info().account.info().uri
# self.media_call_slot is set to the call's conference slot when connecting media,
# and set to None when disconnecting, so if it is not None, this is a reconnect
if self.media_call_slot is not None:
# if self.media_call_slot is not None but is not the current call's conference slot,
# it isn't a reconnect, it's a structural program error
if self.media_call_slot != self.call.info().conf_slot:
raise Ux("connect_media: call at slot %d media already connected to call slot %d"
% (self.call.info().conf_slot, self.media_call_slot))
log.debug("%s: disconnecting call slot %d from recorder %s at slot %d"
% (my_uri, self.media_call_slot, self.rec_id, self.rec_slot))
lib.conf_disconnect(self.media_call_slot, self.rec_slot)
if self.player_id is not None:
self.pb_slot = lib.player_get_slot(self.player_id)
log.debug("%s: disconnecting player %s at slot %d to call slot %d"
% (my_uri, self.player_id, self.pb_slot, self.media_call_slot))
lib.conf_disconnect(self.pb_slot, self.media_call_slot)
self.media_call_slot = None
log.debug("%s: connecting call slot %d to recorder %s at slot %d"
% (my_uri, self.call.info().conf_slot, self.rec_id, self.rec_slot))
lib.conf_connect(self.call.info().conf_slot, self.rec_slot)
# if there is a player ID then the player was created during create_media and we can connect it, too
if self.player_id is not None:
self.pb_slot = lib.player_get_slot(self.player_id)
log.debug("%s: connecting player %s at slot %d to call slot %d"
% (my_uri, self.player_id, self.pb_slot, self.call.info().conf_slot))
lib.conf_connect(self.pb_slot, self.call.info().conf_slot)
self.media_call_slot = self.call.info().conf_slot
| [
"[email protected]"
]
| |
bda426d73a162db938d51aa0011a0c12b47e5d89 | 97caa124ffa5da9819c39a16c734165176d90349 | /projects/ideas/api/nba/nba_players.py | adde586a9f8bcb7246108819c3a5de18ab7919ce | [
"Apache-2.0"
]
| permissive | YAtOff/python0 | dd684731065321fd52d475fd2b2105db59f5c19c | b5af5004131d64dd52d42746eddb72b6c43a13c7 | refs/heads/master | 2021-01-18T21:19:11.990434 | 2019-05-29T20:14:23 | 2019-05-29T20:14:23 | 44,601,010 | 6 | 7 | Apache-2.0 | 2019-10-31T22:45:21 | 2015-10-20T11:13:11 | Jupyter Notebook | UTF-8 | Python | false | false | 715 | py | """
NBA API
https://pypi.org/project/nba-api/
Преди да започнете инсталирайте:
pip install nba_api
https://github.com/swar/nba_api
"""
from nba_api.stats.static import players
from nba_api.stats.endpoints import commonplayerinfo
name = ""
while name != "exit":
name = input("Player name: ")
result = players.find_players_by_full_name(name)
if result:
player = result[0]
player_info = commonplayerinfo.CommonPlayerInfo(player_id=player["id"])
table = player_info.common_player_info.get_dict()
for name, value in zip(table["headers"], table["data"][0]):
print(name, value)
else:
print("No player found! Try again.")
| [
"[email protected]"
]
| |
2564ea2977644b8d4ec91ec350761e03c7cfff6f | f9e3a0fb511470561d3d94bc984dafaee06000cb | /9780596009250/PP3E-Examples-1.2/Examples/PP3E/System/Filetools/site-forward.py | 43c772418aad4c045863a31bed676eaa7d153913 | [
"LicenseRef-scancode-oreilly-notice"
]
| permissive | Sorath93/Programming-Python-book | 359b6fff4e17b44b9842662f484bbafb490cfd3d | ebe4c93e265edd4ae135491bd2f96904d08a911c | refs/heads/master | 2022-12-03T01:49:07.815439 | 2020-08-16T22:19:38 | 2020-08-16T22:19:38 | 287,775,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | ###########################################################################
# Create forward link pages for relocating a web site.
# Generates one page for every existing site file; upload the generated
# files to your old web site. Performance note: the first 2 str.replace
# calls could be moved out of the for loop, but this runs in < 1 second
# on my Win98 machine for 150 site files. Lib note: the os.listdir call
# can be replaced with: sitefiles = glob.glob(sitefilesdir + os.sep + '*')
# but then the file/directory names must be split up manually with:
# dirname, filename = os.path.split(sitefile);
###########################################################################
import os
servername = 'starship.python.net' # where site is relocating to
homedir = '~lutz/home' # where site will be rooted
sitefilesdir = 'public_html' # where site files live locally
uploaddir = 'isp-forward' # where to store forward files
templatename = 'template.html' # template for generated pages
try:
os.mkdir(uploaddir) # make upload dir if needed
except OSError: pass
template = open(templatename).read() # load or import template text
sitefiles = os.listdir(sitefilesdir) # filenames, no directory prefix
count = 0
for filename in sitefiles:
fwdname = os.path.join(uploaddir, filename) # or + os.sep + filename
print 'creating', filename, 'as', fwdname
filetext = template.replace('$server$', servername) # insert text
filetext = filetext.replace('$home$', homedir) # and write
filetext = filetext.replace('$file$', filename) # file varies
open(fwdname, 'w').write(filetext)
count += 1
print 'Last file =>\n', filetext
print 'Done:', count, 'forward files created.'
| [
"[email protected]"
]
| |
ef153fa9651dace4d24ab5d1475eee7afaf808cb | 6cd690fb01e100f440289ea8fe7342bb58d37e78 | /tests/elemental/combat_elemental_tests.py | 1e175b4d862b30ee622ca4db805356341ff523d9 | []
| no_license | Hammerlord/Monbot | 6db8308ae492d7cfbb6f1bdff909105129924269 | fde8177d9170dddd958a89068a560008259d6e24 | refs/heads/master | 2020-03-07T16:43:20.019123 | 2019-08-29T03:08:33 | 2019-08-29T03:08:33 | 127,591,188 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,556 | py | import unittest
from src.elemental.ability.abilities.wait import Wait
from src.elemental.ability.ability import Ability
from src.elemental.combat_elemental import CombatElemental
from src.elemental.elemental import Elemental
from tests.elemental.elemental_builder import CombatElementalBuilder, ElementalBuilder
class CombatElementalTests(unittest.TestCase):
"""
Tests for CombatElemental, the wrapper class generated when an Elemental enters combat.
"""
def setUp(self):
self.elemental = self.get_elemental()
self.combat_elemental = CombatElementalBuilder().with_elemental(self.elemental).build()
def tearDown(self):
self.combat_elemental = None
self.elemental = None
def get_elemental(self) -> Elemental:
return ElementalBuilder() \
.with_current_hp(5) \
.with_max_hp(50) \
.build()
def get_combat_elemental(self) -> CombatElemental:
return CombatElementalBuilder().with_elemental(self.get_elemental()).build()
def test_starting_mana(self):
error = "CombatElemental didn't have the correct amount of starting mana"
combat_elemental_mana = self.combat_elemental.current_mana
expected_mana = self.elemental.starting_mana
self.assertEqual(combat_elemental_mana, expected_mana, error)
def test_starting_hp(self):
error = "CombatElemental's HP didn't refer to its Elemental's HP"
combat_elemental_hp = self.combat_elemental.current_hp
expected_hp = self.elemental.current_hp
self.assertEqual(combat_elemental_hp, expected_hp, error)
def test_defend_charges(self):
error = "CombatElemental's Defend charges didn't refer to its Elemental's"
combat_elemental_charges = self.combat_elemental.defend_charges
min_charges = 2 # All Elementals have at least two Defend charges
self.assertGreaterEqual(combat_elemental_charges, min_charges, error)
def test_defend_available(self):
error = "CombatElemental didn't have defend available as an ability"
abilities = self.combat_elemental.abilities
has_defend = any([ability for ability in abilities if ability.name == "Defend"])
self.assertTrue(has_defend, error)
def test_defend_unavailable(self):
error = "Defend was available even though there were no defend charges"
self.combat_elemental.update_defend_charges(-self.combat_elemental.defend_charges)
abilities = self.combat_elemental.available_abilities
has_defend = any([ability for ability in abilities if ability.name == "Defend"])
self.assertFalse(has_defend, error)
def test_has_abilities(self):
error = "CombatElemental doesn't have Abilities"
abilities = self.combat_elemental.abilities
self.assertGreater(len(abilities), 0, error)
self.assertIsInstance(abilities[0], Ability, error)
def test_bide_available(self):
error = "Bide wasn't available when there were no other usable abilities"
self.combat_elemental._abilities = []
self.assertIsInstance(self.combat_elemental.available_abilities[0], Wait, error)
def test_bide_unavailable(self):
error = "Bide shouldn't be available if anything else is available"
is_bide_available = any([ability for ability in self.combat_elemental.available_abilities
if ability.name == Wait().name])
self.assertFalse(is_bide_available, error)
def test_take_damage(self):
error = "Reference Elemental didn't take damage when CombatElemental took damage"
prev_hp = self.elemental.current_hp
self.combat_elemental.receive_damage(2, self.get_combat_elemental())
current_hp = self.elemental.current_hp
expected_hp = prev_hp - 2
self.assertEqual(current_hp, expected_hp, error)
def test_heal(self):
error = "Reference Elemental didn't heal when CombatElemental healed"
prev_hp = self.elemental.current_hp
self.combat_elemental.heal(5)
current_hp = self.elemental.current_hp
expected_hp = prev_hp + 5
self.assertEqual(current_hp, expected_hp, error)
def test_stat_change(self):
error = "Reference Elemental's stats incorrectly changed when CombatElemental's stats changed"
# TODO
def test_overkill(self):
error = "Elemental's HP didn't set to 0 on overkill"
self.combat_elemental.receive_damage(200, self.get_combat_elemental())
current_hp = self.elemental.current_hp
expected_hp = 0
self.assertEqual(current_hp, expected_hp, error)
def test_overheal(self):
error = "Elemental's HP didn't set to max HP on overheal"
self.combat_elemental.heal(100)
current_hp = self.elemental.current_hp
expected_hp = 50
self.assertEqual(current_hp, expected_hp, error)
def test_knockout_flag(self):
error = "CombatElemental wasn't flagged as knocked out at 0 HP"
self.combat_elemental.receive_damage(12, self.get_combat_elemental())
knocked_out = self.combat_elemental.is_knocked_out
self.assertIs(knocked_out, True, error)
def test_gain_mana(self):
error = "CombatElemental didn't gain mana on turn start"
mana_before_turn = self.combat_elemental.current_mana
self.combat_elemental.start_turn()
mana_after_turn = self.combat_elemental.current_mana
self.assertGreater(mana_after_turn, mana_before_turn, error)
| [
"[email protected]"
]
| |
30a1e31371ef290579d9c7c19f8771ad60bf07c6 | 6e631bd7f138abb9f7eb0d936a8615287248b697 | /Home/DaysBetween.py | 76d67e4c8f9b064e75254e0764b99006c53ced5b | []
| no_license | ankiwoong/Check_Io | 24494390a851fad91f173c5e81a4eedfad7cfe6e | f417dbf1c1cce316ca25d51d645e228e7b03bf9c | refs/heads/master | 2022-06-03T14:59:04.571112 | 2020-04-25T11:29:55 | 2020-04-25T11:29:55 | 254,514,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | '''
Days Between
We have prepared a set of Editor's Choice Solutions. You will see them first after you solve the mission. In order to see all other solutions you should change the filter.
How old are you in a number of days? It's easy to calculate - just subtract your birthday from today. We could make this a real challenge though and count the difference between any dates.
You are given two dates as an array with three numbers - a year, month and day. For example: 19 April 1982 will be (1982, 4, 19). You should find the difference in days between the given dates. For example between today and tomorrow = 1 day. The difference will always be either a positive number or zero, so don't forget about the absolute value.
Input:
Two dates as tuples of integers.
Output:
The difference between the dates in days as an integer.
Example:
days_diff((1982, 4, 19), (1982, 4, 22)) == 3
days_diff((2014, 1, 1), (2014, 8, 27)) == 238
How it is used: Python has batteries included, so in this mission you’ll need to learn how to use completed modules so that you don't have to invent the bicycle all over again.
Precondition:
Dates between 1 january 1 and 31 december 9999. Dates are correct.
def days_diff(a, b):
# your code here
return None
if __name__ == '__main__':
print("Example:")
print(days_diff((1982, 4, 19), (1982, 4, 22)))
# These "asserts" are used for self-checking and not for an auto-testing
assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3
assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238
assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238
print("Coding complete? Click 'Check' to earn cool rewards!")
'''
from datetime import datetime
def days_diff(date1, date2):
# datetime를 사용하여서 년 월 일을 지정한다.
# 각 년 / 월 / 일은 입력되는 인덱싱 번호로 추출한다.
# 출력값은 year-month-day 00:00:00 로 나온다.
date1 = datetime(year=date1[0], month=date1[1], day=date1[2])
date2 = datetime(year=date2[0], month=date2[1], day=date2[2])
# date2에서 date1의 날짜를 빼면 되므로 절대값을 줘서 계산 후 반환 한다.
return abs((date2 - date1).days)
if __name__ == '__main__':
print("Example:")
print(days_diff((1982, 4, 19), (1982, 4, 22)))
# These "asserts" are used for self-checking and not for an auto-testing
assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3
assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238
assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"[email protected]"
]
| |
de53426f1c73e86d0c2bf22d218d124d89f7947c | f9033131dc4d66ede2c5c22fcaa4a0be5b682152 | /BinaryTrees/Tasks/eolymp(3326).py | 504d4e87e8e265aa065e0b26f1c53ee7288a6ca3 | []
| no_license | Invalid-coder/Data-Structures-and-algorithms | 9bd755ce3d4eb11e605480db53302096c9874364 | 42c6eb8656e85b76f1c0043dcddc9c526ae12ba1 | refs/heads/main | 2023-04-29T08:40:34.661184 | 2021-05-19T10:57:37 | 2021-05-19T10:57:37 | 301,458,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | class BinaryTree:
def __init__(self, key):
self.key = key
self.leftChild = None
self.rightChild = None
def hasLeft(self):
return not self.leftChild is None
def hasRight(self):
return not self.rightChild is None
def setLeft(self, key):
self.leftChild = BinaryTree(key)
def setRight(self, key):
self.rightChild = BinaryTree(key)
def insert(self, key):
node = self
while True:
if node.key == key:
break
elif node.key > key:
if node.hasLeft():
node = node.leftChild
else:
node.setLeft(key)
break
elif node.key < key:
if node.hasRight():
node = node.rightChild
else:
node.setRight(key)
break
def isSameTree(self, tree):
ans = True
def _isSameTree(node1, node2):
nonlocal ans
if node1.hasLeft() and node2.hasLeft():
_isSameTree(node1.leftChild, node2.leftChild)
else:
if node1.hasLeft() and not node2.hasLeft():
ans = False
elif node2.hasLeft() and not node1.hasLeft():
ans = False
if node1.hasRight() and node2.hasRight():
_isSameTree(node1.rightChild, node2.rightChild)
else:
if node1.hasRight() and not node2.hasRight():
ans = False
elif node2.hasRight() and not node1.hasRight():
ans = False
_isSameTree(self, tree)
return ans
def createTree(nodes):
tree = BinaryTree(nodes[0])
i = 1
while i < len(nodes):
tree.insert(nodes[i])
i += 1
return tree
def findSequences(tree, n, m):
sequences = []
def _findSequences(sequence):
if len(sequence) == n:
tree1 = createTree(sequence)
if tree.isSameTree(tree1):
if not sequence in sequences:
sequences.append(sequence)
return
for i in range(1, m + 1):
if not i in sequence:
next_seq = sequence[:]
next_seq.append(i)
_findSequences(next_seq)
_findSequences([])
return len(sequences)
if __name__ == '__main__':
t = int(input())
for _ in range(t):
n, m = map(int, input().split())
nodes = tuple(map(int, input().split()))
tree = BinaryTree(nodes[0])
i = 1
while i < len(nodes):
tree.insert(nodes[i])
i += 1
print(findSequences(tree, n, m))
| [
"[email protected]"
]
| |
60e71440b4fa46560d11572b5594307fa09e7b55 | e7af5a3e76e674be0a85628067fa494348d45123 | /Python-for-Finance-Second-Edition-master/Chapter12/c12_28_basic_income_best.py | 360b70b1ba061b5ab73780ecb6b715663f4dfd9d | [
"MIT"
]
| permissive | SeyedShobeiri/Work | 8321ead6f11de8297fa18d70a450602f700f26fb | f758e758106fbd53236a7fadae42e4ec6a4e8244 | refs/heads/master | 2022-07-25T02:33:25.852521 | 2020-05-17T16:11:27 | 2020-05-17T16:11:27 | 264,706,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,820 | py |
"""
Name : c12_28_basic_income_best.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : [email protected]
[email protected]
original : https://gist.github.com/stucchio/7447067
"""
from pylab import *
from scipy.stats import *
#input area
million=1e6 # unit of million
billion=1e9 # unit of billion
trillion=1e12 # unit of trillion
tiny=1e-7 # a small number
hourlyPay = 7.5 # hourly wage
workingHoursPerWeek=40 # working hour per week
workingWeeksPerYear=50 # working weeks per year
nAdult = 227*million # number of adult
laborForce = 154*million # labor force
disabledAdults = 21*million # disability
nSimulations = 1024*32 # number of simulations
#
basicIncome = hourlyPay*workingHoursPerWeek*workingWeeksPerYear
# define a few function
def geniusEffect(nNonWorkers):
nGenious = binom(nNonWorkers,tiny).rvs()
return nGenious* billion
#
def costBasicIncome():
salaryCost= nAdult * basicIncome
unitAdmCost = norm(250,75)
nonWorkerMultiplier = uniform(-0.10, 0.15).rvs()
nonWorker0=nAdult-laborForce-disabledAdults
nNonWorker = nonWorker0*(1+nonWorkerMultiplier)
marginalWorkerHourlyProductivity = norm(10,1)
admCost = nAdult * unitAdmCost.rvs()
unitBenefitNonWorker=40*52*marginalWorkerHourlyProductivity.rvs()
benefitNonWorkers = 1 * (nNonWorker*unitBenefitNonWorker)
geniusBenefit=geniusEffect(nNonWorker)
totalCost=salaryCost + admCost - benefitNonWorkers-geniusBenefit
return totalCost
#
def costBasicJob():
unitAdmCost4disabled= norm(500,150).rvs()
unitAdmCost4worker = norm(5000, 1500).rvs()
nonWorkerMultiplier = uniform(-0.20, 0.25).rvs()
hourlyProductivity = uniform(0.0, hourlyPay).rvs()
cost4disabled=disabledAdults * (basicIncome + unitAdmCost4disabled)
nBasicWorkers=((nAdult-disabledAdults-laborForce)*(1+nonWorkerMultiplier))
annualCost=workingHoursPerWeek*workingWeeksPerYear*hourlyProductivity
cost4workers=nBasicWorkers * (basicIncome+unitAdmCost4worker-annualCost)
return cost4disabled + cost4workers
#
# take a long time here!!!
N = nSimulations
costBI = zeros(shape=(N,),dtype=float)
costBJ = zeros(shape=(N,),dtype=float)
for k in range(N):
costBI[k] = costBasicIncome()
costBJ[k] = costBasicJob()
#
def myPlot(data,myTitle,key):
subplot(key)
width = 4e12
height=50*N/1024
title(myTitle)
#xlabel("Cost (Trillion = 1e12)")
hist(data, bins=50)
axis([0,width,0,height])
#
myPlot(costBI,"Basic Income",211)
myPlot(costBJ,"Basic Job",212)
show()
| [
"[email protected]"
]
| |
1cf9bc616d68317e6e54b595f5fa04659ec0aa69 | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/third_party/2/kazoo/client.pyi | 683596f0fc6c615216bdead3db0ebdda388f94a5 | [
"MIT",
"Apache-2.0"
]
| permissive | ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | Python | UTF-8 | Python | false | false | 3,879 | pyi | from typing import Any
string_types = ... # type: Any
bytes_types = ... # type: Any
LOST_STATES = ... # type: Any
ENVI_VERSION = ... # type: Any
ENVI_VERSION_KEY = ... # type: Any
log = ... # type: Any
class KazooClient:
logger = ... # type: Any
handler = ... # type: Any
auth_data = ... # type: Any
default_acl = ... # type: Any
randomize_hosts = ... # type: Any
hosts = ... # type: Any
chroot = ... # type: Any
state = ... # type: Any
state_listeners = ... # type: Any
read_only = ... # type: Any
retry = ... # type: Any
Barrier = ... # type: Any
Counter = ... # type: Any
DoubleBarrier = ... # type: Any
ChildrenWatch = ... # type: Any
DataWatch = ... # type: Any
Election = ... # type: Any
NonBlockingLease = ... # type: Any
MultiNonBlockingLease = ... # type: Any
Lock = ... # type: Any
Party = ... # type: Any
Queue = ... # type: Any
LockingQueue = ... # type: Any
SetPartitioner = ... # type: Any
Semaphore = ... # type: Any
ShallowParty = ... # type: Any
def __init__(self, hosts=..., timeout=..., client_id=..., handler=..., default_acl=..., auth_data=..., read_only=..., randomize_hosts=..., connection_retry=..., command_retry=..., logger=..., **kwargs) -> None: ...
@property
def client_state(self): ...
@property
def client_id(self): ...
@property
def connected(self): ...
def set_hosts(self, hosts, randomize_hosts=...): ...
def add_listener(self, listener): ...
def remove_listener(self, listener): ...
def start(self, timeout=...): ...
def start_async(self): ...
def stop(self): ...
def restart(self): ...
def close(self): ...
def command(self, cmd=...): ...
def server_version(self, retries=...): ...
def add_auth(self, scheme, credential): ...
def add_auth_async(self, scheme, credential): ...
def unchroot(self, path): ...
def sync_async(self, path): ...
def sync(self, path): ...
def create(self, path, value=..., acl=..., ephemeral=..., sequence=..., makepath=...): ...
def create_async(self, path, value=..., acl=..., ephemeral=..., sequence=..., makepath=...): ...
def ensure_path(self, path, acl=...): ...
def ensure_path_async(self, path, acl=...): ...
def exists(self, path, watch=...): ...
def exists_async(self, path, watch=...): ...
def get(self, path, watch=...): ...
def get_async(self, path, watch=...): ...
def get_children(self, path, watch=..., include_data=...): ...
def get_children_async(self, path, watch=..., include_data=...): ...
def get_acls(self, path): ...
def get_acls_async(self, path): ...
def set_acls(self, path, acls, version=...): ...
def set_acls_async(self, path, acls, version=...): ...
def set(self, path, value, version=...): ...
def set_async(self, path, value, version=...): ...
def transaction(self): ...
def delete(self, path, version=..., recursive=...): ...
def delete_async(self, path, version=...): ...
def reconfig(self, joining, leaving, new_members, from_config=...): ...
def reconfig_async(self, joining, leaving, new_members, from_config): ...
class TransactionRequest:
client = ... # type: Any
operations = ... # type: Any
committed = ... # type: Any
def __init__(self, client) -> None: ...
def create(self, path, value=..., acl=..., ephemeral=..., sequence=...): ...
def delete(self, path, version=...): ...
def set_data(self, path, value, version=...): ...
def check(self, path, version): ...
def commit_async(self): ...
def commit(self): ...
def __enter__(self): ...
def __exit__(self, exc_type, exc_value, exc_tb): ...
class KazooState:
...
| [
"[email protected]"
]
| |
83ebf0a66825f6e61ab543b4e72c1939cbe90293 | 57e148ea3ebc4a7476a661ce4332fdc15912934d | /cf 606 div 2 C.py | ee6cc6c8cbec456b68807eceb9a6cee0e076571c | []
| no_license | FahimSifnatul/online_problem_solving_with_FahimSifnatul_python_version | 20f99a59dda8083ac4cf220b0cd4b45b34262fa3 | 6e1e54b78ba5d64ba4bb5edee507277fe2c1a186 | refs/heads/master | 2022-12-24T10:57:06.212206 | 2020-10-07T05:29:54 | 2020-10-07T05:29:54 | 265,504,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from sys import stdin,stdout
ans,pos = [], 0
for i in range(int(input())):
s = stdin.readline()
cnt, j, l = 0, 0, len(s)-2
while j <= l-2:
tmp = s[j] + s[j+1] + s[j+2]
if j <= l-4 and tmp+s[j+3]+s[j+4] == 'twone':
ans.append(str(j+3))
ans.append(' ')
cnt += 1
j += 5
elif tmp in ['one','two']:
ans.append(str(j+2))
ans.append(' ')
cnt += 1
j += 3
else:
j += 1
ans.append('\n')
ans.insert(pos, str(cnt))
ans.insert(pos+1, '\n')
pos = len(ans)
stdout.write(''.join(ans))
| [
"[email protected]"
]
| |
b33d369d83f2a16e6a3d0e37e3be017bafa2bca5 | 432a38514b27aba3b3b5c3ddba0b2804ebff8222 | /drf_api/settings.py | 3e407d2deb300ae3ef3b49c88bd883ff912a0825 | []
| no_license | mdAshrafuddin/drf_api | 36b35f579fd7805bca2277389e5f3b1117b0a5dc | a3b95dd014a36c61f87b7caf304a31e2ad646f32 | refs/heads/main | 2023-02-21T02:00:45.577214 | 2021-01-21T13:43:23 | 2021-01-21T13:43:23 | 331,640,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | """
Django settings for drf_api project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5y0$_a#+6&^7f^)+3or@)aly_^3kpaxds90!pg9j7t!_rcv3^@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication'
]
} | [
"[email protected]"
]
| |
45de7de2ca65f57cb6a7a2c82425aa4a63f7f879 | 2127cabeeda296f7a6b692982872d91e8bdd3016 | /tests/test_schema_priority.py | 15535c1c3ad072aac439c0948434e442010679ea | [
"Apache-2.0"
]
| permissive | nomilkinmyhome/dataclass_factory | 26059993af95509e386793c42fd743d6f08e1079 | 7bcbd395acd5c61806ae36042067a7f9882cec28 | refs/heads/master | 2022-11-18T21:51:40.308764 | 2020-03-26T08:51:08 | 2020-03-26T08:51:08 | 279,984,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,731 | py | from dataclasses import dataclass
from unittest import TestCase
from dataclass_factory import Factory, Schema
@dataclass
class Data:
a: str = ""
b: str = ""
c_: str = ""
_d: str = ""
class TestFactory(TestCase):
def test_only_mapping(self):
factory = Factory(
schemas={
Data: Schema(
only=("b",),
name_mapping={"a": "A"},
only_mapped=True,
),
}
)
data = Data("AA", "BB", "CC")
serial = {"b": "BB"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "b": "BB"}
data2 = Data(b="BB")
self.assertEqual(factory.load(serial, Data), data2)
def test_only_exclude(self):
factory = Factory(
schemas={
Data: Schema(
only=("a", "b",),
exclude=("a",)
),
}
)
data = Data("AA", "BB", "CC")
serial = {"b": "BB"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "b": "BB"}
data2 = Data(b="BB")
self.assertEqual(factory.load(serial, Data), data2)
def test_trailing_mapping(self):
factory = Factory(
schemas={
Data: Schema(
name_mapping={"c_": "c_"},
trim_trailing_underscore=True,
),
}
)
data = Data("AA", "BB", "CC")
serial = {"a": "AA", "b": "BB", "c_": "CC"}
self.assertEqual(factory.dump(data), serial)
self.assertEqual(factory.load(serial, Data), data)
def test_internal_only(self):
factory = Factory(
schemas={
Data: Schema(
only=("_d",),
skip_internal=True,
),
}
)
data = Data("AA", "BB", "CC", "DD")
serial = {"_d": "DD"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "_d": "DD"}
data2 = Data(_d="DD")
self.assertEqual(factory.load(serial, Data), data2)
def test_internal_mapping(self):
factory = Factory(
schemas={
Data: Schema(
name_mapping={"_d": "_d"},
skip_internal=True,
),
}
)
data = Data("AA", "BB", "CC", "DD")
serial = {"a": "AA", "b": "BB", "c": "CC", "_d": "DD"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "_d": "DD"}
data2 = Data(a="XXX", _d="DD")
self.assertEqual(factory.load(serial, Data), data2)
| [
"[email protected]"
]
| |
72fd3453bb6ac3f150bff9dbcaa458288016d216 | 8379cb63b570eb29c2d2e52b37960ea350fe6be3 | /datasets/create_bond_dataset.py | 953ce6f39ef64c0eccda9e81138d9b55caacd945 | []
| no_license | biotite-dev/biotite-util | 3ad622cee28a556ef37c2abf7dabee8f4ae91bfb | 3c5bcce9411c6f1bd5c12df91b1c091c5eff84ab | refs/heads/master | 2022-10-13T21:49:25.040376 | 2022-09-18T15:51:42 | 2022-09-18T15:51:42 | 152,449,276 | 1 | 1 | null | 2022-09-18T15:51:43 | 2018-10-10T15:46:49 | Python | UTF-8 | Python | false | false | 2,164 | py | import argparse
import msgpack
import biotite.structure as struc
import biotite.structure.io.pdbx as pdbx
BOND_ORDERS = {
("SING", "N") : struc.BondType.SINGLE,
("DOUB", "N") : struc.BondType.DOUBLE,
("TRIP", "N") : struc.BondType.TRIPLE,
("QUAD", "N") : struc.BondType.QUADRUPLE,
("SING", "Y") : struc.BondType.AROMATIC_SINGLE,
("DOUB", "Y") : struc.BondType.AROMATIC_DOUBLE,
("TRIP", "Y") : struc.BondType.AROMATIC_TRIPLE,
}
def create_bond_dict(components_pdbx_file_path, msgpack_file_path):
pdbx_file = pdbx.PDBxFile()
pdbx_file.read(components_pdbx_file_path)
components = pdbx_file.get_block_names()
bond_dict = {}
for i, component in enumerate(components):
print(f"{component:3} {int(i/len(components)*100):>3d}%", end="\r")
cif_bonds = pdbx_file.get_category(
"chem_comp_bond", block=component, expect_looped=True
)
if cif_bonds is None:
# No bond info for this compound
continue
else:
group_bonds = {}
for atom1, atom2, order, aromatic_flag in zip(
cif_bonds["atom_id_1"], cif_bonds["atom_id_2"],
cif_bonds["value_order"], cif_bonds["pdbx_aromatic_flag"]
):
bond_type = BOND_ORDERS[order, aromatic_flag]
group_bonds[(atom1, atom2)] = bond_type
bond_dict[component] = group_bonds
with open(msgpack_file_path, "wb") as msgpack_file:
msgpack.dump(bond_dict, msgpack_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create a dataset, that contains the information which "
"atoms are connected in a given residue. "
"The information is based on a 'components.cif' file."
)
parser.add_argument(
"infile",
help="The path to the 'components.cif' file to be read."
)
parser.add_argument(
"outfile",
help="The path to the file, where the output MessagePack file should "
"be placed."
)
args = parser.parse_args()
create_bond_dict(args.infile, args.outfile) | [
"[email protected]"
]
| |
83156090bb8b3b0a4cc1a33ac9e451f5d4f13a09 | 6a33cb94d4af1d8a7329ddc6c9d42f870c35bb2f | /python/100+/euler131.py | 4d34b9b3dbb1e9573a034fa5863f2228130191e4 | []
| no_license | vochong/project-euler | 836321cc8e7d2e7cdf22b3b136d44dcba74a8701 | 6a0c7103861ff825bf84800b6e2e62819a41e36d | refs/heads/master | 2020-04-29T10:41:48.487159 | 2018-09-19T00:13:34 | 2018-09-19T00:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from fractions import gcd
def isPrime(n):
for i in range(2, int(n**0.5)+1):
if n % i == 0: return False
return True
def euler131():
m = 2
n = m-1
ps = 0
while m**3 - n**3 < 1000000:
p = m**3 - n**3
if isPrime(p): ps += 1
m += 1
n = m-1
return ps
if __name__ == "__main__":
print euler131() | [
"[email protected]"
]
| |
6bcbd7cb96b17a945779fd33d2772b582faa191c | 2ba8378d2028305c2582a2d5d16a91527d207040 | /Soilder.py | 1b6e9968b690cf80f1a63808b6568dd247d9e2ac | []
| no_license | giridhararao/guvi | 41bf15e7dbd8ca3494f2e7ada5b42737e80fefe8 | e67e245a2b31463f39087430bce0f7cf5bc92b4a | refs/heads/master | 2020-03-22T06:04:27.296180 | 2019-03-30T06:52:05 | 2019-03-30T06:52:05 | 139,610,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | def factors1(n) :
L = []
i = 2
cnt = 0
while n >1 :
while n%i == 0 :
cnt += 1
n //= i
i += 1
return cnt
n = int(input())
L = [input().split() for i in range(0,n)]
for i in range(0,n) :
a = 1
n, k = L[i]
n, k = int(n), int(k)
for i in range(k+1,n+1) :
a = a*i
out = factors1(a)
print(out)
| [
"[email protected]"
]
| |
ed430e4a8ec7376b2596b58d7285585e9507bec0 | 609a4bb18ffd8e93ef28da6762266d852c9aca54 | /src/h02_bert_embeddings/bert_per_word.py | 9343c3f703ec8633f3d6dd09f9e3c73e45675912 | [
"MIT"
]
| permissive | tpimentelms/lexical-ambiguity-in-context | 0fe9a6835451bc2d5abcba65654e7049109ded67 | 5277b9e0f1a846b5fe93eeba1cf37de2d48cfc62 | refs/heads/main | 2023-05-31T03:25:34.499572 | 2021-06-02T15:04:37 | 2021-06-02T15:04:37 | 373,110,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | import torch
import torch.nn as nn
from transformers import BertModel
from utils import constants
class BertPerWordModel(nn.Module):
# pylint: disable=arguments-differ
def __init__(self, bert_option):
super().__init__()
self.bert = self.get_bert(bert_option)
@staticmethod
def get_bert(bert_option):
model = BertModel.from_pretrained(bert_option)
return model
def forward(self, x, attention_mask, mappings):
output, _ = self.bert(x, attention_mask=attention_mask)
return self.from_bpe_to_word(output, mappings)
def from_bpe_to_word(self, output, mappings):
batch_size = output.size(0)
longest_token_sent = mappings.size(1)
hidden_states = output[:, 1:-1]
embedding_size = output.size(-1)
hidden_states_per_token = torch.zeros(
(batch_size, longest_token_sent, embedding_size)).to(device=constants.device)
mask_start = torch.zeros(batch_size).long().to(device=constants.device)
for mask_pos in range(0, longest_token_sent):
mask_sizes = mappings[:, mask_pos]
hidden_states_per_token[:, mask_pos] = \
self.sum_bpe_embeddings(hidden_states, mask_start, mask_sizes)
mask_start += mask_sizes
return hidden_states_per_token
@staticmethod
def sum_bpe_embeddings(hidden_states, mask_start, mask_sizes):
mask_idxs = []
for i, (sent_start, sent_size) in enumerate(zip(mask_start, mask_sizes)):
mask_idxs += [(i, sent_start.item() + x) for x in range(sent_size)]
mask_idxs = list(zip(*mask_idxs))
hidden_states_temp = \
torch.zeros_like(hidden_states).float().to(device=constants.device)
hidden_states_temp[mask_idxs] = hidden_states[mask_idxs]
embedding_size = hidden_states.size(-1)
return hidden_states_temp.sum(dim=1) / \
mask_sizes.unsqueeze(-1).repeat(1, embedding_size).float()
| [
"[email protected]"
]
| |
0479a264837198ef41a9305938f1a57efdcd97d3 | 75e8d0da60b0e9456058eee70ada47ed11e953a2 | /584A.py | db56edd5936777f92ff2e128d8c3d63cc31d75f5 | []
| no_license | smirnoffmg/codeforces | 87aa12596b4927e5b5620369a5d4fb52330c51f7 | 1b0e7feb051c7b7c5c4e46351e122a050d1561ac | refs/heads/master | 2021-10-11T10:09:24.019018 | 2019-01-24T13:54:35 | 2019-01-24T13:54:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # -*- coding: utf-8 -*-
n, t = map(int, raw_input().split(' '))
if t == 10:
if n > 1:
print(10 ** (n - 1))
else:
print(-1)
else:
f = 10 ** (n - 1) % t
print(10 ** (n - 1) + (t-f))
| [
"[email protected]"
]
| |
a29184b368126daa37dbeb89a73cfc13478bb9a9 | c3dc08fe8319c9d71f10473d80b055ac8132530e | /challenge-173/roger-bell-west/python/ch-1.py | 3ee4659182a9b33fabe21f977355cec1a0bd03bc | []
| no_license | southpawgeek/perlweeklychallenge-club | d4b70d9d8e4314c4dfc4cf7a60ddf457bcaa7a1e | 63fb76188e132564e50feefd2d9d5b8491568948 | refs/heads/master | 2023-01-08T19:43:56.982828 | 2022-12-26T07:13:05 | 2022-12-26T07:13:05 | 241,471,631 | 1 | 0 | null | 2020-02-18T21:30:34 | 2020-02-18T21:30:33 | null | UTF-8 | Python | false | false | 809 | py | #! /usr/bin/python3
import unittest
def esthetic(n0, base):
n = n0
pdigit = 0
ch = False
while n > 0:
digit = n % base
if ch and abs(digit - pdigit) != 1:
return False
ch = True
pdigit = digit
n //= base
return True
def esthetic10(n):
return esthetic(n, 10)
class TestEsthetic(unittest.TestCase):
def test_ex1(self):
self.assertEqual(esthetic10(5456),True,'example 1')
def test_ex2(self):
self.assertEqual(esthetic10(120),False,'example 1')
def test_ex3(self):
self.assertEqual(esthetic10(12),True,'example 1')
def test_ex4(self):
self.assertEqual(esthetic10(5654),True,'example 1')
def test_ex5(self):
self.assertEqual(esthetic10(890),False,'example 1')
unittest.main()
| [
"[email protected]"
]
| |
d8f05b966238a7358ae208514b580ac1cdfb8039 | 0792f5f7432ef3320c16e717671726289d1db3be | /filetest.py | 7c1f8dad9c01c68c7b9df6e8a0cdd1053bf2b2b9 | []
| no_license | hujiangyi/autoupgrade | 47a28ee9751d555a11d7697105b17af1f2d6c13c | 1f6cae3a264d4ce639283bda10df97e216d1fa40 | refs/heads/master | 2020-04-13T04:53:07.185244 | 2019-05-17T06:20:06 | 2019-05-17T06:20:06 | 162,974,411 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 jay <[email protected]>
#
cmdfile = open('./collectDataCmd.txt', "r")
for line in cmdfile.readlines():
print line.strip() | [
"[email protected]"
]
| |
4330e3d6164c5d652c3158e70a747de0759ed0b0 | b7cd8c2db6d7dae81c5b15203640994c3022b51f | /tigger/cluda/ocl.py | 9550e84e5568cba5bff546c6e1f448af8f5f4112 | [
"MIT"
]
| permissive | tnorth/reikna_old | 1dcc66e302f25a14108be35089f899a7277d8c94 | 0e77d785742151ed17c2637545ff28948d859e33 | refs/heads/master | 2020-12-25T05:30:16.434871 | 2013-02-08T17:08:00 | 2013-02-08T17:08:00 | 6,692,032 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,102 | py | from logging import error
import sys
import numpy
import pyopencl as cl
import pyopencl.array as clarray
import tigger.cluda as cluda
import tigger.cluda.dtypes as dtypes
from tigger.helpers import wrap_in_tuple, product
from tigger.cluda.kernel import render_prelude, render_template_source
from tigger.cluda.vsize import VirtualSizes, render_stub_vsize_funcs
API_ID = cluda.API_OCL
def get_platforms():
return cl.get_platforms()
class Context:
@classmethod
def create(cls, device=None, **kwds):
# cl.create_some_context() creates multiple-device context,
# and we do not want that (yet)
def find_suitable_device():
platforms = get_platforms()
target_device = None
for platform in platforms:
devices = platform.get_devices()
for device in devices:
params = DeviceParameters(device)
if params.max_work_group_size > 1:
return device
return None
if device is None:
device = find_suitable_device()
if device is None:
raise RuntimeError("Cannot find suitable OpenCL device to create CLUDA context")
ctx = cl.Context(devices=[device])
kwds['owns_context'] = True
return cls(ctx, **kwds)
def __init__(self, context, queue=None, fast_math=True, async=True, owns_context=False):
self.api = cluda.api(API_ID)
self._fast_math = fast_math
self._context = context
self._async = async
self.device_params = DeviceParameters(context.get_info(cl.context_info.DEVICES)[0])
self._device = self._context.devices[0]
self._queue = self.create_queue() if queue is None else queue
self._released = False if owns_context else True
def override_device_params(self, **kwds):
for kwd in kwds:
if hasattr(self.device_params, kwd):
setattr(self.device_params, kwd, kwds[kwd])
else:
raise ValueError("Device parameter " + str(kwd) + " does not exist")
def create_queue(self):
return cl.CommandQueue(self._context)
def supports_dtype(self, dtype):
if dtypes.is_double(dtype):
extensions = self._context.devices[0].extensions
return "cl_khr_fp64" in extensions or "cl_amd_fp64" in extensions
else:
return True
def allocate(self, shape, dtype):
return clarray.Array(self._queue, shape, dtype=dtype)
def empty_like(self, arr):
return self.allocate(arr.shape, arr.dtype)
def to_device(self, arr, dest=None):
if dest is None:
arr_device = self.empty_like(arr)
else:
arr_device = dest
arr_device.set(arr, queue=self._queue, async=self._async)
if dest is None:
return arr_device
def from_device(self, arr, dest=None, async=False):
arr_cpu = arr.get(queue=self._queue, ary=dest, async=async)
if dest is None:
return arr_cpu
def copy_array(self, arr, dest=None, src_offset=0, dest_offset=0, size=None):
if dest is None:
arr_device = self.empty_like(arr)
else:
arr_device = dest
itemsize = arr.dtype.itemsize
nbytes = arr.nbytes if size is None else itemsize * size
src_offset *= itemsize
dest_offset *= itemsize
cl.enqueue_copy(self._queue,
arr_device.data, arr.data,
byte_count=nbytes, src_offset=src_offset, dest_offset=dest_offset)
self._synchronize()
if dest is None:
return arr_device
def synchronize(self):
self._queue.finish()
def _synchronize(self):
if not self._async:
self.synchronize()
def release(self):
if not self._released:
del self._device
del self._queue
del self._context
self._released = True
def __del__(self):
self.release()
def _compile(self, src):
options = "-cl-mad-enable -cl-fast-relaxed-math" if self._fast_math else ""
try:
module = cl.Program(self._context, src).build(options=options)
except:
listing = "\n".join([str(i+1) + ":" + l for i, l in enumerate(src.split('\n'))])
error("Failed to compile:\n" + listing)
raise
return module
def compile(self, template_src, render_kwds=None):
return Module(self, template_src, render_kwds=render_kwds)
def compile_static(self, template_src, name, global_size,
local_size=None, render_kwds=None):
return StaticKernel(self, template_src, name, global_size,
local_size=local_size, render_kwds=render_kwds)
class DeviceParameters:
def __init__(self, device):
if device.platform.name == 'Apple' and device.type == cl.device_type.CPU:
# Apple is being funny again.
# On OSX 10.8.0 it reports the maximum block size as 1024, when it is really 128.
# Moreover, if local_barrier() is used in the kernel, it becomes 1
self.max_work_group_size = 1
self.max_work_item_sizes = [1, 1, 1]
else:
self.max_work_group_size = device.max_work_group_size
self.max_work_item_sizes = device.max_work_item_sizes
self.max_num_groups = [sys.maxsize, sys.maxsize, sys.maxsize]
if device.type == cl.device_type.CPU:
# For CPU both values do not make much sense
self.local_mem_banks = self.max_work_group_size
self.warp_size = 1
elif "cl_nv_device_attribute_query" in device.extensions:
# If NV extensions are available, use them to query info
self.local_mem_banks = 16 if device.compute_capability_major_nv < 2 else 32
self.warp_size = device.warp_size_nv
elif device.vendor == 'NVIDIA':
# nVidia device, but no extensions.
# Must be APPLE OpenCL implementation.
self.local_mem_banks = 16
self.warp_size = 16
else:
# AMD card.
# Do not know how to query this info, so settle for most probable values.
self.local_mem_banks = 32
# An alternative is to query CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE
# for some arbitrary kernel.
self.warp_size = 64
self.min_mem_coalesce_width = {4: 16, 8: 16, 16: 8}
self.local_mem_size = device.local_mem_size
class Module:
def __init__(self, ctx, src, render_kwds=None):
self._ctx = ctx
if render_kwds is None:
render_kwds = {}
prelude = render_prelude(self._ctx)
src = render_template_source(src, **render_kwds)
# Casting source code to ASCII explicitly
# New versions of Mako produce Unicode output by default,
# and it makes OpenCL compiler unhappy
self.source = str(prelude + src)
self._module = ctx._compile(self.source)
def __getattr__(self, name):
return Kernel(self._ctx, getattr(self._module, name))
class Kernel:
def __init__(self, ctx, kernel):
self._ctx = ctx
self._kernel = kernel
self._max_work_group_size = kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE, self._ctx._device)
def prepare(self, global_size, local_size=None):
if local_size is None:
self._local_size = None
else:
self._local_size = wrap_in_tuple(local_size)
self._global_size = wrap_in_tuple(global_size)
def prepared_call(self, *args):
# Unlike PyCuda, PyOpenCL does not allow passing array objects as is
args = [x.data if isinstance(x, clarray.Array) else x for x in args]
self._kernel(self._ctx._queue, self._global_size, self._local_size, *args)
self._ctx._synchronize()
def __call__(self, *args, **kwds):
if 'global_size' in kwds:
prep_args = (kwds.pop('global_size'),)
else:
prep_args = tuple()
self.prepare(*prep_args, **kwds)
self.prepared_call(*args)
class StaticKernel:
def __init__(self, ctx, src, name, global_size, local_size=None, render_kwds=None):
self._ctx = ctx
if render_kwds is None:
render_kwds = {}
prelude = render_prelude(self._ctx)
stub_vsize_funcs = render_stub_vsize_funcs()
src = render_template_source(src, **render_kwds)
# We need the first approximation of the maximum thread number for a kernel.
# Stub virtual size functions instead of real ones will not change it (hopefully).
stub_module = ctx._compile(str(prelude + stub_vsize_funcs + src))
stub_kernel = getattr(stub_module, name)
max_work_group_size = stub_kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE, self._ctx._device)
vs = VirtualSizes(ctx.device_params, max_work_group_size, global_size, local_size)
static_prelude = vs.render_vsize_funcs()
self._global_size, self._local_size = vs.get_call_sizes()
# Casting source code to ASCII explicitly
# New versions of Mako produce Unicode output by default,
# and it makes OpenCL compiler unhappy
self.source = str(prelude + static_prelude + src)
self._module = ctx._compile(self.source)
self._kernel = getattr(self._module, name)
self.max_work_group_size = self._kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE, self._ctx._device)
if self.max_work_group_size < product(self._local_size):
raise cluda.OutOfResourcesError("Not enough registers/local memory for this local size")
def __call__(self, *args):
args = [x.data if isinstance(x, clarray.Array) else x for x in args]
self._kernel(self._ctx._queue, self._global_size, self._local_size, *args)
self._ctx._synchronize()
| [
"[email protected]"
]
| |
2888b0395bdc3f6679dfc6bde5a66fad4551756e | 339901caa0cbb3bd2762ad83bb9f847c01b0df39 | /rice_RILs_mPing_scripts/Construction_of_recombination_bin_and_linkage_map/scripts/genotype/Tab2SNP.py | ec1d59a1b3f5eef7fce9a54aabae2c80828dece8 | []
| no_license | stajichlab/Dynamic_rice_publications | e592e83a4842eff7354e06e5368e6f7590b472ee | 93ac8732d64b7ab4831a0b0b9b1593efc5814805 | refs/heads/master | 2020-03-24T05:10:56.815367 | 2020-02-11T07:26:17 | 2020-02-11T07:26:17 | 142,477,743 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | #!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
import glob
from Bio import SeqIO
import subprocess
import multiprocessing as mp
import gzip
def usage():
test="name"
message='''
python Tab2SNP.py --input RILs_ALL_bam_correct
Convert genotype.tab to Maq.p1.map.pileup.SNP.
'''
print message
#/rhome/cjinfeng/HEG4_cjinfeng/RILs/QTL_pipe/bin/RILs_ALL_275line_core/NB.RILs.dbSNP.SNPs.Markers
#SNP_id Allele
#0100021547A A
def read_parents(infile):
data = defaultdict(lambda : str())
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'SNP_id'):
unit = re.split(r'\t',line)
data[unit[0]] = unit[1]
return data
##CHROM POS REF RIL103_0_GAGTGG_FC1213L5
#Chr1 31071 A A/A
##0100031071A GN278 G
def convert_tab2SNP(infile, markers, outfile):
#outfile = re.sub(r'.genotype.tab', r'.Maq.p1.map.pileup.SNP', infile)
ofile = open (outfile, 'w')
with gzip.open (infile, 'r') as filehd:
headers = re.split(r'\t', filehd.readline())
rils = re.split(r'_', headers[-1])
ril = re.sub(r'RIL', r'GN', rils[0])
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'#'):
unit = re.split(r'\t',line)
#pos = int(unit[0][2:-1])
chrs = re.sub(r'Chr', r'', unit[0])
pos = '%02d%08d%s' %(int(chrs), int(unit[1]), unit[2])
if unit[3][0] == unit[3][2] and not unit[3][0] == '.':
if markers.has_key(pos):
print >> ofile, '%s\t%s\t%s' %(pos, ril, unit[3][0])
ofile.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
markers = read_parents('NB.RILs.dbSNP.SNPs.Markers')
snp_files = glob.glob('%s/*.genotype.tab.gz' %(args.input))
#convert_tab2SNP('RILs_ALL_bam_correct/GN87.genotype.tab', markers)
for tab in sorted(snp_files):
snp = re.sub(r'.genotype.tab.gz', r'.Maq.p1.map.pileup.SNP', tab)
if not os.path.exists(snp):
print '%s to %s' %(tab, snp)
convert_tab2SNP(tab, markers, snp)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
e41a51d9e86953535c20916340c517b758128b50 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/tests/regressiontests/custom_managers_regress/tests.py | d5e38b14778ea6d5bf1050b8bc1042a5abb637f3 | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/tests/regressiontests/custom_managers_regress/tests.py | [
"[email protected]"
]
| |
45689ce9ef1065b6e18e0fd34e1d78d680c0cb51 | 162eed4191aef4431f94a0db1ad4185b6daa6f67 | /supervised_learning/0x00-binary_classification/21-deep_neural_network.py | f28ab914fda9b74ef9fc8a9f068b6e0624e1b5b5 | []
| no_license | giovannyortegon/holbertonschool-machine_learning | d6897bfb492f9d266302930927416637be3c172d | 8cd5e0f837a5c0facbf73647dcc9c6a3b1b1b9e0 | refs/heads/master | 2022-12-31T15:34:20.489690 | 2020-10-24T03:37:01 | 2020-10-24T03:37:01 | 279,656,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,321 | py | #!/usr/bin/env python3
""" deep neural network """
import numpy as np
class DeepNeuralNetwork:
""" DeepNeuralNetwork - defines a deep neural network
"""
@staticmethod
def he_et_al(nx, layers):
""" The weights initialized using the He et al """
if type(layers) is not list or len(layers) == 0:
raise TypeError("layers must be a list of positive integers")
weights = dict()
for i in range(len(layers)):
if type(layers[i]) is not int:
raise TypeError("layers must be a list of positive integers")
layer = layers[i - 1] if i > 0 else nx
W1 = np.random.randn(layers[i], layer)
W2 = np.sqrt(2 / layer)
weights.update({'W' + str(i + 1): W1 * W2,
'b' + str(i + 1): np.zeros((layers[i], 1))
})
return weights
def __init__(self, nx, layers):
""" DeepNeuralNetwork - public instance attributes
Args:
nx is the number of input features
layers is a list representing the number of nodes
L: The number of layers in the neural network.
cache: A dictionary to hold all intermediary values of the network.
weights: A dictionary to hold all weights
and biased of the network.
"""
if type(nx) is not int:
raise TypeError("nx must be an integer")
elif nx < 1:
raise ValueError("nx must be a positive integer")
else:
self.nx = nx
if type(layers) is not list or len(layers) == 0:
raise TypeError("layers must be a list of positive integers")
else:
self.layers = layers
arrprob = np.array(self.layers)
lenarr = arrprob[arrprob >= 1].shape[0]
if len(self.layers) != lenarr:
raise TypeError("layers must be a list of positive integers")
self.__L = len(self.layers)
self.__cache = {}
self.__weights = self.he_et_al(nx, layers)
@property
def L(self):
""" L - number of layers
Args:
__L number of layers
Return:
return __L Private instance
"""
return self.__L
@property
def cache(self):
""" cache - A dictionary to hold all intermediary valuess
Args:
__cache A dictionary to hold all intermediary values
Return:
Return __cache Private instance
"""
return self.__cache
@property
def weights(self):
""" weights - A dictionary to hold all weights and biased
Args:
__weights A dictionary to hold all weights and biased
Return:
Return __weights Private instance
"""
return self.__weights
def forward_prop(self, X):
""" forward_prop - Calculates the forward propagation
of the neural network.
Args:
X contains the input data.
Return:
Returns the output of the neural network and the cache
"""
self.__cache["A0"] = X
for i in range(self.__L):
w = "W" + str(i + 1)
b = "b" + str(i + 1)
a = "A" + str(i + 1)
z = np.matmul(self.__weights[w], self.__cache["A" + str(i)]) + \
self.__weights[b]
self.__cache[a] = 1 / (1 + np.exp(-z))
return self.__cache[a], self.__cache
def cost(self, Y, A):
""" cost - Calculates the cost of the model using
logistic regression
Args:
Y contains the correct labels for the input data
A containing the activated output of the neuron
for each example.
Return:
Returns the cost
"""
m = Y.shape[1]
logprobs1 = np.multiply(np.log(A), Y)
logprobs2 = np.multiply(np.log(1.0000001 - A), (1 - Y))
cost = -(1 / m) * np.sum(logprobs1 + logprobs2)
return cost
def evaluate(self, X, Y):
""" evaluate - Evaluates the neuron’s predictions
Args:
X - (nx, m) that contains the input data.
Y - (1, m) contains the correct labels for the input data.
Return
Returns the neuron’s prediction and the cost of the network.
"""
A, _ = self.forward_prop(X)
cost = self.cost(Y, A)
return np.where(A >= 0.5, 1, 0), cost
def gradient_descent(self, Y, cache, alpha=0.05):
"""
"""
m = Y.shape[1]
weights = self.__weights.copy()
for i in range(self.__L, 0, -1):
W = weights.get('W' + str(i))
W1 = weights.get('W' + str(i + 1))
A = self.__cache['A' + str(i)]
A1 = self.__cache['A' + str(i - 1)]
b = weights['b' + str(i)]
if i == self.__L:
dZ = A - Y
else:
dZ = np.matmul(W1.T, dZ1) * (A * (1 - A))
dW = (1 / m) * np.matmul(dZ, A1.T)
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)
dZ1 = dZ
self.__weights['W' + str(i)] = W - (dW * alpha)
self.__weights['b' + str(i)] = b - (db * alpha)
| [
"[email protected]"
]
| |
0ce1310744f3da08e0fa94833be91e0a0a8e6cbf | 0172fee2851e3d02b855a53d8b63b262d169e6a5 | /ptsites/sites/pttime.py | 75c6b76556862fd4b682da3f9345ccdd374cb347 | [
"MIT"
]
| permissive | Tuohai-Li/flexget_qbittorrent_mod | 3f3f9df45680d27853f44bee6421ceb750d9d01e | 81e3bb473f82dce759da3795b2e89bfc7717d3bb | refs/heads/master | 2023-09-02T17:46:42.077469 | 2021-11-17T04:06:53 | 2021-11-17T04:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | from ..schema.nexusphp import AttendanceHR
from ..schema.site_base import Work, SignState
from ..utils.net_utils import NetUtils
class MainClass(AttendanceHR):
URL = 'https://www.pttime.org/'
USER_CLASSES = {
'downloaded': [3221225472000, 16106127360000],
'share_ratio': [3.05, 4.55],
'days': [112, 364]
}
def build_workflow(self, entry, config):
return [
Work(
url='/attendance.php',
method='get',
succeed_regex=[
'这是你的第.*?次签到,已连续签到.*天,本次签到获得.*个魔力值。',
'获得魔力值:\\d+',
'你今天已经签到过了,请勿重复刷新。'],
check_state=('final', SignState.SUCCEED),
is_base_content=True
)
]
def build_selector(self):
selector = super(MainClass, self).build_selector()
NetUtils.dict_merge(selector, {
'detail_sources': {
'default': {
'elements': {
'bar': '#info_block',
'table': '#outer table.main:last-child'
}
}
}
})
return selector
def get_nexusphp_message(self, entry, config):
super(MainClass, self).get_nexusphp_message(entry, config, unread_elements_selector='td > i[alt*="Unread"]')
| [
"[email protected]"
]
| |
c82569cd70f74e9a2395eb42b18ea0e8e570ef28 | 0693cce8efbeca806f4551c22dce60d5f392c5c9 | /contentful_management/content_type_resource_proxy.py | ee4655778b24c6a08f53eb004c1fabac287e1758 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | timwis/contentful-management.py | 2dc4b7389ca2136ee2a12b89812b18ef2a347e67 | d71a0e18205d1de821b41c7225e8244e786be7f3 | refs/heads/master | 2021-06-28T12:04:58.130393 | 2017-08-10T16:30:09 | 2017-08-10T16:32:50 | 103,517,328 | 0 | 0 | null | 2017-09-14T10:04:48 | 2017-09-14T10:04:48 | null | UTF-8 | Python | false | false | 1,557 | py | """
contentful_management.content_type_resource_proxy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the ContentTypeResourceProxy class.
:copyright: (c) 2017 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class ContentTypeResourceProxy(object):
"""Base class for content type related resource proxies."""
def __init__(self, client, space_id, content_type_id):
self.proxy = self._resource_proxy_class()(client, space_id, content_type_id)
def __repr__(self):
return "<{0} space_id='{1}' content_type_id='{2}'>".format(
self.__class__.__name__,
self.proxy.space_id,
self.proxy.content_type_id
)
def _resource_proxy_class(self):
raise Exception("Must implement")
def all(self, query=None):
"""
Gets all resources related to the current content type.
"""
return self.proxy.all(query)
def find(self, resource_id, query=None):
"""
Finds a single resource by ID related to the current content type.
"""
return self.proxy.find(resource_id, query)
def create(self, resource_id=None, attributes=None):
"""
Creates a resource with a given ID (optional) and attributes for the current content type.
"""
return self.proxy.create(resource_id=resource_id, attributes=attributes)
def delete(self, resource_id):
"""
Deletes a resource by ID.
"""
return self.proxy.delete(resource_id)
| [
"[email protected]"
]
| |
16742bddb05036e20ed2557be42711b55d39a3d1 | 567e89b21aca23db5f14032889fdd1cb7c7700f7 | /test19.py | 47f860f38a388291628ba60b577d81da05f51c43 | []
| no_license | MarcPartensky/Python-2018 | 7ab83d42eb28b34bed88fc6fb77892e62094dd8d | 27d2a57a6b6d6cdaa883fd2ce55e1c5eefd13ccc | refs/heads/master | 2020-04-17T13:12:41.448439 | 2019-01-19T23:55:05 | 2019-01-19T23:55:05 | 166,605,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | import sys, select, os
i = 0
while True:
print(input(""))
| [
"[email protected]"
]
| |
1033b438cc562e7a9ed44ead113b464f37380f81 | 1e70fa970f3c0f7d71273b5aaf97e2dfdaf249ec | /axi/planner.py | 62a712bebc75653c3622814abd414afbfcadfb36 | [
"MIT"
]
| permissive | sgentle/axi | fbbd423560078878f4fdcc2e5bb6354d04077634 | cc4551e990713aa28fd6a3e10634b825041f6e3e | refs/heads/master | 2021-04-06T15:32:58.904837 | 2018-03-08T11:30:37 | 2018-03-08T11:30:37 | 124,370,678 | 1 | 0 | null | 2018-03-08T09:45:20 | 2018-03-08T09:45:20 | null | UTF-8 | Python | false | false | 7,163 | py | from __future__ import division
from bisect import bisect
from collections import namedtuple
from math import sqrt, hypot
# a planner computes a motion profile for a list of (x, y) points
class Planner(object):
def __init__(self, acceleration, max_velocity, corner_factor):
self.acceleration = acceleration
self.max_velocity = max_velocity
self.corner_factor = corner_factor
def plan(self, points):
return constant_acceleration_plan(
points, self.acceleration, self.max_velocity, self.corner_factor)
def plan_all(self, paths):
return [self.plan(path) for path in paths]
# a plan is a motion profile generated by the planner
class Plan(object):
def __init__(self, blocks):
self.blocks = blocks
self.ts = [] # start time of each block
self.ss = [] # start distance of each block
t = 0
s = 0
for b in blocks:
self.ts.append(t)
self.ss.append(s)
t += b.t
s += b.s
self.t = t # total time
self.s = s # total duration
def instant(self, t):
t = max(0, min(self.t, t)) # clamp t
i = bisect(self.ts, t) - 1 # find block for t
return self.blocks[i].instant(t - self.ts[i], self.ts[i], self.ss[i])
# a block is a constant acceleration for a duration of time
class Block(object):
def __init__(self, a, t, vi, p1, p2):
self.a = a
self.t = t
self.vi = vi
self.p1 = p1
self.p2 = p2
self.s = p1.distance(p2)
def instant(self, t, dt=0, ds=0):
t = max(0, min(self.t, t)) # clamp t
a = self.a
v = self.vi + self.a * t
s = self.vi * t + self.a * t * t / 2
s = max(0, min(self.s, s)) # clamp s
p = self.p1.lerps(self.p2, s)
return Instant(t + dt, p, s + ds, v, a)
# an instant gives position, velocity, etc. at a single point in time
Instant = namedtuple('Instant', ['t', 'p', 's', 'v', 'a'])
# a = acceleration
# v = velocity
# s = distance
# t = time
# i = initial
# f = final
# vf = vi + a * t
# s = (vf + vi) / 2 * t
# s = vi * t + a * t * t / 2
# vf * vf = vi * vi + 2 * a * s
EPS = 1e-9
_Point = namedtuple('Point', ['x', 'y'])
class Point(_Point):
def length(self):
return hypot(self.x, self.y)
def normalize(self):
d = self.length()
if d == 0:
return Point(0, 0)
return Point(self.x / d, self.y / d)
def distance(self, other):
return hypot(self.x - other.x, self.y - other.y)
def add(self, other):
return Point(self.x + other.x, self.y + other.y)
def sub(self, other):
return Point(self.x - other.x, self.y - other.y)
def mul(self, factor):
return Point(self.x * factor, self.y * factor)
def dot(self, other):
return self.x * other.x + self.y * other.y
def lerps(self, other, s):
v = other.sub(self).normalize()
return self.add(v.mul(s))
Triangle = namedtuple('Triangle',
['s1', 's2', 't1', 't2', 'vmax', 'p1', 'p2', 'p3'])
def triangle(s, vi, vf, a, p1, p3):
# compute a triangular profile: accelerating, decelerating
s1 = (2 * a * s + vf * vf - vi * vi) / (4 * a)
s2 = s - s1
vmax = (vi * vi + 2 * a * s1) ** 0.5
t1 = (vmax - vi) / a
t2 = (vf - vmax) / -a
p2 = p1.lerps(p3, s1)
return Triangle(s1, s2, t1, t2, vmax, p1, p2, p3)
Trapezoid = namedtuple('Trapezoid',
['s1', 's2', 's3', 't1', 't2', 't3', 'p1', 'p2', 'p3', 'p4'])
def trapezoid(s, vi, vmax, vf, a, p1, p4):
# compute a trapezoidal profile: accelerating, cruising, decelerating
t1 = (vmax - vi) / a
s1 = (vmax + vi) / 2 * t1
t3 = (vf - vmax) / -a
s3 = (vf + vmax) / 2 * t3
s2 = s - s1 - s3
t2 = s2 / vmax
p2 = p1.lerps(p4, s1)
p3 = p1.lerps(p4, s - s3)
return Trapezoid(s1, s2, s3, t1, t2, t3, p1, p2, p3, p4)
def corner_velocity(s1, s2, vmax, a, delta):
# compute a maximum velocity at the corner of two segments
# https://onehossshay.wordpress.com/2011/09/24/improving_grbl_cornering_algorithm/
cosine = -s1.vector.dot(s2.vector)
if abs(cosine - 1) < EPS:
return 0
sine = sqrt((1 - cosine) / 2)
if abs(sine - 1) < EPS:
return vmax
v = sqrt((a * delta * sine) / (1 - sine))
return min(v, vmax)
class Segment(object):
# a segment is a line segment between two points, which will be broken
# up into blocks by the planner
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = p1.distance(p2)
self.vector = p2.sub(p1).normalize()
self.max_entry_velocity = 0
self.entry_velocity = 0
self.blocks = []
def constant_acceleration_plan(points, a, vmax, cf):
# make sure points are Point objects
points = [Point(x, y) for x, y in points]
# create segments for each consecutive pair of points
segments = [Segment(p1, p2) for p1, p2 in zip(points, points[1:])]
# compute a max_entry_velocity for each segment
# based on the angle formed by the two segments at the vertex
for s1, s2 in zip(segments, segments[1:]):
v = corner_velocity(s1, s2, vmax, a, cf)
s2.max_entry_velocity = v
# add a dummy segment at the end to force a final velocity of zero
segments.append(Segment(points[-1], points[-1]))
# loop over segments
i = 0
while i < len(segments) - 1:
# pull out some variables
segment = segments[i]
next_segment = segments[i + 1]
s = segment.length
vi = segment.entry_velocity
vexit = next_segment.max_entry_velocity
p1 = segment.p1
p2 = segment.p2
# determine which profile to use for this segment
m = triangle(s, vi, vexit, a, p1, p2)
if m.s1 < -EPS:
# too fast! update max_entry_velocity and backtrack
segment.max_entry_velocity = sqrt(vexit * vexit + 2 * a * s)
i -= 1
elif m.s2 < 0:
# accelerate
vf = sqrt(vi * vi + 2 * a * s)
t = (vf - vi) / a
segment.blocks = [
Block(a, t, vi, p1, p2),
]
next_segment.entry_velocity = vf
i += 1
elif m.vmax > vmax:
# accelerate, cruise, decelerate
z = trapezoid(s, vi, vmax, vexit, a, p1, p2)
segment.blocks = [
Block(a, z.t1, vi, z.p1, z.p2),
Block(0, z.t2, vmax, z.p2, z.p3),
Block(-a, z.t3, vmax, z.p3, z.p4),
]
next_segment.entry_velocity = vexit
i += 1
else:
# accelerate, decelerate
segment.blocks = [
Block(a, m.t1, vi, m.p1, m.p2),
Block(-a, m.t2, m.vmax, m.p2, m.p3),
]
next_segment.entry_velocity = vexit
i += 1
# concatenate all of the blocks
blocks = []
for segment in segments:
blocks.extend(segment.blocks)
# filter out zero-duration blocks and return
blocks = [b for b in blocks if b.t > EPS]
return Plan(blocks)
| [
"[email protected]"
]
| |
be0ace15d0a17f5c8094cd0c651183e8263917e7 | c6cbc2adf3acd58c8892874d0172b9844129595f | /web_flask/3-python_route.py | a85c50ff586ccdb64ea212a5e68027cc3b9e586f | []
| no_license | Joldiazch/AirBnB_clone_v2 | 4dac319e03f94ba677e5a3e17801958223c78552 | a9c2d54991e2e956fe27c89ece0ecc3400b045c1 | refs/heads/master | 2021-05-25T21:12:54.056693 | 2020-04-22T23:52:46 | 2020-04-22T23:52:46 | 253,921,574 | 1 | 0 | null | 2020-04-07T21:59:16 | 2020-04-07T21:59:16 | null | UTF-8 | Python | false | false | 824 | py | #!/usr/bin/python3
""" that starts a Flask web application """
# import flask
from flask import Flask
app = Flask(__name__)
# strict_slashes allow that this route work with /my_route and /my_route/
@app.route('/', strict_slashes=False)
def root():
""" return Hello HBNB """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb():
""" return HBNB """
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def show_text(text):
text = text.replace('_', ' ')
return 'C {}'.format(text)
@app.route('/python', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def show_python_text(text='is cool'):
text = text.replace('_', ' ')
return 'Python {}'.format(text)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5000)
| [
"[email protected]"
]
| |
4f824041f4d0d7bd13c488ef541522624434f08a | 90bc7032cda25da6541a976f37e5b9f491b70bd0 | /nrlbio/chimirna/lfc2cdf.py | 9c5e2d701dd00f6abf6a59d897007688e137812b | []
| no_license | afilipch/nrlbio | da89bb262e9b900b5b71cf14612ace9630263e61 | d05258e4b1e352130999a23e4d2b9717b8f834c9 | refs/heads/master | 2020-04-12T01:21:19.231931 | 2017-03-17T16:16:08 | 2017-03-17T16:16:08 | 52,795,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | #! /usr/bin/python
'''produces table of LFC CDF for tables of genes'''
import sys;
import copy;
import os;
import argparse;
from collections import *;
import itertools
from scipy.stats import ks_2samp, mannwhitneyu
parser = argparse.ArgumentParser(description='Script outputs presence of certain binding modes in the interactions');
parser.add_argument('path', metavar = 'N', nargs = '+', type = str, help = "path to gene names tables");
parser.add_argument('--lfc', nargs = '?', type = str, help = "path to the table which connects gene names to LFC")
parser.add_argument('-o', '--output', nargs = '?', type = str, help = "name of the output")
args = parser.parse_args();
def get_genes(path):
genes = set();
f = open(path);
for l in f:
genes.add(l.strip());
f.close()
return genes;
gene_list = [];
for path in args.path:
gene_list.append(get_genes(path));
lfc = {};
f = open(args.lfc);
for l in f:
a = l.strip().split("\t");
try:
lfc[a[0]] = float(a[1]);
except:
pass;
f.close();
lfc_list =[lfc.values()];
for genes in gene_list:
tlfc = [];
for g in genes:
try:
tlfc.append(lfc[g]);
except:
pass;
#print sys.stderr.write("%s\n" % g);
lfc_list.append(copy.copy(tlfc));
##output
o = open(args.output, 'w')
for i in range(len(lfc)):
a = []
for el in lfc_list:
try:
v = str(el[i])
except:
v = " "
a.append(v);
o.write("\t".join(a) + "\n")
o.close()
for i in range(len(lfc_list)):
print ('set n%d\tlength %d' % (i, len(lfc_list[i])))
#>>> output p-values:
for k1, k2 in itertools.combinations(list(range(len(lfc_list))), 2):
print "differnce between gene set %d and gene set %d" % (k1+1,k2+1)
print sys.stderr.write("KS statistics \t%1.3f\tp value\t%.2e\n" % ks_2samp(lfc_list[k1], lfc_list[k2]));
print
#print sys.stderr.write("KS statistics \t%1.3f\tp value\t%.2e\n" % ks_2samp(lfc_list[1] + lfc_list[2], lfc_list[0] ));
| [
"afilipch@a58d7b47-41b6-4d21-bc94-2c2c30e24b6a"
]
| afilipch@a58d7b47-41b6-4d21-bc94-2c2c30e24b6a |
9f38fdd55389eccffedf6e73cd9bedac9c27be08 | 9120120ee0b52f24c627759b0901afbc55347529 | /pygis_src/ch06_spatialite/sec3_access_sqlite_via_python/test_3_import_shapefile_x_x.py | 959744afe35441f3c6f3152ce4654d40af5fc49d | []
| no_license | xiaomozi/book_python_gis | d999b17833abe746a7be2683595f48b54071cd59 | 754fa10c17a20506146d8f409e035e4d4869ad3e | refs/heads/master | 2020-03-11T18:36:40.739267 | 2018-01-04T16:31:23 | 2018-01-04T16:31:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
import os; import sqlite3 as sqlite
dbfile = 'xx_shapedb.sqlite'
if os.path.exists(dbfile): os.remove(dbfile)
db = sqlite.connect(dbfile)
db.enable_load_extension(True)
db.execute('SELECT load_extension("mod_spatialite.so.7")')
cursor = db.cursor()
cursor.execute('SELECT InitSpatialMetaData();')
cursor.execute("DROP TABLE IF EXISTS gshhs")
cursor.execute("CREATE TABLE gshhs (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT, " +
"level INTEGER)")
cursor.execute("CREATE INDEX gshhs_level on gshhs(level)")
cursor.execute("SELECT AddGeometryColumn('gshhs', 'geom', " +
"4326, 'POLYGON', 2)")
cursor.execute("SELECT CreateSpatialIndex('gshhs', 'geom')")
db.commit()
sql_tpl = "INSERT INTO gshhs (level, geom) VALUES (2, GeomFromText('{0}', 4326))"
import ogr
fName = '/gdata/GSHHS_l/GSHHS_l_L2.shp'
shapefile = ogr.Open(fName)
layer = shapefile.GetLayer(0)
for i in range(layer.GetFeatureCount()):
feature = layer.GetFeature(i)
geometry = feature.GetGeometryRef()
wkt = geometry.ExportToWkt()
cursor.execute( sql_tpl.format(wkt))
db.commit()
| [
"[email protected]"
]
| |
9de3642c57a0d6237c06147471395720fc4207a4 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/scaleform/daapi/view/meta/moduleinfometa.py | d7d29dd3d8f30586b5f883e94565df3dd822e3ea | []
| no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 967 | py | # 2015.11.10 21:27:57 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ModuleInfoMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class ModuleInfoMeta(AbstractWindowView):
def onCancelClick(self):
self._printOverrideError('onCancelClick')
def onActionButtonClick(self):
self._printOverrideError('onActionButtonClick')
def as_setModuleInfoS(self, moduleInfo):
if self._isDAAPIInited():
return self.flashObject.as_setModuleInfo(moduleInfo)
def as_setActionButtonS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setActionButton(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\moduleinfometa.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:27:57 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
5bac41f8f6ba96763a05d3c22ca3bc5063f102a4 | fff80d8049aa19dacc01e48a21032fa74f069441 | /Chapter_15_Generating_Data/mpl_squares_correct.py | 6eb1554d6bb2d9a12e7bcb4d1c83c1173e396f52 | [
"MIT"
]
| permissive | charliealpha094/Project_Data_Visualization | a77d5e8290de0fa416394e188e349bf198499ff1 | ccd55db58927dbbcfd57ab750fe7b21754c2b2dc | refs/heads/master | 2022-11-29T13:24:52.725995 | 2020-08-09T09:20:02 | 2020-08-09T09:20:02 | 285,671,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #Done by Carlos Amaral (19/07/2020)
#Styling and correcting
import matplotlib.pyplot as plt
input_values = [1,2,3,4,5]
squares = [1, 4, 9, 16, 25]
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(input_values, squares, linewidth = 3)
#Set chart title and label axes.
ax.set_title("Square Numbers", fontsize = 24)
ax.set_xlabel("Value", fontsize = 14)
ax.set_ylabel("Square of Value", fontsize = 14)
#Set size tick labels.
ax.tick_params(axis = 'both', labelsize = 14)
plt.show() | [
"[email protected]"
]
| |
335642bb7e305c8a7e2f0448c2a1ec8d75c1a15b | 60dd6073a3284e24092620e430fd05be3157f48e | /tiago_public_ws/build/pal_gripper/pal_gripper_controller_configuration/catkin_generated/pkg.installspace.context.pc.py | 72d62140668fd30c806c4d71575a513fdfde1439 | []
| no_license | SakshayMahna/Programming-Robots-with-ROS | e94d4ec5973f76d49c81406f0de43795bb673c1e | 203d97463d07722fbe73bdc007d930b2ae3905f1 | refs/heads/master | 2020-07-11T07:28:00.547774 | 2019-10-19T08:05:26 | 2019-10-19T08:05:26 | 204,474,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_gripper_controller_configuration"
PROJECT_SPACE_DIR = "/media/root/BuntuDrive/Programming-Robots-with-ROS/tiago_public_ws/install"
PROJECT_VERSION = "1.0.2"
| [
"[email protected]"
]
| |
999b8d39e8434c5a1188b45fd7c163f18721d4d2 | db3662b35d48cc2b34b48350db603bb08fd47e81 | /tf_api/arithmetic_experiment.py | eb4d6e474336695b88f01debfe787147f2456e1b | []
| no_license | JuneXia/handml | 70b4c172940e0e10b2775ec0dad462f2b08f47bf | 35c23568065178e48347b440851ad5a38db5f93e | refs/heads/master | 2020-04-02T01:59:23.073289 | 2020-02-11T02:57:16 | 2020-02-11T02:57:16 | 153,885,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # 各种算术运算实验
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__1':
# ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8],dtype = tf.int32)
# indices = tf.constant([4, 3, 1, 7],dtype = tf.int32)
# updates = tf.constant([9, 10, 11, 12],dtype = tf.int32)
ref = tf.Variable([1, 2, 3, 4], dtype=tf.int32)
indices = tf.constant([1, 3], dtype=tf.int32)
updates = tf.constant([1, 3], dtype=tf.int32)
sub = tf.scatter_sub(ref, indices, updates)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
arr = sess.run(sub)
print(arr)
if __name__ == '__main__':
learning_rate = 0.1
decay_rate = 0.6
global_steps = 1000
decay_steps = 100
# global_ = tf.Variable(tf.constant(0))
global_ = tf.Variable(0)
c = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=True)
d = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=False)
T_C = []
F_D = []
with tf.Session() as sess:
for i in range(global_steps):
T_c = sess.run(c, feed_dict={global_: i})
T_C.append(T_c)
F_d = sess.run(d, feed_dict={global_: i})
F_D.append(F_d)
plt.figure(1)
plt.plot(range(global_steps), F_D, 'r-')
plt.plot(range(global_steps), T_C, 'b-')
plt.show()
| [
"[email protected]"
]
| |
1cec5040d91a46de4c6181b2f40f9673101e9b6d | f8d2521a88e465eed01adc3981c7a173d5c2554b | /etc/educational/round0001-0025/round0003/a1.py | 852c5741db4bf0680c354c4cf3bf35dd431c8deb | []
| no_license | clarinet758/codeforces | b2a8a349bba40e7761a8ce50dd5ff9a57477b60d | d79870c47bdb109547891a0d076dd173d6d647cf | refs/heads/main | 2021-12-15T05:46:51.000160 | 2021-12-01T12:01:33 | 2021-12-01T12:01:33 | 41,968,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import sys
import io
import re
import math
import itertools
import collections
import bisect
#sys.stdin=file('input.txt')
#sys.stdout=file('output.txt','w')
#10**9+7
mod=1000000007
#mod=1777777777
pi=3.141592653589
IS=float('inf')
xy=[(1,0),(-1,0),(0,1),(0,-1)]
bs=[(-1,-1),(-1,1),(1,1),(1,-1)]
def niten(a,b): return abs(a-b) if a>=0 and b>=0 else a+abs(b) if a>=0 else abs(a)+b if b>=0 else abs(abs(a)-abs(b))
def gcd(a,b): return a if b==0 else gcd(b,a%b)
def lcm(a,b): return a*b/gcd(a,b)
def euclid_dis(x1,y1,x2,y2): return ((x1-x2)**2+(y1-y2)**2)**0.5
def choco(xa,ya,xb,yb,xc,yc,xd,yd): return 1 if abs((yb-ya)*(yd-yc)+(xb-xa)*(xd-xc))<1.e-10 else 0
n=int(raw_input())
m=int(raw_input())
a=[]
for i in range(n):
a.append(int(raw_input()))
a.sort()
ans=chk=0
for i,j in enumerate(a[::-1]):
ans+=j
if ans>=m:
print i+1
break
exit()
n,k=map(int,raw_input().split())
l=map(int,raw_input().split())
#end = time.clock()
#print end - start
| [
"[email protected]"
]
| |
18534c36016112cf39c8feb7b8f6c6f3149600d6 | f5ffd566166948c4202eb1e66bef44cf55a70033 | /openapi_client/model/single_group_obj.py | 488b753fff9c74b25461ac8e906c70079b56882d | []
| no_license | skyportal/skyportal_client | ed025ac6d23589238a9c133d712d4f113bbcb1c9 | 15514e4dfb16313e442d06f69f8477b4f0757eaa | refs/heads/master | 2023-02-10T02:54:20.757570 | 2021-01-05T02:18:03 | 2021-01-05T02:18:03 | 326,860,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,667 | py | """
Fritz: SkyPortal API
SkyPortal provides an API to access most of its underlying functionality. To use it, you will need an API token. This can be generated via the web application from your profile page or, if you are an admin, you may use the system provisioned token stored inside of `.tokens.yaml`. ### Accessing the SkyPortal API Once you have a token, you may access SkyPortal programmatically as follows. #### Python ```python import requests token = 'ea70a5f0-b321-43c6-96a1-b2de225e0339' def api(method, endpoint, data=None): headers = {'Authorization': f'token {token}'} response = requests.request(method, endpoint, json=data, headers=headers) return response response = api('GET', 'http://localhost:5000/api/sysinfo') print(f'HTTP code: {response.status_code}, {response.reason}') if response.status_code in (200, 400): print(f'JSON response: {response.json()}') ``` #### Command line (curl) ```shell curl -s -H 'Authorization: token ea70a5f0-b321-43c6-96a1-b2de225e0339' http://localhost:5000/api/sysinfo ``` ### Response In the above examples, the SkyPortal server is located at `http://localhost:5000`. In case of success, the HTTP response is 200: ``` HTTP code: 200, OK JSON response: {'status': 'success', 'data': {}, 'version': '0.9.dev0+git20200819.84c453a'} ``` On failure, it is 400; the JSON response has `status=\"error\"` with the reason for the failure given in `message`: ```js { \"status\": \"error\", \"message\": \"Invalid API endpoint\", \"data\": {}, \"version\": \"0.9.1\" } ``` # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 0.9.dev0+git20201221.76627dd
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.group_obj import GroupObj
globals()['GroupObj'] = GroupObj
class SingleGroupObj(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('status',): {
'SUCCESS': "success",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'status': (str,), # noqa: E501
'message': (str,), # noqa: E501
'data': (GroupObj,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'status': 'status', # noqa: E501
'message': 'message', # noqa: E501
'data': 'data', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SingleGroupObj - a model defined in OpenAPI
Args:
Keyword Args:
status (str): defaults to "success", must be one of ["success", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
message (str): [optional] # noqa: E501
data (GroupObj): [optional] # noqa: E501
"""
status = kwargs.get('status', "success")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.status = status
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
]
| |
365149b42675dd45c83444b739330f8d68f7586e | d6de6311ab2794cd3cce36ae0d1e591330941e8a | /2019/day09/part1_and_2.py | 623945fb25630ccddae81196e6974a98fb25a7e6 | []
| no_license | candyer/Advent-of-Code | a5346cffb4b9b1f45691c7f1b8d45bfd480b3fc0 | d5c6758c5b1feb66c4afb8ee773085a3751d8b37 | refs/heads/master | 2022-03-27T23:07:42.712877 | 2019-12-12T23:11:54 | 2019-12-12T23:11:54 | 112,863,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | import sys
from typing import List, Tuple, Dict
from collections import defaultdict
def breakDown(num: int) -> Tuple[int, List[int]]:
modes = list(map(int, str(num)[:-2][::-1]))
modes.extend([0] * (3 - len(modes)))
return (num % 100, modes)
def solve(d: Dict[int, int], inpt: int) -> int:
relative_base = 0
output = 0
i = 0
while True:
opcode, modes = breakDown(d[i])
if opcode == 99:
return output
a = i + 1 if modes[0] == 1 else d[i + 1] if modes[0] == 0 else d[i + 1] + relative_base
b = i + 2 if modes[1] == 1 else d[i + 2] if modes[1] == 0 else d[i + 2] + relative_base
c = d[i + 3] if modes[2] == 0 else d[i + 3] + relative_base
############################################################
if opcode == 1:
d[c] = d[a] + d[b]
i += 4
elif opcode == 2:
d[c] = d[a] * d[b]
i += 4
elif opcode == 3:
d[a] = inpt
i += 2
elif opcode == 4:
i += 2
output = d[a]
elif opcode == 5:
if d[a]:
i = d[b]
else:
i += 3
elif opcode == 6:
if d[a]:
i += 3
else:
i = d[b]
elif opcode == 7:
if d[a] < d[b]:
d[c] = 1
else:
d[c] = 0
i += 4
elif opcode == 8:
if d[a] == d[b]:
d[c] = 1
else:
d[c] = 0
i += 4
elif opcode == 9:
relative_base += d[a]
i += 2
if __name__ == '__main__':
for line in sys.stdin:
d = defaultdict(int)
i = 0
for num in map(int, line.split(',')):
d[i] = num
i += 1
print('part1 result: {}'.format(solve(d, 1)))
print('part2 result: {}'.format(solve(d, 2)))
| [
"[email protected]"
]
| |
f8532b1c94622cc29700d629fee455cb052c8cc0 | 1aaaca67031d81eabb07e9e9fb1a4fcae9de7462 | /dictionary.py | 090cb29f273b4decfd6bba42bd4c69c4ede70104 | []
| no_license | supriyo-pal/Joy-Of-Computing-Using-Python-All-programms | 9e08bdf4c2a88cc360c0cb296b217230f0ae0b2c | bd450dfdbc879e0b200d03fa9106ece09456fa8c | refs/heads/main | 2023-01-21T07:28:02.312421 | 2020-12-02T09:18:08 | 2020-12-02T09:18:08 | 317,161,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 10:24:41 2020
@author: Supriyo
"""
#dictionary is represented by {}
#example of dictionary {key:value}
conv_factor={'dollar':60,'euro': 80 }
print(conv_factor)
print(conv_factor["dollar"])
print(conv_factor["euro"]) | [
"[email protected]"
]
| |
5c77f798c033cb12c4cf21c9e04ca72aa05a8927 | ead94ab55e0dc5ff04964a23b16cc02ab3622188 | /src/commands/pendingsubscr.py | e90bca3836fe62c655cb330efdf290ad4acc1fdf | []
| no_license | caifti/openstack-security-integrations | 88d92d6fcfb2a5a438f3a7e98f2e738d7434476f | bddd51675fe0ad4123f23520f3fdc6a793bf7bbc | refs/heads/master | 2023-03-11T02:49:38.442233 | 2020-12-16T07:39:12 | 2020-12-16T07:39:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | # Copyright (c) 2014 INFN - "Istituto Nazionale di Fisica Nucleare" - Italy
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.db import transaction
from django.conf import settings
from django.core.management.base import CommandError
from openstack_auth_shib.models import PrjRequest
from openstack_auth_shib.models import RegRequest
from openstack_auth_shib.models import EMail
from openstack_auth_shib.models import PrjRole
from openstack_auth_shib.models import PSTATUS_PENDING
from openstack_auth_shib.notifications import notifyUser
from openstack_auth_shib.notifications import SUBSCR_REMINDER
from horizon.management.commands.cronscript_utils import CloudVenetoCommand
LOG = logging.getLogger("pendingsubscr")
class Command(CloudVenetoCommand):
def handle(self, *args, **options):
super(Command, self).handle(options)
admin_table = dict()
mail_table = dict()
req_table = dict()
try:
with transaction.atomic():
for prj_req in PrjRequest.objects.filter(flowstatus=PSTATUS_PENDING):
prjname = prj_req.project.projectname
if not req_table.has_key(prjname):
req_table[prjname] = list()
req_table[prjname].append(prj_req.registration.username)
for prjname in req_table.keys():
for p_role in PrjRole.objects.filter(project__projectname=prjname):
user_name = p_role.registration.username
user_id = p_role.registration.userid
user_tuple = (user_name, user_id)
if not admin_table.has_key(user_tuple):
admin_table[user_tuple] = list()
admin_table[user_tuple].append(p_role.project.projectname)
if not mail_table.has_key(user_name):
tmpres = EMail.objects.filter(registration__username=user_name)
if len(tmpres):
mail_table[user_name] = tmpres[0].email
for user_tuple in admin_table:
for prj_name in admin_table[user_tuple]:
try:
noti_params = {
'pendingreqs' : req_table[prj_name],
'project' : prj_name
}
notifyUser(mail_table[user_tuple[0]], SUBSCR_REMINDER, noti_params,
dst_user_id=user_tuple[1])
except:
LOG.error("Cannot notify pending subscription: %s" % user_tuple[0],
exc_info=True)
except:
LOG.error("Cannot notify pending subscritions: system error", exc_info=True)
raise CommandError("Cannot notify pending subscritions")
| [
"[email protected]"
]
| |
8570f0b581dbd4ad8bbf7a06e2040630abddadc0 | 9784a90cac667e8e0aaba0ca599b4255b215ec67 | /chainer_/datasets/svhn_cls_dataset.py | 41da30bcecc81e26b5d6dc68f2b4f4fc67cabb8f | [
"MIT"
]
| permissive | osmr/imgclsmob | d2f48f01ca541b20119871393eca383001a96019 | f2993d3ce73a2f7ddba05da3891defb08547d504 | refs/heads/master | 2022-07-09T14:24:37.591824 | 2021-12-14T10:15:31 | 2021-12-14T10:15:31 | 140,285,687 | 3,017 | 624 | MIT | 2022-07-04T15:18:37 | 2018-07-09T12:57:46 | Python | UTF-8 | Python | false | false | 1,587 | py | """
SVHN classification dataset.
"""
import os
from chainer.dataset import DatasetMixin
from chainer.datasets.svhn import get_svhn
from .cifar10_cls_dataset import CIFAR10MetaInfo
class SVHN(DatasetMixin):
"""
SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/.
Each sample is an image (in 3D NDArray) with shape (32, 32, 3).
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0`.
Parameters:
----------
root : str, default '~/.chainer/datasets/svhn'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".chainer", "datasets", "svhn"),
mode="train",
transform=None):
assert (root is not None)
self.transform = transform
train_ds, test_ds = get_svhn()
self.base = train_ds if mode == "train" else test_ds
def __len__(self):
return len(self.base)
def get_example(self, i):
image, label = self.base[i]
image = self.transform(image)
return image, label
class SVHNMetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(SVHNMetaInfo, self).__init__()
self.label = "SVHN"
self.root_dir_name = "svhn"
self.dataset_class = SVHN
self.num_training_samples = 73257
| [
"[email protected]"
]
| |
2473d2a63db36b6ae3c726be1d617079d514ab75 | 7bbfc7dc130f31c2e78b1feb6ff48888e4985c08 | /static/main.py | c06dd9b40934e457fb3e2162b3acb5e0dc47e753 | []
| no_license | swappt/CodeIslands | 7ce80e6297c2280ec24b8f0d773ffc0242761d52 | ef5419eb668cbbc22a290ff00a5933dfe94c0653 | refs/heads/master | 2021-09-06T07:10:35.849987 | 2018-02-03T16:10:32 | 2018-02-03T16:10:32 | 105,559,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,054 | py | print('loading main.py')
from browser import document, alert, window, timer
from math import sin, cos, tan, radians
from random import choice
result = window.gebi('result')
start = window.gebi('start')
win = window.gebi('win')
challenges = window.gebi('challenges')
display = window.gebi('display')
map_ = window.gebi('map')
cloud_canvas = window.gebi('clouds_canvas')
cloud_ctx = cloud_canvas.getContext('2d')
ctx = display.getContext('2d')
map_ctx = map_.getContext('2d')
class Turtle(object):
angles = [62.64,-62.64,-117.36,117.36]
def __init__(self):
self.x = 0
self.y = 0
self.angle = 0
self.disp_x = 0
self.disp_y = 0
self.origin_x, self.origin_y = window.data['player_origin']
self.collision_map = window.data['collisions']
def collision(self):
x_pos = round(self.origin_x + self.disp_x)
y_pos = round(self.origin_y + self.disp_y)
try:
if self.collision_map[y_pos][x_pos]:
return 1
except IndexError:
return 1
return 0
def forward(self,dist):
self.x += sin(radians(self.angles[self.angle]))*dist*(display.height/(2*iso_scale))/(cos(radians(62.64)))
self.y += cos(radians(self.angles[self.angle]))*dist*(display.height/(2*iso_scale))/(cos(radians(62.64)))
if self.angle == 0:
self.disp_x += dist
elif self.angle == 1:
self.disp_y += dist
elif self.angle == 2:
self.disp_x -= dist
elif self.angle == 3:
self.disp_y -= dist
else:
print('Issue in displacement tracking!')
def backward(self,dist):
self.x -= sin(radians(self.angles[self.angle]))*dist*(display.height/(2*iso_scale))/(cos(radians(62.64)))
self.y -= cos(radians(self.angles[self.angle]))*dist*(display.height/(2*iso_scale))/(cos(radians(62.64)))
if self.angle == 0:
self.disp_x -= dist
elif self.angle == 1:
self.disp_y -= dist
elif self.angle == 2:
self.disp_x += dist
elif self.angle == 3:
self.disp_y += dist
else:
print('Issue in displacement tracking!')
def left(self,rot):
tval = self.angle + int(rot)
self.angle = tval % 4
def right(self,rot):
tval = self.angle - int(rot)
self.angle = tval % 4
class Cloud(object):
x = 0
y = 0
speed = 0
src = window.gebi(choice(['cloud1', 'cloud2']))
turtle = Turtle()
cmd = {
'FORWARD' : turtle.forward,
'BACKWARD' : turtle.backward,
'LEFT' : turtle.left,
'RIGHT' : turtle.right,
'FOR ' : None,
'ENDFOR' : None
}
run_buffer = []
used_commands = []
runtime = None
def run():
global runtime
global run_buffer
global used_commands
variables = {}
window.gebi('error').style.width = '0vw'
window.gebi('error').style.opacity = 0
try:
window.clearInterval(runtime)
except:
pass
display.width = display.width
turtle.x = display.width / 2
turtle.y = display.height / 2
turtle.disp_x = 0
turtle.disp_y = 0
turtle.angle = 0
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(turtle.x + dis_center_x,turtle.y + dis_center_y)
ctx.stroke()
lines = window.editor.getValue().split('\n')
lines = list(filter(lambda x: bool(x), lines))
used_commands = []
run_buffer = []
for_buffer = []
current_buffer = run_buffer
for line in lines:
line = line.strip()
if '=' in line:
pre, post = line.split('=')
pre = pre.strip()
if pre[0] in '0123456789':
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Error in line "{}": Declarations cannot begin with numericals'.format(line)
window.hide_error()
return
post = post.strip()
for char in pre:
if char not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_':
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Error in line "{}": "{}" not allowed in variable declarations'.format(line, char)
window.hide_error()
return
variables[pre] = post
else:
for command, action in cmd.items():
if line.startswith(command):
used_commands.append(command)
if command in ['FORWARD', 'BACKWARD']:
try:
arg = int(line.split(' ',1)[1])
except ValueError:
try:
arg = int(variables[line.split(' ',1)[1]])
except ValueError:
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Error in line "{}": {} only accepts integer values (whole numbers)'.format(line, command)
window.hide_error()
return
except KeyError:
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Error in line "{}": variable "{}" not defined'.format(line, line.split(' ')[1])
window.hide_error()
return
for i in range(5*arg):
current_buffer.append(command + ' 0.2')
elif command in ['LEFT','RIGHT']:
current_buffer.append(command + ' 1')
elif command in ['FOR ']:
try:
arg = int(line.split(' ',1)[1])
except ValueError:
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Error in line "{}": {} only accepts integer values (whole numbers)'.format(line, command)
window.hide_error()
return
if len(for_buffer) > 0:
for_buffer.append([arg])
current_buffer = for_buffer[-1]
else:
for_buffer.append(arg)
current_buffer = for_buffer
elif command in ['ENDFOR']:
if current_buffer == for_buffer[-1]:
cur = for_buffer.pop(-1)
counts = cur.pop(0)
apval = cur*counts
try:
for item in apval:
for_buffer[-1].append(item)
current_buffer = for_buffer[-1]
except:
for item in apval:
for_buffer.append(item)
current_buffer = for_buffer
elif current_buffer == for_buffer:
counts = for_buffer.pop(0)
apval = for_buffer*counts
for item in apval:
run_buffer.append(item)
current_buffer = run_buffer
break
else:
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Error in line "{}": Unrecognised command-word'.format(line)
window.hide_error()
break
runtime = window.setInterval(run_action,35)
def run_action():
if len(run_buffer) == 0:
print('exec finished')
window.clearInterval(runtime)
post_run()
return
a = run_buffer.pop(0)
arg = float(a.split(' ',1)[1])
cmd[a.split(' ')[0]](arg)
display.width = display.width
imgs = ['front_r', 'front', 'back_l', 'back']
ctx.drawImage(window.gebi(imgs[turtle.angle]), turtle.x - 50, turtle.y - 100, 100, 100)
if turtle.collision():
window.clearInterval(runtime)
window.gebi('error').style.width = '42vw'
window.gebi('error').style.opacity = 1
window.gebi('error').innerHTML = 'Rodrick can\'t move there!'
window.hide_error()
def post_run():
if [int(turtle.disp_x), int(turtle.disp_y)] == window.data['displacement']:
completed = []
challenge_text = []
for typ, target in dict(window.data['targets']).items():
if typ == 'LINES':
if len(used_commands) <= target:
challenge_text.append('Use {} or less lines ⭐'.format(target))
completed.append([typ, target])
else:
challenge_text.append('Use {} or less lines'.format(target))
elif typ == '!USE':
for command in target:
if command in used_commands:
challenge_text.append('Don\'t use {} anywhere in your code'.format(', '.join(target)))
break
else:
challenge_text.append('Don\'t use {} anywhere in your code ⭐'.format(', '.join(target)))
completed.append([typ, target])
elif typ == 'USE':
for command in target:
if command in used_commands:
challenge_text.append('Apply a {} somewhere in your code ⭐'.format(', '.join(target)))
completed.append([typ, target])
break
else:
challenge_text.append('Apply a {} somewhere in your code'.format(', '.join(target)))
window.on_win()
challenges.innerHTML = '<br>'.join(challenge_text) + '<br><hr>'
keysdown = []
def keysDown(event):
keysdown.append(event.keyCode)
if 13 in keysdown and 16 in keysdown:
event.preventDefault()
run()
elif 17 in keysdown and 83 in keysdown:
event.preventDefault()
window.save(window.editor.getValue().replace('\n','\\'))
def keysUp(event):
try:
keysdown.remove(event.keyCode)
except:
pass
clouds = []
for i in range(4):
c = Cloud()
c.x = result.width * window.Math.random()
c.y = result.height * 0.5 * window.Math.random() + result.height * 0.04
c.speed = 2.2 * window.Math.random() + 1
clouds.append(c)
def clouds_render():
cloud_canvas.width = result.width
cloud_canvas.height = result.height
cloud_w = cloud_canvas.width / 5.5
cloud_h = cloud_w / 2
for cloud in clouds:
cloud.x -= cloud.speed
cloud_ctx.drawImage(cloud.src, cloud.x, cloud.y, cloud_w, cloud_h)
if cloud.x < -cloud_w:
cloud.x = result.width
cloud.y = result.height * 0.5 * window.Math.random() + result.height * 0.04
cloud.speed = 2 * window.Math.random() + 0.5
iso_scale = window.data['gridSize']
median_cell = ()
dis_center_x = 0
dis_center_y = 0
def resize_window(event):
display.width = result.width
display.height = result.height
map_.width = result.width
map_.height = result.height
cell_w = (display.height/iso_scale)*tan(radians(62.64))
cell_h = display.height/iso_scale
median_cell = (display.width/2,display.height/2)
## READING MAP DATA ##
for i,j,src,c in window.data['map']:
map_ctx.beginPath()
if c[0] == 'simpleFill':
img = window.gebi(src)
# ANTIALIAS #
oc = document.createElement('canvas')
oc2 = document.createElement('canvas')
octx = oc.getContext('2d')
octx2 = oc2.getContext('2d')
oc.width = img.width
oc.height = img.height
oc2.width = img.width
oc2.height = img.height
octx.drawImage(img, 0, 0, oc.width, oc.height)
octx2.drawImage(oc, 0, 0, oc.width * 0.5, oc.height * 0.5)
oc.width = oc.width
octx.drawImage(oc2, 0, 0, oc.width * 0.5, oc.height * 0.5)
oc2.width = oc2.width
octx2.drawImage(oc, 0, 0, oc.width * 0.5, oc.height * 0.5)
map_ctx.drawImage(
oc2, 0, 0, oc.width * 0.125, oc.height * 0.125,
(median_cell[0] + i*cell_w) - c[2][0] * cell_w,
(median_cell[1] + j*cell_h) - c[2][1] * cell_h,
c[1][0]*cell_w+c[2][0]*cell_w,
c[1][1]*cell_h+c[2][1]*cell_h
)
map_ctx.fill()
elif c == 'floor':
map_ctx.moveTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h - cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w + cell_w/2,median_cell[1] + j*cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h + cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h)
pattern = ctx.createPattern(window.gebi(src), 'repeat')
map_ctx.fillStyle = pattern
map_ctx.fill()
elif c == 'wall_l':
map_ctx.moveTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h - cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h - cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h + cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h)
pattern = map_ctx.createPattern(window.gebi(src), 'repeat')
map_ctx.fillStyle = pattern
map_ctx.fill()
map_ctx.beginPath()
map_ctx.globalAlpha = 0.5
map_ctx.moveTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h - cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h - cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h + cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h)
pattern = map_ctx.createPattern(window.gebi(src), 'repeat')
map_ctx.fillStyle = '#000000'
map_ctx.fill()
map_ctx.globalAlpha = 1
elif c == 'wall_r':
map_ctx.moveTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h - cell_h)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h - cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w - cell_w/2,median_cell[1] + j*cell_h + cell_h/2)
map_ctx.lineTo(median_cell[0] + i*cell_w,median_cell[1] + j*cell_h)
pattern = map_ctx.createPattern(window.gebi(src), 'repeat')
map_ctx.fillStyle = pattern
map_ctx.fill()
start.onclick = run
window.onresize = resize_window
window.setInterval(clouds_render,55)
document.onkeydown = keysDown
document.onkeyup = keysUp
def load():
resize_window(None)
window.hide_load()
document.onload = load()
print('main.py loaded')
| [
"[email protected]"
]
| |
c738e7dd0a22c2c6d399f18ba00ba42343053ea5 | 90cad1df7b7d424feb8e71ff3d77e772d446afdf | /reebill/payment_dao.py | 7a26623a1f93df8ab5c4f36c04a68d01f167cc14 | []
| no_license | razagilani/billing | acb8044c22b4075250c583f599baafe3e09abc2e | fd2b20019eeedf0fcc781e5d81ff240be90c0b37 | refs/heads/master | 2021-05-01T14:46:32.138870 | 2016-03-09T18:55:09 | 2016-03-09T18:55:09 | 79,589,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,111 | py | from datetime import datetime
from sqlalchemy import and_
from core.model import Session, UtilityAccount
from reebill.exceptions import IssuedBillError
from reebill.reebill_model import ReeBillCustomer, Payment
class PaymentDAO(object):
'''CRUD for Payment objects. Some of these methods are used only in tests
and should be removed.
'''
def create_payment(self, account, date_applied, description,
credit, date_received=None):
'''Adds a new payment, returns the new Payment object. By default,
'date_received' is the current datetime in UTC when this method is
called; only override this for testing purposes.'''
# NOTE a default value for 'date_received' can't be specified as a
# default argument in the method signature because it would only get
# evaluated once at the time this module was imported, which means its
# value would be the same every time this method is called.
if date_received is None:
date_received = datetime.utcnow()
session = Session()
utility_account = session.query(UtilityAccount) \
.filter(UtilityAccount.account==account).one()
reebill_customer = session.query(ReeBillCustomer) \
.filter(ReeBillCustomer.utility_account==utility_account) \
.one()
new_payment = Payment(reebill_customer, date_received, date_applied,
description, credit)
session.add(new_payment)
session.flush()
return new_payment
def delete_payment(self, id):
'''Deletes the payment with the given id.'''
session = Session()
payment = session.query(Payment).filter(Payment.id == id).one()
if payment.reebill_id is not None:
raise IssuedBillError('payments cannot be deleted after they are'
'applied to an issued reebill')
session.delete(payment)
def find_payment(self, account, periodbegin, periodend):
'''Returns a list of payment objects whose date_applied is in
[periodbegin, period_end).'''
# periodbegin and periodend must be non-overlapping between bills. This
# is in direct opposition to the reebill period concept, which is a
# period that covers all services for a given reebill and thus overlap
# between bills. Therefore, a non overlapping period could be just the
# first utility service on the reebill. If the periods overlap,
# payments will be applied more than once. See 11093293
session = Session()
utility_account = session.query(UtilityAccount) \
.filter(UtilityAccount.account==account).one()
reebill_customer = session.query(ReeBillCustomer) \
.filter(ReeBillCustomer.utility_account==utility_account) \
.one()
payments = session.query(Payment) \
.filter(Payment.reebill_customer == reebill_customer) \
.filter(and_(Payment.date_applied >= periodbegin,
Payment.date_applied < periodend)).all()
return payments
def get_total_payment_since(self, account, start, end=None):
'''Returns sum of all account's payments applied on or after 'start'
and before 'end' (today by default). If 'start' is None, the beginning
of the interval extends to the beginning of time.
'''
assert isinstance(start, datetime)
if end is None:
end=datetime.utcnow()
session = Session()
reebill_customer = session.query(ReeBillCustomer).join(
UtilityAccount).filter_by(account=account).one()
payments = session.query(Payment) \
.filter(Payment.reebill_customer==reebill_customer) \
.filter(Payment.date_applied < end)
if start is not None:
payments = payments.filter(Payment.date_applied >= start)
return payments.all()
def get_payments(self, account):
'''Returns list of all payments for the given account ordered by
date_received.'''
session = Session()
payments = session.query(Payment).join(ReeBillCustomer) \
.join(UtilityAccount) \
.filter(UtilityAccount.account == account).order_by(
Payment.date_received).all()
return payments
def get_payments_for_reebill_id(self, reebill_id):
session = Session()
payments = session.query(Payment) \
.filter(Payment.reebill_id == reebill_id).order_by(
Payment.date_received).all()
return payments
def update_payment(self, id, date_applied, description, credit):
session = Session()
payment = session.query(Payment).filter_by(id=id).one()
if payment.reebill_id is not None:
raise IssuedBillError('payments cannot be changed after they are'
'applied to an issued reebill')
payment.date_applied = date_applied
payment.description = description
payment.credit = credit
| [
"[email protected]"
]
| |
19e03a975d6e84656aa59f9380034203aaaba1c3 | d3fa8ded9d393ba9b03388ba7f05fc559cf31d1e | /Codes/antman/agent/framework/controllers/file.py | e53710b676cb8afb754414194c3a23579657a3a0 | []
| no_license | lengxu/YouYun | e20c4d8f553ccb245e96de177a67f776666e986f | b0ad8fd0b0e70dd2445cecb9ae7b00f7e0a20815 | refs/heads/master | 2020-09-13T22:30:49.642980 | 2017-11-27T03:13:34 | 2017-11-27T03:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,515 | py | # coding: utf-8
import nfs
import os
import logging
import time
from tornado import web, gen
from tornado.locks import Semaphore
from tornado.httpclient import AsyncHTTPClient
from framework import settings
from framework.config import config
MAX_BODY_SIZE = 4 * 1024.0 * 1024.0 * 1024.0 # 4GB
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
AsyncHTTPClient.configure(None, max_body_size=MAX_BODY_SIZE)
logger = logging.getLogger('default')
semaphore = Semaphore(config.get('file_service_semaphore', 5))
class FileHandler(web.RequestHandler):
@gen.coroutine
def get(self):
self.file_name = self.get_argument('filename') # type: str
self.space_dir = nfs.join(settings.REPO_DIR,
settings.REPO_ANT_SPACENAME)
if not nfs.exists(self.space_dir):
nfs.makedirs(self.space_dir)
self.file_path = nfs.join(self.space_dir, self.file_name)
lock_file_name = nfs.extsep + self.file_name + nfs.extsep + 'lock'
self.lock_file = nfs.join(self.space_dir, lock_file_name)
logger.info('#%d Request file: %s', id(self.request), self.file_name)
if nfs.exists(self.lock_file):
yield self.wait_for_file_complete()
else:
is_cache_hit = yield self.try_to_return_file_cache()
if is_cache_hit:
return
logger.info('#%d File cache missed: %s',
id(self.request), self.file_path)
nfs.touch(self.lock_file)
yield self.request_file_from_upstream()
@gen.coroutine
def try_to_return_file_cache(self):
is_cache_hit = False
if nfs.exists(self.file_path):
flag = yield self.check_file_mtime()
if flag:
logger.info('#%d File cache hit: %s',
id(self.request), self.file_path)
self.write(self.file_path) # 直接返回本地缓存文件的路径
is_cache_hit = True
else:
logger.info('#{} The cache file is too old and need to '
'download the new file'.format(id(self.request)))
nfs.remove(self.file_path)
raise gen.Return(is_cache_hit)
@gen.coroutine
def check_file_mtime(self):
is_match = False
try:
http_client = AsyncHTTPClient()
sep = '' if config['upstream'].endswith('/') else '/'
url = '{upstream}{sep}file/{filename}'.format(
upstream=config['upstream'], sep=sep, filename=self.file_name)
response = yield http_client.fetch(
url, method="HEAD", validate_cert=False)
m_time = response.headers.get('Last-Modified', None)
if m_time:
m_time = time.mktime(time.strptime(m_time, GMT_FORMAT))
file_m_time = os.stat(self.file_path).st_mtime
if m_time and file_m_time and m_time == file_m_time:
is_match = True
else:
logger.error('#{} The m_time from server is {}, the m_time '
'from cache is {} !'.format(
id(self.request), m_time, file_m_time))
except Exception as e:
logger.error('#{} Get Last-Modified from server error: {}'
.format(id(self.request), e))
raise gen.Return(is_match)
@gen.coroutine
def wait_for_file_complete(self):
logger.info('#%d File lock exists, waiting for complete: %s',
id(self.request), self.file_path)
lock_watch_interval = config.get('file_service_lock_watch_interval',
5.0)
current_timeout = 0.0
request_timeout = config.get('file_service_request_timeout', 3600.0)
while current_timeout < request_timeout:
yield gen.sleep(lock_watch_interval)
current_timeout += lock_watch_interval
if not nfs.exists(self.lock_file) and nfs.exists(self.file_path):
self.write(self.file_path) # 文件缓存完毕,返回本地缓存文件的路径
return
else:
logger.info('#%d Waiting for file complete: %s',
id(self.request), self.file_path)
# 等待文件缓存超时
self.send_error(504, message='Waiting for file complete timeout')
def on_file_chunk(self, chunk):
if self.temp_file and not self.temp_file.closed:
self.temp_file.write(chunk)
@gen.coroutine
def request_file_from_upstream(self):
# 不存在本地缓存,也不存在lock文件,向上游请求下载
try:
yield semaphore.acquire() # 文件下载临界区,防止AsyncHTTPClient资源耗尽
self.temp_file = open(self.file_path, 'wb')
http_client = AsyncHTTPClient()
sep = '' if config['upstream'].endswith('/') else '/'
url = '{upstream}{sep}file/{filename}'.format(
upstream=config['upstream'], sep=sep, filename=self.file_name)
response = yield http_client.fetch(
url,
validate_cert=False,
streaming_callback=self.on_file_chunk,
connect_timeout=config.get('file_service_connect_timeout',
3600.0),
request_timeout=config.get('file_service_request_timeout',
3600.0))
self.generate_response(response)
except Exception as exc:
logger.error(
'#%d Error while fetching %s: %s',
id(self.request),
self.file_name,
exc,
exc_info=True)
self.send_error(500, message=exc)
finally:
yield semaphore.release()
self.close_file_resource()
def generate_response(self, response):
if response.code == 200:
logger.info('#%d Complete, change file last-modified',
id(self.request))
if self.temp_file and not self.temp_file.closed:
self.temp_file.close()
m_time = response.headers.get('Last-Modified', None)
m_time = time.mktime(time.strptime(m_time, GMT_FORMAT)) \
if m_time else time.time()
# 将文件的修改时间改成和server端相同,来判断文件是否更新了
os.utime(self.file_path, (int(time.time()), int(m_time)))
self.write(self.file_path)
else:
logger.error('#%d Non-200 file response from upstream: %d',
id(self.request), response.code)
self.send_error(
500,
message='Non-200 file response from upstream:{}'
.format(response.code))
def close_file_resource(self):
try:
if self.temp_file and not self.temp_file.closed:
self.temp_file.close()
if nfs.exists(self.lock_file):
nfs.remove(self.lock_file)
except Exception as exc:
logger.error(
'#%d Error while closing resource (%s): %s',
id(self.request),
self.file_path,
exc,
exc_info=True)
self.send_error(500, message=exc) # FIXME: 有可能是请求结束后调用
| [
"[email protected]"
]
| |
1dcce5aa67012ed2cdbf673010edcbb66619eacf | b58d01d25e1fe4d157834c3bd88d38bef5b07eff | /phy/cluster/manual/clustering.py | 82439ac5b85a7d51fc4e1c17f6c2daf35d79389f | []
| no_license | apeyrache/phy | 75f01f876c95d44dc08048eb87db09c5c3c19798 | 0c9b1c71666bb66d7856fab78decc7c2934c9027 | refs/heads/master | 2021-01-17T11:23:10.793863 | 2015-03-10T13:05:28 | 2015-03-10T13:05:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,041 | py | # -*- coding: utf-8 -*-
"""Clustering structure."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from collections import namedtuple, defaultdict, OrderedDict
from copy import deepcopy
import numpy as np
from ...ext.six import iterkeys, itervalues, iteritems
from ...utils.array import _as_array, _is_array_like
from ._utils import _unique, _spikes_in_clusters, _spikes_per_cluster
from ._update_info import UpdateInfo
from ._history import History
#------------------------------------------------------------------------------
# Clustering class
#------------------------------------------------------------------------------
def _extend_spikes(spike_ids, spike_clusters):
"""Return all spikes belonging to the clusters containing the specified
spikes."""
# We find the spikes belonging to modified clusters.
# What are the old clusters that are modified by the assignement?
old_spike_clusters = spike_clusters[spike_ids]
unique_clusters = _unique(old_spike_clusters)
# Now we take all spikes from these clusters.
changed_spike_ids = _spikes_in_clusters(spike_clusters, unique_clusters)
# These are the new spikes that need to be reassigned.
extended_spike_ids = np.setdiff1d(changed_spike_ids, spike_ids,
assume_unique=True)
return extended_spike_ids
def _concatenate_spike_clusters(*pairs):
"""Concatenate a list of pairs (spike_ids, spike_clusters)."""
pairs = [(_as_array(x), _as_array(y)) for (x, y) in pairs]
concat = np.vstack(np.hstack((x[:, None], y[:, None]))
for x, y in pairs)
reorder = np.argsort(concat[:, 0])
concat = concat[reorder, :]
return concat[:, 0].astype(np.int64), concat[:, 1].astype(np.int64)
def _extend_assignement(spike_ids, old_spike_clusters, spike_clusters_rel):
# 1. Add spikes that belong to modified clusters.
# 2. Find new cluster ids for all changed clusters.
old_spike_clusters = _as_array(old_spike_clusters)
spike_ids = _as_array(spike_ids)
assert isinstance(spike_clusters_rel, (list, np.ndarray))
spike_clusters_rel = _as_array(spike_clusters_rel)
assert spike_clusters_rel.min() >= 0
# We renumber the new cluster indices.
new_cluster_id = old_spike_clusters.max() + 1
new_spike_clusters = (spike_clusters_rel +
(new_cluster_id - spike_clusters_rel.min()))
# We find the spikes belonging to modified clusters.
extended_spike_ids = _extend_spikes(spike_ids, old_spike_clusters)
if len(extended_spike_ids) == 0:
return spike_ids, new_spike_clusters
# We take their clusters.
extended_spike_clusters = old_spike_clusters[extended_spike_ids]
# Generate new cluster numbers.
k = new_spike_clusters.max() + 1
extended_spike_clusters += (k - extended_spike_clusters.min())
# Finally, we concatenate spike_ids and extended_spike_ids.
return _concatenate_spike_clusters((spike_ids,
new_spike_clusters),
(extended_spike_ids,
extended_spike_clusters))
def _assign_update_info(spike_ids,
old_spike_clusters, old_spikes_per_cluster,
new_spike_clusters, new_spikes_per_cluster):
old_clusters = _unique(old_spike_clusters)
new_clusters = _unique(new_spike_clusters)
descendants = list(set(zip(old_spike_clusters,
new_spike_clusters)))
update_info = UpdateInfo(description='assign',
spikes=spike_ids,
added=list(new_clusters),
deleted=list(old_clusters),
descendants=descendants,
old_spikes_per_cluster=old_spikes_per_cluster,
new_spikes_per_cluster=new_spikes_per_cluster,
)
return update_info
class Clustering(object):
"""Object representing a mapping from spike to cluster ids."""
def __init__(self, spike_clusters):
self._undo_stack = History(base_item=(None, None))
# Spike -> cluster mapping.
self._spike_clusters = _as_array(spike_clusters)
self._n_spikes = len(self._spike_clusters)
self._spike_ids = np.arange(self._n_spikes).astype(np.int64)
# Create the spikes per cluster structure.
self._update_all_spikes_per_cluster()
# Keep a copy of the original spike clusters assignement.
self._spike_clusters_base = self._spike_clusters.copy()
def reset(self):
self._spike_clusters = self._spike_clusters_base
self._update_all_spikes_per_cluster()
@property
def spike_clusters(self):
"""Mapping spike to cluster ids."""
return self._spike_clusters
@property
def spikes_per_cluster(self):
"""Dictionary {cluster: array_of_spikes}."""
return self._spikes_per_cluster
@property
def cluster_ids(self):
"""Labels of all non-empty clusters, sorted by id."""
return sorted(self._spikes_per_cluster)
@property
def cluster_counts(self):
"""Number of spikes in each cluster."""
return {cluster: len(self._spikes_per_cluster[cluster])
for cluster in self.cluster_ids}
def new_cluster_id(self):
"""Return a new cluster id."""
return int(np.max(self.cluster_ids)) + 1
@property
def n_clusters(self):
"""Number of different clusters."""
return len(self.cluster_ids)
@property
def n_spikes(self):
return self._n_spikes
@property
def spike_ids(self):
return self._spike_ids
def spikes_in_clusters(self, clusters):
"""Return the spikes belonging to a set of clusters."""
return _spikes_in_clusters(self.spike_clusters, clusters)
# Actions
#--------------------------------------------------------------------------
def merge(self, cluster_ids, to=None):
"""Merge several clusters to a new cluster."""
if not _is_array_like(cluster_ids):
raise ValueError("The first argument should be a list or "
"an array.")
cluster_ids = sorted(cluster_ids)
if not set(cluster_ids) <= set(self.cluster_ids):
raise ValueError("Some clusters do not exist.")
# Find the new cluster number.
if to is None:
to = self.new_cluster_id()
if to < self.new_cluster_id():
raise ValueError("The new cluster numbers should be higher than "
"{0}.".format(self.new_cluster_id()))
# NOTE: we could have called self.assign() here, but we don't.
# We circumvent self.assign() for performance reasons.
# assign() is a relatively costly operation, whereas merging is a much
# cheaper operation.
# Find all spikes in the specified clusters.
spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)
# Create the UpdateInfo instance here.
descendants = [(cluster, to) for cluster in cluster_ids]
old_spc = {k: self._spikes_per_cluster[k] for k in cluster_ids}
new_spc = {to: spike_ids}
up = UpdateInfo(description='merge',
spikes=spike_ids,
added=[to],
deleted=cluster_ids,
descendants=descendants,
old_spikes_per_cluster=old_spc,
new_spikes_per_cluster=new_spc,
)
# Update the spikes_per_cluster structure directly.
self._spikes_per_cluster[to] = spike_ids
for cluster in cluster_ids:
del self._spikes_per_cluster[cluster]
# Assign the clusters.
self.spike_clusters[spike_ids] = to
# Add to stack.
self._undo_stack.add((spike_ids, [to]))
return up
def _update_all_spikes_per_cluster(self):
self._spikes_per_cluster = _spikes_per_cluster(self._spike_ids,
self._spike_clusters)
def _do_assign(self, spike_ids, new_spike_clusters):
"""Make spike-cluster assignements after the spike selection has
been extended to full clusters."""
# Ensure spike_clusters has the right shape.
spike_ids = _as_array(spike_ids)
if len(new_spike_clusters) == 1 and len(spike_ids) > 1:
new_spike_clusters = (np.ones(len(spike_ids), dtype=np.int64) *
new_spike_clusters[0])
old_spike_clusters = self._spike_clusters[spike_ids]
assert len(spike_ids) == len(old_spike_clusters)
assert len(new_spike_clusters) == len(spike_ids)
# Update the spikes per cluster structure.
clusters = _unique(old_spike_clusters)
old_spikes_per_cluster = {cluster: self._spikes_per_cluster[cluster]
for cluster in clusters}
new_spikes_per_cluster = _spikes_per_cluster(spike_ids,
new_spike_clusters)
self._spikes_per_cluster.update(new_spikes_per_cluster)
# All old clusters are deleted.
for cluster in clusters:
del self._spikes_per_cluster[cluster]
# We return the UpdateInfo structure.
up = _assign_update_info(spike_ids,
old_spike_clusters, old_spikes_per_cluster,
new_spike_clusters, new_spikes_per_cluster)
# We make the assignements.
self._spike_clusters[spike_ids] = new_spike_clusters
return up
def assign(self, spike_ids, spike_clusters_rel=0):
"""Assign clusters to a number of spikes.
NOTE: spike_clusters_rel contains relative indices. They don't
correspond to final cluster ids: self.assign() handles the final
assignements to ensure that no cluster ends up modified. A cluster
can only be born, stay unchanged, or die.
"""
assert not isinstance(spike_ids, slice)
# Ensure 'spike_clusters_rel' is an array-like.
if not hasattr(spike_clusters_rel, '__len__'):
spike_clusters_rel = spike_clusters_rel * np.ones(len(spike_ids),
dtype=np.int64)
spike_ids = _as_array(spike_ids)
assert len(spike_ids) == len(spike_clusters_rel)
assert spike_ids.min() >= 0
assert spike_ids.max() < self._n_spikes
# Normalize the spike-cluster assignement such that
# there are only new or dead clusters, not modified clusters.
# This implies that spikes not explicitely selected, but that
# belong to clusters affected by the operation, will be assigned
# to brand new clusters.
spike_ids, cluster_ids = _extend_assignement(spike_ids,
self._spike_clusters,
spike_clusters_rel)
up = self._do_assign(spike_ids, cluster_ids)
# Add the assignement to the undo stack.
self._undo_stack.add((spike_ids, cluster_ids))
return up
def split(self, spike_ids):
"""Split a number of spikes into a new cluster."""
# self.assign() accepts relative numbers as second argument.
return self.assign(spike_ids, 0)
def undo(self):
"""Undo the last cluster assignement operation."""
self._undo_stack.back()
# Retrieve the initial spike_cluster structure.
spike_clusters_new = self._spike_clusters_base.copy()
# Loop over the history (except the last item because we undo).
for spike_ids, cluster_ids in self._undo_stack:
# We update the spike clusters accordingly.
if spike_ids is not None:
spike_clusters_new[spike_ids] = cluster_ids
# What are the spikes affected by the last changes?
changed = np.nonzero(self._spike_clusters !=
spike_clusters_new)[0]
clusters_changed = spike_clusters_new[changed]
return self._do_assign(changed,
clusters_changed)
def redo(self):
"""Redo the last cluster assignement operation."""
# Go forward in the stack, and retrieve the new assignement.
item = self._undo_stack.forward()
if item is None:
# No redo has been performed: abort.
return
spike_ids, cluster_ids = item
assert spike_ids is not None
# We apply the new assignement.
return self._do_assign(spike_ids,
cluster_ids)
| [
"[email protected]"
]
| |
b7e9a84dfee84fe3a63a89a7b9e557c566d47949 | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/eve/client/script/ui/structure/structureSettings/schedule/__init__.py | 2527ee24c593c6d6a1cfdf69e8d5bb20d7008db8 | []
| no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\structure\structureSettings\schedule\__init__.py
__author__ = 'bara' | [
"[email protected]"
]
| |
a88b50f072fc292e98b1924ee430d4fb78ab9eb7 | dc50eb6176b4f5609519e912bc5379cea3fac9d2 | /Learn/spider/21DaysOfDistributedSpider/ch06/jianshu_spider/jianshu_spider/start.py | 93c5f64fabc256f15c10cbb8676d97c8fddd221c | []
| no_license | shuxiangguo/Python | 890c09a028e660206a8b3a8c7ca094a6f642095d | 089b2795e1db113dea6333d8dee6803071921cab | refs/heads/master | 2020-04-06T10:28:17.851981 | 2018-12-20T04:39:03 | 2018-12-20T04:39:03 | 157,381,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # encoding: utf-8
"""
@author: shuxiangguo
@file: start.py
@time: 2018-11-29 17:26:57
"""
from scrapy import cmdline
cmdline.execute("scrapy crawl js".split()) | [
"[email protected]"
]
| |
d82c316008ae3777ac502b60b15b2a6e27e8e845 | 7986ec6498e3f93967fa9bfe2b6a9d4056138293 | /Protheus_WebApp/Modules/SIGAPCP/MATA660TESTCASE.py | 16968c8e2418ee95143f196717d2106ec59cdfd2 | [
"MIT"
]
| permissive | HelenaAdrignoli/tir-script-samples | 7d08973e30385551ef13df15e4410ac484554303 | bb4f4ab3a49f723216c93f66a4395e5aa328b846 | refs/heads/master | 2023-02-21T11:26:28.247316 | 2020-04-28T16:37:26 | 2020-04-28T16:37:26 | 257,304,757 | 0 | 0 | MIT | 2020-04-20T14:22:21 | 2020-04-20T14:22:20 | null | UTF-8 | Python | false | false | 1,304 | py | from tir import Webapp
import unittest
class MATA660(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAPCP','26/04/2019','T1','D MG 01 ','10')
inst.oHelper.Program('MATA660')
def test_MATA660_001(self):
self.oHelper.SetButton('Outras Ações', 'Incluir')
self.oHelper.SetBranch('D MG 01')
self.oHelper.SetValue('H9_RECURSO','MT6601')
self.oHelper.SetValue('H9_MOTIVO','QUEBRA DE EQUIPAMENTO')
self.oHelper.SetValue('H9_DTINI','25/04/2019')
self.oHelper.SetValue('H9_DTFIM','27/04/2019')
self.oHelper.SetValue('H9_HRINI','10:00')
self.oHelper.SetValue('H9_HRFIM','15:00')
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Cancelar')
self.oHelper.SetButton('Visualizar')
self.oHelper.CheckResult('H9_RECURSO','MT6601')
self.oHelper.CheckResult('H9_CCUSTO','PCP000001')
self.oHelper.CheckResult('H9_MOTIVO','QUEBRA DE EQUIPAMENTO')
self.oHelper.CheckResult('H9_DTINI','25/04/2019')
self.oHelper.CheckResult('H9_DTFIM','27/04/2019')
self.oHelper.CheckResult('H9_HRINI','10:00')
self.oHelper.CheckResult('H9_HRFIM','15:00')
self.oHelper.SetButton('Cancelar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
b260ab5265f53119dfc20bd10fab69410b969e8d | 39a5908ff24b9a4d9b5e9a90f76ba248ec47fd39 | /mymultispider/mymultispider/spiders/myspd2.py | c1944c6bf3acf97d9f71130cedffe87a9a2df2b9 | []
| no_license | terroristhouse/crawler | 281b10ccc2490b4f1a86eae7ae819cf408f15bd8 | 3c501da46deef73b80e381d6d3c45cc049702d14 | refs/heads/master | 2020-12-27T03:28:20.586755 | 2020-09-08T03:43:56 | 2020-09-08T03:43:56 | 284,569,509 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
import scrapy
from mymultispider.items import Myspd2spiderItem
class Myspd2Spider(scrapy.Spider):
name = 'myspd2'
allowed_domains = ['sina.com.cn']
start_urls = ['http://sina.com.cn/']
# custom_settings = {
# 'ITEM_PIPELINES': {'mymultispider.pipelines.Myspd2spiderPipeline': 300},
# }
def parse(self, response):
print('myspd2')
item = Myspd2spiderItem()
item['name'] = 'myspd2的pipelines'
yield item | [
"[email protected]"
]
| |
17f36f2e6c5b6dc04263fd98c3913c332d50c9a7 | 7cd30248342dc83e0b49409bed4b3df378b629b1 | /sampling_image_15channels.py | d5d3bc787979ea380c94ef835d399663a0d72d43 | []
| no_license | minhnd3796/NGUYENDUCMINH_CODE | d34fc5cb0c9ba4108faf500170a8bea5bdef1d04 | 9fb27777ca0d40018c7154f7be19b420cf391471 | refs/heads/master | 2021-04-06T20:47:34.958473 | 2018-06-12T08:36:33 | 2018-06-12T08:36:33 | 125,452,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,190 | py | import os
import numpy as np
import scipy.misc as misc
base_dir_train = "../ISPRS_semantic_labeling_Vaihingen/train_15channels"
base_dir_validate = "../ISPRS_semantic_labeling_Vaihingen/validate_15channels"
base_dir_annotations = "../ISPRS_semantic_labeling_Vaihingen/annotations"
base_dir_top = "../ISPRS_semantic_labeling_Vaihingen/top"
base_dir_ndsm = "../ISPRS_semantic_labeling_Vaihingen/ndsm"
base_dir_dsm = "../ISPRS_semantic_labeling_Vaihingen/dsm"
base_dir_ndvi= "../ISPRS_semantic_labeling_Vaihingen/ndvi"
base_dir_L= "../ISPRS_semantic_labeling_Vaihingen/L"
base_dir_A= "../ISPRS_semantic_labeling_Vaihingen/A"
base_dir_B= "../ISPRS_semantic_labeling_Vaihingen/B"
base_dir_ele= "../ISPRS_semantic_labeling_Vaihingen/ele"
base_dir_azi= "../ISPRS_semantic_labeling_Vaihingen/azi"
base_dir_sat= "../ISPRS_semantic_labeling_Vaihingen/sat"
base_dir_entpy= "../ISPRS_semantic_labeling_Vaihingen/entpy"
base_dir_entpy2= "../ISPRS_semantic_labeling_Vaihingen/entpy2"
base_dir_texton= "../ISPRS_semantic_labeling_Vaihingen/texton"
base_dir_train_validate_gt = "../ISPRS_semantic_labeling_Vaihingen/train_validate_gt_15channels"
image_size = 224
num_cropping_per_image = 3333
validate_image=['top_mosaic_09cm_area11.png']
def create_training_dataset():
for filename in os.listdir(base_dir_annotations):
if filename in validate_image:
continue
top_image = misc.imread(os.path.join(base_dir_top,os.path.splitext(filename)[0]+".tif"))
annotation_image = misc.imread(os.path.join(base_dir_annotations, filename))
dsm_image_name= filename.replace('top_mosaic','dsm').replace('png','tif').replace('area','matching_area')
dsm_image= misc.imread(base_dir_dsm+"/"+dsm_image_name)
ndsm_image_name= dsm_image_name.replace('.tif','')+"_normalized.jpg"
ndsm_image= misc.imread(base_dir_ndsm+"/"+ndsm_image_name)
A_image_name = "A"+ndsm_image_name.replace('dsm_09cm_matching_area','').replace('_normalized.jpg','.tif')
A_image = misc.imread(base_dir_A + "/"+ A_image_name)
azi_image_name = A_image_name.replace('A','azi')
azi_image = misc.imread(base_dir_azi+"/"+azi_image_name)
B_image_name = A_image_name.replace('A', 'B')
B_image = misc.imread(base_dir_B + "/" + B_image_name)
ele_image_name = A_image_name.replace('A', 'ele')
ele_image = misc.imread(base_dir_ele + "/" + ele_image_name)
entpy_image_name = A_image_name.replace('A', 'entpy')
entpy_image = misc.imread(base_dir_entpy + "/" + entpy_image_name)
entpy2_image_name = A_image_name.replace('A', 'entpy2')
entpy2_image = misc.imread(base_dir_entpy2 + "/" + entpy2_image_name)
L_image_name = A_image_name.replace('A', 'L')
L_image = misc.imread(base_dir_L + "/" + L_image_name)
ndvi_image_name = A_image_name.replace('A', 'ndvi')
ndvi_image = misc.imread(base_dir_ndvi + "/" + ndvi_image_name)
sat_image_name = A_image_name.replace('A', 'sat')
sat_image = misc.imread(base_dir_sat + "/" + sat_image_name)
texton_image_name = A_image_name.replace('A', 'texton')
texton_image = misc.imread(base_dir_texton + "/" + texton_image_name)
width= np.shape(top_image)[1]
height= np.shape(top_image)[0]
for i in range(num_cropping_per_image):
x = int(np.random.uniform(0, height - image_size + 1))
y = int(np.random.uniform(0, width - image_size + 1))
print((x,y))
top_image_cropped= top_image[x:x + image_size, y:y + image_size, :]
ndsm_image_cropped= ndsm_image[x:x + image_size, y:y + image_size]
ndsm_image_cropped= np.expand_dims(ndsm_image_cropped,axis=2)
dsm_image_cropped= dsm_image[x:x + image_size, y:y + image_size]
dsm_image_cropped= np.expand_dims(dsm_image_cropped,axis=2)
A_image_cropped = A_image[x:x + image_size, y:y + image_size]
A_image_cropped = np.expand_dims(A_image_cropped, axis=2)
azi_image_cropped = azi_image[x:x + image_size, y:y + image_size]
azi_image_cropped = np.expand_dims(azi_image_cropped, axis=2)
B_image_cropped = B_image[x:x + image_size, y:y + image_size]
B_image_cropped = np.expand_dims(B_image_cropped, axis=2)
ele_image_cropped = ele_image[x:x + image_size, y:y + image_size]
ele_image_cropped = np.expand_dims(ele_image_cropped, axis=2)
entpy_image_cropped = entpy_image[x:x + image_size, y:y + image_size]
entpy_image_cropped = np.expand_dims(entpy_image_cropped, axis=2)
entpy2_image_cropped = entpy2_image[x:x + image_size, y:y + image_size]
entpy2_image_cropped = np.expand_dims(entpy2_image_cropped, axis=2)
L_image_cropped = L_image[x:x + image_size, y:y + image_size]
L_image_cropped = np.expand_dims(L_image_cropped, axis=2)
ndvi_image_cropped = ndvi_image[x:x + image_size, y:y + image_size]
ndvi_image_cropped = np.expand_dims(ndvi_image_cropped, axis=2)
sat_image_cropped = sat_image[x:x + image_size, y:y + image_size]
sat_image_cropped = np.expand_dims(sat_image_cropped, axis=2)
texton_image_cropped = texton_image[x:x + image_size, y:y + image_size]
texton_image_cropped = np.expand_dims(texton_image_cropped, axis=2)
array_to_save= np.concatenate((top_image_cropped,ndsm_image_cropped,dsm_image_cropped, A_image_cropped,
azi_image_cropped, B_image_cropped, ele_image_cropped, entpy_image_cropped, entpy2_image_cropped,
L_image_cropped, ndvi_image_cropped, sat_image_cropped, texton_image_cropped),axis=2).astype(dtype=np.float16)
np.save(os.path.join(base_dir_train, os.path.splitext(filename)[0] + "_" + str(i)+".npy"),array_to_save)
annotation_image_cropped= annotation_image[x:x + image_size, y:y + image_size]
misc.imsave(os.path.join(base_dir_train_validate_gt, os.path.splitext(filename)[0] + "_" + str(i) + ".png"), annotation_image_cropped)
return None
def create_validation_dataset():
for filename in validate_image:
top_image = misc.imread(os.path.join(base_dir_top, os.path.splitext(filename)[0] + ".tif"))
annotation_image = misc.imread(os.path.join(base_dir_annotations, filename))
dsm_image_name = filename.replace('top_mosaic', 'dsm').replace('png', 'tif').replace('area','matching_area')
dsm_image = misc.imread(base_dir_dsm + "/" + dsm_image_name)
ndsm_image_name = dsm_image_name.replace('.tif', '') + "_normalized.jpg"
ndsm_image = misc.imread(base_dir_ndsm + "/" + ndsm_image_name)
width = np.shape(top_image)[1]
height = np.shape(top_image)[0]
for i in range(num_cropping_per_image):
x = int(np.random.uniform(0, height - image_size + 1))
y = int(np.random.uniform(0, width - image_size + 1))
print((x, y))
top_image_cropped = top_image[x:x + image_size, y:y + image_size, :]
ndsm_image_cropped = ndsm_image[x:x + image_size, y:y + image_size]
ndsm_image_cropped = np.expand_dims(ndsm_image_cropped, axis=2)
dsm_image_cropped = dsm_image[x:x + image_size, y:y + image_size]
dsm_image_cropped = np.expand_dims(dsm_image_cropped, axis=2)
array_to_save = np.concatenate((top_image_cropped, ndsm_image_cropped, dsm_image_cropped), axis=2).astype(dtype=np.float16)
np.save(os.path.join(base_dir_validate, os.path.splitext(filename)[0] + "_" + str(i) + ".npy"), array_to_save)
# misc.imsave(os.path.join(base_dir_train, os.path.splitext(filename)[0] + "_" + str(i) + ".tif"), top_image_cropped)
annotation_image_cropped = annotation_image[x:x + image_size, y:y + image_size]
misc.imsave(os.path.join(base_dir_train_validate_gt, os.path.splitext(filename)[0] + "_" + str(i) + ".png"),
annotation_image_cropped)
return None
if __name__=="__main__":
create_training_dataset()
| [
"[email protected]"
]
| |
1ad8e42d8d9f216254a022a78cade94d1ffdf242 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20191209/example_metashape/conf2.py | 24069b8dfc3798d4637b5f160b6a247571a22e1b | []
| no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 1,089 | py | from __future__ import annotations
import typing as t
import dataclasses
@dataclasses.dataclass
class Toplevel:
site_name: str = "Material for MkDocs"
class Theme:
name: str = "material"
language: str = "en"
class Palette:
primary: str = "indigo"
accent: str = "indigo"
palette: t.Type["Toplevel.Theme.Palette"] = Palette
class Font:
text: str = "Roboto"
code: str = "Roboto Mono"
font: t.Type["Toplevel.Theme.Font"] = Font
theme: t.Type["Toplevel.Theme"] = Theme
# hmm
#
# diff --git a/metashape/analyze/typeinfo.py b/metashape/analyze/typeinfo.py
# index ede8971..9074fc5 100644
# --- a/metashape/analyze/typeinfo.py
# +++ b/metashape/analyze/typeinfo.py
# @@ -196,6 +196,8 @@ def typeinfo(
# is_optional=is_optional,
# )
# else:
# + inner = typing_inspect.get_args(typ)[0]
# + return typeinfo(inner)
# raise ValueError(f"unsuported type %{typ}")
# supertypes = []
| [
"[email protected]"
]
| |
d7c44772d25a6fdde5ccc1ecb583a18f77d7c22e | 2c926b4847a44c7f831d47ed0160751d3248e8f4 | /venv/lib/python3.8/site-packages/hubspot/crm/objects/models/batch_input_simple_public_object_input.py | 20216ea23651ee62851a64e4df5154661908c214 | []
| no_license | Women-in-Tech-Society/WITS_Site | c42cd2c9abe1b5515b80be82dc876a6c3842e42a | 5dbf22f5ee5a36358f6f279af4c13d86d31653c5 | refs/heads/main | 2023-05-11T02:34:05.531902 | 2021-06-01T01:05:12 | 2021-06-01T01:05:12 | 278,658,100 | 0 | 5 | null | 2022-11-22T18:41:35 | 2020-07-10T14:43:28 | Python | UTF-8 | Python | false | false | 4,993 | py | # coding: utf-8
"""
CRM Objects
CRM objects such as companies, contacts, deals, line items, products, tickets, and quotes are standard objects in HubSpot’s CRM. These core building blocks support custom properties, store critical information, and play a central role in the HubSpot application. ## Supported Object Types This API provides access to collections of CRM objects, which return a map of property names to values. Each object type has its own set of default properties, which can be found by exploring the [CRM Object Properties API](https://developers.hubspot.com/docs/methods/crm-properties/crm-properties-overview). |Object Type |Properties returned by default | |--|--| | `companies` | `name`, `domain` | | `contacts` | `firstname`, `lastname`, `email` | | `deals` | `dealname`, `amount`, `closedate`, `pipeline`, `dealstage` | | `products` | `name`, `description`, `price` | | `tickets` | `content`, `hs_pipeline`, `hs_pipeline_stage`, `hs_ticket_category`, `hs_ticket_priority`, `subject` | Find a list of all properties for an object type using the [CRM Object Properties](https://developers.hubspot.com/docs/methods/crm-properties/get-properties) API. e.g. `GET https://api.hubapi.com/properties/v2/companies/properties`. Change the properties returned in the response using the `properties` array in the request body. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.objects.configuration import Configuration
class BatchInputSimplePublicObjectInput(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"inputs": "list[SimplePublicObjectInput]"}
attribute_map = {"inputs": "inputs"}
def __init__(self, inputs=None, local_vars_configuration=None): # noqa: E501
"""BatchInputSimplePublicObjectInput - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._inputs = None
self.discriminator = None
self.inputs = inputs
@property
def inputs(self):
"""Gets the inputs of this BatchInputSimplePublicObjectInput. # noqa: E501
:return: The inputs of this BatchInputSimplePublicObjectInput. # noqa: E501
:rtype: list[SimplePublicObjectInput]
"""
return self._inputs
@inputs.setter
def inputs(self, inputs):
"""Sets the inputs of this BatchInputSimplePublicObjectInput.
:param inputs: The inputs of this BatchInputSimplePublicObjectInput. # noqa: E501
:type: list[SimplePublicObjectInput]
"""
if (
self.local_vars_configuration.client_side_validation and inputs is None
): # noqa: E501
raise ValueError(
"Invalid value for `inputs`, must not be `None`"
) # noqa: E501
self._inputs = inputs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchInputSimplePublicObjectInput):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BatchInputSimplePublicObjectInput):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
c3c267c38c57dffabbb56aeea85b6750efebb684 | 57d964ebf76d2462f21017ec68e124b6728d8ccb | /setup.py | e663edcf79392ac1efe703d50ba0012944b55225 | [
"MIT"
]
| permissive | chaeminlim/netron | a2983ada9d803549a3266e1a5922894b39226b26 | d75991128647a636425c746205b0a28a21d40e07 | refs/heads/master | 2021-01-15T03:54:05.742616 | 2020-03-05T10:45:53 | 2020-03-05T10:45:53 | 242,869,800 | 0 | 0 | MIT | 2020-02-24T23:56:50 | 2020-02-24T23:56:49 | null | UTF-8 | Python | false | false | 6,854 | py | #!/usr/bin/env python
import distutils
import io
import json
import os
import setuptools
import setuptools.command.build_py
import distutils.command.build
node_dependencies = [
( 'netron', [
'node_modules/d3/dist/d3.min.js',
'node_modules/dagre/dist/dagre.min.js',
'node_modules/handlebars/dist/handlebars.min.js',
'node_modules/marked/marked.min.js',
'node_modules/pako/dist/pako.min.js',
'node_modules/long/dist/long.js',
'node_modules/protobufjs/dist/protobuf.min.js',
'node_modules/protobufjs/ext/prototxt/prototxt.js',
'node_modules/flatbuffers/js/flatbuffers.js' ] )
]
class build(distutils.command.build.build):
user_options = distutils.command.build.build.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.version = None
def finalize_options(self):
distutils.command.build.build.finalize_options(self)
def run(self):
build_py.version = bool(self.version)
return distutils.command.build.build.run(self)
class build_py(setuptools.command.build_py.build_py):
user_options = setuptools.command.build_py.build_py.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
setuptools.command.build_py.build_py.initialize_options(self)
self.version = None
def finalize_options(self):
setuptools.command.build_py.build_py.finalize_options(self)
def run(self):
setuptools.command.build_py.build_py.run(self)
for target, files in node_dependencies:
target = os.path.join(self.build_lib, target)
if not os.path.exists(target):
os.makedirs(target)
for file in files:
self.copy_file(file, target)
def build_module(self, module, module_file, package):
setuptools.command.build_py.build_py.build_module(self, module, module_file, package)
if build_py.version and module == '__version__':
package = package.split('.')
outfile = self.get_module_outfile(self.build_lib, package, module)
with open(outfile, 'w+') as f:
f.write("__version__ = '" + package_version() + "'\n")
def package_version():
folder = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(folder, 'package.json')) as package_file:
package_manifest = json.load(package_file)
return package_manifest['version']
setuptools.setup(
name="netron",
version=package_version(),
description="Viewer for neural network, deep learning and machine learning models",
long_description='Netron is a viewer for neural network, deep learning and machine learning models.\n\n' +
'Netron supports **ONNX** (`.onnx`, `.pb`), **Keras** (`.h5`, `.keras`), **Core ML** (`.mlmodel`), **Caffe** (`.caffemodel`, `.prototxt`), **Caffe2** (`predict_net.pb`), **Darknet** (`.cfg`), **MXNet** (`.model`, `-symbol.json`), ncnn (`.param`) and **TensorFlow Lite** (`.tflite`). Netron has experimental support for **TorchScript** (`.pt`, `.pth`), **PyTorch** (`.pt`, `.pth`), **Torch** (`.t7`), **ArmNN** (`.armnn`), **BigDL** (`.bigdl`, `.model`), **Chainer** (`.npz`, `.h5`), **CNTK** (`.model`, `.cntk`), **Deeplearning4j** (`.zip`), **PaddlePaddle** (`__model__`), **MediaPipe** (`.pbtxt`), **ML.NET** (`.zip`), MNN (`.mnn`), **OpenVINO** (`.xml`), **scikit-learn** (`.pkl`), **TensorFlow.js** (`model.json`, `.pb`) and **TensorFlow** (`.pb`, `.meta`, `.pbtxt`, `.ckpt`, `.index`).',
keywords=[
'onnx', 'keras', 'tensorflow', 'tflite', 'coreml', 'mxnet', 'caffe', 'caffe2', 'torchscript', 'pytorch', 'ncnn', 'mnn' 'openvino', 'darknet', 'paddlepaddle', 'chainer',
'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
'visualizer', 'viewer'
],
license="MIT",
cmdclass={
'build': build,
'build_py': build_py
},
package_dir={
'netron': 'src'
},
packages=[
'netron'
],
package_data={
'netron': [
'favicon.ico', 'icon.png',
'base.js',
'numpy.js', 'pickle.js', 'hdf5.js', 'bson.js',
'zip.js', 'tar.js', 'gzip.js',
'armnn.js', 'armnn-metadata.json', 'armnn-schema.js',
'bigdl.js', 'bigdl-metadata.json', 'bigdl-proto.js',
'caffe.js', 'caffe-metadata.json', 'caffe-proto.js',
'caffe2.js', 'caffe2-metadata.json', 'caffe2-proto.js',
'chainer.js',
'cntk.js', 'cntk-metadata.json', 'cntk-proto.js',
'coreml.js', 'coreml-metadata.json', 'coreml-proto.js',
'darknet.js', 'darknet-metadata.json',
'dl4j.js', 'dl4j-metadata.json',
'flux.js', 'flux-metadata.json',
'keras.js', 'keras-metadata.json',
'mediapipe.js',
'mlnet.js', 'mlnet-metadata.json',
'mnn.js', 'mnn-metadata.json', 'mnn-schema.js',
'mxnet.js', 'mxnet-metadata.json',
'ncnn.js', 'ncnn-metadata.json',
'onnx.js', 'onnx-metadata.json', 'onnx-proto.js',
'openvino.js', 'openvino-metadata.json', 'openvino-parser.js',
'paddle.js', 'paddle-metadata.json', 'paddle-proto.js',
'pytorch.js', 'pytorch-metadata.json', 'python.js',
'sklearn.js', 'sklearn-metadata.json',
'tf.js', 'tf-metadata.json', 'tf-proto.js',
'tflite.js', 'tflite-metadata.json', 'tflite-schema.js',
'torch.js', 'torch-metadata.json',
'index.html', 'index.js',
'view-grapher.css', 'view-grapher.js',
'view-sidebar.css', 'view-sidebar.js',
'view.js',
'server.py'
]
},
install_requires=[],
author='Lutz Roeder',
author_email='[email protected]',
url='https://github.com/lutzroeder/netron',
entry_points={
'console_scripts': [ 'netron = netron:main' ]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization'
]
) | [
"[email protected]"
]
| |
ded0b78eb650a9a7b1f02e68ec43f07f81a9da48 | 2b020a49e5c2bff241fd1a99fc31531ea2b6f8c1 | /pyLeetCode/S11_1_Container_With_Most_Water.py | 6e8aa6b416c2cb0808fe6774a90a5b807468c13c | []
| no_license | yangze01/algorithm | 7855461430dc0a5abcc8f1a94fda9318a0653e3e | 44968c3fd2ce02bd9ab18d02b487401a0d72c1a8 | refs/heads/master | 2021-01-11T11:09:54.668345 | 2018-04-08T15:04:11 | 2018-04-08T15:04:11 | 78,757,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
height = [1,2,3,4,5,6,7]
class Solution(object):
def maxArea(self,height):
max_val = 0
for i in range(0,len(height)-1):
for j in range(i+1,len(height)):
print(i,j)
tmp = (j-i)*min(height[i],height[j])
if(tmp>max_val):
max_val = tmp
return max_val
if __name__ == "__main__":
solution = Solution()
print(solution.maxArea(height))
| [
"[email protected]"
]
| |
5330811aaf49a7feff3c2159e41445feaa2201d3 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /scripts_from_Astrocyte/scripts/mridataSort.py | 2ec9b124d72314d9b7234cf11aa592e10cf844cc | [
"BSD-2-Clause"
]
| permissive | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,926 | py | import os, shutil, csv
src = '/media/katie/storage/PanicPTSD/data/raw_data'
dst = '/media/katie/storage/PanicPTSD/data-neworg/Panic/'
lst = '/media/katie/storage/PanicPTSD/data/PanicSubjListPHI.csv'
lst = open(lst)
def copy_and_rename(sub, exam, time):
for folder in os.listdir(src):
if folder == exam:
for root, dirs, files in os.walk(src + "/" + folder):
print dirs
anatcount = 0
simoncount = 0
affectcount = 0
for d in dirs:
if 'spgr' in d or 'SPGR' in d:
anatcount += 1
print anatcount
print "anat ", d
newname = exam + "-Anat-" + str(anatcount)
srcdir = os.path.join(root, d)
dstdir = os.path.join(dst, sub, 'anatomical', time, newname)
shutil.copytree(srcdir, dstdir)
recstring = os.path.join(dstdir, newname + '-record.txt')
record = open(recstring, 'w')
record.write(srcdir)
if 'simon' in d or 'SIMON' in d or 'rapid' in d or 'RAPID' in d:
simoncount += 1
print "simon ", d
newname = exam + "-Simon-" + str(simoncount)
srcdir = os.path.join(root, d)
dstdir = os.path.join(dst, sub, 'functional/simon/', time, newname)
shutil.copytree(srcdir, dstdir)
recstring = os.path.join(dstdir, newname + '-record.txt')
record = open(recstring, 'w')
record.write(srcdir)
if 'affect' in d or 'AFFECT' in d:
affectcount += 1
print "affect ", d
newname = exam + "-Affect-" + str(affectcount)
srcdir = os.path.join(root, d)
dstdir = os.path.join(dst, sub, 'functional/affect/', time, newname)
shutil.copytree(srcdir, dstdir)
recstring = os.path.join(dstdir, newname + '-record.txt')
record = open(recstring, 'w')
record.write(srcdir)
def makedir(dirlist):
newpath = ''
newdir = ''
for d in dirlist:
newpath = os.path.join(newpath, d)
for d in dirlist[0:-1]:
newdir = os.path.join(newdir, d)
endpath = dirlist[-1]
if endpath in os.listdir(newdir):
pass
else:
os.mkdir(newpath)
exam1 = 'zzzzz'
exam2 = 'zzzzz'
exam3 = 'zzzzz'
exam4 = 'zzzzz'
for subj in csv.DictReader(lst, dialect='excel', delimiter='\t'):
keys = [key for key in subj.keys()]
print keys
if subj['Study No.'] == '1010' or subj['Study No.'] == '1029':
subj_id = subj['Study No.'] + '-' + subj['PPID']
makedir([dst, subj_id])
makedir([dst, subj_id, 'anatomical'])
makedir([dst, subj_id, 'functional'])
makedir([dst, subj_id, 'functional', 'affect'])
makedir([dst, subj_id, 'functional', 'simon'])
if subj['Exam 1']:
exam1 = subj['Exam 1']
copy_and_rename(subj_id, exam1, 'Time1')
if subj['Exam 2']:
exam2 = subj['Exam 2']
copy_and_rename(subj_id, exam2, 'Time2')
if subj['Exam 3']:
exam3 = subj['Exam 3']
copy_and_rename(subj_id, exam3, 'Time3')
if subj['Exam 4']:
exam3 = subj['Exam 4']
copy_and_rename(subj_id, exam3, 'Time4')
| [
"[email protected]"
]
| |
f6316387c1470593c9210fdc9d875d597262e49b | bb6ebff7a7f6140903d37905c350954ff6599091 | /third_party/openmax_dl/dl/dl.gyp | d43f546805dcdbd4d725192e3e9f3ecd3cc8e5a8 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
]
| permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 10,916 | gyp | # Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'variables' : {
# Override this value to build with small float FFT tables
'big_float_fft%' : 1,
},
'targets': [
{
'target_name': 'openmax_dl',
'type': 'static_library',
'include_dirs': [
'../',
],
'direct_dependent_settings': {
'include_dirs': [
'../',
],
},
'sources': [
'api/omxtypes.h',
'sp/api/omxSP.h',
'sp/src/armSP_FFT_F32TwiddleTable.c',
],
'conditions' : [
['big_float_fft == 1', {
'defines': [
'BIG_FFT_TABLE',
],
}],
['target_arch=="arm" or target_arch=="arm64"', {
'sources':[
# Common files that are used by both arm and arm64 code.
'api/arm/armOMX.h',
'api/arm/omxtypes_s.h',
'sp/api/armSP.h',
'sp/src/arm/armSP_FFT_S32TwiddleTable.c',
'sp/src/arm/omxSP_FFTGetBufSize_C_FC32.c',
'sp/src/arm/omxSP_FFTGetBufSize_C_SC32.c',
'sp/src/arm/omxSP_FFTGetBufSize_R_F32.c',
'sp/src/arm/omxSP_FFTGetBufSize_R_S32.c',
'sp/src/arm/omxSP_FFTInit_C_FC32.c',
'sp/src/arm/omxSP_FFTInit_R_F32.c',
],
}],
['target_arch=="arm"', {
'cflags!': [
'-mfpu=vfpv3-d16',
],
'cflags': [
# We enable Neon instructions even with arm_neon==0, to support
# runtime detection.
'-mfpu=neon',
],
'dependencies': [
'openmax_dl_armv7',
],
'sources': [
# Common files that are used by both the NEON and non-NEON code.
'api/armCOMM_s.h',
'sp/src/arm/omxSP_FFTGetBufSize_C_SC16.c',
'sp/src/arm/omxSP_FFTGetBufSize_R_S16.c',
'sp/src/arm/omxSP_FFTGetBufSize_R_S16S32.c',
'sp/src/arm/omxSP_FFTInit_C_SC16.c',
'sp/src/arm/omxSP_FFTInit_C_SC32.c',
'sp/src/arm/omxSP_FFTInit_R_S16.c',
'sp/src/arm/omxSP_FFTInit_R_S16S32.c',
'sp/src/arm/omxSP_FFTInit_R_S32.c',
# Complex 32-bit fixed-point FFT.
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix2_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix2_ls_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix2_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix4_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix4_ls_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix2_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix4_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC32_Radix8_fs_unsafe_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CToC_SC32_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTFwd_CToC_SC32_Sfs_s.S',
# Real 32-bit fixed-point FFT
'sp/src/arm/neon/armSP_FFTInv_CCSToR_S32_preTwiddleRadix2_unsafe_s.S',
'sp/src/arm/neon/omxSP_FFTFwd_RToCCS_S32_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CCSToR_S32_Sfs_s.S',
# Complex 16-bit fixed-point FFT
'sp/src/arm/neon/armSP_FFTInv_CCSToR_S16_preTwiddleRadix2_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix2_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix2_ls_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix2_ps_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix2_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix4_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix4_ls_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix4_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_SC16_Radix8_fs_unsafe_s.S',
'sp/src/arm/neon/omxSP_FFTFwd_CToC_SC16_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CToC_SC16_Sfs_s.S',
# Real 16-bit fixed-point FFT
'sp/src/arm/neon/omxSP_FFTFwd_RToCCS_S16_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CCSToR_S16_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTFwd_RToCCS_S16S32_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CCSToR_S32S16_Sfs_s.S',
# Complex floating-point FFT
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix2_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix2_ls_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix2_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix4_fs_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix4_ls_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix2_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix4_unsafe_s.S',
'sp/src/arm/neon/armSP_FFT_CToC_FC32_Radix8_fs_unsafe_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CToC_FC32_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTFwd_CToC_FC32_Sfs_s.S',
# Real floating-point FFT
'sp/src/arm/neon/armSP_FFTInv_CCSToR_F32_preTwiddleRadix2_unsafe_s.S',
'sp/src/arm/neon/omxSP_FFTFwd_RToCCS_F32_Sfs_s.S',
'sp/src/arm/neon/omxSP_FFTInv_CCSToR_F32_Sfs_s.S',
],
}],
['target_arch=="ia32" or target_arch=="x64"', {
'cflags': [
'-msse2',
],
'sources': [
# Real 32-bit floating-point FFT.
'sp/api/x86SP.h',
'sp/src/x86/omxSP_FFTFwd_RToCCS_F32_Sfs.c',
'sp/src/x86/omxSP_FFTGetBufSize_R_F32.c',
'sp/src/x86/omxSP_FFTInit_R_F32.c',
'sp/src/x86/omxSP_FFTInv_CCSToR_F32_Sfs.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix2_fs.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix2_ls.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix2_ls_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix2_ms.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix4_fs.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix4_fs_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix4_ls.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix4_ls_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix4_ms.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Fwd_Radix4_ms_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix2_fs.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix2_ls.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix2_ls_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix2_ms.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix4_fs.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix4_fs_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix4_ls.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix4_ls_sse.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix4_ms.c',
'sp/src/x86/x86SP_FFT_CToC_FC32_Inv_Radix4_ms_sse.c',
'sp/src/x86/x86SP_FFT_F32_radix2_kernel.c',
'sp/src/x86/x86SP_FFT_F32_radix4_kernel.c',
'sp/src/x86/x86SP_SSE_Math.h',
],
}],
['target_arch=="arm64"', {
'sources':[
'api/arm/arm64COMM_s.h',
# Complex floating-point FFT
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix2_fs_s.S',
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix2_ls_s.S',
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix2_s.S',
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix4_fs_s.S',
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix4_ls_s.S',
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix4_s.S',
'sp/src/arm/arm64/armSP_FFT_CToC_FC32_Radix8_fs_s.S',
'sp/src/arm/arm64/omxSP_FFTInv_CToC_FC32.c',
'sp/src/arm/arm64/omxSP_FFTFwd_CToC_FC32.c',
# Real floating-point FFT
'sp/src/arm/arm64/armSP_FFTInv_CCSToR_F32_preTwiddleRadix2_s.S',
'sp/src/arm/arm64/omxSP_FFTFwd_RToCCS_F32.c',
'sp/src/arm/arm64/ComplexToRealFixup.S',
'sp/src/arm/arm64/omxSP_FFTInv_CCSToR_F32.c',
],
}],
['target_arch=="mipsel"', {
'cflags': [
'-std=c99',
],
'sources!': [
'sp/src/armSP_FFT_F32TwiddleTable.c',
],
'sources': [
'sp/api/mipsSP.h',
'sp/src/mips/mips_FFTFwd_RToCCS_F32_complex.c',
'sp/src/mips/mips_FFTFwd_RToCCS_F32_real.c',
'sp/src/mips/mips_FFTInv_CCSToR_F32_complex.c',
'sp/src/mips/mips_FFTInv_CCSToR_F32_real.c',
'sp/src/mips/omxSP_FFT_F32TwiddleTable.c',
'sp/src/mips/omxSP_FFTFwd_RToCCS_F32_Sfs.c',
'sp/src/mips/omxSP_FFTGetBufSize_R_F32.c',
'sp/src/mips/omxSP_FFTInit_R_F32.c',
'sp/src/mips/omxSP_FFTInv_CCSToR_F32_Sfs.c',
],
}],
],
},
],
'conditions': [
['target_arch=="arm"', {
'targets': [
{
# Non-NEON implementation of FFT. This library is NOT
# standalone. Applications must link with openmax_dl.
'target_name': 'openmax_dl_armv7',
'type': 'static_library',
'includes': [
'../../../build/android/cpufeatures.gypi',
],
'include_dirs': [
'../',
],
'cflags!': [
'-mfpu=neon',
],
'link_settings' : {
'libraries': [
# To get the __android_log_print routine
'-llog',
],
},
'sources': [
# Detection routine
'sp/src/arm/detect.c',
# Complex floating-point FFT
'sp/src/arm/armv7/armSP_FFT_CToC_FC32_Radix2_fs_unsafe_s.S',
'sp/src/arm/armv7/armSP_FFT_CToC_FC32_Radix2_fs_unsafe_s.S',
'sp/src/arm/armv7/armSP_FFT_CToC_FC32_Radix4_fs_unsafe_s.S',
'sp/src/arm/armv7/armSP_FFT_CToC_FC32_Radix4_unsafe_s.S',
'sp/src/arm/armv7/armSP_FFT_CToC_FC32_Radix8_fs_unsafe_s.S',
'sp/src/arm/armv7/omxSP_FFTInv_CToC_FC32_Sfs_s.S',
'sp/src/arm/armv7/omxSP_FFTFwd_CToC_FC32_Sfs_s.S',
# Real floating-point FFT
'sp/src/arm/armv7/armSP_FFTInv_CCSToR_F32_preTwiddleRadix2_unsafe_s.S',
'sp/src/arm/armv7/omxSP_FFTFwd_RToCCS_F32_Sfs_s.S',
'sp/src/arm/armv7/omxSP_FFTInv_CCSToR_F32_Sfs_s.S',
],
},
],
}],
],
}
| [
"[email protected]"
]
| |
124e013441d004398f64e0732e3bf47043367432 | ce6271f3dc32cf374e4dde5e4666e80242e83fde | /grow/partials/partial_test.py | c9db7c2b496316826deeb59f55989568296bcb5e | [
"MIT"
]
| permissive | kmcnellis/grow | 26ab42e051906a1aaa28e52aae585b5ed5c497a9 | 4787f5a01681ef0800e9b4388a56cdbc48209368 | refs/heads/master | 2020-04-18T09:44:35.950251 | 2019-01-24T22:05:06 | 2019-01-24T22:05:06 | 167,445,373 | 1 | 0 | MIT | 2019-01-24T22:07:44 | 2019-01-24T22:07:44 | null | UTF-8 | Python | false | false | 1,289 | py | """Test the pod partial."""
import unittest
from grow import storage
from grow.pods import pods
from grow.testing import testing
class PartialTestCase(unittest.TestCase):
"""Tests for partials."""
def setUp(self):
dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(dir_path, storage=storage.FileStorage)
def test_editor_config(self):
"""Test that editor configuration is read correctly."""
partials = self.pod.partials
partial = partials.get_partial('hero')
expected = {
'label': 'Hero',
'editor': {
'fields': [
{
'type': 'text',
'key': 'title',
'label': 'Hero Title'
}, {
'type': 'text',
'key': 'subtitle',
'label': 'Hero Subtitle'
},
{
'type': 'markdown',
'key': 'description',
'label': 'Description'
},
],
},
}
self.assertEquals(expected, partial.editor_config)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
972103bb17ed3d5a13dc49f753c7d97fdf963e30 | 705fa27fb794898a3ee52a4af8446d7ef8ea13f4 | /tests/constants/route_parser.py | 4d3a6dc8de7d6c0fb40031e61128aa7085a0f51e | [
"MIT"
]
| permissive | Mause/pytransperth | 1612063a0c9276ca9b0ae7399b2e9d15598c5dc3 | 411c6a38b8451dc917927bdc4fdb70aeb9acd52b | refs/heads/master | 2022-07-20T00:24:39.252527 | 2014-06-19T07:23:43 | 2014-06-19T07:23:43 | 16,773,465 | 0 | 0 | MIT | 2022-07-06T19:27:24 | 2014-02-12T16:25:48 | Python | UTF-8 | Python | false | false | 1,628 | py | import os
from lxml.html import builder as E
from lxml.etree import HTML
PATH = os.path.dirname(__file__)
with open(os.path.join(PATH, 'header.html')) as fh:
HEADER = HTML(fh.read()).find('body/tr')
STEPS = E.HTML(
E.TD(
E.DIV(
E.TABLE('STEP1'),
E.TABLE('STEP2'),
E.TABLE('EXCLUDED')
)
)
)
STEP_BUS = E.HTML(
E.TR(
E.TD(
E.IMG(alt="bus")
),
E.TD(
E.SPAN('ONE'),
E.SPAN('TWO')
),
E.TD(
E.SPAN('THREE'),
E.SPAN('FOUR')
)
)
)
STEP_TRAIN = E.HTML(
E.TR(
E.TD(
E.IMG(alt="train")
),
E.TD(
E.SPAN('ONE'),
E.SPAN('TWO')
),
E.TD(
E.SPAN('THREE'),
E.SPAN('FOUR')
)
)
)
STEP_WALK = E.HTML(
E.TR(
E.TD(
E.IMG(alt="walk")
),
E.TD(
E.SPAN('ONE'),
E.SPAN('TWO')
),
E.TD(
E.SPAN('THREE'),
E.SPAN('FOUR')
)
)
)
STEP_INVALID = E.HTML(
E.TR(
E.TD(
E.IMG(alt="invalid")
)
)
)
with open(os.path.join(PATH, 'misc.html')) as fh:
MISC = HTML(fh.read()).xpath('//html/body/tr')[0]
IMG = E.IMG(
onclick="getFares('11/11/1111', 1111)"
)
LINKS = E.HTML(
E.DIV(
E.IMG('ONE'),
E.IMG('TWO')
)
)
DURATION = E.HTML(
E.SPAN(
E.SPAN('IGNORED'),
E.SPAN('11:11 hrs')
)
)
with open(os.path.join(PATH, 'routes.html')) as fh:
ROUTES = fh.read()
| [
"[email protected]"
]
| |
d4d41250ca36686c6ae73595c77ed8af59c24f08 | bc86ba9fcbb19711e1af3891120a505d0da95a63 | /backend/mobile_12_dev_5928/urls.py | a8f3bd6fdb90b26e0566b75e6e456cc98ff17aec | []
| no_license | crowdbotics-apps/mobile-12-dev-5928 | 8c0281d1ee1ad08ad1cc47e6f0bfeafb630fb219 | d5a28035de34631d834f49931ada0c7b5c2b5324 | refs/heads/master | 2022-10-25T19:05:11.263550 | 2020-06-12T12:37:36 | 2020-06-12T12:37:36 | 271,712,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | """mobile_12_dev_5928 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "mobile 12"
admin.site.site_title = "mobile 12 Admin Portal"
admin.site.index_title = "mobile 12 Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="mobile 12 API",
default_version="v1",
description="API documentation for mobile 12 App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
]
| |
2be54c9eb80269c4b94a171aa565464b596c6fc1 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/240230d6f4f34654aed81c439a0398cd.py | 6166c2b7d7b980ccf4335bc4439fcae7d5daf5a5 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 491 | py | def hey(statement):
if is_silence(statement):
return 'Fine. Be that way!'
if is_yelling(statement):
return 'Whoa, chill out!'
if is_question(statement):
return 'Sure.'
else:
return 'Whatever.'
def is_silence(statement):
if statement.isspace() or not statement:
return True
def is_yelling(statement):
if statement.isupper():
return True
def is_question(statement):
if statement.endswith('?'):
return True
| [
"[email protected]"
]
| |
940ee3c9cce97e80e85a1dcc48bf6688e73046d2 | bd649f51496a24a55a2327e658f31d6e03e2f602 | /InvTL/lm_py/py/apigen/project.py | 0f9fe198fd2c4884e281894e55ad3d2e3a5b2efb | [
"MIT"
]
| permissive | mickg10/DARLAB | 6507530231f749e8fc1647f3a9bec22a20bebe46 | 0cd8d094fcaf60a48a3b32f15e836fcb48d93e74 | refs/heads/master | 2020-04-15T20:39:23.403215 | 2019-01-10T06:54:50 | 2019-01-10T06:54:50 | 16,510,433 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | """ this contains the code that actually builds the pages using layout.py
building the docs happens in two passes: the first one takes care of
collecting contents and navigation items, the second builds the actual
HTML
"""
import py
from layout import LayoutPage
class Project(py.__.doc.confrest.Project):
""" a full project
this takes care of storing information on the first pass, and building
pages + indexes on the second
"""
def __init__(self):
self.content_items = {}
def add_item(self, path, content):
""" add a single item (page)
path is a (relative) path to the object, used for building links
and navigation
content is an instance of some py.xml.html item
"""
assert path not in self.content_items, 'duplicate path %s' % (path,)
self.content_items[path] = content
def build(self, outputpath):
""" convert the tree to actual HTML
uses the LayoutPage class below for each page and takes care of
building index documents for the root and each sub directory
"""
opath = py.path.local(outputpath)
opath.ensure(dir=True)
paths = self.content_items.keys()
paths.sort()
for path in paths:
# build the page using the LayoutPage class
page = self.Page(self, path, stylesheeturl=self.stylesheet)
page.contentspace.append(self.content_items[path])
ipath = opath.join(path)
if not ipath.dirpath().check():
# XXX create index.html(?)
ipath.ensure(file=True)
ipath.write(page.unicode().encode(self.encoding))
def process(self, txtpath):
""" this allows using the project from confrest """
# XXX not interesting yet, but who knows later (because of the
# cool nav)
if __name__ == '__main__':
# XXX just to have an idea of how to use this...
proj = Project()
here = py.path.local('.')
for fpath in here.visit():
if fpath.check(file=True):
proj.add_item(fpath, convert_to_html_somehow(fpath))
proj.build()
| [
"[email protected]"
]
| |
dd9000bb994165c09b867fb4b448862822153bc1 | b891f38eb12eeafdbcec9deee2320acfaac3a7ad | /0x01-python-if_else_loops_functions/5-print_comb2.py | 54df4340c244496ec2e94b9a6613cb44bdfb060f | []
| no_license | davixcky/holbertonschool-higher_level_programming | bb112af3e18994a46584ac3e78385e46c3d918f6 | fe4cd0e95ee976b93bd47c85c2bc810049f568fa | refs/heads/master | 2023-01-11T00:41:03.145968 | 2020-09-22T22:55:53 | 2020-09-22T22:55:53 | 259,390,611 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | #!/usr/bin/python3
for number in range(0, 99):
print("{:02d}, ".format(number), end='')
print("99")
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.