blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9b5c37d490d72e9375bcbdc7923a7dac1af0d0c | 01ac9e40052a468dd472a296df0003c4e629e2c9 | /news_all/spiders_old/tsw_all.py | c97cd4d9b74d039fbc585c3c4ee884ec2f287e8e | [] | no_license | Pintrue/news_all | b5cee16584ed92e6574edd825b574214df65d917 | eb8c32c79bdacd8e2f76b88f27871c3cd0118006 | refs/heads/master | 2022-03-23T13:34:10.354029 | 2019-11-22T07:40:50 | 2019-11-22T07:40:50 | 223,058,997 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from news_all.spider_models import NewsRCSpider
import re
class TswSpider(NewsRCSpider):
"""天山网"""
name = 'tsw'
mystart_urls = {
'http://news.ts.cn/tp/index.shtml': 1301253, # 天山网 新闻中心-图片新闻-左侧列表
}
rules = (
#http://news.ts.cn/system/2019/06/03/035719250.shtml
Rule(LinkExtractor(allow=(r'news.ts.cn.*?/%s/\d{2}/\d+.shtml' % datetime.today().strftime('%Y/%m'), ),
), callback='parse_item',
follow=False),
)
def parse_item(self, response):
xp = response.xpath
try:
title = xp("//h1[@class='active-title']/text()").extract_first()
source = xp("//p[@class='active-info2']")[0]
content_div = xp("//div[@class='hy-active']")[0]
pubtime = source.re(r'\d{2,4}年\d{1,2}月\d{1,2}日')[0]
# pubtime = xp("//div[@class='Remark']/span/text()").extract_first().split('|')[0]
origin_name_div =xp('//p[@class="active-info2"]/text()').extract_first('')
origin_name = re.findall(".*来源:(.*).*", origin_name_div)[0]
except:
return self.produce_debugitem(response, "xpath error")
content, media, _, _ = self.content_clean(content_div)
return self.produce_item(
response=response,
title=title,
# self.get_page_title(response).split('_')[0]
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
)
| [
"[email protected]"
] | |
70797a92621d0c27c582ee280246e49b977d773d | 8f5c1ad76f3f9aa67d6720154b4884c9fab2ecbc | /toontown/hood/DGHoodAI.py | e4f84d08dd39839a3336652b6e2f10b55714760e | [] | no_license | RegDogg/ttr-2014-dev | eb0d9da3e91b9504b83804c27e1a00d87a0b7220 | 8a392ea4697cf15bd83accd01dcf26d0f87557eb | refs/heads/master | 2023-07-13T02:40:56.171517 | 2021-07-12T00:31:28 | 2021-07-12T00:31:28 | 372,103,145 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | from toontown.toonbase import ToontownGlobals
from toontown.safezone.DistributedDGFlowerAI import DistributedDGFlowerAI
from SZHoodAI import SZHoodAI
from toontown.toon import NPCToons
from toontown.safezone import ButterflyGlobals
from toontown.safezone.DistributedButterflyAI import DistributedButterflyAI
class DGHoodAI(SZHoodAI):
HOOD = ToontownGlobals.DaisyGardens
def createZone(self):
SZHoodAI.createZone(self)
self.butterflies = []
self.spawnObjects()
self.flower = DistributedDGFlowerAI(self.air)
self.flower.generateWithRequired(self.HOOD)
self.createButterflies()
def createButterflies(self):
playground = ButterflyGlobals.DG
for area in range(ButterflyGlobals.NUM_BUTTERFLY_AREAS[playground]):
for b in range(ButterflyGlobals.NUM_BUTTERFLIES[playground]):
butterfly = DistributedButterflyAI(self.air)
butterfly.setArea(playground, area)
butterfly.setState(0, 0, 0, 1, 1)
butterfly.generateWithRequired(self.HOOD)
self.butterflies.append(butterfly)
| [
"[email protected]"
] | |
af25fa5ee55d2f7965a59473f1165b20d44c87ed | 9c4f3a2d6d95f2f9a96d4a33f258c9dbbd73bbb3 | /raiden/storage/utils.py | 0ef042bd43e5384ae288246965fb7494900f8dcb | [
"MIT"
] | permissive | copra2005/raiden | b70d4a9c20fca19bc984aa7546da3b54ff22eea7 | 2afd6a0039107bb9bbe1d619b9ebfedc1373b566 | refs/heads/master | 2020-03-28T05:11:21.533314 | 2018-09-06T14:12:51 | 2018-09-06T20:53:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | DB_CREATE_SETTINGS = '''
CREATE TABLE IF NOT EXISTS settings (
name VARCHAR[24] NOT NULL PRIMARY KEY,
value TEXT
);
'''
DB_CREATE_STATE_CHANGES = '''
CREATE TABLE IF NOT EXISTS state_changes (
identifier INTEGER PRIMARY KEY AUTOINCREMENT,
data BINARY
);
'''
DB_CREATE_SNAPSHOT = '''
CREATE TABLE IF NOT EXISTS state_snapshot (
identifier INTEGER PRIMARY KEY,
statechange_id INTEGER,
data BINARY,
FOREIGN KEY(statechange_id) REFERENCES state_changes(identifier)
);
'''
DB_CREATE_STATE_EVENTS = '''
CREATE TABLE IF NOT EXISTS state_events (
identifier INTEGER PRIMARY KEY,
source_statechange_id INTEGER NOT NULL,
data BINARY,
FOREIGN KEY(source_statechange_id) REFERENCES state_changes(identifier)
);
'''
DB_SCRIPT_CREATE_TABLES = """
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
{}{}{}{}
COMMIT;
PRAGMA foreign_keys=on;
""".format(
DB_CREATE_SETTINGS,
DB_CREATE_STATE_CHANGES,
DB_CREATE_SNAPSHOT,
DB_CREATE_STATE_EVENTS,
)
| [
"[email protected]"
] | |
ec41eba4d760cb80e779607fef62ddb85cf78059 | e7b7505c084e2c2608cbda472bc193d4a0153248 | /LeetcodeNew/Tree/LC_124_Binary_Tree_Maximum_Path_Sum.py | 6a10be30d8460e31e6ec0ca49ac7de282569f3cf | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | """
Given a non-empty binary tree, find the maximum path sum.
For this problem, a path is defined as any sequence of nodes from some starting node to any node in the tree along the parent-child connections. The path must contain at least one node and does not need to go through the root.
Example 1:
Input: [1,2,3]
1
/ \
2 3
Output: 6
Example 2:
Input: [-10,9,20,null,null,15,7]
-10
/ \
9 20
/ \
15 7
Output: 42
Similar to Problem 687
https://leetcode.com/problems/binary-tree-maximum-path-sum/discuss/171807/Python-or-DFS-tm
Algorithm
分制到底部,在返回的时候传入左右任意一遍最大值加上目前root.val:
cur = max(left, right) + root.val
这种情况处理了从Root到左右任意一边的最大值,也就是 root.val + left 和 root.val + right
还有一种情况就是当最大值 = root.val + left + right, 我们在放入global变量的时候何其比较。
对于最底部叶子节点传上来的值,我们将其设置成0: return cur if cur > 0 else 0
Now everything is ready to write down an algorithm.
1. Initiate max_sum as the smallest possible integer and call max_gain(node = root).
2. Implement max_gain(node) with a check to continue the old path/to start a new path:
- Base case : if node is null, the max gain is 0.
- Call max_gain recursively for the node children to compute max gain from the left and right subtrees : left_gain = max(max_gain(node.left), 0) and
right_gain = max(max_gain(node.right), 0).
- Now check to continue the old path or to start a new path. To start a new path would cost price_newpath = node.val + left_gain + right_gain. Update max_sum if it's better to start a new path.
- For the recursion return the max gain the node and one/zero of its subtrees could add to the current path : node.val + max(left_gain, right_gain).
Bottom up divider and conquer
At each rode, it can form 3 tyes of path.
1st is node
2nd is left - node - right
3rd is left/right - node
Once we get the max after comparsion, we return 1st or 3rd path sum to the upper level.
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
if not root:
return
self.res = float('-inf')
self.helper(root)
return self.res
def helper(self, root):
if not root:
return 0
left = max(self.helper(root.left), 0)
right = max(self.helper(root.right), 0)
self.res = max(self.res, root.val + left + right)
return max(left, right) + root.val
| [
"[email protected]"
] | |
010427dfae0fe07018165b13f4ff05de9eb8ea7c | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j19059-4438/sdB_galex_j19059-4438_coadd.py | 21693045bdacdeb4c6ec6e927c32a01761f2fbab | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[286.482375,-44.643983], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j19059-4438/sdB_galex_j19059-4438_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j19059-4438/sdB_galex_j19059-4438_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
2dd11b0a111f15630d14d50fd54317a69dd5160c | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/dn5/M-69.py | 5f487e7ea6ba72a5ea5819aa9b5f57e667ecd5d4 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,698 | py | import unittest
def unikati(s):
seznam_unik = []
for currUnik in s:
if(not seznam_unik.__contains__(currUnik)):
seznam_unik.append(currUnik)
return seznam_unik
def avtor(tvit):
ime = ""
for currChar in tvit:
if currChar != ':':
ime += currChar
else:
break
return ime
def vsi_avtorji(tviti):
avtorji = []
for tvit in tviti:
avtorji.append(avtor(tvit))
return unikati(avtorji)
def izloci_besedo(beseda):
for currChar in beseda:
if not currChar.isalnum():
beseda = beseda[1:]
else:
break
for currChar in reversed(beseda):
if not currChar.isalnum():
beseda = beseda[:-1]
else:
break
return beseda
def se_zacne_z(tvit, c):
besede = []
for currBeseda in tvit.split():
if(currBeseda[0] == c):
besede.append(izloci_besedo(currBeseda))
return besede
def zberi_se_zacne_z(tviti, c):
besede = []
for tvit in tviti:
besede.extend(se_zacne_z(tvit, c))
return unikati(besede)
def vse_afne(tviti):
return zberi_se_zacne_z(tviti, '@')
def vsi_hashtagi(tviti):
return zberi_se_zacne_z(tviti, '#')
def vse_osebe(tviti):
osebe = []
for tvit in tviti:
osebe.append(avtor(tvit))
osebe.extend(vse_afne(tviti))
osebe.sort()
return unikati(osebe)
def custva(tviti, hashtagi):
osebe = []
for tvit in tviti:
for hashtag in hashtagi:
if tvit.__contains__(hashtag):
osebe.append(avtor(tvit))
osebe.sort()
return unikati(osebe)
def se_poznata(tviti, oseba1, oseba2):
for tvit in tviti:
if (se_zacne_z(tvit, '@').__contains__(oseba1) and avtor(tvit) == oseba2) or (se_zacne_z(tvit, '@').__contains__(oseba2) and avtor(tvit) == oseba1):
return True
return False
class TestTviti(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_unikat(self):
self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])
self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])
self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])
self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])
self.assertEqual(unikati([1]), [1])
self.assertEqual(unikati([]), [])
self.assertEqual(unikati(["Ana", "Berta", "Cilka", "Berta"]), ["Ana", "Berta", "Cilka"])
def test_avtor(self):
self.assertEqual(avtor("janez: pred dvopičjem avtor, potem besedilo"), "janez")
self.assertEqual(avtor("ana: malo krajse ime"), "ana")
self.assertEqual(avtor("benjamin: pomembne so tri stvari: prva, druga in tretja"), "benjamin")
def test_vsi_avtorji(self):
self.assertEqual(vsi_avtorji(self.tviti), ["sandra", "berta", "ana", "cilka", "benjamin", "ema"])
self.assertEqual(vsi_avtorji(self.tviti[:3]), ["sandra", "berta"])
def test_izloci_besedo(self):
self.assertEqual(izloci_besedo("@ana"), "ana")
self.assertEqual(izloci_besedo("@@ana!!!"), "ana")
self.assertEqual(izloci_besedo("ana"), "ana")
self.assertEqual(izloci_besedo("!#$%\"=%/%()/Ben-jamin'"), "Ben-jamin")
def test_vse_na_crko(self):
self.assertEqual(se_zacne_z("Benjamin $je $skocil! Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("Benjamin $je $skocil! #Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("ana: kdo so te @berta, @cilka, @dani? #krneki", "@"), ["berta", "cilka", "dani"])
def test_zberi_na_crko(self):
self.assertEqual(zberi_se_zacne_z(self.tviti, "@"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
self.assertEqual(zberi_se_zacne_z(self.tviti, "#"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_afne(self):
self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
def test_vsi_hashtagi(self):
self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_osebe(self):
self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])
class TestDodatna(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_custva(self):
self.assertEqual(custva(self.tviti, ["dougcajt", "krneki"]), ["ana", "sandra"])
self.assertEqual(custva(self.tviti, ["luft"]), ["cilka"])
self.assertEqual(custva(self.tviti, ["meh"]), [])
def test_se_poznata(self):
self.assertTrue(se_poznata(self.tviti, "ana", "berta"))
self.assertTrue(se_poznata(self.tviti, "ema", "ana"))
self.assertFalse(se_poznata(self.tviti, "sandra", "ana"))
self.assertFalse(se_poznata(self.tviti, "cilka", "luft"))
self.assertFalse(se_poznata(self.tviti, "cilka", "balon"))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
5180350f76117971d49278960829f3e352d29cdc | 79c2fe6cc0af8c9267901d891b85e226d4b6bc51 | /pontoon/base/tests/test_models.py | 4fc934d4f2c75d624a3863fa9c7ab422901d7b78 | [
"BSD-3-Clause"
] | permissive | SandraShklyaeva/pontoon | d1f1eab458bbab75e54b112e60d077d34b27699f | 577a3275a0cf4888ecc30622ba4449fe706d5fd6 | refs/heads/master | 2021-01-14T12:36:21.461908 | 2015-09-16T08:33:11 | 2015-09-16T08:33:11 | 31,669,331 | 0 | 0 | null | 2015-03-26T06:12:32 | 2015-03-04T17:14:22 | Python | UTF-8 | Python | false | false | 1,916 | py | from django_nose.tools import assert_equal
from pontoon.base.models import Translation
from pontoon.base.tests import (
TranslationFactory,
UserFactory,
TestCase
)
from pontoon.base.utils import aware_datetime
class TranslationQuerySetTests(TestCase):
def setUp(self):
self.user0, self.user1 = UserFactory.create_batch(2)
def _translation(self, user, submitted, approved):
return TranslationFactory.create(
date=aware_datetime(*submitted),
user=user,
approved_date=aware_datetime(*approved) if approved else None,
approved_user=user
)
def test_latest_activity_translated(self):
"""
If latest activity in Translation QuerySet is translation submission,
return submission date and user.
"""
latest_submission = self._translation(self.user0, submitted=(1970, 1, 3), approved=None)
latest_approval = self._translation(self.user1, submitted=(1970, 1, 1), approved=(1970, 1, 2))
assert_equal(Translation.objects.all().latest_activity(), {
'date': latest_submission.date,
'user': latest_submission.user
})
def test_latest_activity_approved(self):
"""
If latest activity in Translation QuerySet is translation approval,
return approval date and user.
"""
latest_submission = self._translation(self.user0, submitted=(1970, 1, 2), approved=(1970, 1, 2))
latest_approval = self._translation(self.user1, submitted=(1970, 1, 1), approved=(1970, 1, 3))
assert_equal(Translation.objects.all().latest_activity(), {
'date': latest_approval.date,
'user': latest_approval.user
})
def test_latest_activity_none(self):
"""If empty Translation QuerySet, return None."""
assert_equal(Translation.objects.none().latest_activity(), None)
| [
"[email protected]"
] | |
8a301610df2c32e3dbc459193320ddcef3aa07c1 | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtWidgets/QWizard.py | e64a26ec442693c681c085d3e68237a30edacb0c | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,907 | py | # encoding: utf-8
# module PyQt5.QtWidgets
# from F:\Python\Python36\lib\site-packages\PyQt5\QtWidgets.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import sip as __sip
from .QDialog import QDialog
class QWizard(QDialog):
""" QWizard(parent: QWidget = None, flags: Union[Qt.WindowFlags, Qt.WindowType] = Qt.WindowFlags()) """
def actionEvent(self, *args, **kwargs): # real signature unknown
pass
def addPage(self, QWizardPage): # real signature unknown; restored from __doc__
""" addPage(self, QWizardPage) -> int """
return 0
def back(self): # real signature unknown; restored from __doc__
""" back(self) """
pass
def button(self, QWizard_WizardButton): # real signature unknown; restored from __doc__
""" button(self, QWizard.WizardButton) -> QAbstractButton """
return QAbstractButton
def buttonText(self, QWizard_WizardButton): # real signature unknown; restored from __doc__
""" buttonText(self, QWizard.WizardButton) -> str """
return ""
def changeEvent(self, *args, **kwargs): # real signature unknown
pass
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def cleanupPage(self, p_int): # real signature unknown; restored from __doc__
""" cleanupPage(self, int) """
pass
def closeEvent(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def contextMenuEvent(self, *args, **kwargs): # real signature unknown
pass
def create(self, *args, **kwargs): # real signature unknown
pass
def currentId(self): # real signature unknown; restored from __doc__
""" currentId(self) -> int """
return 0
def currentIdChanged(self, p_int): # real signature unknown; restored from __doc__
""" currentIdChanged(self, int) [signal] """
pass
def currentPage(self): # real signature unknown; restored from __doc__
""" currentPage(self) -> QWizardPage """
return QWizardPage
def customButtonClicked(self, p_int): # real signature unknown; restored from __doc__
""" customButtonClicked(self, int) [signal] """
pass
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def destroy(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def done(self, p_int): # real signature unknown; restored from __doc__
""" done(self, int) """
pass
def dragEnterEvent(self, *args, **kwargs): # real signature unknown
pass
def dragLeaveEvent(self, *args, **kwargs): # real signature unknown
pass
def dragMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def dropEvent(self, *args, **kwargs): # real signature unknown
pass
def enterEvent(self, *args, **kwargs): # real signature unknown
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" event(self, QEvent) -> bool """
return False
def eventFilter(self, *args, **kwargs): # real signature unknown
pass
def field(self, p_str): # real signature unknown; restored from __doc__
""" field(self, str) -> Any """
pass
def focusInEvent(self, *args, **kwargs): # real signature unknown
pass
def focusNextChild(self, *args, **kwargs): # real signature unknown
pass
def focusNextPrevChild(self, *args, **kwargs): # real signature unknown
pass
def focusOutEvent(self, *args, **kwargs): # real signature unknown
pass
def focusPreviousChild(self, *args, **kwargs): # real signature unknown
pass
def hasVisitedPage(self, p_int): # real signature unknown; restored from __doc__
""" hasVisitedPage(self, int) -> bool """
return False
def helpRequested(self): # real signature unknown; restored from __doc__
""" helpRequested(self) [signal] """
pass
def hideEvent(self, *args, **kwargs): # real signature unknown
pass
def initializePage(self, p_int): # real signature unknown; restored from __doc__
""" initializePage(self, int) """
pass
def initPainter(self, *args, **kwargs): # real signature unknown
pass
def inputMethodEvent(self, *args, **kwargs): # real signature unknown
pass
def isSignalConnected(self, *args, **kwargs): # real signature unknown
pass
def keyPressEvent(self, *args, **kwargs): # real signature unknown
pass
def keyReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def leaveEvent(self, *args, **kwargs): # real signature unknown
pass
def metric(self, *args, **kwargs): # real signature unknown
pass
def mouseDoubleClickEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def mousePressEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def moveEvent(self, *args, **kwargs): # real signature unknown
pass
def nativeEvent(self, *args, **kwargs): # real signature unknown
pass
def next(self): # real signature unknown; restored from __doc__
""" next(self) """
pass
def nextId(self): # real signature unknown; restored from __doc__
""" nextId(self) -> int """
return 0
def options(self): # real signature unknown; restored from __doc__
""" options(self) -> QWizard.WizardOptions """
pass
def page(self, p_int): # real signature unknown; restored from __doc__
""" page(self, int) -> QWizardPage """
return QWizardPage
def pageAdded(self, p_int): # real signature unknown; restored from __doc__
""" pageAdded(self, int) [signal] """
pass
def pageIds(self): # real signature unknown; restored from __doc__
""" pageIds(self) -> List[int] """
return []
def pageRemoved(self, p_int): # real signature unknown; restored from __doc__
""" pageRemoved(self, int) [signal] """
pass
def paintEvent(self, QPaintEvent): # real signature unknown; restored from __doc__
""" paintEvent(self, QPaintEvent) """
pass
def pixmap(self, QWizard_WizardPixmap): # real signature unknown; restored from __doc__
""" pixmap(self, QWizard.WizardPixmap) -> QPixmap """
pass
def receivers(self, *args, **kwargs): # real signature unknown
pass
def removePage(self, p_int): # real signature unknown; restored from __doc__
""" removePage(self, int) """
pass
def resizeEvent(self, QResizeEvent): # real signature unknown; restored from __doc__
""" resizeEvent(self, QResizeEvent) """
pass
def restart(self): # real signature unknown; restored from __doc__
""" restart(self) """
pass
def sender(self, *args, **kwargs): # real signature unknown
pass
def senderSignalIndex(self, *args, **kwargs): # real signature unknown
pass
def setButton(self, QWizard_WizardButton, QAbstractButton): # real signature unknown; restored from __doc__
""" setButton(self, QWizard.WizardButton, QAbstractButton) """
pass
def setButtonLayout(self, Iterable, QWizard_WizardButton=None): # real signature unknown; restored from __doc__
""" setButtonLayout(self, Iterable[QWizard.WizardButton]) """
pass
def setButtonText(self, QWizard_WizardButton, p_str): # real signature unknown; restored from __doc__
""" setButtonText(self, QWizard.WizardButton, str) """
pass
def setDefaultProperty(self, p_str, p_str_1, PYQT_SIGNAL): # real signature unknown; restored from __doc__
""" setDefaultProperty(self, str, str, PYQT_SIGNAL) """
pass
def setField(self, p_str, Any): # real signature unknown; restored from __doc__
""" setField(self, str, Any) """
pass
def setOption(self, QWizard_WizardOption, on=True): # real signature unknown; restored from __doc__
""" setOption(self, QWizard.WizardOption, on: bool = True) """
pass
def setOptions(self, Union, QWizard_WizardOptions=None, QWizard_WizardOption=None): # real signature unknown; restored from __doc__
""" setOptions(self, Union[QWizard.WizardOptions, QWizard.WizardOption]) """
pass
def setPage(self, p_int, QWizardPage): # real signature unknown; restored from __doc__
""" setPage(self, int, QWizardPage) """
pass
def setPixmap(self, QWizard_WizardPixmap, QPixmap): # real signature unknown; restored from __doc__
""" setPixmap(self, QWizard.WizardPixmap, QPixmap) """
pass
def setSideWidget(self, QWidget): # real signature unknown; restored from __doc__
""" setSideWidget(self, QWidget) """
pass
def setStartId(self, p_int): # real signature unknown; restored from __doc__
""" setStartId(self, int) """
pass
def setSubTitleFormat(self, Qt_TextFormat): # real signature unknown; restored from __doc__
""" setSubTitleFormat(self, Qt.TextFormat) """
pass
def setTitleFormat(self, Qt_TextFormat): # real signature unknown; restored from __doc__
""" setTitleFormat(self, Qt.TextFormat) """
pass
def setVisible(self, bool): # real signature unknown; restored from __doc__
""" setVisible(self, bool) """
pass
def setWizardStyle(self, QWizard_WizardStyle): # real signature unknown; restored from __doc__
""" setWizardStyle(self, QWizard.WizardStyle) """
pass
def sharedPainter(self, *args, **kwargs): # real signature unknown
pass
def showEvent(self, *args, **kwargs): # real signature unknown
pass
def sideWidget(self): # real signature unknown; restored from __doc__
""" sideWidget(self) -> QWidget """
return QWidget
def sizeHint(self): # real signature unknown; restored from __doc__
""" sizeHint(self) -> QSize """
pass
def startId(self): # real signature unknown; restored from __doc__
""" startId(self) -> int """
return 0
def subTitleFormat(self): # real signature unknown; restored from __doc__
""" subTitleFormat(self) -> Qt.TextFormat """
pass
def tabletEvent(self, *args, **kwargs): # real signature unknown
pass
def testOption(self, QWizard_WizardOption): # real signature unknown; restored from __doc__
""" testOption(self, QWizard.WizardOption) -> bool """
return False
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def titleFormat(self): # real signature unknown; restored from __doc__
""" titleFormat(self) -> Qt.TextFormat """
pass
def updateMicroFocus(self, *args, **kwargs): # real signature unknown
pass
def validateCurrentPage(self): # real signature unknown; restored from __doc__
""" validateCurrentPage(self) -> bool """
return False
def visitedPages(self): # real signature unknown; restored from __doc__
""" visitedPages(self) -> List[int] """
return []
def wheelEvent(self, *args, **kwargs): # real signature unknown
pass
def wizardStyle(self): # real signature unknown; restored from __doc__
""" wizardStyle(self) -> QWizard.WizardStyle """
pass
def __init__(self, parent=None, flags, Qt_WindowFlags=None, Qt_WindowType=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
pass
AeroStyle = 3
BackButton = 0
BackgroundPixmap = 3
BannerPixmap = 2
CancelButton = 4
CancelButtonOnLeft = 1024
ClassicStyle = 0
CommitButton = 2
CustomButton1 = 6
CustomButton2 = 7
CustomButton3 = 8
DisabledBackButtonOnLastPage = 64
ExtendedWatermarkPixmap = 4
FinishButton = 3
HaveCustomButton1 = 8192
HaveCustomButton2 = 16384
HaveCustomButton3 = 32768
HaveFinishButtonOnEarlyPages = 256
HaveHelpButton = 2048
HaveNextButtonOnLastPage = 128
HelpButton = 5
HelpButtonOnRight = 4096
IgnoreSubTitles = 2
IndependentPages = 1
LogoPixmap = 1
MacStyle = 2
ModernStyle = 1
NextButton = 1
NoBackButtonOnLastPage = 32
NoBackButtonOnStartPage = 16
NoCancelButton = 512
NoCancelButtonOnLastPage = 65536
NoDefaultButton = 8
Stretch = 9
WatermarkPixmap = 0
| [
"[email protected]"
] | |
375a11d1c77c7c9b5e0a4ecfe2d2dece4756c57a | 9878df8dcc9443267197e31f24a628e115c87949 | /swagger_client/models/create_org_option.py | fa8e956d06cf5b747f1f317a21355174bd33dc79 | [] | no_license | mirandacong/gitea_python_client | 79fff8b3bb73f160abb059fe2f470b185017e844 | 79e2ae5253a20635aa019e176c17f8797d418f01 | refs/heads/master | 2020-04-02T00:19:07.392521 | 2018-10-20T05:02:55 | 2018-10-20T05:02:55 | 153,798,708 | 0 | 0 | null | 2018-10-20T05:02:56 | 2018-10-19T14:49:15 | Python | UTF-8 | Python | false | false | 5,763 | py | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreateOrgOption(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'full_name': 'str',
'location': 'str',
'username': 'str',
'website': 'str'
}
attribute_map = {
'description': 'description',
'full_name': 'full_name',
'location': 'location',
'username': 'username',
'website': 'website'
}
def __init__(self, description=None, full_name=None, location=None, username=None, website=None): # noqa: E501
"""CreateOrgOption - a model defined in Swagger""" # noqa: E501
self._description = None
self._full_name = None
self._location = None
self._username = None
self._website = None
self.discriminator = None
if description is not None:
self.description = description
if full_name is not None:
self.full_name = full_name
if location is not None:
self.location = location
self.username = username
if website is not None:
self.website = website
@property
def description(self):
"""Gets the description of this CreateOrgOption. # noqa: E501
:return: The description of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateOrgOption.
:param description: The description of this CreateOrgOption. # noqa: E501
:type: str
"""
self._description = description
@property
def full_name(self):
"""Gets the full_name of this CreateOrgOption. # noqa: E501
:return: The full_name of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this CreateOrgOption.
:param full_name: The full_name of this CreateOrgOption. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def location(self):
"""Gets the location of this CreateOrgOption. # noqa: E501
:return: The location of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this CreateOrgOption.
:param location: The location of this CreateOrgOption. # noqa: E501
:type: str
"""
self._location = location
@property
def username(self):
"""Gets the username of this CreateOrgOption. # noqa: E501
:return: The username of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CreateOrgOption.
:param username: The username of this CreateOrgOption. # noqa: E501
:type: str
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
@property
def website(self):
"""Gets the website of this CreateOrgOption. # noqa: E501
:return: The website of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._website
@website.setter
def website(self, website):
"""Sets the website of this CreateOrgOption.
:param website: The website of this CreateOrgOption. # noqa: E501
:type: str
"""
self._website = website
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateOrgOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b21196c3211c716a022386906d536cb16977c5de | f7c07caa1210d2a08e8433cdd854b1232efa88e3 | /Directory-And-File-Modules/OS-Path-Module/Mount-Point-Test.py | 139a4a9945b10f4204de1fa19e4fb52106b19c06 | [] | no_license | rchicoli/ispycode-python | c2fbecc28bf32933150986d24f77b7297f50b78e | fa27f2377943ac2e4d983065406578151091e3f5 | refs/heads/master | 2020-03-20T11:34:59.698618 | 2018-06-14T21:14:02 | 2018-06-14T21:14:02 | 137,407,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py |
import os
dir = "/proc"
print(os.path.ismount(dir))
| [
"[email protected]"
] | |
0c543552664fd3afe39399fb3f895ca72021e56d | bc6508a1dde1e61a8b2f61e70044c074aeeb4406 | /whoiser/servers/BB.py | b33a96c32341105b898e149e693478ed66fbf271 | [] | no_license | krikulis/whoiser | 7eca72260dc061a91c7630901557264b80c5263e | 27af46d6ffcf2bacc5e5b837883ab5fab7ac9b40 | refs/heads/master | 2021-01-10T19:10:53.915622 | 2012-06-24T23:50:28 | 2012-06-24T23:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py |
from servers.generic import GenericWhoisQuery
class WhoisQuery(GenericWhoisQuery):
def query(self, query):
raise NotImplementedError(u"TLD BB has no Whois server available")
| [
"[email protected]"
] | |
2b102cafd639c68ae78c3d889a58a70e39bcd682 | 285de86d006f3ef53b94156d7fcbddb498f10a60 | /backend/users/migrations/0002_auto_20201125_0526.py | d21f4d5402f99799cba6e502d6e82dd81cb1bcc6 | [] | no_license | crowdbotics-apps/aia-store-22965 | 1e457444cd854adfb35e59edff4ab1f06b8c13b7 | 28162af82dc569d4f4e9a12a14d41787cb1f1a72 | refs/heads/master | 2023-01-19T09:18:50.195872 | 2020-11-25T05:27:32 | 2020-11-25T05:27:32 | 315,837,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Generated by Django 2.2.17 on 2020-11-25 05:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
3c90e8613ee6cecb065601ac20d4e343478c900d | b6aa9768dbac327943e0220df1c56ce38adc6de1 | /657_robot-return-to-origin.py | 56b67347d37bd1865161ddde5c165b4adcf916f8 | [] | no_license | Khrystynka/LeetCodeProblems | f86e4c1e46f70f874924de137ec5efb2f2518766 | 917bd000c2a055dfa2633440a61ca4ae2b665fe3 | refs/heads/master | 2021-03-17T00:51:10.102494 | 2020-09-28T06:31:03 | 2020-09-28T06:31:03 | 246,954,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # Problem Title: Robot Return to Origin
class Solution(object):
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
pos = [0, 0]
for move in moves:
if move == "L":
pos[0] -= 1
elif move == "R":
pos[0] += 1
elif move == "U":
pos[1] += 1
elif move == "D":
pos[1] -= 1
return pos == [0, 0]
| [
"[email protected]"
] | |
96398f9563c3ce4191380a21668c526c3207b238 | b822eae07a1f02365b3b9d0e16cd0dc9c48f01c2 | /qiskit_aqua/dynamics/__init__.py | aa40b2bd30c0ac9d2fa895a91357b0c63738c2bd | [
"Apache-2.0"
] | permissive | takehuge/aqua | ff4b38c10f976223ffef4047c3a209df2d20de28 | ec0fb71899d9e37f96f82eaf79c7d6581df6c9a7 | refs/heads/master | 2020-03-24T10:45:52.106507 | 2018-07-27T11:37:24 | 2018-07-27T11:37:24 | 142,666,813 | 1 | 0 | Apache-2.0 | 2018-07-28T10:01:44 | 2018-07-28T10:01:44 | null | UTF-8 | Python | false | false | 730 | py | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# for core development
# from .dynamics import Dynamics
| [
"[email protected]"
] | |
855af8ae5099cd64f0c255047fc3c691da29855d | 29a78032c3b2fdd4722f6c054ab20a5a8cea627c | /board/user_manager/urls.py | 0d466d9611d59ef7cd61565d71636d4999a3960e | [] | no_license | jungting20/pythonpro | 838ea188f846b6e1a90f1a7c429f02464b1b0927 | 455dd23132023cb472bab5e8d9ba4a881331db54 | refs/heads/master | 2021-06-27T16:20:54.768172 | 2017-09-16T08:38:19 | 2017-09-16T08:38:19 | 103,737,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from django.conf.urls import url
from user_manager.views import login, login_validate, join_page
#사실 이자리에 함수가 생기는데 이걸 블록 지정한다음 alt shift v를
#이용하여 views.py로 옮겼다
urlpatterns = [
url(r'^login/$', login),
url(r'^login/validate/$', login_validate),
url(r'^join/$', join_page)
] | [
"[email protected]"
] | |
8e53c8d3b292b9d2807cb3998f91fe0c511c0f5a | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/rapid7_insightidr/komand_rapid7_insightidr/actions/update_investigation/schema.py | e6b2c8a39c249b0cb740677586dc4812051c1fcc | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 6,172 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Allows to update existing investigation by ID or RRN"
class Input:
DISPOSITION = "disposition"
EMAIL = "email"
ID = "id"
PRIORITY = "priority"
STATUS = "status"
TITLE = "title"
class Output:
INVESTIGATION = "investigation"
class UpdateInvestigationInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"disposition": {
"type": "string",
"title": "Disposition",
"description": "Investigation's disposition",
"enum": [
"",
"BENIGN",
"MALICIOUS",
"NOT_APPLICABLE"
],
"order": 5
},
"email": {
"type": "string",
"title": "Email",
"description": "A user's email address for investigation to be assigned",
"order": 6
},
"id": {
"type": "string",
"title": "ID or RRN",
"description": "The identifier of investigation to be update (ID or RRN)",
"order": 1
},
"priority": {
"type": "string",
"title": "Priority",
"description": "Investigation's priority",
"enum": [
"",
"UNSPECIFIED",
"LOW",
"MEDIUM",
"HIGH",
"CRITICAL"
],
"order": 4
},
"status": {
"type": "string",
"title": "Status",
"description": "Investigation's status",
"enum": [
"",
"OPEN",
"INVESTIGATING",
"CLOSED"
],
"order": 3
},
"title": {
"type": "string",
"title": "Title",
"description": "Investigation's title",
"order": 2
}
},
"required": [
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class UpdateInvestigationOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"investigation": {
"$ref": "#/definitions/investigation",
"title": "Investigation",
"description": "The body of the specified investigation",
"order": 1
}
},
"required": [
"investigation"
],
"definitions": {
"assignee": {
"type": "object",
"title": "assignee",
"properties": {
"email": {
"type": "string",
"title": "Email",
"description": "The email of the assigned user",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "The name of the assigned user",
"order": 2
}
}
},
"investigation": {
"type": "object",
"title": "investigation",
"properties": {
"assignee": {
"$ref": "#/definitions/assignee",
"title": "Assignee",
"description": "The user assigned to this investigation, if any",
"order": 1
},
"created_time": {
"type": "string",
"title": "Created Time",
"description": "The time the investigation was created as an ISO formatted timestamp",
"order": 2
},
"disposition": {
"type": "string",
"title": "Disposition",
"description": "The disposition of this investigation, where possible values are BENIGN, MALICIOUS, NOT_APPLICABLE, and UNSPECIFIED",
"order": 3
},
"first_alert_time": {
"type": "string",
"title": "First Alert Time",
"description": "The create time of the first alert belonging to this investigation",
"order": 4
},
"last_accessed": {
"type": "string",
"title": "Last Accessed",
"description": "The time investigation was last viewed or modified",
"order": 5
},
"latest_alert_time": {
"type": "string",
"title": "Latest Alert Time",
"description": "The create time of the most recent alert belonging to this investigation",
"order": 6
},
"organization_id": {
"type": "string",
"title": "Organization ID",
"description": "The id of the organization that owns this investigation",
"order": 7
},
"priority": {
"type": "string",
"title": "Priority",
"description": "The investigations priority, where possible values are CRITICAL, HIGH, MEDIUM, LOW, and UNKNOWN",
"order": 8
},
"rrn": {
"type": "string",
"title": "RRN",
"description": "The RRN of the investigation",
"order": 9
},
"source": {
"type": "string",
"title": "Source",
"description": "The source of this investigation",
"order": 10
},
"status": {
"type": "string",
"title": "Status",
"description": "The status of the investigation",
"order": 11
},
"title": {
"type": "string",
"title": "Title",
"description": "Investigation title",
"order": 12
}
},
"required": [
"created_time",
"disposition",
"last_accessed",
"organization_id",
"priority",
"rrn",
"source",
"status",
"title"
],
"definitions": {
"assignee": {
"type": "object",
"title": "assignee",
"properties": {
"email": {
"type": "string",
"title": "Email",
"description": "The email of the assigned user",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "The name of the assigned user",
"order": 2
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"[email protected]"
] | |
eb56d0121a289f5fa16456acf5a76c39e991c74b | 47128c6ff1277eedf851670d33f7a288fdfe2246 | /function/function_callback2.py | 32ed7de75eaffb118c21f481c714ce8fb6757336 | [] | no_license | chati757/python-learning-space | 5de7f11a931cf95bc076473da543331b773c07fb | bc33749254d12a47523007fa9a32668b8dc12a24 | refs/heads/master | 2023-08-13T19:19:52.271788 | 2023-07-26T14:09:58 | 2023-07-26T14:09:58 | 83,208,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | #!/usr/bin/env python
from __future__ import print_function
def fibonacci(cb):
values = []
while(True):
if len(values) < 2:
values.append(1)
else:
values = [values[-1], values[-1] + values[-2]]
r = cb(values[-1])
if (r[0]):
return(r[1])
def check_17(v):
if v % 17 == 0:
return (True, v)
if v > 10000:
return (True, None)
return (False,)
if __name__ == '__main__':
res = fibonacci(check_17)
if (res != None):
print(res) | [
"[email protected]"
] | |
5f9fb75b20926dfae9b4822da73744706878fe88 | 4f0cd2618cd7856e5ef51d1ad177fa572ccaea6b | /CircuitPython_Templates/storage_neopixel_code/code.py | a7c727c5021e3e139f92b578e5c700017f5e6a04 | [
"MIT"
] | permissive | profharris/Adafruit_Learning_System_Guides | ecd213d34ffb7fa227e085ef3c763c802406d30e | 1e64c043be80451443fcae3f8952c6fd0cb1a52e | refs/heads/main | 2023-07-06T22:17:02.568765 | 2021-08-06T18:44:30 | 2021-08-06T18:44:30 | 394,449,146 | 1 | 0 | MIT | 2021-08-09T21:54:29 | 2021-08-09T21:54:28 | null | UTF-8 | Python | false | false | 1,439 | py | """
CircuitPython Essentials Storage CP Filesystem code.py file
For use with boards that have a built-in NeoPixel or NeoPixels, but no little red LED.
It will use only one pixel as an indicator, even if there is more than one NeoPixel.
"""
import time
import board
import microcontroller
import neopixel
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1)
try:
with open("/temperature.txt", "a") as temp_log:
while True:
# The microcontroller temperature in Celsius. Include the
# math to do the C to F conversion here, if desired.
temperature = microcontroller.cpu.temperature
# Write the temperature to the temperature.txt file every 10 seconds.
temp_log.write('{0:.2f}\n'.format(temperature))
temp_log.flush()
# Blink the NeoPixel on every write...
pixel.fill((255, 0, 0))
time.sleep(1) # ...for one second.
pixel.fill((0, 0, 0)) # Then turn it off...
time.sleep(9) # ...for the other 9 seconds.
except OSError as e: # When the filesystem is NOT writable by CircuitPython...
delay = 0.5 # ...blink the NeoPixel every half second.
if e.args[0] == 28: # If the file system is full...
delay = 0.15 # ...blink the NeoPixel every 0.15 seconds!
while True:
pixel.fill((255, 0, 0))
time.sleep(delay)
pixel.fill((0, 0, 0))
time.sleep(delay)
| [
"[email protected]"
] | |
9de73ca502dfd47d31b65500e037cbf5e1d5abde | 68d38b305b81e0216fa9f6769fe47e34784c77f2 | /alascrapy/spiders/amazon_it_csv.py | f2a5930639aa4176890b9ef1d5c13a6528aae1f8 | [] | no_license | ADJet1437/ScrapyProject | 2a6ed472c7c331e31eaecff26f9b38b283ffe9c2 | db52844411f6dac1e8bd113cc32a814bd2ea3632 | refs/heads/master | 2022-11-10T05:02:54.871344 | 2020-02-06T08:01:17 | 2020-02-06T08:01:17 | 237,448,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # -*- coding: utf8 -*-
__author__ = 'leonardo'
from alascrapy.spiders.base_spiders.amazon import AmazonCSV
class AmazonITCsv(AmazonCSV):
name = 'amazon_it_csv'
country_code = 'it'
asin_kind = 'amazon_it_id'
endpoint = "webservices.amazon.it"
start_urls = ['http://alatest.com']
schema = {'asin': 0,
'name': 4,
'image': [5, 6, 7],
'url': [23, 28],
'manufacturer': 1,
'price': [19, 24],
'mpn': 17,
'ean': 9,
'salesrank': 12,
'nodes': [{'node': 13,
'node_path': 15},
{'node': 14,
'node_path': 16}]}
| [
"[email protected]"
] | |
3259148744fc149b8b65f565643198102619c09e | 501615c82801733e69c7447ab9fd68d3883ed947 | /hotfix/.svn/pristine/32/3259148744fc149b8b65f565643198102619c09e.svn-base | d6cfb334426ef58e6247a4decc8bdf34ec7beb71 | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | # -*- coding: utf-8 -*-
"""
@version: 2016/5/17 0017
@author: zhangyunrui
@contact: [email protected]
@file: views.py
@time: 2016/5/17 0017 10:48
@note: 教务端自己可见VIEWS
"""
from django.shortcuts import render
from mz_common.decorators import eduadmin_required
from mz_usercenter.base.context import get_usercenter_context
from mz_usercenter.eduadmin.interface import EduAdminOverview
@eduadmin_required
def view_index(request):
"""
教务面板
:param request:
:return:
"""
user_id = request.user.id
edu_info = EduAdminOverview.get_info(user_id)
return render(request, 'mz_usercenter/eduadmin/homepage.html', locals(),
context_instance=get_usercenter_context(request))
| [
"[email protected]"
] | ||
bd52c4b7c823672296c8c50e673e389127a6ee32 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /adafruit-circuitpython-bundle-py-20210402/lib/adafruit_framebuf.py | 3c7bd7b3a766945df0c001cddf4560066893c949 | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,681 | py | # SPDX-FileCopyrightText: <text> 2018 Kattni Rembor, Melissa LeBlanc-Williams
# and Tony DiCola, for Adafruit Industries.
# Original file created by Damien P. George </text>
#
# SPDX-License-Identifier: MIT
"""
`adafruit_framebuf`
====================================================
CircuitPython pure-python framebuf module, based on the micropython framebuf module.
Implementation Notes
--------------------
**Hardware:**
* `Adafruit SSD1306 OLED displays <https://www.adafruit.com/?q=ssd1306>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
__version__ = "1.4.6"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_framebuf.git"
import os
import struct
# Framebuf format constants:
MVLSB = 0 # Single bit displays (like SSD1306 OLED)
RGB565 = 1 # 16-bit color displays
GS4_HMSB = 2 # Unimplemented!
MHMSB = 3 # Single bit displays like the Sharp Memory
RGB888 = 4 # Neopixels and Dotstars
class MHMSBFormat:
"""MHMSBFormat"""
@staticmethod
def set_pixel(framebuf, x, y, color):
"""Set a given pixel to a color."""
index = (y * framebuf.stride + x) // 8
offset = 7 - x & 0x07
framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (
(color != 0) << offset
)
@staticmethod
def get_pixel(framebuf, x, y):
"""Get the color of a given pixel"""
index = (y * framebuf.stride + x) // 8
offset = 7 - x & 0x07
return (framebuf.buf[index] >> offset) & 0x01
@staticmethod
def fill(framebuf, color):
"""completely fill/clear the buffer with a color"""
if color:
fill = 0xFF
else:
fill = 0x00
for i in range(len(framebuf.buf)):
framebuf.buf[i] = fill
@staticmethod
def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
for _x in range(x, x + width):
offset = 7 - _x & 0x07
for _y in range(y, y + height):
index = (_y * framebuf.stride + _x) // 8
framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (
(color != 0) << offset
)
class MVLSBFormat:
"""MVLSBFormat"""
@staticmethod
def set_pixel(framebuf, x, y, color):
"""Set a given pixel to a color."""
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (
(color != 0) << offset
)
@staticmethod
def get_pixel(framebuf, x, y):
"""Get the color of a given pixel"""
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
return (framebuf.buf[index] >> offset) & 0x01
@staticmethod
def fill(framebuf, color):
"""completely fill/clear the buffer with a color"""
if color:
fill = 0xFF
else:
fill = 0x00
for i in range(len(framebuf.buf)):
framebuf.buf[i] = fill
@staticmethod
def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
while height > 0:
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
for w_w in range(width):
framebuf.buf[index + w_w] = (
framebuf.buf[index + w_w] & ~(0x01 << offset)
) | ((color != 0) << offset)
y += 1
height -= 1
class RGB888Format:
"""RGB888Format"""
@staticmethod
def set_pixel(framebuf, x, y, color):
"""Set a given pixel to a color."""
index = (y * framebuf.stride + x) * 3
if isinstance(color, tuple):
framebuf.buf[index : index + 3] = bytes(color)
else:
framebuf.buf[index : index + 3] = bytes(
((color >> 16) & 255, (color >> 8) & 255, color & 255)
)
@staticmethod
def get_pixel(framebuf, x, y):
"""Get the color of a given pixel"""
index = (y * framebuf.stride + x) * 3
return (
(framebuf.buf[index] << 16)
| (framebuf.buf[index + 1] << 8)
| framebuf.buf[index + 2]
)
@staticmethod
def fill(framebuf, color):
"""completely fill/clear the buffer with a color"""
fill = (color >> 16) & 255, (color >> 8) & 255, color & 255
for i in range(0, len(framebuf.buf), 3):
framebuf.buf[i : i + 3] = bytes(fill)
@staticmethod
def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
fill = (color >> 16) & 255, (color >> 8) & 255, color & 255
for _x in range(x, x + width):
for _y in range(y, y + height):
index = (_y * framebuf.stride + _x) * 3
framebuf.buf[index : index + 3] = bytes(fill)
class FrameBuffer:
"""FrameBuffer object.
:param buf: An object with a buffer protocol which must be large enough to contain every
pixel defined by the width, height and format of the FrameBuffer.
:param width: The width of the FrameBuffer in pixel
:param height: The height of the FrameBuffer in pixel
:param buf_format: Specifies the type of pixel used in the FrameBuffer; permissible values
are listed under Constants below. These set the number of bits used to
encode a color value and the layout of these bits in ``buf``. Where a
color value c is passed to a method, c is a small integer with an encoding
that is dependent on the format of the FrameBuffer.
:param stride: The number of pixels between each horizontal line of pixels in the
FrameBuffer. This defaults to ``width`` but may need adjustments when
implementing a FrameBuffer within another larger FrameBuffer or screen. The
``buf`` size must accommodate an increased step size.
"""
def __init__(self, buf, width, height, buf_format=MVLSB, stride=None):
# pylint: disable=too-many-arguments
self.buf = buf
self.width = width
self.height = height
self.stride = stride
self._font = None
if self.stride is None:
self.stride = width
if buf_format == MVLSB:
self.format = MVLSBFormat()
elif buf_format == MHMSB:
self.format = MHMSBFormat()
elif buf_format == RGB888:
self.format = RGB888Format()
else:
raise ValueError("invalid format")
self._rotation = 0
@property
def rotation(self):
"""The rotation setting of the display, can be one of (0, 1, 2, 3)"""
return self._rotation
@rotation.setter
def rotation(self, val):
if not val in (0, 1, 2, 3):
raise RuntimeError("Bad rotation setting")
self._rotation = val
def fill(self, color):
"""Fill the entire FrameBuffer with the specified color."""
self.format.fill(self, color)
def fill_rect(self, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments, too-many-boolean-expressions
self.rect(x, y, width, height, color, fill=True)
def pixel(self, x, y, color=None):
"""If ``color`` is not given, get the color value of the specified pixel. If ``color`` is
given, set the specified pixel to the given color."""
if self.rotation == 1:
x, y = y, x
x = self.width - x - 1
if self.rotation == 2:
x = self.width - x - 1
y = self.height - y - 1
if self.rotation == 3:
x, y = y, x
y = self.height - y - 1
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return None
if color is None:
return self.format.get_pixel(self, x, y)
self.format.set_pixel(self, x, y, color)
return None
def hline(self, x, y, width, color):
"""Draw a horizontal line up to a given length."""
self.rect(x, y, width, 1, color, fill=True)
def vline(self, x, y, height, color):
"""Draw a vertical line up to a given length."""
self.rect(x, y, 1, height, color, fill=True)
def circle(self, center_x, center_y, radius, color):
"""Draw a circle at the given midpoint location, radius and color.
The ```circle``` method draws only a 1 pixel outline."""
x = radius - 1
y = 0
d_x = 1
d_y = 1
err = d_x - (radius << 1)
while x >= y:
self.pixel(center_x + x, center_y + y, color)
self.pixel(center_x + y, center_y + x, color)
self.pixel(center_x - y, center_y + x, color)
self.pixel(center_x - x, center_y + y, color)
self.pixel(center_x - x, center_y - y, color)
self.pixel(center_x - y, center_y - x, color)
self.pixel(center_x + y, center_y - x, color)
self.pixel(center_x + x, center_y - y, color)
if err <= 0:
y += 1
err += d_y
d_y += 2
if err > 0:
x -= 1
d_x += 2
err += d_x - (radius << 1)
def rect(self, x, y, width, height, color, *, fill=False):
"""Draw a rectangle at the given location, size and color. The ```rect``` method draws only
a 1 pixel outline."""
# pylint: disable=too-many-arguments
if self.rotation == 1:
x, y = y, x
width, height = height, width
x = self.width - x - width
if self.rotation == 2:
x = self.width - x - width
y = self.height - y - height
if self.rotation == 3:
x, y = y, x
width, height = height, width
y = self.height - y - height
# pylint: disable=too-many-boolean-expressions
if (
width < 1
or height < 1
or (x + width) <= 0
or (y + height) <= 0
or y >= self.height
or x >= self.width
):
return
x_end = min(self.width - 1, x + width - 1)
y_end = min(self.height - 1, y + height - 1)
x = max(x, 0)
y = max(y, 0)
if fill:
self.format.fill_rect(self, x, y, x_end - x + 1, y_end - y + 1, color)
else:
self.format.fill_rect(self, x, y, x_end - x + 1, 1, color)
self.format.fill_rect(self, x, y, 1, y_end - y + 1, color)
self.format.fill_rect(self, x, y_end, x_end - x + 1, 1, color)
self.format.fill_rect(self, x_end, y, 1, y_end - y + 1, color)
def line(self, x_0, y_0, x_1, y_1, color):
# pylint: disable=too-many-arguments
"""Bresenham's line algorithm"""
d_x = abs(x_1 - x_0)
d_y = abs(y_1 - y_0)
x, y = x_0, y_0
s_x = -1 if x_0 > x_1 else 1
s_y = -1 if y_0 > y_1 else 1
if d_x > d_y:
err = d_x / 2.0
while x != x_1:
self.pixel(x, y, color)
err -= d_y
if err < 0:
y += s_y
err += d_x
x += s_x
else:
err = d_y / 2.0
while y != y_1:
self.pixel(x, y, color)
err -= d_x
if err < 0:
x += s_x
err += d_y
y += s_y
self.pixel(x, y, color)
def blit(self):
"""blit is not yet implemented"""
raise NotImplementedError()
def scroll(self, delta_x, delta_y):
"""shifts framebuf in x and y direction"""
if delta_x < 0:
shift_x = 0
xend = self.width + delta_x
dt_x = 1
else:
shift_x = self.width - 1
xend = delta_x - 1
dt_x = -1
if delta_y < 0:
y = 0
yend = self.height + delta_y
dt_y = 1
else:
y = self.height - 1
yend = delta_y - 1
dt_y = -1
while y != yend:
x = shift_x
while x != xend:
self.format.set_pixel(
self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)
)
x += dt_x
y += dt_y
# pylint: disable=too-many-arguments
def text(self, string, x, y, color, *, font_name="font5x8.bin", size=1):
"""Place text on the screen in variables sizes. Breaks on \n to next line.
Does not break on line going off screen.
"""
# determine our effective width/height, taking rotation into account
frame_width = self.width
frame_height = self.height
if self.rotation == 1 or self.rotation == 3:
frame_width, frame_height = frame_height, frame_width
for chunk in string.split("\n"):
if not self._font or self._font.font_name != font_name:
# load the font!
self._font = BitmapFont(font_name)
width = self._font.font_width
height = self._font.font_height
for i, char in enumerate(chunk):
char_x = x + (i * (width + 1)) * size
if (
char_x + (width * size) > 0
and char_x < frame_width
and y + (height * size) > 0
and y < frame_height
):
self._font.draw_char(char, char_x, y, self, color, size=size)
y += height * size
# pylint: enable=too-many-arguments
def image(self, img):
"""Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size equal to the display size."""
# determine our effective width/height, taking rotation into account
width = self.width
height = self.height
if self.rotation == 1 or self.rotation == 3:
width, height = height, width
if isinstance(self.format, RGB888Format) and img.mode != "RGB":
raise ValueError("Image must be in mode RGB.")
if isinstance(self.format, (MHMSBFormat, MVLSBFormat)) and img.mode != "1":
raise ValueError("Image must be in mode 1.")
imwidth, imheight = img.size
if imwidth != width or imheight != height:
raise ValueError(
"Image must be same dimensions as display ({0}x{1}).".format(
width, height
)
)
# Grab all the pixels from the image, faster than getpixel.
pixels = img.load()
# Clear buffer
for i in range(len(self.buf)):
self.buf[i] = 0
# Iterate through the pixels
for x in range(width): # yes this double loop is slow,
for y in range(height): # but these displays are small!
if img.mode == "RGB":
self.pixel(x, y, pixels[(x, y)])
elif pixels[(x, y)]:
self.pixel(x, y, 1) # only write if pixel is true
# MicroPython basic bitmap font renderer.
# Author: Tony DiCola
# License: MIT License (https://opensource.org/licenses/MIT)
class BitmapFont:
"""A helper class to read binary font tiles and 'seek' through them as a
file to display in a framebuffer. We use file access so we dont waste 1KB
of RAM on a font!"""
def __init__(self, font_name="font5x8.bin"):
# Specify the drawing area width and height, and the pixel function to
# call when drawing pixels (should take an x and y param at least).
# Optionally specify font_name to override the font file to use (default
# is font5x8.bin). The font format is a binary file with the following
# format:
# - 1 unsigned byte: font character width in pixels
# - 1 unsigned byte: font character height in pixels
# - x bytes: font data, in ASCII order covering all 255 characters.
# Each character should have a byte for each pixel column of
# data (i.e. a 5x8 font has 5 bytes per character).
self.font_name = font_name
# Open the font file and grab the character width and height values.
# Note that only fonts up to 8 pixels tall are currently supported.
try:
self._font = open(self.font_name, "rb")
self.font_width, self.font_height = struct.unpack("BB", self._font.read(2))
# simple font file validation check based on expected file size
if 2 + 256 * self.font_width != os.stat(font_name)[6]:
raise RuntimeError("Invalid font file: " + font_name)
except OSError:
print("Could not find font file", font_name)
raise
except OverflowError:
# os.stat can throw this on boards without long int support
# just hope the font file is valid and press on
pass
def deinit(self):
"""Close the font file as cleanup."""
self._font.close()
def __enter__(self):
"""Initialize/open the font file"""
self.__init__()
return self
def __exit__(self, exception_type, exception_value, traceback):
"""cleanup on exit"""
self.deinit()
def draw_char(
self, char, x, y, framebuffer, color, size=1
): # pylint: disable=too-many-arguments
"""Draw one character at position (x,y) to a framebuffer in a given color"""
size = max(size, 1)
# Don't draw the character if it will be clipped off the visible area.
# if x < -self.font_width or x >= framebuffer.width or \
# y < -self.font_height or y >= framebuffer.height:
# return
# Go through each column of the character.
for char_x in range(self.font_width):
# Grab the byte for the current column of font data.
self._font.seek(2 + (ord(char) * self.font_width) + char_x)
try:
line = struct.unpack("B", self._font.read(1))[0]
except RuntimeError:
continue # maybe character isnt there? go to next
# Go through each row in the column byte.
for char_y in range(self.font_height):
# Draw a pixel for each bit that's flipped on.
if (line >> char_y) & 0x1:
framebuffer.fill_rect(
x + char_x * size, y + char_y * size, size, size, color
)
def width(self, text):
"""Return the pixel width of the specified text message."""
return len(text) * (self.font_width + 1)
class FrameBuffer1(FrameBuffer): # pylint: disable=abstract-method
"""FrameBuffer1 object. Inherits from FrameBuffer."""
| [
"[email protected]"
] | |
45c2ef41ea4cb46acafc9a71ea9a5b4744b680b5 | 9ebeb33e168798d41b54a8ab474b00c160de43a2 | /orders/migrations/0002_auto_20200822_0401.py | 4701bcd93bd83faa6a3cc16743f6a5882c6e3e11 | [] | no_license | danielspring-crypto/tritrade | 0c1f961138b9e4892d53ece98b54094be0e4c4b9 | 6fc7c644c1657a7744703cd144be7fbb5320397c | refs/heads/master | 2022-12-04T13:21:07.761942 | 2020-08-28T00:02:36 | 2020-08-28T00:02:36 | 290,908,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | # Generated by Django 3.1 on 2020-08-22 04:01
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coupons', '0001_initial'),
('orders', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='coupon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='coupons.coupon'),
),
migrations.AddField(
model_name='order',
name='discount',
field=models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
| [
"[email protected]"
] | |
5e245a3eafcd6474ff09b5de1fc61d585c86d74f | 74d66269984cec1527dbfb5aa9772775e2198ad6 | /machin/parallel/server/param_server.py | be87a1d7ca8918fc9d2cbd753130e994dcf7e28f | [
"MIT"
] | permissive | Silas-Asamoah/machin | 34dfa0717ba266a70e7ae11d60bf0055f222b2b3 | af1b5d825e27a98deab7130eedbe1c2505dacf9d | refs/heads/master | 2023-01-28T21:17:42.690424 | 2020-12-05T09:18:36 | 2020-12-05T09:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,172 | py | from typing import Any, Union, List
from random import choice
from copy import deepcopy
from queue import Queue
from threading import Event
import enum
import torch as t
import torch.nn as nn
from machin.parallel.thread import Thread
from machin.parallel.distributed import RpcGroup
from machin.utils.prepare import prep_load_state_dict
from .ordered_server import (
OrderedServerBase,
OrderedServerSimple,
OrderedServerSimpleImpl
)
class PushPullModelServer:
def __init__(self,
model_name: str,
o_server: OrderedServerBase = None):
"""
Create an accessor to the services provided by
:class:`PushPullModelServerImpl`
Args:
model_name: Name of the managed model in the ordered server,
only needed if ``server`` needs such a identifier. The default
ordered server does not require this.
o_server: Ordered server accessor.
"""
self.model_name = model_name
self.o_server = o_server
def push(self, model: nn.Module, pull_on_fail=True):
"""
Try to push a model to the ordered server, if failed, the newest
model will be automatically pulled and its parameters will be
assigned to ``model``. Gradients will not be cleared.
Args:
model: Model to push.
pull_on_fail: Pull the newest parameters if push failed.
"""
if not hasattr(model, "pp_version"):
model.pp_version = 0
copied_model_params = deepcopy(model.state_dict())
for k, v in copied_model_params.items():
copied_model_params[k] = v.to("cpu")
if not self.o_server.push(
self.model_name, copied_model_params,
version=model.pp_version + 1, prev_version=model.pp_version
):
if pull_on_fail:
result = self.o_server.pull(self.model_name)
if result is None: # pragma: no cover
raise RuntimeError("Pull failed, this should not happen.")
st_dict, version = result
prep_load_state_dict(model, st_dict)
model.pp_version = version
return False
else:
model.pp_version += 1
return True
def pull(self, model: nn.Module):
"""
Pull the newest state dict of your model and update its parameters
and ``pp_version``. Gradients will not be cleared.
Args:
model: Model to pull.
"""
result = self.o_server.pull(self.model_name)
if result is None: # pragma: no cover
return False
st_dict, version = result
if not hasattr(model, "pp_version") or model.pp_version < version:
prep_load_state_dict(model, st_dict)
model.pp_version = version
return True
class PushPullModelServerImpl:
"""
A simple parameter server, which synchronize model parameters
by pushing and pulling all parameters and maintaining a strict
ordered version chain.
Warning:
Only one model is supported.
"""
def __init__(self,
server_name: str,
group: RpcGroup,
model_name: str = "model",
o_server: OrderedServerBase = None):
"""
This init function must be only invoked on the runner process,
and the runner process must be a member process of ``group``.
Args:
server_name: Name of this server, used to registered
the server as a paired class of ``group``.
group: RpcGroup of the default server :class:`.OrderedServerSimple`
mutually exclusive with ``o_server``
model_name: Name of the managed model in the ordered server,
only needed if ``server`` needs such a identifier. The default
ordered server does not require this.
o_server: Custom ordered server accessor.
"""
self.server_name = server_name
self.group = group
self.model_name = model_name
# actual running server started by OrderedServerSimpleStarter
self._o_server_impl = None
if o_server is None:
self._o_server_impl = OrderedServerSimpleImpl(
server_name + "_o_server", group
)
self.o_server = group.get_paired(server_name + "_o_server")\
.to_here()
else: # pragma: no cover
self.o_server = o_server
# pair an accessor to group
self.group.pair(server_name,
PushPullModelServer(self.model_name, self.o_server))
class ReduceType(enum.Enum):
REDUCE_PRIMARY = 0
REDUCE_SECONDARY = 1
class PushPullGradServer:
def __init__(self,
server_name: str,
group: RpcGroup,
model_name: str,
secondary_reducers: List[str],
o_server: OrderedServerBase):
self.group = group
self.model_name = model_name
self.o_server = o_server
self.secondary_services = [server_name +
"/" + m + "/_push_service"
for m in secondary_reducers]
def push(self, model: nn.Module):
"""
Push the gradients of your model, then pull the newest parameters.
Its gradients will be cleared.
Args:
model: Model to push.
"""
# extract gradients from the model
grad_dict = {}
for k, v in model.named_parameters():
if not hasattr(v, "grad") or \
not t.is_tensor(v.grad): # pragma: no cover
raise RuntimeError("Parameter {} doesn't have gradient "
"to push!".format(k))
grad_dict[k] = deepcopy(v.grad).to("cpu")
self.group.registered_sync(
choice(self.secondary_services),
args=(grad_dict, ReduceType.REDUCE_SECONDARY)
)
self.pull(model)
def pull(self, model: nn.Module):
"""
Pull the newest model. Its gradients will be cleared.
Args:
model: Model to push.
"""
model.zero_grad()
params = self.o_server.pull(self.model_name)
if params is not None:
# params could be None if the master reducer has't performed
# a single reduction operation yet
prep_load_state_dict(model, params[0])
class PushPullGradServerImpl:
"""
A simple parameter server, which synchronize model parameters
by pushing gradients and pulling back new parameters, no strict
order is guaranteed.
Warning:
``DistributedDataParallel`` is not supported. since we cannot
load state dictionary after creation.
"""
REDUCE_MASTER = 0
REDUCE_SLAVE = 1
def __init__(self,
server_name: str,
group: RpcGroup,
model_name: str = "model",
primary_reducer: str = None,
secondary_reducers: List[str] = None,
o_server: OrderedServerBase = None,
reduce_method: str = "sum",
reduce_device: Union[t.device, str] = "cpu",
reduce_batch_size: int = 4,
max_queue_size: int = 64):
"""
Note:
You should initialize ``PushPullGradServer`` on all members of
``secondary_reducers``, and ``primary_reducer``. Both of them
should be members of the ``group``.
Note:
Internally the primary reducer will push updated versions
to the ordered server.
Hint:
Reduction is performed in a tree fashion:
1. In the first step, clients will push new gradients to a
random secondary reducer, and the secondary reducer will perform
the first reduction pass, then secondary reducers will push
their results to the primary reducer.
2. In the second step, the primary reducer will reduce results
from the secondary reducer to get the final reduced gradient
dictionary (has the same structure as state_dict), and assign
gradients to its **managed model**, and perform the
optimization.
3. In the final step, the primary reducer will push the final
model to the model server group, then clients can pull the
newest model.
Args:
server_name: Name of this server, used to registered
the server as a paired class of ``group``.
group: Server group.
model_name: Name of the managed model in the ordered server,
only needed if ``server`` needs such a identifier. The default
ordered server does not require this.
primary_reducer: Name of the process serving as the primary reducer,
which collects reduced gradients from secondary reducers and
perform the final reduction.
secondary_reducers: Name of the process serving as secondary
reducers.
o_server: Custom ordered server accessor. By default, the ordered
server is a :class:`.OrderedServerSimple` hosted on the primary
reducer.
reduce_method: "mean" or "sum"
reduce_device: Device to perform reduction, by default it is "cpu".
reduce_batch_size: Size of a single reduction batch, server will
wait until the number of requests in the reduction queue have
reached this size.
max_queue_size: Maximum reduction request queue size.
"""
self.server_name = server_name
self.group = group
self.model_name = model_name
if primary_reducer is None:
primary_reducer = group.get_group_members()[0]
assert group.is_member(primary_reducer)
assert group.is_member()
# actual running server started by OrderedServerSimpleStarter
self._o_server_impl = None
self.o_server = None
if o_server is None:
if group.get_cur_name() == primary_reducer:
self._o_server_impl = OrderedServerSimpleImpl(
server_name + "_o_server", group
)
self.o_server = OrderedServerSimple(server_name + "_o_server",
group)
else: # pragma: no cover
self.o_server = o_server
if secondary_reducers is None:
secondary_reducers = group.get_group_members()
self.primary_reducer = primary_reducer
self.primary_service = (server_name +
"/" + primary_reducer +
"/_push_service")
self.secondary_reducers = secondary_reducers
self.secondary_services = [server_name +
"/" + m + "/_push_service"
for m in secondary_reducers]
# register secondary reducer service
self.group.register(server_name + "/" + group.get_cur_name() +
"/_push_service", self._push_service)
# pair an accessor to group
if self.group.get_cur_name() == self.primary_reducer:
self.group.pair(
self.server_name,
PushPullGradServer(self.server_name, self.group,
self.model_name,
self.secondary_reducers,
self.o_server)
)
# prepare to start the reduction sub-thread
assert reduce_method in ("mean", "sum")
assert max_queue_size > 1
assert reduce_batch_size > 1
assert max_queue_size > reduce_batch_size
self.started = False
self.reduce_method = reduce_method
self.reduce_batch_size = reduce_batch_size
self.reduce_device = reduce_device
self.max_queue_size = max_queue_size
self.model = None # type: Union[nn.Module, None]
self.optimizer = None
# do not set max_queue_size here, will raise queue.Full
self.master_queue = Queue()
self.secondary_queue = Queue()
self.work_event = Event()
self.stop_event = Event()
self.reduce_task = Thread(target=self._task_reduce_grad)
self.reduce_task.daemon = True
def start(self):
if not self.started:
self.reduce_task.start()
self.started = True
def stop(self):
if self.started:
self.stop_event.set()
self.reduce_task.join()
self.stop_event.clear()
def watch(self):
self.reduce_task.watch()
def manage_model(self, model: nn.Module, optimizer: Any):
"""
Let the main reducer manage your model. Must be called before start.
Warning:
Make sure that the managed model is different from the model
you use in your algorithms such as A3C!
Args:
model: Model to manage.
optimizer: Optimizer of your model. you should initialize it first:
>>> optimizer(model.parameters(), lr=1e-3)
Raises:
``RuntimeError`` if current rpc role is not the main reducer.
"""
if self.group.get_cur_name() == self.primary_reducer:
self.model = model
self.optimizer = optimizer
self.model.pp_version = 0
else: # pragma: no cover
raise RuntimeError("Current worker is not the reduce master, and"
"cannot manage the model.")
def _push_service(self, grad_dict, level): # pragma: no cover
# Append reduce requests to queue.
if level == ReduceType.REDUCE_SECONDARY:
self.secondary_queue.put_nowait(grad_dict)
self.work_event.set()
self.work_event.clear()
elif level == ReduceType.REDUCE_PRIMARY:
self.master_queue.put_nowait(grad_dict)
self.work_event.set()
self.work_event.clear()
else: # pragma: no cover
raise ValueError("Unknown push level: {}".format(level))
def _task_reduce_grad(self):
while True:
# Wait until one queue has reached target batch size
while (self.master_queue.qsize() < self.reduce_batch_size and
self.secondary_queue.qsize() < self.reduce_batch_size):
self.work_event.wait(timeout=1e-1)
if self.stop_event.is_set():
return
# discard oldest messages
while self.master_queue.qsize() > self.max_queue_size:
self.master_queue.get()
while self.secondary_queue.qsize() > self.max_queue_size:
self.secondary_queue.get()
if self.master_queue.qsize() >= self.reduce_batch_size:
# Perform reduction on the master reduction queue
# Only the master reducer will execute this branch
grad_dict = self._reduce_batch(self.master_queue,
self.reduce_batch_size,
self.reduce_method,
self.reduce_device)
# Assign gradients to the managed model and
# perform optimization.
if self.model is not None and self.optimizer is not None:
self.optimizer.zero_grad()
with t.no_grad():
for k, v in self.model.named_parameters():
v.grad = grad_dict[k].to(v.device)
self.optimizer.step()
self.o_server.push(self.model_name,
self.model.to("cpu").state_dict(),
self.model.pp_version + 1,
self.model.pp_version)
self.model.pp_version += 1
if self.secondary_queue.qsize() >= self.reduce_batch_size:
# Perform reduction on the secondary reduction queue
# All processes(including master) in the reduction
# group will execute this branch.
grad_dict = self._reduce_batch(self.secondary_queue,
self.reduce_batch_size,
self.reduce_method,
self.reduce_device)
# Push reduced results to the master queue.
self.group.registered_sync(
self.primary_service,
args=(grad_dict, ReduceType.REDUCE_PRIMARY)
)
@staticmethod
def _reduce_batch(queue, batch_size, reduce_method, reduce_device):
"""
Perform batched gradient reduction
Returns:
Reduced gradient dictionary.
"""
batch = []
while len(batch) < batch_size:
batch.append(queue.get())
grad_dict = {}
for grad in batch:
for k, v in grad.items():
if k not in grad_dict:
grad_dict[k] = [v.to(reduce_device)]
else:
grad_dict[k].append(v.to(reduce_device))
for k, v in grad_dict.items():
# Stack parameter tensors in dim 0 and reduce.
if reduce_method == "sum":
grad_dict[k] = t.sum(t.stack(v, dim=0), dim=0, keepdim=False)
elif reduce_method == "mean":
grad_dict[k] = t.mean(t.stack(v, dim=0), dim=0, keepdim=False)
else: # pragma: no cover
raise RuntimeError("Unknown reduce method.")
return grad_dict
| [
"[email protected]"
] | |
eff6edbbee741710b0632dba047bfdf05bcd4856 | a0883db90ffd673650af8ffab53c158f4cd21b32 | /venv/Lib/site-packages/win32comext/axscript/test/leakTest.py | c228f447182c3ff5f84da194484524df3b8d8f67 | [] | no_license | deshudiosh/PyMs | 3bda141378cbc0b847f19f70fe461625feed5a4b | c06749db6e7e53f96686d07f9d2b44b2f1290832 | refs/heads/master | 2020-03-26T22:11:11.574421 | 2018-08-24T16:52:48 | 2018-08-24T16:52:48 | 145,438,274 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,562 | py | import sys
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
import pythoncom
from win32com.server import util, connect
import win32com.server.policy
class MySite(axsite.AXSite):
def OnScriptError(self, error):
exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
print(" >Exception:", exc[1])
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print(" >" + line)
class MyCollection(util.Collection):
def _NewEnum(self):
print("Making new Enumerator")
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo' ]
_public_attrs_ = ['collection', 'verbose']
def __init__(self):
self.verbose = 0
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = ''.join(map(str, args))
if self.verbose:
for arg in args:
print(arg, end=' ')
print()
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.py.echo arg1
end sub
sub testcollection
test.py.verbose = 1
for each item in test.py.collection
test.py.echo "Collection item is", item
next
end sub
"""
if sys.version_info < (3,):
PyScript = """print "PyScript is being parsed..."\n"""
else:
PyScript = """print("PyScript is being parsed...")\n"""
PyScript += """\
prop = "Property Value"
def hello(arg1):
test.py.echo(arg1)
pass
def testcollection():
test.py.verbose = 1
# test.py.collection[1] = "New one"
for item in test.py.collection:
test.py.echo("Collection item is", item)
pass
"""
ErrScript = """\
bad code for everyone!
"""
def TestEngine(engineName, code, bShouldWork = 1):
echoer = Test()
model = {
'test.py' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
engine.AddCode(code, axscript.SCRIPTTEXT_ISPERSISTENT)
try:
engine.Start()
finally:
if not bShouldWork:
engine.Close()
return
doTestEngine(engine, echoer)
# re-transition the engine back to the UNINITIALIZED state, a-la ASP.
engine.eScript.SetScriptState(axscript.SCRIPTSTATE_UNINITIALIZED)
engine.eScript.SetScriptSite(util.wrap(site))
print("restarting")
engine.Start()
# all done!
engine.Close()
def doTestEngine(engine, echoer):
# Now call into the scripts IDispatch
from win32com.client.dynamic import Dispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
except pythoncom.com_error as exc:
print("***** Calling 'hello' failed", exc)
return
if echoer.last != "Goober":
print("***** Function call didnt set value correctly", repr(echoer.last))
if str(ob.prop) != "Property Value":
print("***** Property Value not correct - ", repr(ob.prop))
ob.testcollection()
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
if result != 2:
print("Engine could not evaluate '1+1' - said the result was", result)
def dotestall():
for i in range(10):
TestEngine("Python", PyScript)
print(sys.gettotalrefcount())
## print "Testing Exceptions"
## try:
## TestEngine("Python", ErrScript, 0)
## except pythoncom.com_error:
## pass
def testall():
dotestall()
pythoncom.CoUninitialize()
print("AXScript Host worked correctly - %d/%d COM objects left alive." % (pythoncom._GetInterfaceCount(), pythoncom._GetGatewayCount()))
if __name__ == '__main__':
testall()
| [
"[email protected]"
] | |
d673b62e680b4d86249bac3164dfec8faef49055 | 3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9 | /SimG4CMS/ShowerLibraryProducer/python/__init__.py | 5f4692dfe95ccd32b24af996341d7813c9d43441 | [] | no_license | sextonkennedy/cmssw-ib | c2e85b5ffa1269505597025e55db4ffee896a6c3 | e04f4c26752e0775bd3cffd3a936b288ee7b0268 | HEAD | 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/SimG4CMS/ShowerLibraryProducer/',1)[0])+'/cfipython/slc6_amd64_gcc480/SimG4CMS/ShowerLibraryProducer')
| [
"[email protected]"
] | |
dce37f9b796d24bea56707b6c9e337138d39b7c8 | 22767a6d0d42b040846e024fb8f2276df89e832d | /LiDar_read3.py | f4efcdbf937325390d6a951eb4ef82d9f3266cfc | [] | no_license | hhs732/snowforest_modeling | a42bb7387ac02c864c1cc8ca88e165a40e0ba4db | 4c40d854b8c45a3614c44a33798800e232b4109a | refs/heads/master | 2020-03-21T16:36:00.416250 | 2019-07-03T22:06:46 | 2019-07-03T22:06:46 | 138,780,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,166 | py | import laspy as ls
import numpy as np
import scipy
from scipy.spatial.kdtree import KDTree
import matplotlib.pyplot as plt
class K_Means:
def __init__(self, numOfClusters=2, init_centroids=None):
self.numOfClusters = numOfClusters
self.centroids={}
for i in range(self.numOfClusters):
self.centroids[i] = init_centroids[i]
def fit(self,data):
self.classifications = {}
for i in range(self.numOfClusters):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
def predict(self,data):
distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
#%%
infile = ls.file.File("lidardata\sagehen_testveg.las", mode="r")
infileGrd = ls.file.File("lidardata\sagehen_testGrd.las", mode="r")
# Grab all of the points from the file.
point_records = infile.points
# Grab just the X dimension from the file, and scale it.
def scaled_x_dimension(las_file):
x_dimension = las_file.X
scale = las_file.header.scale[0]
offset = las_file.header.offset[0]
return(x_dimension*scale + offset)
scaled_x = scaled_x_dimension(infile)
#%%
# Find out what the point format looks like.
pointformat = infile.point_format
for spec in infile.point_format:
print(spec.name)
#Lets take a look at the header also.
headerformat = infile.header.header_format
for spec in headerformat:
print(spec.name)
#%%
# Grab the scaled x, y, and z dimensions and stick them together in an nx3 numpy array
coords = np.vstack((infile.x, infile.y, infile.z)).T
coordsGrd = np.vstack((infileGrd.x, infileGrd.y, infileGrd.z)).T
#%% calculating the nearest neighbors of a set of points, you might want to use a highly optimized package like FLANN
dataset = np.vstack([infile.X, infile.Y, infile.Z]).T
datasetGrd = np.vstack([infileGrd.X, infileGrd.Y, infileGrd.Z]).T
#%%we’re interested only in the last return from each pulse in order to do ground detection.
#We can easily figure out which points are the last return by finding out for which points return_num is equal to num_returns.
# Grab the return_num and num_returns dimensions
num_returns = infile.num_returns
return_num = infile.return_num
ground_points = infile.points[num_returns == return_num]
print("%i points out of %i were ground points." % (len(ground_points),len(infile)))
num_returnsG = infileGrd.num_returns
return_numG = infileGrd.return_num
ground_pointsGrd = infileGrd.points[num_returnsG == return_numG]
#%%
groundPoints_ls = ground_points.tolist()
#groundPoints_arr = np.array(groundPoints_ls)
groundPoints_arr = []
for i in range (len(groundPoints_ls)):
GPlist = np.array(groundPoints_ls[i])
groundPoints_arr.append(GPlist[0,0:3])
groundPoints_arr = np.array(groundPoints_arr)
#%%
#from mpl_toolkits.mplot3d import Axes3D
#fig = plt.figure(figsize=(20,15))
#ax = Axes3D(fig)
#ax.scatter(dataset[:, 0], dataset[:, 1], dataset[:, 2])
#plt.savefig('3DallPoints.png')
#%% implementing Kmean
#Number of clusters
k = np.size(groundPoints_arr[:,0])
# Number of training data
n = np.size(dataset[:,0])
# Number of features in the data
#c = dataset.shape[1]
centers = groundPoints_arr.copy()
clusters = np.zeros(n)
distances = np.zeros((n,k))
# Measure the distance to every center
for i in range(k):
distances[:,i] = np.linalg.norm(dataset - centers[i], axis=1)
# Assign all training data to closest center
clusters = np.argmin(distances, axis = 1)
#%%new metnod (class) for Kmean
centroids=groundPoints_arr.copy()
# instantiate a class
clf = K_Means(numOfClusters=k,init_centroids=centroids)
# fit kmean class to data
clf.fit(dataset)
# get classification
classes = clf.classifications
#%% DEM file (.tif) reading
#import gzip
#with gzip.open("lidardata\sagehen_demveg.tin.gz", 'rb') as f:
# for line in f:
# print(line)
from osgeo import gdal
demfile = gdal.Open("lidardata\output.tin.tif", gdal.GA_ReadOnly)
lyr = gdal.GDALDEMProcessingOptions_swigregister(demfile)
print("Driver: {}/{}".format(demfile.GetDriver().ShortName,demfile.GetDriver().LongName))
print("Size is {} x {} x {}".format(demfile.RasterXSize,demfile.RasterYSize,demfile.RasterCount))
print("Projection is {}".format(demfile.GetProjection()))
geotransform = demfile.GetGeoTransform()
if geotransform:
print("Origin = ({}, {})".format(geotransform[0], geotransform[3]))
print("Pixel Size = ({}, {})".format(geotransform[1], geotransform[5]))
band = demfile.GetRasterBand(1)
print("Band Type={}".format(gdal.GetDataTypeName(band.DataType)))
min0 = band.GetMinimum()
max0 = band.GetMaximum()
if not min or not max:
(min,max) = band.ComputeRasterMinMax(True)
print("Min={:.3f}, Max={:.3f}".format(min,max))
if band.GetOverviewCount() > 0:
print("Band has {} overviews".format(band.GetOverviewCount()))
if band.GetRasterColorTable():
print("Band has a color table with {} entries".format(band.GetRasterColorTable().GetCount()))
scanline = band.ReadRaster(xoff=0, yoff=0, xsize=band.XSize, ysize=1,
buf_xsize=band.XSize, buf_ysize=1,
buf_type=gdal.GDT_Float32)
import struct
tuple_of_floats = struct.unpack('f' * band.XSize, scanline)
#Y
#Z
#intensity
#flag_byte
#raw_classification
#scan_angle_rank
#user_data
#pt_src_id
#gps_time
#file_sig ???????????????????
#file_source_id
#global_encoding
#proj_id_1 ??????????????
#proj_id_2 ????????????????
#proj_id_3 ?????????
#proj_id_4 ???????????/
#version_major
#version_minor
#system_id
#software_id
#created_day
#created_year
#header_size
#data_offset
#num_variable_len_recs
#data_format_id
#data_record_length
#point_records_count
#point_return_count
#x_scale
#y_scale
#z_scale
#x_offset
#y_offset
#z_offset
#x_max
#x_min
#y_max
#y_min
#z_max
#z_min
| [
"[email protected]"
] | |
f7cd7280a8fe0e76ea694356b8e664387c4b0dd8 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/edcps/apis/DescribeInstanceRaidRequest.py | ffe9791f476342b0cfcad38c975a790eaf25c446 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,552 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeInstanceRaidRequest(JDCloudRequest):
"""
查询单个分布式云物理服务器已安装的RAID信息,包括系统盘RAID信息和数据盘RAID信息
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeInstanceRaidRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}:describeInstanceRaid', 'GET', header, version)
self.parameters = parameters
class DescribeInstanceRaidParameters(object):
def __init__(self, regionId, instanceId, ):
"""
:param regionId: 地域ID,可调用接口(describeEdCPSRegions)获取分布式云物理服务器支持的地域
:param instanceId: 分布式云物理服务器ID
"""
self.regionId = regionId
self.instanceId = instanceId
| [
"[email protected]"
] | |
b64a06b58deef6f2abcc090b1cd40042604dfc0f | e315715504e54e2c4af04abec0e179d233a003de | /example.py | 67dd869c62199db4639d2f4c85dd184b6b038086 | [
"MIT"
] | permissive | CaliDog/tachikoma | d80ce280c33a6bf39644fb7eb6bc3af0186b4db6 | 3955ff97c14ba9747c42324cb6a2955419c6e71e | refs/heads/master | 2021-09-22T09:49:54.030468 | 2018-09-07T16:38:03 | 2018-09-07T16:38:03 | 108,040,909 | 21 | 3 | null | 2018-09-07T16:10:15 | 2017-10-23T21:20:50 | Python | UTF-8 | Python | false | false | 511 | py | import tachikoma
from tachikoma import analyzers
from tachikoma import generators
from tachikoma import emitters
pipeline = tachikoma.Pipeline(
generators={
"slack": generators.SlackGenerator(),
"aws.iam": generators.AWSACMGenerator(),
"aws.acm": generators.AWSIAMGenerator(),
},
analyzers={
"aws.*": analyzers.AllAWSAnalyzer(),
"slack": analyzers.SlackAnalyzer()
},
emitters={
"aws.*": emitters.SlackEmitter()
},
)
pipeline.execute()
| [
"[email protected]"
] | |
85160f261e1ed99c59d0e5085f797298064e8998 | dcbef06d5a00f07756339b9e62c684dec2fee425 | /nuitka/tools/specialize/SpecializeC.py | 0dd61843d7d8de62e585d9e669d6677290478921 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Nuitka/Nuitka | f9543d8d95bfa0b81d4e60af0dfad99fb72893a4 | d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2 | refs/heads/develop | 2023-08-28T14:00:32.861328 | 2023-08-27T09:16:45 | 2023-08-27T09:16:45 | 9,626,741 | 8,573 | 599 | Apache-2.0 | 2023-09-13T02:49:41 | 2013-04-23T15:40:33 | Python | UTF-8 | Python | false | false | 39,625 | py | # Copyright 2023, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This tool is generating code variants for helper codes from Jinja templates.
"""
import nuitka.Options
nuitka.Options.is_full_compat = False
# isort:start
import os
import nuitka.specs.BuiltinBytesOperationSpecs
import nuitka.specs.BuiltinDictOperationSpecs
import nuitka.specs.BuiltinListOperationSpecs
import nuitka.specs.BuiltinStrOperationSpecs
import nuitka.specs.BuiltinUnicodeOperationSpecs
from nuitka.code_generation.BinaryOperationHelperDefinitions import (
getSpecializedBinaryOperations,
parseTypesFromHelper,
)
from nuitka.code_generation.c_types.CTypePyObjectPointers import (
make_list_constant_direct_threshold,
make_list_constant_hinted_threshold,
)
from nuitka.code_generation.CallCodes import (
getQuickCallCode,
getQuickMethodCallCode,
getQuickMethodDescriptorCallCode,
getQuickMixedCallCode,
getTemplateCodeDeclaredFunction,
max_quick_call,
)
from nuitka.code_generation.ComparisonHelperDefinitions import (
getSpecializedComparisonOperations,
)
from nuitka.code_generation.ImportCodes import getImportModuleHardCodeName
from nuitka.nodes.ImportNodes import (
hard_modules,
hard_modules_non_stdlib,
hard_modules_version,
)
from nuitka.nodes.shapes.BuiltinTypeShapes import (
tshape_bool,
tshape_bytes,
tshape_dict,
tshape_int,
tshape_list,
tshape_none,
tshape_str,
tshape_tuple,
)
from nuitka.utils.Jinja2 import getTemplateC
from .Common import (
formatArgs,
getMethodVariations,
python2_dict_methods,
python2_list_methods,
python2_str_methods,
python2_unicode_methods,
python3_bytes_methods,
python3_dict_methods,
python3_list_methods,
python3_str_methods,
withFileOpenedAndAutoFormatted,
writeLine,
)
from .CTypeDescriptions import (
bytes_desc,
c_bool_desc,
c_digit_desc,
c_float_desc,
c_long_desc,
dict_desc,
float_desc,
int_desc,
list_desc,
long_desc,
n_bool_desc,
object_desc,
set_desc,
str_desc,
tuple_desc,
unicode_desc,
)
def getDoExtensionUsingTemplateC(template_name):
return getTemplateC(
package_name="nuitka.code_generation",
template_subdir="templates_c",
template_name=template_name,
extensions=("jinja2.ext.do",),
)
class AlternativeTypeBase(object):
# TODO: Base class for alternative types
pass
class AlternativeIntOrClong(AlternativeTypeBase):
# TODO: Base class for alternative type int or clong.
pass
types = (
int_desc,
str_desc,
unicode_desc,
float_desc,
tuple_desc,
list_desc,
set_desc,
dict_desc,
bytes_desc,
long_desc,
c_long_desc,
c_digit_desc,
c_float_desc,
c_bool_desc,
n_bool_desc,
object_desc,
)
def findTypeFromCodeName(code_name):
for candidate in types:
if candidate.getHelperCodeName() == code_name:
return candidate
op_slot_codes = set()
# Reverse operation mapping.
reversed_args_compare_op_codes = {
"LE": "GE",
"LT": "GT",
"EQ": "EQ",
"NE": "NE",
"GT": "LT",
"GE": "LE",
}
def makeCompareSlotCode(operator, op_code, target, left, right, emit):
# Many variations to consider, pylint: disable=too-many-branches
key = operator, op_code, target, left, right
if key in op_slot_codes:
return
int_types_family = (int_desc, c_long_desc)
long_types_family = (int_desc, long_desc, c_long_desc, c_digit_desc)
float_types_family = (int_desc, long_desc, float_desc, c_long_desc, c_float_desc)
if left in int_types_family and right in int_types_family:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonInt.c.j2")
elif left in long_types_family and right in long_types_family:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonLong.c.j2")
elif left in float_types_family and right in float_types_family:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonFloat.c.j2")
elif left == int_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonInt.c.j2")
elif left == long_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonLong.c.j2")
elif left == float_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonFloat.c.j2")
elif left == tuple_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonTuple.c.j2")
elif left == list_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonList.c.j2")
# elif left == set_desc:
# template = env.get_template("HelperOperationComparisonSet.c.j2")
elif left == bytes_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonBytes.c.j2")
elif left == str_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonStr.c.j2")
elif left == unicode_desc:
template = getDoExtensionUsingTemplateC("HelperOperationComparisonUnicode.c.j2")
else:
return
assert left is not int_desc or right is not int_desc or target is not n_bool_desc
code = template.render(
operand=operator, # TODO: rename
target=target,
left=left,
right=right,
op_code=op_code,
reversed_args_op_code=reversed_args_compare_op_codes[op_code],
name=template.name,
long_desc=long_desc,
c_long_desc=c_long_desc,
c_digit_desc=c_digit_desc,
)
emit(code)
op_slot_codes.add(key)
mul_repeats = set()
def makeMulRepeatCode(target, left, right, emit):
key = right, left
if key in mul_repeats:
return
template = getDoExtensionUsingTemplateC("HelperOperationMulRepeatSlot.c.j2")
code = template.render(target=target, left=left, right=right)
emit(code)
mul_repeats.add(key)
def _getNbSlotFromOperand(operand, op_code):
# pylint: disable=too-many-branches,too-many-return-statements
if operand == "+":
return "nb_add"
elif operand == "*":
return "nb_multiply"
elif operand == "-":
return "nb_subtract"
elif operand == "//":
return "nb_floor_divide"
elif operand == "/":
if op_code == "TRUEDIV":
return "nb_true_divide"
else:
return "nb_divide"
elif operand == "%":
return "nb_remainder"
elif operand == "**":
return "nb_power"
elif operand == "<<":
return "nb_lshift"
elif operand == ">>":
return "nb_rshift"
elif operand == "|":
return "nb_or"
elif operand == "&":
return "nb_and"
elif operand == "^":
return "nb_xor"
elif operand == "@":
return "nb_matrix_multiply"
elif operand == "divmod":
return "nb_divmod"
else:
assert False, operand
def _getNbInplaceSlotFromOperand(operand, op_code):
if operand == "divmod":
return None
nb_slot = _getNbSlotFromOperand(operand, op_code)
return nb_slot.replace("nb_", "nb_inplace_")
def _parseTypesFromHelper(helper_name):
(
target_code,
left_code,
right_code,
) = parseTypesFromHelper(helper_name)
if target_code is not None:
target = findTypeFromCodeName(target_code)
else:
target = None
left = findTypeFromCodeName(left_code)
right = findTypeFromCodeName(right_code)
return target_code, target, left, right
def _parseRequirements(op_code, target, left, right, emit):
python_requirement = set()
# There is an obsolete Python2 operation too, making sure it's guarded in code.
if op_code == "OLDDIV":
python_requirement.add(int_desc.python_requirement)
if op_code == "MATMULT":
python_requirement.add("PYTHON_VERSION >= 0x350")
if target is not None and target.python_requirement:
python_requirement.add(target.python_requirement)
if left.python_requirement:
python_requirement.add(left.python_requirement)
if right.python_requirement:
python_requirement.add(right.python_requirement)
if python_requirement:
assert len(python_requirement) == 1, (target, left, right)
python_requirement = python_requirement.pop()
emit("#if %s" % python_requirement)
return python_requirement
def makeHelperOperations(
template, inplace, helpers_set, operator, op_code, emit_h, emit_c, emit
):
# Complexity comes natural, pylint: disable=too-many-locals
emit(
'/* C helpers for type %s "%s" (%s) operations */'
% ("in-place" if inplace else "specialized", operator, op_code)
)
emit()
for helper_name in helpers_set:
target_code, target, left, right = _parseTypesFromHelper(helper_name)
assert target is None or not inplace, helper_name
if target is None and not inplace:
assert False, target_code
python_requirement = _parseRequirements(op_code, target, left, right, emit)
emit(
'/* Code referring to "%s" corresponds to %s and "%s" to %s. */'
% (
left.getHelperCodeName(),
left.type_desc,
right.getHelperCodeName(),
right.type_desc,
)
)
if operator == "+":
sq_slot = "sq_concat"
elif operator == "*":
sq_slot = "sq_repeat"
else:
sq_slot = None
if inplace and sq_slot is not None:
sq_inplace_slot = sq_slot.replace("sq_", "sq_inplace_")
else:
sq_inplace_slot = None
code = template.render(
target=target,
left=left,
right=right,
op_code=op_code,
operator=operator,
nb_slot=_getNbSlotFromOperand(operator, op_code),
nb_inplace_slot=_getNbInplaceSlotFromOperand(operator, op_code)
if inplace
else None,
sq_slot=sq_slot,
sq_inplace_slot=sq_inplace_slot,
object_desc=object_desc,
int_desc=int_desc,
long_desc=long_desc,
float_desc=float_desc,
list_desc=list_desc,
tuple_desc=tuple_desc,
set_desc=set_desc,
str_desc=str_desc,
unicode_desc=unicode_desc,
bytes_desc=bytes_desc,
c_long_desc=c_long_desc,
c_digit_desc=c_digit_desc,
)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
if python_requirement:
emit("#endif")
emit()
def makeHelperComparisons(
template, helpers_set, operator, op_code, emit_h, emit_c, emit
):
# Details to look for, pylint: disable=too-many-locals
emit(
'/* C helpers for type specialized "%s" (%s) comparisons */'
% (operator, op_code)
)
emit()
for target in (object_desc, c_bool_desc):
python_requirement = _parseRequirements(
op_code, target, int_desc, int_desc, emit_c
)
makeCompareSlotCode(operator, op_code, target, int_desc, int_desc, emit_c)
if python_requirement:
emit_c("#endif")
for helper_name in helpers_set:
assert helper_name.split("_")[:2] == ["RICH", "COMPARE"], (helper_name,)
# Filter for the operation.
if helper_name.split("_")[2] != op_code:
continue
_target_code, target, left, right = _parseTypesFromHelper(helper_name)
assert target is not None, helper_name
assert left is not None, helper_name
assert right is not None, helper_name
python_requirement = _parseRequirements(op_code, target, left, right, emit)
(
code,
helper_target,
type_desc1,
type_desc2,
_operand1,
_operand2,
) = left.getTypeComparisonSpecializationHelper(
other=right,
op_code=op_code,
target=target,
operand1="operand1",
operand2="operand2",
)
if code:
makeCompareSlotCode(
operator, op_code, helper_target, type_desc1, type_desc2, emit_c
)
emit(
'/* Code referring to "%s" corresponds to %s and "%s" to %s. */'
% (
left.getHelperCodeName(),
left.type_desc,
right.getHelperCodeName(),
right.type_desc,
)
)
if not python_requirement:
is_py3_only = False
is_py2_only = False
elif python_requirement == "PYTHON_VERSION < 0x300":
is_py3_only = False
is_py2_only = True
elif python_requirement == "PYTHON_VERSION >= 0x300":
is_py3_only = True
is_py2_only = False
else:
assert False, python_requirement
code = template.render(
target=target,
left=left,
right=right,
op_code=op_code,
reversed_args_op_code=reversed_args_compare_op_codes[op_code],
operator=operator,
is_py3_only=is_py3_only,
is_py2_only=is_py2_only,
object_desc=object_desc,
int_desc=int_desc,
)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
if python_requirement:
emit("#endif")
emit()
def emitGenerationWarning(emit, template_name):
emit(
"/* WARNING, this code is GENERATED. Modify the template %s instead! */"
% template_name
)
def emitIDE(emit):
emit(
"""
/* This file is included from another C file, help IDEs to still parse it on its own. */
#ifdef __IDE_ONLY__
#include "nuitka/prelude.h"
#endif
"""
)
def makeHelpersComparisonOperation(operand, op_code):
specialized_cmp_helpers_set = getSpecializedComparisonOperations()
template = getDoExtensionUsingTemplateC("HelperOperationComparison.c.j2")
filename_c = "nuitka/build/static_src/HelpersComparison%s.c" % op_code.capitalize()
filename_h = "nuitka/build/include/nuitka/helper/comparisons_%s.h" % op_code.lower()
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
emitGenerationWarning(emit, template.name)
emitIDE(emit)
filename_utils = filename_c[:-2] + "Utils.c"
if os.path.exists(filename_utils):
emit_c('#include "%s"' % os.path.basename(filename_utils))
makeHelperComparisons(
template,
specialized_cmp_helpers_set,
operand,
op_code,
emit_h,
emit_c,
emit,
)
def makeHelpersBinaryOperation(operand, op_code):
specialized_op_helpers_set = getSpecializedBinaryOperations(op_code)
template = getDoExtensionUsingTemplateC("HelperOperationBinary.c.j2")
filename_c = (
"nuitka/build/static_src/HelpersOperationBinary%s.c" % op_code.capitalize()
)
filename_h = (
"nuitka/build/include/nuitka/helper/operations_binary_%s.h" % op_code.lower()
)
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
emitGenerationWarning(emit, template.name)
emitIDE(emit)
filename_utils = filename_c[:-2] + "Utils.c"
if os.path.exists(filename_utils):
emit_c('#include "%s"' % os.path.basename(filename_utils))
makeHelperOperations(
template,
False,
specialized_op_helpers_set,
operand,
op_code,
emit_h,
emit_c,
emit,
)
def makeHelpersInplaceOperation(operand, op_code):
specialized_op_helpers_set = getSpecializedBinaryOperations("I" + op_code)
template = getDoExtensionUsingTemplateC("HelperOperationInplace.c.j2")
filename_c = (
"nuitka/build/static_src/HelpersOperationInplace%s.c" % op_code.capitalize()
)
filename_h = (
"nuitka/build/include/nuitka/helper/operations_inplace_%s.h" % op_code.lower()
)
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
emitGenerationWarning(emit, template.name)
emitIDE(emit)
filename_utils = filename_c[:-2] + "Utils.c"
if os.path.exists(filename_utils):
emit_c('#include "%s"' % os.path.basename(filename_utils))
makeHelperOperations(
template,
True,
specialized_op_helpers_set,
operand,
op_code,
emit_h,
emit_c,
emit,
)
def makeHelpersImportHard():
filename_c = "nuitka/build/static_src/HelpersImportHard.c"
filename_h = "nuitka/build/include/nuitka/helper/import_hard.h"
template = getDoExtensionUsingTemplateC("HelperImportHard.c.j2")
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
emitGenerationWarning(emit, template.name)
emitIDE(emit)
for module_name in sorted(hard_modules):
makeHelperImportModuleHard(
template,
module_name,
emit_h,
emit_c,
emit,
)
def makeHelperImportModuleHard(template, module_name, emit_h, emit_c, emit):
emit('/* C helper for hard import of module "%s" import. */' % module_name)
python_min_max_os_version = hard_modules_version.get(module_name)
if python_min_max_os_version is not None:
assert len(python_min_max_os_version) == 3, module_name
python_min_version, python_max_version, os_limit = python_min_max_os_version
parts = []
if python_min_version is not None:
parts.append("PYTHON_VERSION >= %s" % hex(python_min_version))
if python_max_version is not None:
parts.append("PYTHON_VERSION < %s" % hex(python_max_version))
if os_limit is not None:
parts.append({"win32": "defined(_WIN32)"}[os_limit])
python_requirement = " && ".join(parts)
else:
python_requirement = None
if python_requirement:
emit("#if %s" % python_requirement)
code = template.render(
module_name=module_name,
module_code_name=getImportModuleHardCodeName(module_name),
name=template.name,
target=object_desc,
is_stdlib=module_name not in hard_modules_non_stdlib,
)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
if python_requirement:
emit("#endif")
emit()
def makeHelperCalls():
filename_c = "nuitka/build/static_src/HelpersCallingGenerated.c"
filename_h = "nuitka/build/include/nuitka/helper/calling_generated.h"
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
assert args[0] != "extern "
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
template = getTemplateC(
"nuitka.code_generation", "CodeTemplateCallsPositional.c.j2"
)
emitGenerationWarning(emit, template.name)
emitIDE(emit)
for args_count in range(max_quick_call + 1):
code = getQuickCallCode(args_count=args_count, has_tuple_arg=False)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
if args_count >= 1:
code = getQuickCallCode(args_count=args_count, has_tuple_arg=True)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
template = getTemplateC(
"nuitka.code_generation", "CodeTemplateCallsMixed.c.j2"
)
# Only keywords, but not positional arguments, via split args.
code = getQuickMixedCallCode(
args_count=0,
has_tuple_arg=False,
has_dict_values=True,
)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
for args_count in range(1, max_quick_call + 1):
for has_tuple_arg in (False, True):
for has_dict_values in (False, True):
# We do not do that.
if not has_dict_values and has_tuple_arg:
continue
code = getQuickMixedCallCode(
args_count=args_count,
has_tuple_arg=has_tuple_arg,
has_dict_values=has_dict_values,
)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
for args_count in range(1, 5):
code = getQuickMethodDescriptorCallCode(args_count=args_count)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
for args_count in range(max_quick_call + 1):
code = getQuickMethodCallCode(args_count=args_count)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
def makeHelperLists():
filename_c = "nuitka/build/static_src/HelpersListsGenerated.c"
filename_h = "nuitka/build/include/nuitka/helper/lists_generated.h"
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
assert args[0] != "extern "
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
template = getTemplateC(
"nuitka.code_generation", "CodeTemplateMakeListSmall.c.j2"
)
emitGenerationWarning(emit, template.name)
emitIDE(emit)
for args_count in range(1, make_list_constant_direct_threshold):
code = template.render(args_count=args_count)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
template = getTemplateC(
"nuitka.code_generation", "CodeTemplateMakeListHinted.c.j2"
)
for args_count in range(
make_list_constant_direct_threshold, make_list_constant_hinted_threshold
):
code = template.render(args_count=args_count)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
def _makeHelperBuiltinTypeAttributes(
type_prefix, type_name, python2_methods, python3_methods, emit_c, emit_h
):
# many cases to deal with, pylint: disable=too-many-branches
def getVarName(method_name):
return "%s_builtin_%s" % (type_prefix, method_name)
for method_name in sorted(set(python2_methods + python3_methods)):
is_public = method_name in (
"format",
"fromkeys",
)
if method_name in python2_methods and method_name not in python3_methods:
emit_c("#if PYTHON_VERSION < 0x300")
if is_public:
emit_h("#if PYTHON_VERSION < 0x300")
needs_endif = True
elif method_name not in python2_methods and method_name in python3_methods:
emit_c("#if PYTHON_VERSION >= 0x300")
if is_public:
emit_h("#if PYTHON_VERSION >= 0x300")
needs_endif = True
else:
needs_endif = False
if not is_public:
emit_c("static")
emit_c("PyObject *%s = NULL;" % getVarName(method_name))
if is_public:
emit_h("extern PyObject *%s;" % getVarName(method_name))
if needs_endif:
emit_c("#endif")
if is_public:
emit_h("#endif")
if not python3_methods:
emit_c("#if PYTHON_VERSION < 0x300")
if not python2_methods:
emit_c("#if PYTHON_VERSION >= 0x300")
emit_c("static void _init%sBuiltinMethods(void) {" % type_prefix.capitalize())
for method_name in sorted(set(python2_methods + python3_methods)):
if (
method_name in python2_methods
and method_name not in python3_methods
and python3_methods
):
emit_c("#if PYTHON_VERSION < 0x300")
needs_endif = True
elif (
method_name not in python2_methods
and method_name in python3_methods
and python2_methods
):
emit_c("#if PYTHON_VERSION >= 0x300")
needs_endif = True
else:
needs_endif = False
emit_c(
'%s = PyObject_GetAttrString((PyObject *)&%s, "%s");'
% (getVarName(method_name), type_name, method_name)
)
if needs_endif:
emit_c("#endif")
emit_c("}")
if not python2_methods or not python3_methods:
emit_c("#endif")
generate_builtin_type_operations = [
# TODO: For these, we would need an implementation for adding/deleting dictionary values. That
# has turned out to be too hard so far and these are very good friends, not doing hashing
# multiple times when reading and writing, so can't do it unless we add something for the
# Nuitka-Python eventually.
(
"tshape_dict",
dict_desc,
nuitka.specs.BuiltinDictOperationSpecs,
("pop", "popitem", "setdefault"),
),
(
"tshape_list",
list_desc,
nuitka.specs.BuiltinListOperationSpecs,
(
"pop",
# TODO: template doesn't do positional only yet.
# "sort",
),
),
# TODO: These are very complex things using "string lib" code in CPython,
# that we do not have easy access to, but we might one day for Nuitka-Python
# expose it for the static linking of it and then we could in fact call
# these directly.
(
"tshape_str",
str_desc,
nuitka.specs.BuiltinStrOperationSpecs,
(
"strip",
"rstrip",
"lstrip",
"partition",
"rpartition",
"find",
"rfind",
"index",
"rindex",
"capitalize",
"upper",
"lower",
"swapcase",
"title",
"isalnum",
"isalpha",
"isdigit",
"islower",
"isupper",
"isspace",
"istitle",
"split",
"rsplit",
"startswith",
"endswith",
"replace",
"encode",
"decode",
"count",
"expandtabs",
"translate",
"ljust",
"rjust",
"center",
"zfill",
"splitlines",
),
),
# TODO: This is using Python2 spec module for Python3 strings, that will be a problem down the
# road, when version specifics come in.
(
"tshape_unicode",
unicode_desc,
nuitka.specs.BuiltinUnicodeOperationSpecs,
(
"strip",
"rstrip",
"lstrip",
"find",
"rfind",
"index",
"rindex",
"capitalize",
"upper",
"lower",
"swapcase",
"title",
"isalnum",
"isalpha",
"isdigit",
"islower",
"isupper",
"isspace",
"istitle",
"split",
"rsplit",
"startswith",
"endswith",
"replace",
"encode",
"count",
"expandtabs",
"translate",
"ljust",
"rjust",
"center",
"zfill",
"splitlines",
),
),
(
"tshape_bytes",
bytes_desc,
nuitka.specs.BuiltinBytesOperationSpecs,
(
"capitalize",
"center",
"count",
"decode",
"endswith",
"expandtabs",
"find",
"index",
"isalnum",
"isalpha",
"isdigit",
"islower",
"isspace",
"istitle",
"isupper",
"join",
"ljust",
"lower",
"lstrip",
"partition",
"replace",
"rfind",
"rindex",
"rjust",
"rpartition",
"rsplit",
"rstrip",
"split",
"splitlines",
"startswith",
"strip",
"swapcase",
"title",
"translate",
"upper",
"zfill",
),
),
]
def makeDictCopyHelperCodes():
filename_c = "nuitka/build/static_src/HelpersDictionariesGenerated.c"
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
def emit(*args):
writeLine(output_c, *args)
emitIDE(emit)
template = getDoExtensionUsingTemplateC("HelperDictionaryCopy.c.j2")
emitGenerationWarning(emit, template.name)
code = template.render()
emit(code)
def _getCheckForShape(shape):
# Return driven for better debugging experience, pylint: disable=too-many-return-statements
if shape is tshape_str:
return "Nuitka_String_CheckExact"
elif shape is tshape_list:
return "PyList_CheckExact"
elif shape is tshape_tuple:
return "PyTuple_CheckExact"
elif shape is tshape_int:
# TODO: Not defined a version independent one yet in prelude.h
return None
elif shape is tshape_bool:
return "PyBool_Check"
elif shape is tshape_none:
# TODO: Not defined one in prelude.h yet
return None
elif shape is tshape_dict:
return "PyDict_CheckExact"
elif shape is tshape_bytes:
return "PyBytes_CheckExact"
else:
assert False, shape
def makeHelperBuiltinTypeMethods():
# Many details, pylint: disable=too-many-locals
filename_c = "nuitka/build/static_src/HelpersBuiltinTypeMethods.c"
filename_h = "nuitka/build/include/nuitka/helper/operations_builtin_types.h"
with withFileOpenedAndAutoFormatted(filename_c) as output_c:
with withFileOpenedAndAutoFormatted(filename_h) as output_h:
def emit_h(*args):
writeLine(output_h, *args)
def emit_c(*args):
writeLine(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
template = getDoExtensionUsingTemplateC("HelperBuiltinMethodOperation.c.j2")
emitGenerationWarning(emit, template.name)
emitIDE(emit)
# TODO: Isn't this creating more than necessary, we don't use all of them, e.g.
# not with lists and dicts.
_makeHelperBuiltinTypeAttributes(
"str", "PyString_Type", python2_str_methods, (), emit_c, emit_h
)
_makeHelperBuiltinTypeAttributes(
"bytes", "PyBytes_Type", (), python3_bytes_methods, emit_c, emit_h
)
_makeHelperBuiltinTypeAttributes(
"unicode",
"PyUnicode_Type",
python2_unicode_methods,
python3_str_methods,
emit_c,
emit_h,
)
_makeHelperBuiltinTypeAttributes(
"dict",
"PyDict_Type",
python2_dict_methods,
python3_dict_methods,
emit_c,
emit_h,
)
_makeHelperBuiltinTypeAttributes(
"list",
"PyList_Type",
python2_list_methods,
python3_list_methods,
emit_c,
emit_h,
)
for (
shape_name,
type_desc,
spec_module,
method_names,
) in generate_builtin_type_operations:
if type_desc.python_requirement:
emit("#if %s" % type_desc.python_requirement)
for method_name in sorted(method_names):
(
present,
arg_names,
_arg_tests,
arg_name_mapping,
arg_counts,
result_shape,
) = getMethodVariations(
spec_module=spec_module,
shape_name=shape_name,
method_name=method_name,
must_exist=True,
)
assert present, method_name
def formatArgumentDeclaration(arg_types, arg_names, starting):
return formatArgs(
[
arg_type.getVariableDecl(arg_name)
for arg_type, arg_name in zip(arg_types, arg_names)
],
starting=starting,
)
# Function is used immediately in same loop, pylint: disable=cell-var-from-loop
def replaceArgNameForC(arg_name):
if arg_name in arg_name_mapping:
arg_name = arg_name_mapping[arg_name]
if arg_name in ("default", "new", "delete"):
return arg_name + "_value"
else:
return arg_name
for arg_count in arg_counts:
variant_args = [
replaceArgNameForC(arg_name)
for arg_name in arg_names[:arg_count]
]
code = template.render(
object_desc=object_desc,
builtin_type=type_desc,
builtin_arg_name=type_desc.type_name,
method_name=method_name,
api_suffix=str(arg_count + 1)
if len(arg_counts) > 1
else "",
arg_names=variant_args,
arg_types=[object_desc] * len(variant_args),
formatArgumentDeclaration=formatArgumentDeclaration,
extra_check=_getCheckForShape(result_shape)
if result_shape is not None
else None,
zip=zip,
len=len,
name=template.name,
)
emit_c(code)
emit_h(getTemplateCodeDeclaredFunction(code))
if type_desc.python_requirement:
emit("#endif")
def main():
makeDictCopyHelperCodes()
# Cover many things once first, then cover all for quicker turnaround during development.
makeHelpersComparisonOperation("==", "EQ")
makeHelpersBinaryOperation("+", "ADD")
makeHelpersInplaceOperation("+", "ADD")
makeHelperBuiltinTypeMethods()
makeHelpersImportHard()
makeHelperCalls()
makeHelperLists()
makeHelpersBinaryOperation("-", "SUB")
makeHelpersBinaryOperation("*", "MULT")
makeHelpersBinaryOperation("%", "MOD")
makeHelpersBinaryOperation("|", "BITOR")
makeHelpersBinaryOperation("&", "BITAND")
makeHelpersBinaryOperation("^", "BITXOR")
makeHelpersBinaryOperation("<<", "LSHIFT")
makeHelpersBinaryOperation(">>", "RSHIFT")
makeHelpersBinaryOperation("//", "FLOORDIV")
makeHelpersBinaryOperation("/", "TRUEDIV")
makeHelpersBinaryOperation("/", "OLDDIV")
makeHelpersBinaryOperation("divmod", "DIVMOD")
makeHelpersBinaryOperation("**", "POW")
makeHelpersBinaryOperation("@", "MATMULT")
makeHelpersInplaceOperation("-", "SUB")
makeHelpersInplaceOperation("*", "MULT")
makeHelpersInplaceOperation("%", "MOD")
makeHelpersInplaceOperation("|", "BITOR")
makeHelpersInplaceOperation("&", "BITAND")
makeHelpersInplaceOperation("^", "BITXOR")
makeHelpersInplaceOperation("<<", "LSHIFT")
makeHelpersInplaceOperation(">>", "RSHIFT")
makeHelpersInplaceOperation("//", "FLOORDIV")
makeHelpersInplaceOperation("/", "TRUEDIV")
makeHelpersInplaceOperation("/", "OLDDIV")
makeHelpersInplaceOperation("**", "POW")
makeHelpersInplaceOperation("@", "MATMULT")
makeHelpersComparisonOperation("!=", "NE")
makeHelpersComparisonOperation("<=", "LE")
makeHelpersComparisonOperation(">=", "GE")
makeHelpersComparisonOperation(">", "GT")
makeHelpersComparisonOperation("<", "LT")
| [
"[email protected]"
] | |
e6b701c4e85ace8f8f42cbd77905813b1d824f87 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/insights/v20191017preview/get_private_link_scoped_resource.py | 0294eb716232b389cedb14f651bdf588d455d348 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 3,981 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetPrivateLinkScopedResourceResult',
'AwaitableGetPrivateLinkScopedResourceResult',
'get_private_link_scoped_resource',
]
@pulumi.output_type
class GetPrivateLinkScopedResourceResult:
"""
A private link scoped resource
"""
def __init__(__self__, linked_resource_id=None, name=None, provisioning_state=None, type=None):
if linked_resource_id and not isinstance(linked_resource_id, str):
raise TypeError("Expected argument 'linked_resource_id' to be a str")
pulumi.set(__self__, "linked_resource_id", linked_resource_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="linkedResourceId")
def linked_resource_id(self) -> Optional[str]:
"""
The resource id of the scoped Azure monitor resource.
"""
return pulumi.get(self, "linked_resource_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkScopedResourceResult(GetPrivateLinkScopedResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkScopedResourceResult(
linked_resource_id=self.linked_resource_id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_link_scoped_resource(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
scope_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkScopedResourceResult:
"""
Use this data source to access information about an existing resource.
:param str name: The name of the scoped resource object.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Monitor PrivateLinkScope resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['scopeName'] = scope_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:insights/v20191017preview:getPrivateLinkScopedResource', __args__, opts=opts, typ=GetPrivateLinkScopedResourceResult).value
return AwaitableGetPrivateLinkScopedResourceResult(
linked_resource_id=__ret__.linked_resource_id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| [
"[email protected]"
] | |
832eeb22d69bb86c317491a4cdcc67e51034ff48 | f40e5c91a18fc5c7e0b4d96fe964a493f924e958 | /supervised_learning/0x00-binary_classification/15-neural_network.py | 8741dca8460da62b04755c3d6e890d729cf38caa | [] | no_license | jgadelugo/holbertonschool-machine_learning | ab46f71477998371ca5e3623455d61fe334ab221 | e20b284d5f1841952104d7d9a0274cff80eb304d | refs/heads/master | 2023-02-01T03:52:43.723569 | 2020-12-10T19:28:57 | 2020-12-10T19:28:57 | 256,043,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,674 | py | #!/usr/bin/env python3
"""
Class defines a single neuron performing binary classification
"""
import numpy as np
import matplotlib.pyplot as plt
class NeuralNetwork:
""" class neuron"""
def __init__(self, nx, nodes):
""" initialize """
# nx is the number of input features
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
elif nx < 1:
raise ValueError("nx must be a positive integer")
# is the number of nodes found in the hidden layer
if not isinstance(nodes, int):
raise TypeError("nodes must be an integer")
elif nodes < 1:
raise ValueError("nodes must be a positive integer")
# Hidden layer
# weights vector
self.__W1 = np.random.normal(size=(nodes, nx))
# bias
self.__b1 = np.zeros((nodes, 1))
# activated output
self.__A1 = 0
# Output neuron
# weights vector
self.__W2 = np.random.normal(size=(1, nodes))
# bias
self.__b2 = 0
# activated output(prediction)
self.__A2 = 0
@property
def W1(self):
""" weights vector for Hidden Layer """
return self.__W1
@property
def b1(self):
""" bias for Hidden Layer """
return self.__b1
@property
def A1(self):
""" activated output for Hidden Layer """
return self.__A1
@property
def W2(self):
""" Weight for Output Neuron """
return self.__W2
@property
def b2(self):
""" Bias for Output Neuron """
return self.__b2
@property
def A2(self):
""" Activated output(prediction) for Output Neuron"""
return self.__A2
def forward_prop(self, X):
""" Calculates the forward propagation of the neural network """
val1 = self.W1 @ X + self.b1
# sigmoid formula
self.__A1 = 1/(1 + np.exp(-val1))
val2 = self.W2 @ self.A1 + self.b2
# sigmoid formula
self.__A2 = 1/(1 + np.exp(-val2))
return (self.A1, self.A2)
def cost(self, Y, A):
"""Calculates the cost of the model using logistic regression"""
m = Y.shape[1]
ct = -(1 / m) * ((Y * (np.log(A))) + ((1 - Y) * np.log(1.0000001 - A)))
return ct.sum()
def evaluate(self, X, Y):
""" Evaluates the neurons predictions """
A1, A2 = self.forward_prop(X)
prob = np.where(A2 <= 0.5, 0, 1)
return (prob, self.cost(Y, A2))
def gradient_descent(self, X, Y, A1, A2, alpha=0.05):
""" Calculates one pass of gradient descent on the neuron """
dZ2 = A2 - Y
dW2 = (dZ2 @ A1.T) / X.shape[1]
db2 = np.sum(dZ2, axis=1, keepdims=True) / X.shape[1]
dZ1 = (self.W2.T @ dZ2) * (A1 - (A1 ** 2))
dW1 = (dZ1 @ X.T) / X.shape[1]
db1 = np.sum(dZ1, axis=1, keepdims=True) / X.shape[1]
self.__b1 = self.__b1 - alpha * db1
self.__W1 = self.__W1 - alpha * dW1
self.__b2 = self.__b2 - alpha * db2
self.__W2 = self.__W2 - alpha * dW2
def train(self, X, Y, iterations=5000, alpha=0.05, verbose=True,
graph=True, step=100):
""" method to train the neuron """
if isinstance(iterations, int) is False:
raise TypeError("iterations must be an integer")
if iterations <= 0:
raise ValueError("iterations must be a positive integer")
if isinstance(alpha, float) is False:
raise TypeError("alpha must be a float")
if alpha <= 0:
raise ValueError("alpha must be positive")
if verbose or graph:
if isinstance(step, int) is False:
raise TypeError("step must be an integer")
if step < 1 or step > iterations:
raise ValueError("step must be positive and <= iterations")
count = 0
while iterations:
A1, A2 = self.forward_prop(X)
self.gradient_descent(X, Y, A1, A2, alpha)
iterations -= 1
cost = self.evaluate(X, Y)[1]
if verbose:
if count == step or count == 0 or count == iterations:
print("Cost after {} iterations: {}".format(count, cost))
if graph:
if count == step or count == 0 or count == iterations:
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title('Training Cost')
plt.plot(cost, 'b')
plt.show()
count += 1
iterations -= 1
return self.evaluate(X, Y)
| [
"[email protected]"
] | |
fc39807e7990bb84b9dd55a1058e1b467a921a81 | 8b22963b0ac1581249552ed9f61e6730b0d1898f | /src/encoded/commands/dev_servers.py | e2a122a2d970812b135f6b3e7e7db1bfc68835d9 | [
"MIT"
] | permissive | brianleesc/encoded | a070e07f59c59a19220908fc7f8f71db015a1c73 | 0d2961f04cf542f78e6f29c9a08da1b2913782c5 | refs/heads/master | 2021-01-17T12:11:10.578814 | 2015-02-06T21:38:13 | 2015-02-06T21:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,334 | py | """\
Examples
For the development.ini you must supply the paster app name:
%(prog)s development.ini --app-name app --init --clear
"""
from pyramid.paster import get_app
import atexit
import logging
import os.path
import select
import shutil
import sys
EPILOG = __doc__
logger = logging.getLogger(__name__)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Run development servers", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--clear', action="store_true", help="Clear existing data")
parser.add_argument('--init', action="store_true", help="Init database")
parser.add_argument('--load', action="store_true", help="Load test set")
parser.add_argument('--datadir', default='/tmp/encoded', help="path to datadir")
args = parser.parse_args()
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
from encoded.tests import elasticsearch_fixture, postgresql_fixture
from encoded.commands import create_mapping
datadir = os.path.abspath(args.datadir)
pgdata = os.path.join(datadir, 'pgdata')
esdata = os.path.join(datadir, 'esdata')
if args.clear:
for dirname in [pgdata, esdata]:
if os.path.exists(dirname):
shutil.rmtree(dirname)
if args.init:
postgresql_fixture.initdb(pgdata, echo=True)
postgres = postgresql_fixture.server_process(pgdata, echo=True)
elasticsearch = elasticsearch_fixture.server_process(esdata, echo=True)
processes = [postgres, elasticsearch]
@atexit.register
def cleanup_process():
for process in processes:
if process.poll() is None:
process.terminate()
for process in processes:
try:
for line in process.stdout:
sys.stdout.write(line)
except IOError:
pass
process.wait()
if args.init:
app = get_app(args.config_uri, args.app_name)
create_mapping.run(app)
if args.load:
from webtest import TestApp
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = TestApp(app, environ)
from encoded.loadxl import load_all
from pkg_resources import resource_filename
inserts = resource_filename('encoded', 'tests/data/inserts/')
docsdir = [resource_filename('encoded', 'tests/data/documents/')]
load_all(testapp, inserts, docsdir)
print('Started. ^C to exit.')
stdouts = [p.stdout for p in processes]
# Ugly should probably use threads instead
while True:
readable, writable, err = select.select(stdouts, [], stdouts, 5)
for stdout in readable:
for line in iter(stdout.readline, ''):
sys.stdout.write(line)
if err:
for stdout in err:
for line in iter(stdout.readline, ''):
sys.stdout.write(line)
break
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
eab02dfd11097c80f771656034ab7171b20fe987 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/196/usersdata/264/76878/submittedfiles/atividade.py | 3740bbe7d78d83dbfaf5e714b13d553895fe31d1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # -*- coding: utf-8 -*-
import math
n= int(input('Digite o valor de n: '))
a=0
s=0
i=1
numerador=1
if (n<0):
n= n*(-1)
else:
n=n
while (i<=n):
s= (numerador)/(n+a)+s
i=i+1
a=a-1
numerador= numerador + 1
print ('%.5f' %s) | [
"[email protected]"
] | |
aaaa1b695c26d13e55cfc7fffabe7e8d8b5ce84f | 50f04c633f36e9d64c40c4f1b434ed0c24e447c7 | /tkinter-examples/left_tab.py | 76d9d1583711f1ae8196f13e7fddc2e109f2d34b | [] | no_license | sarahchou/python-practice | 883ba7dedd60b2cc18d5d73ef7d3cbb74f09dede | 2a3d10144b74460d8ec513e3c7d49bdb48107596 | refs/heads/master | 2022-11-11T10:06:12.944579 | 2018-06-11T22:14:06 | 2018-06-11T22:14:06 | 136,985,077 | 0 | 1 | null | 2022-10-20T08:48:36 | 2018-06-11T21:54:46 | Python | UTF-8 | Python | false | false | 1,620 | py | import tkinter as tk
from tkinter import ttk
class TFEnvSelectionScreen(tk.Frame):
"""
This is where we give the user the chance to select their Terraform environment.
Uses a drop down menu for environment selection.
"""
def __init__(self, parent, controller):
root = tk.Tk()
style = ttk.Style(root)
style.configure('lefttab.TNotebook', tabposition='wn')
notebook = ttk.Notebook(root, style='lefttab.TNotebook')
f1 = tk.Frame(notebook, bg="red", width=200, height=200)
f1.__init__(self, parent)
env_options = ['Dev', 'Stage', 'Prod']
select_type_label = tk.Label(f1, text='Select Terraform Environment:')
select_type_label.grid(row=0,sticky='w')
env_var = tk.StringVar(self)
env_menu = ttk.Combobox(f1,textvariable=env_var, values=env_options)
env_menu.grid(row=0, column=1)
env_menu.current(1)
def get_env():
print("Environment selected is: " + env_var.get())
continue_button = tk.Button(f1, text='Continue', command=get_env)
continue_button.grid(row=3, column=0, padx=10,sticky='w')
continue_button.config(width=10,fg='DodgerBlue3')
cancel_button = tk.Button(f1, text='Cancel', command=self.quit())
cancel_button.grid(row=3, column=1, padx=10, sticky='e')
cancel_button.config(width=10)
env_var.set('')
f2 = tk.Frame(notebook, bg="blue", width=200, height=200)
notebook.add(f1, text="Environment")
notebook.add(f2, text="Components")
notebook.pack()
root.mainloop() | [
"[email protected]"
] | |
3fbd64a3aa44722bb31c298a48bfc16e304bc51d | f55d682c106fad3e81c32e0fa5a611802ce8954a | /src/radical/pilot/agent/scheduler/continuous.py | ffe101128f097656a739860ccd206c7ed621fbcf | [
"MIT"
] | permissive | jhprinz/radical.pilot | c65f1791177d62bc4e1404d9c39afe5537862991 | 9f3dd4c96e72d18680e66df7c6f17ea732ad8769 | refs/heads/devel | 2021-04-29T05:07:24.646329 | 2016-12-21T12:12:44 | 2016-12-21T12:12:44 | 78,015,300 | 0 | 0 | null | 2017-01-04T12:53:08 | 2017-01-04T12:53:08 | null | UTF-8 | Python | false | false | 10,929 | py |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import time
import radical.utils as ru
from ... import utils as rpu
from ... import states as rps
from ... import constants as rpc
from .base import AgentSchedulingComponent
# ==============================================================================
#
class Continuous(AgentSchedulingComponent):
# --------------------------------------------------------------------------
#
def __init__(self, cfg):
self.slots = None
AgentSchedulingComponent.__init__(self, cfg)
# --------------------------------------------------------------------------
#
def _configure(self):
if not self._lrms_node_list:
raise RuntimeError("LRMS %s didn't _configure node_list." % \
self._lrms_info['name'])
if not self._lrms_cores_per_node:
raise RuntimeError("LRMS %s didn't _configure cores_per_node." % \
self._lrms_info['name'])
# Slots represents the internal process management structure.
# The structure is as follows:
# [
# {'node': 'node1', 'cores': [p_1, p_2, p_3, ... , p_cores_per_node]},
# {'node': 'node2', 'cores': [p_1, p_2, p_3. ... , p_cores_per_node]
# ]
#
# We put it in a list because we care about (and make use of) the order.
#
self.slots = []
for node in self._lrms_node_list:
self.slots.append({
'node': node,
# TODO: Maybe use the real core numbers in the case of
# non-exclusive host reservations?
'cores': [rpc.FREE for _ in range(0, self._lrms_cores_per_node)]
})
# --------------------------------------------------------------------------
#
def slot_status(self):
"""Returns a multi-line string corresponding to slot status.
"""
slot_matrix = ""
for slot in self.slots:
slot_matrix += "|"
for core in slot['cores']:
if core == rpc.FREE:
slot_matrix += "-"
else:
slot_matrix += "+"
slot_matrix += "|"
return {'timestamp' : time.time(),
'slotstate' : slot_matrix}
# --------------------------------------------------------------------------
#
def _allocate_slot(self, cores_requested):
# TODO: single_node should be enforced for e.g. non-message passing
# tasks, but we don't have that info here.
if cores_requested <= self._lrms_cores_per_node:
single_node = True
else:
single_node = False
# Given that we are the continuous scheduler, this is fixed.
# TODO: Argument can be removed altogether?
continuous = True
# Switch between searching for continuous or scattered slots
# Switch between searching for single or multi-node
if single_node:
if continuous:
task_slots = self._find_slots_single_cont(cores_requested)
else:
raise NotImplementedError('No scattered single node scheduler implemented yet.')
else:
if continuous:
task_slots = self._find_slots_multi_cont(cores_requested)
else:
raise NotImplementedError('No scattered multi node scheduler implemented yet.')
if not task_slots:
# allocation failed
return {}
self._change_slot_states(task_slots, rpc.BUSY)
task_offsets = self.slots2offset(task_slots)
return {'task_slots' : task_slots,
'task_offsets' : task_offsets,
'lm_info' : self._lrms_lm_info}
# --------------------------------------------------------------------------
#
# Convert a set of slots into an index into the global slots list
#
def slots2offset(self, task_slots):
# TODO: This assumes all hosts have the same number of cores
first_slot = task_slots[0]
# Get the host and the core part
[first_slot_host, first_slot_core] = first_slot.split(':')
# Find the entry in the the all_slots list based on the host
slot_entry = (slot for slot in self.slots if slot["node"] == first_slot_host).next()
# Transform it into an index in to the all_slots list
all_slots_slot_index = self.slots.index(slot_entry)
return all_slots_slot_index * self._lrms_cores_per_node + int(first_slot_core)
# --------------------------------------------------------------------------
#
def _release_slot(self, opaque_slots):
if not 'task_slots' in opaque_slots:
raise RuntimeError('insufficient information to release slots via %s: %s' \
% (self.name, opaque_slots))
self._change_slot_states(opaque_slots['task_slots'], rpc.FREE)
# --------------------------------------------------------------------------
#
# Find a needle (continuous sub-list) in a haystack (list)
#
def _find_sublist(self, haystack, needle):
n = len(needle)
# Find all matches (returns list of False and True for every position)
hits = [(needle == haystack[i:i+n]) for i in xrange(len(haystack)-n+1)]
try:
# Grab the first occurrence
index = hits.index(True)
except ValueError:
index = None
return index
# --------------------------------------------------------------------------
#
# Transform the number of cores into a continuous list of "status"es,
# and use that to find a sub-list.
#
def _find_cores_cont(self, slot_cores, cores_requested, status):
return self._find_sublist(slot_cores, [status for _ in range(cores_requested)])
# --------------------------------------------------------------------------
#
# Find an available continuous slot within node boundaries.
#
def _find_slots_single_cont(self, cores_requested):
for slot in self.slots:
slot_node = slot['node']
slot_cores = slot['cores']
slot_cores_offset = self._find_cores_cont(slot_cores,
cores_requested, rpc.FREE)
if slot_cores_offset is not None:
# self._log.info('Node %s satisfies %d cores at offset %d',
# slot_node, cores_requested, slot_cores_offset)
return ['%s:%d' % (slot_node, core) for core in
range(slot_cores_offset, slot_cores_offset + cores_requested)]
return None
# --------------------------------------------------------------------------
#
# Find an available continuous slot across node boundaries.
#
def _find_slots_multi_cont(self, cores_requested):
# Convenience aliases
cores_per_node = self._lrms_cores_per_node
all_slots = self.slots
# Glue all slot core lists together
all_slot_cores = [core for node in [node['cores'] for node in all_slots] for core in node]
# self._log.debug("all_slot_cores: %s", all_slot_cores)
# Find the start of the first available region
all_slots_first_core_offset = self._find_cores_cont(all_slot_cores,
cores_requested, rpc.FREE)
self._log.debug("all_slots_first_core_offset: %s", all_slots_first_core_offset)
if all_slots_first_core_offset is None:
return None
# Determine the first slot in the slot list
first_slot_index = all_slots_first_core_offset / cores_per_node
self._log.debug("first_slot_index: %s", first_slot_index)
# And the core offset within that node
first_slot_core_offset = all_slots_first_core_offset % cores_per_node
self._log.debug("first_slot_core_offset: %s", first_slot_core_offset)
# Note: We subtract one here, because counting starts at zero;
# Imagine a zero offset and a count of 1, the only core used
# would be core 0.
# TODO: Verify this claim :-)
all_slots_last_core_offset = (first_slot_index * cores_per_node) +\
first_slot_core_offset + cores_requested - 1
self._log.debug("all_slots_last_core_offset: %s", all_slots_last_core_offset)
last_slot_index = (all_slots_last_core_offset) / cores_per_node
self._log.debug("last_slot_index: %s", last_slot_index)
last_slot_core_offset = all_slots_last_core_offset % cores_per_node
self._log.debug("last_slot_core_offset: %s", last_slot_core_offset)
# Convenience aliases
last_slot = self.slots[last_slot_index]
self._log.debug("last_slot: %s", last_slot)
last_node = last_slot['node']
self._log.debug("last_node: %s", last_node)
first_slot = self.slots[first_slot_index]
self._log.debug("first_slot: %s", first_slot)
first_node = first_slot['node']
self._log.debug("first_node: %s", first_node)
# Collect all node:core slots here
task_slots = []
# Add cores from first slot for this unit
# As this is a multi-node search, we can safely assume that we go
# from the offset all the way to the last core.
task_slots.extend(['%s:%d' % (first_node, core) for core in
range(first_slot_core_offset, cores_per_node)])
# Add all cores from "middle" slots
for slot_index in range(first_slot_index+1, last_slot_index):
slot_node = all_slots[slot_index]['node']
task_slots.extend(['%s:%d' % (slot_node, core) for core in range(0, cores_per_node)])
# Add the cores of the last slot
task_slots.extend(['%s:%d' % (last_node, core) for core in range(0, last_slot_core_offset+1)])
return task_slots
# --------------------------------------------------------------------------
#
# Change the reserved state of slots (rpc.FREE or rpc.BUSY)
#
def _change_slot_states(self, task_slots, new_state):
# Convenience alias
all_slots = self.slots
# logger.debug("change_slot_states: unit slots: %s", task_slots)
for slot in task_slots:
# logger.debug("change_slot_states: slot content: %s", slot)
# Get the node and the core part
[slot_node, slot_core] = slot.split(':')
# Find the entry in the the all_slots list
slot_entry = (slot for slot in all_slots if slot["node"] == slot_node).next()
# Change the state of the slot
slot_entry['cores'][int(slot_core)] = new_state
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
1591790717be37542d2bbec2c3978a7a11fec374 | 732c0303ecfe8e915548846144e2a257d0ba0bd0 | /prob139.py | dea51619317d90ea968cedd82d869736ac4078c6 | [] | no_license | mercurium/proj_euler | e2c041d833b80369f0e7b7aa493a9ff5c1e22d91 | a8326af80cac040fa515350cf9972dca6f116f82 | refs/heads/master | 2020-04-06T05:24:31.185785 | 2017-05-06T23:50:56 | 2017-05-06T23:50:56 | 9,540,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | import time
START = time.time()
SIZE = 10**8
pythagTriplet = set()
count = 0
for m in xrange(1,SIZE):
if 2*m*(m+1) > SIZE:
break
diff = 2 if m%2 == 0 else 1
nLim = SIZE/(2*m) - m
for n in xrange(1,min(m,nLim+1),diff):
d,f = m*m,n*n
a,b,c = d-f,2*m*n,d+f
if a+b+c >= SIZE:
break
if a > b:
a,b = b,a
if c %(b-a) == 0:
for k in xrange(1,SIZE/(2*(d+f))+1):
pythagTriplet.add((a*k,b*k,c*k))
print len(pythagTriplet)
print "Time Taken:", time.time()-START
"""
~/Desktop/python_projects/proj_euler $python prob139.py
10057761
Time Taken: 253.662650108 (slow, naive method)
Time Taken: 26.9965119362 (reordered the loops)
Method of attack: a = m^2-n^2,b = 2mn, c = m^2 +n^2
So we know that since we can tile the square, we have (b-a)|c.
After this, we only need to check the k's when we have a valid equation... :x
"""
| [
"[email protected]"
] | |
de019a55e97ed0bbb01a449f33d1985240fc4ea7 | c64f865810c4b81cd4f95812d3ad9ea7798bf49d | /bw2io/extractors/simapro_csv.py | 0ff29832fcda7a850a708b185224172e61eb11bc | [] | permissive | pjamesjoyce/brightway2-io | e4fc4ec218143fa90b83f1b09fc0f1f27861a83f | 142fc26e2ffc47d8ec474386ee93ab2737a089ce | refs/heads/master | 2022-04-27T14:59:37.436175 | 2020-03-12T23:44:08 | 2020-03-12T23:44:08 | 249,489,813 | 0 | 0 | BSD-3-Clause | 2022-04-13T11:10:59 | 2020-03-23T16:51:26 | null | UTF-8 | Python | false | false | 21,006 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from ..utils import activity_hash, UnicodeCSVReader, default_delimiter
from ..compatibility import SIMAPRO_BIOSPHERE, SIMAPRO_BIO_SUBCATEGORIES
from ..strategies.simapro import normalize_simapro_formulae
from bw2data import Database, databases, config
from bw2data.logs import get_io_logger, close_log
from bw2parameters import ParameterSet
from numbers import Number
from stats_arrays import *
import os
import math
import re
import uuid
INTRODUCTION = """Starting SimaPro import:
\tFilepath: %s
\tDelimiter: %s
\tName: %s
"""
SIMAPRO_TECHNOSPHERE = {
"Avoided products",
"Electricity/heat",
"Materials/fuels",
"Waste to treatment",
}
SIMAPRO_PRODUCTS = {
"Products",
"Waste treatment"
}
SIMAPRO_END_OF_DATASETS = {
"Database Calculated parameters",
"Database Input parameters",
"Literature reference",
"Project Input parameters",
"Project Calculated parameters",
"Quantities",
"Units",
}
class EndOfDatasets(Exception):
pass
def to_number(obj):
try:
return float(obj.replace(",", ".").strip())
except (ValueError, SyntaxError):
# Sometimes allocation or ref product specific as percentage
if "%" in obj:
return float(obj.replace("%", "").strip()) / 100.
try:
# Eval for simple expressions like "1/2"
return float(eval(obj.replace(",", ".").strip()))
except NameError:
# Formula with a variable which isn't in scope - raises NameError
return obj
except SyntaxError:
# Unit string like "ha a" raises a syntax error when evaled
return obj
# \x7f if ascii delete - where does it come from?
strip_whitespace_and_delete = lambda obj: obj.replace('\x7f', '').strip() if isinstance(obj, str) else obj
lowercase_expression = (
"(?:" # Don't capture this group
"^" # Match the beginning of the string
"|" # Or
"[^a-zA-Z_])" # Anything other than a letter or underscore. SimaPro is limited to ASCII characters
"(?P<variable>{})" # The variable name string will be substituted here
"(?:[^a-zA-Z_]|$)" # Match anything other than a letter or underscore, or the end of the line
)
def replace_with_lowercase(string, names):
"""Replace all occurrences of elements of ``names`` in ``string`` with their lowercase equivalents.
``names`` is a list of variable name strings that should already all be lowercase.
Returns a modified ``string``."""
for name in names:
expression = lowercase_expression.format(name)
for result in re.findall(expression, string, re.IGNORECASE):
if result != name:
string = string.replace(result, result.lower())
return string
class SimaProCSVExtractor(object):
@classmethod
def extract(cls, filepath, delimiter=default_delimiter(), name=None, encoding='cp1252'):
assert os.path.exists(filepath), "Can't find file %s" % filepath
log, logfile = get_io_logger("SimaPro-extractor")
log.info(INTRODUCTION % (
filepath,
repr(delimiter),
name,
))
with UnicodeCSVReader(
filepath,
encoding=encoding,
delimiter=delimiter
) as csv_file:
lines = [[strip_whitespace_and_delete(obj) for obj in line]
for line in csv_file]
# Check if valid SimaPro file
assert ('SimaPro' in lines[0][0] or
'CSV separator' in lines[0][0]), "File is not valid SimaPro export"
project_name = name or cls.get_project_name(lines)
datasets = []
project_metadata = cls.get_project_metadata(lines)
global_parameters = cls.get_global_parameters(lines, project_metadata)
index = cls.get_next_process_index(lines, 0)
while True:
try:
ds, index = cls.read_data_set(
lines,
index,
project_name,
filepath,
global_parameters,
project_metadata,
)
datasets.append(ds)
index = cls.get_next_process_index(lines, index)
except EndOfDatasets:
break
close_log(log)
return datasets, global_parameters, project_metadata
@classmethod
def get_next_process_index(cls, data, index):
while True:
try:
if data[index] and data[index][0] in SIMAPRO_END_OF_DATASETS:
raise EndOfDatasets
elif data[index] and data[index][0] == "Process":
return index + 1
except IndexError:
# File ends without extra metadata
raise EndOfDatasets
index += 1
@classmethod
def get_project_metadata(cls, data):
meta = {}
for line in data:
if not line:
return meta
elif ":" not in line[0]:
continue
if not len(line) == 1:
raise ValueError("Can't understand metadata line {}".format(line))
assert line[0][0] == "{" and line[0][-1] == "}"
line = line[0][1:-1].split(":")
key, value = line[0], ":".join(line[1:])
meta[key.strip()] = value.strip()
@classmethod
def get_global_parameters(cls, data, pm):
current, parameters = None, []
for line in data:
if not line: # Blank line, end of section
current = None
elif line[0] in {"Database Input parameters",
"Project Input parameters"}:
current = "input"
elif line[0] in {"Database Calculated parameters",
"Project Calculated parameters"}:
current = "calculated"
elif current is None:
continue
elif current == 'input':
parameters.append(cls.parse_input_parameter(line))
elif current == 'calculated':
parameters.append(cls.parse_calculated_parameter(line, pm))
else:
raise ValueError("This should never happen")
# Extract name and lowercase
parameters = {obj.pop('name').lower(): obj for obj in parameters}
# Change all formula values to lowercase if referencing global parameters
for obj in parameters.values():
if 'formula' in obj:
obj['formula'] = replace_with_lowercase(obj['formula'], parameters)
ParameterSet(parameters).evaluate_and_set_amount_field()
return parameters
@classmethod
def get_project_name(cls, data):
for line in data[:25]:
if not line:
continue
elif "{Project:" in line[0]:
return line[0][9:-1].strip()
# What the holy noodly appendage
# All other metadata in English, only this term
# translated into French‽
elif "{Projet:" in line[0]:
return line[0][9:-1].strip()
@classmethod
def invalid_uncertainty_data(cls, amount, kind, field1, field2, field3):
if (kind == "Lognormal" and (not amount or field1 == "0")):
return True
@classmethod
def create_distribution(cls, amount, kind, field1, field2, field3):
amount = to_number(amount)
if kind == "Undefined":
return {
'uncertainty type': UndefinedUncertainty.id,
'loc': amount,
'amount': amount
}
elif cls.invalid_uncertainty_data(amount, kind, field1, field2, field3):
# TODO: Log invalid data?
return {
'uncertainty type': UndefinedUncertainty.id,
'loc': amount,
'amount': amount
}
elif kind == "Lognormal":
return {
'uncertainty type': LognormalUncertainty.id,
'scale': math.log(math.sqrt(to_number(field1))),
'loc': math.log(abs(amount)),
'negative': amount < 0,
'amount': amount
}
elif kind == "Normal":
return {
'uncertainty type': NormalUncertainty.id,
'scale': math.sqrt(to_number(field1)),
'loc': amount,
'negative': amount < 0,
'amount': amount
}
elif kind == "Triangle":
return {
'uncertainty type': TriangularUncertainty.id,
'minimum': to_number(field2),
'maximum': to_number(field3),
'loc': amount,
'negative': amount < 0,
'amount': amount
}
elif kind == "Uniform":
return {
'uncertainty type': UniformUncertainty.id,
'minimum': to_number(field2),
'maximum': to_number(field3),
'loc': amount,
'negative': amount < 0,
'amount': amount
}
else:
raise ValueError("Unknown uncertainty type: {}".format(kind))
@classmethod
def parse_calculated_parameter(cls, line, pm):
"""Parse line in `Calculated parameters` section.
0. name
1. formula
2. comment
Can include multiline comment in TSV.
"""
return {
'name': line[0],
'formula': normalize_simapro_formulae(line[1], pm),
'comment': "; ".join([x for x in line[2:] if x])
}
@classmethod
def parse_input_parameter(cls, line):
"""Parse line in `Input parameters` section.
0. name
1. value (not formula)
2. uncertainty type
3. uncert. param.
4. uncert. param.
5. uncert. param.
6. hidden ("Yes" or "No" - we ignore)
7. comment
"""
ds = cls.create_distribution(*line[1:6])
ds.update({
'name': line[0],
'comment': "; ".join([x for x in line[7:] if x])
})
return ds
@classmethod
def parse_biosphere_flow(cls, line, category, pm):
"""Parse biosphere flow line.
0. name
1. subcategory
2. unit
3. value or formula
4. uncertainty type
5. uncert. param.
6. uncert. param.
7. uncert. param.
8. comment
However, sometimes the value is in index 2, and the unit in index 3. Because why not! We assume default ordering unless we find a number in index 2.
"""
unit, amount = line[2], line[3]
if isinstance(to_number(line[2]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {
'formula': normalize_simapro_formulae(amount, pm)
}
else:
ds = cls.create_distribution(amount, *line[4:8])
ds.update({
'name': line[0],
'categories': (category, line[1]),
'unit': unit,
'comment': "; ".join([x for x in line[8:] if x]),
'type': 'biosphere',
})
return ds
@classmethod
def parse_input_line(cls, line, category, pm):
"""Parse technosphere input line.
0. name
1. unit
2. value or formula
3. uncertainty type
4. uncert. param.
5. uncert. param.
6. uncert. param.
7. comment
However, sometimes the value is in index 1, and the unit in index 2. Because why not! We assume default ordering unless we find a number in index 1.
"""
unit, amount = line[1], line[2]
if isinstance(to_number(line[1]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {
'formula': normalize_simapro_formulae(amount, pm)
}
else:
ds = cls.create_distribution(amount, *line[3:7])
ds.update({
'categories': (category,),
'name': line[0],
'unit': unit,
'comment': "; ".join([x for x in line[7:] if x]),
'type': ("substitution" if category == "Avoided products"
else 'technosphere'),
})
return ds
@classmethod
def parse_final_waste_flow(cls, line, pm):
"""Parse final wate flow line.
0: name
1: subcategory?
2: unit
3. value or formula
4. uncertainty type
5. uncert. param.
6. uncert. param.
7. uncert. param.
However, sometimes the value is in index 2, and the unit in index 3. Because why not! We assume default ordering unless we find a number in index 2.
"""
unit, amount = line[2], line[3]
if isinstance(to_number(line[2]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {
'formula': normalize_simapro_formulae(amount, pm)
}
else:
ds = cls.create_distribution(amount, *line[4:8])
ds.update({
'name': line[0],
'categories': ("Final waste flows", line[1]) if line[1] \
else ("Final waste flows",),
'unit': unit,
'comment': "; ".join([x for x in line[8:] if x]),
'type': 'technosphere',
})
return ds
@classmethod
def parse_reference_product(cls, line, pm):
"""Parse reference product line.
0. name
1. unit
2. value or formula
3. allocation
4. waste type
5. category (separated by \\)
6. comment
However, sometimes the value is in index 1, and the unit in index 2. Because why not! We assume default ordering unless we find a number in index 1.
"""
unit, amount = line[1], line[2]
if isinstance(to_number(line[1]), Number):
unit, amount = amount, unit
is_formula = not isinstance(to_number(amount), Number)
if is_formula:
ds = {
'formula': normalize_simapro_formulae(amount, pm)
}
else:
ds = {
'amount': to_number(amount)
}
ds.update({
'name': line[0],
'unit': unit,
'allocation': to_number(line[3]),
'categories': tuple(line[5].split('\\')),
'comment': "; ".join([x for x in line[6:] if x]),
'type': 'production',
})
return ds
@classmethod
def parse_waste_treatment(cls, line, pm):
"""Parse reference product line.
0. name
1. unit
2. value or formula
3. waste type
4. category (separated by \\)
5. comment
"""
is_formula = not isinstance(to_number(line[2]), Number)
if is_formula:
ds = {
'formula': normalize_simapro_formulae(line[2], pm)
}
else:
ds = {
'amount': to_number(line[2])
}
ds.update({
'name': line[0],
'unit': line[1],
'categories': tuple(line[4].split('\\')),
'comment': "; ".join([x for x in line[5:] if x]),
'type': 'production',
})
return ds
@classmethod
def read_dataset_metadata(cls, data, index):
metadata = {}
while True:
if not data[index]:
pass
elif data[index] and data[index][0] in SIMAPRO_PRODUCTS:
return metadata, index
elif data[index] and data[index + 1] and data[index][0]:
metadata[data[index][0]] = data[index + 1][0]
index += 1
index += 1
@classmethod
def read_data_set(cls, data, index, db_name, filepath, gp, pm):
metadata, index = cls.read_dataset_metadata(data, index)
# `index` is now the `Products` or `Waste Treatment` line
ds = {
'simapro metadata': metadata,
'code': metadata.get('Process identifier') or uuid.uuid4().hex,
'exchanges': [],
'parameters': [],
'database': db_name,
'filename': filepath,
"type": "process",
}
while not data[index] or data[index][0] != 'End':
if not data[index] or not data[index][0]:
index += 1
elif data[index][0] in SIMAPRO_TECHNOSPHERE:
category = data[index][0]
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['exchanges'].append(
cls.parse_input_line(data[index], category, pm)
)
index += 1
elif data[index][0] in SIMAPRO_BIOSPHERE:
category = data[index][0]
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['exchanges'].append(
cls.parse_biosphere_flow(data[index], category, pm)
)
index += 1
elif data[index][0] == "Calculated parameters":
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['parameters'].append(
cls.parse_calculated_parameter(data[index], pm)
)
index += 1
elif data[index][0] == "Input parameters":
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['parameters'].append(
cls.parse_input_parameter(data[index])
)
index += 1
elif data[index][0] == "Products":
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['exchanges'].append(
cls.parse_reference_product(data[index], pm)
)
index += 1
elif data[index][0] == "Waste treatment":
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['exchanges'].append(
cls.parse_waste_treatment(data[index], pm)
)
index += 1
elif data[index][0] == "Final waste flows":
index += 1 # Advance to data lines
while index < len(data) and data[index] and data[index][0]: # Stop on blank line
ds['exchanges'].append(
cls.parse_final_waste_flow(data[index], pm)
)
index += 1
elif data[index][0] in SIMAPRO_END_OF_DATASETS:
# Don't care about processing steps below, as no dataset
# was extracted
raise EndOfDatasets
else:
index += 1
if index == len(data):
break
# Extract name and lowercase
ds['parameters'] = {obj.pop('name').lower(): obj for obj in ds['parameters']}
# Change all parameter formula values to lowercase if referencing
# global or local parameters
for obj in ds['parameters'].values():
if 'formula' in obj:
obj['formula'] = replace_with_lowercase(obj['formula'], ds['parameters'])
obj['formula'] = replace_with_lowercase(obj['formula'], gp)
# Change all exchange values to lowercase if referencing
# global or local parameters
for obj in ds['exchanges']:
if 'formula' in obj:
obj['formula'] = replace_with_lowercase(obj['formula'], ds['parameters'])
obj['formula'] = replace_with_lowercase(obj['formula'], gp)
ps = ParameterSet(
ds['parameters'],
{key: value['amount'] for key, value in gp.items()}
)
# Changes in-place
ps(ds['exchanges'])
if not ds['parameters']:
del ds['parameters']
return ds, index
| [
"[email protected]"
] | |
ca64a4a361b84710defb666ee771b4f6d6ebac6a | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/accesscontextmanager/v1alpha/resources.py | a27052377017c6cf020e34d96072355a779e411c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://accesscontextmanager.googleapis.com/v1alpha/'
DOCS_URL = 'https://cloud.google.com/access-context-manager/docs/reference/rest/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
ACCESSPOLICIES = (
'accessPolicies',
'{+name}',
{
'':
'accessPolicies/{accessPoliciesId}',
},
[u'name']
)
ACCESSPOLICIES_ACCESSLEVELS = (
'accessPolicies.accessLevels',
'{+name}',
{
'':
'accessPolicies/{accessPoliciesId}/accessLevels/'
'{accessLevelsId}',
},
[u'name']
)
ACCESSPOLICIES_ACCESSZONES = (
'accessPolicies.accessZones',
'{+name}',
{
'':
'accessPolicies/{accessPoliciesId}/accessZones/{accessZonesId}',
},
[u'name']
)
OPERATIONS = (
'operations',
'{+name}',
{
'':
'operations/{operationsId}',
},
[u'name']
)
def __init__(self, collection_name, path, flat_paths, params):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
| [
"[email protected]"
] | |
6fffa9923854cd0c644f08969cf10ecff8501431 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyfile/pyfm-008/pyglfm.py | d00ac3cee42a39f6b9bc2413f059353954c0f66f | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,486 | py | #!/usr/bin/env python
# 3D File Manager in Python OpenGL
#
# Copyright by Peter Glen; Jan, 2015
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation. No representations are made about the suitability of this
# software for any purpose. It is provided "as is" without express or
# implied warranty.
#
import math, sys, time, os
import pygtk; pygtk.require('2.0')
import gtk, gtk.gtkgl
import gobject, pango
#import pyrand
from OpenGL.GL import *
from OpenGL.GLU import *
from pyfmutil import *
from pyfmlight import *
from py3dfont.pyfmfont import *
display_mode = \
gtk.gdkgl.MODE_RGB | gtk.gdkgl.MODE_DEPTH | gtk.gdkgl.MODE_DOUBLE
# Simple window holding the file manager's 3D space
class GLfileMan (object):
def __init__ (self):
self.display_mode = display_mode
self.initvars()
self.twinkle = 0.0
self.showstars = True
self.showgrid = True
self.showtri = not False
self.showdonut = False
self.showcubes = False
self.showrand = False
self.showtext = False
self.enable = False
self.anicount = 0
self.starcol = 0.3
self.cnt = 0
self.full = False
self.zoffset = -50
self.arr = []
self.selarr = []
#ttt = time.time()
self.xrand = rand.XRand()
#print "Random gen time", time.time() - ttt
self.stars = []
for jj in range(300):
self.stars.append( (rand.frand2(10.0), rand.frand2(10.0), \
rand.frand4(-3, -12), rand.frand(1)))
self.bigstars = []
for jj in range(200):
self.bigstars.append((rand.frand2(8.0), rand.frand2(8.0), \
rand.frand4(-3, -12), rand.frand(1) ))
self.initfonts()
self.BLACK = gtk.gdk.Color(0x0, 0x0, 0x0)
self.RED = gtk.gdk.Color(0xffff, 0x0, 0x0)
self.GREEN = gtk.gdk.Color(0x0, 0xffff, 0x0)
self.BLUE = gtk.gdk.Color(0x0, 0x0, 0xffff)
# Try to create a double buffered framebuffer, if not successful then
# attempt to create a single buffered one.
try:
self.glconfig = gtk.gdkgl.Config(mode=self.display_mode)
except gtk.gdkgl.NoMatches:
print "Switching to non double mode"
self.display_mode &= ~gtk.gdkgl.MODE_DOUBLE
self.glconfig = gtk.gdkgl.Config(mode=self.display_mode)
# Create the window for the app.
self.win = gtk.Window()
self.win.set_title('Python 3D File Manager')
if sys.platform != 'win32':
self.win.set_resize_mode(gtk.RESIZE_IMMEDIATE)
self.win.set_reallocate_redraws(True)
#self.win.set_border_width(10)
self.win.connect('destroy', lambda quit: gtk.main_quit())
# DrawingArea for OpenGL rendering.
self.glarea = gtk.gtkgl.DrawingArea(self.glconfig)
#self.glarea.set_size_request(600, 600)
www = gtk.gdk.screen_width(); hhh = gtk.gdk.screen_height();
#print "www/hhh", www, hhh
self.glarea.set_size_request(www/4, hhh/4)
self.win.set_position(gtk.WIN_POS_CENTER)
self.win.fullscreen(); self.full = True
#print gtk.gdk.screen_width(), gtk.gdk.screen_height();
self.win.set_events( gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK |
gtk.gdk.FOCUS_CHANGE_MASK )
self.win.connect("motion-notify-event", self.area_motion)
self.win.connect("key-press-event", self.area_key)
self.win.connect("key-release-event", self.area_key)
self.win.connect("button-press-event", self.area_button)
self.win.connect("button-release-event", self.area_button)
# connect to the relevant signals.
self.glarea.connect_after('realize', self.__realize)
self.glarea.connect('configure_event', self.__configure_event)
self.glarea.connect('expose_event', self.__expose_event)
self.glarea.connect('map_event', self.__map_event)
self.glarea.connect('unmap_event', self.__unmap_event)
self.glarea.connect('visibility_notify_event', self.__visibility_notify_event)
self.glarea.add_events(gtk.gdk.VISIBILITY_NOTIFY_MASK)
self.glarea.show()
self.win.add(self.glarea)
self.angle = 0.0
self.angle2 = 0.0
self.angle3 = 0.0
self.angle5 = 0.0
self.angle7 = 0.0
self.pos_y = 0.0
self.pos_y2 = 0.0
self.pos_x = 0.0
self.pos_x2 = 0.0
#self.__enable_timeout = False
self.__enable_timeout = True
self.__timeout_interval = 30
self.__timeout_id = 0
# Init system fonts
def initfonts(self):
self.font = GlSysFont(1.)
self.font.setdepth(-.2)
self.font.setfontcolor([ 0.729412, 0.723529, 0.727451, 1.0 ])
self.fontb = GlSysFont(1.)
self.fontb.setfontcolor([ 0.929412, 0.023529, 0.927451, 1.0 ])
self.font2 = GlSysFont(.5)
self.font2s = GlSysFont(.5)
self.font2s.setfontcolor([ 0.929412, 0.023529, 0.927451, 1.0 ])
self.font2s.fatfont(.6)
self.font4 = GlSysFont(.25)
self.font4.setfontcolor([ 0.929412, 0.023529, 0.027451, 1.0 ])
self.font4.setsidecolor([ 0.929412, 0.923529, 0.027451, 1.0 ])
self.font4.setdepth(.03)
#self.font4.resetfont()
self.font4i = GlSysFont(.25)
self.font4i.setfontcolor([ 0.029412, 0.923529, 0.927451, 1.0 ])
self.font4i.skewfont(.4)
self.font4b = GlSysFont(.25)
self.font4b.fatfont(1.4)
self.font4b.setfontcolor([ 0.329412, 0.023529, 0.027451, 1.0 ])
self.font4ib = GlSysFont(.25)
self.font4ib.fatfont(1.7)
self.font4ib.skewfont(.3)
self.font8 = GlSysFont(1./8)
self.font8.setfontcolor([ 0.029412, 0.923529, 0.027451, 1.0 ])
self.font12 = GlSysFont(1./12)
def initvars(self):
self.__sdepth = -15
self.__sphi = 0.0
self.__stheta = 0.0
self.__sgamma = 0.0
self.__supdown = 0
self.__sside = 0
self.angle = 0.0
self.angle2 = 0.0
self.angle3 = 0.0
self.angle5 = 0.0
self.angle7 = 0.0
def area_button(self, area, event):
if event.type == gtk.gdk.BUTTON_PRESS:
if event.button == 1:
#print "area_button", event.x, event.y
#midx = event.x - self.width / 2
#midy = event.y - self.height / 2
# Get the viewport
viewport = glGetIntegerv(GL_VIEWPORT)
width = viewport[2]; height = viewport[3]
print viewport[2], viewport[3], event.x, event.y
buff = glSelectBuffer(100)
#bs = glGetIntegerv(GL_SELECTION_BUFFER_SIZE); print "bs", bs
glMatrixMode (GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPickMatrix(event.x, viewport[3] - event.y, 2, 2, viewport);
# Apply perspective matrix
#print "viewport", viewport[2], viewport[3]
#aspect = viewport[2] / viewport[3]
aspect = float(gtk.gdk.screen_width()) / gtk.gdk.screen_height()
gluPerspective(20.0, aspect, 5.0, 160.0);
glRenderMode(GL_SELECT)
glMatrixMode (GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
self.renderscene()
glPopMatrix();
lookfor = []
self.selarr = []
res = glRenderMode(GL_RENDER)
for aa in res:
print aa.near, aa.far, aa.names
lookfor = list(aa.names)
glMatrixMode (GL_PROJECTION)
glPopMatrix();
self.selarr.append(lookfor)
#self.pick(lookfor)
#print "buff", buff
#self.arr.append((midx / 170, -midy / 170, 0, rand.frand(1) ))
#self.glarea.window.invalidate_rect(
# self.glarea.allocation, False)
#glMatrixMode (GL_PROJECTION)
#glPopMatrix();
self.invalidate()
#elif event.type == gtk.gdk._2BUTTON_PRESS:
# if event.button == 1:
#print "double", event.x, event.y
def pick(self, lookfor):
viewport = glGetIntegerv(GL_VIEWPORT)
fbbuff = glFeedbackBuffer(62000, GL_3D_COLOR)
glRenderMode(GL_FEEDBACK)
glMatrixMode (GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
# // Apply perspective matrix
aspect = float(gtk.gdk.screen_width()) / gtk.gdk.screen_height()
#aspect = viewport[2] / viewport[3]
gluPerspective(20.0, aspect, 5.0, 160.0);
glMatrixMode (GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
self.renderscene()
glPopMatrix();
glMatrixMode (GL_PROJECTION)
glPopMatrix();
res2 = None
try:
res2 = glRenderMode(GL_RENDER)
except:
a,b,c = sys.exc_info()
print sys.excepthook(a,b,c)
exit()
return
self.arr = []
found = False
for aa in res2:
#print aa
# Select polygon
if aa[0] == GL_PASS_THROUGH_TOKEN:
if aa[1] in lookfor:
print "Found:", aa
found = True
else:
found = False
continue
# Emit vertices
if found:
if aa[0] == GL_POINT_TOKEN:
print pvertex(aa[1])
if aa[0] == GL_LINE_TOKEN:
print pvertex(aa[1]),
print pvertex(aa[2])
if aa[0] == GL_POLYGON_TOKEN:
for cc in aa[1:]:
#print pvertex(cc),
self.arr.append(cc)
#print
def area_motion(self, area, event):
#print "motion event", event.state, event.x, event.y
if event.state & gtk.gdk.BUTTON1_MASK:
self.__sgamma += (self.__beginX - event.x)/10.0
self.__stheta += (self.__beginY - event.y)/4.0
self.invalidate()
elif event.state & gtk.gdk.BUTTON3_MASK:
self.__sdepth += (self.__beginY - event.y)/10.0
self.__sphi += (event.x - self.__beginX)/4.0
self.invalidate()
# Mark old positions
self.__beginX = event.x
self.__beginY = event.y
# Call key handler
def area_key(self, area, event):
#print "key event", event
if event.type == gtk.gdk.KEY_PRESS:
if event.keyval == gtk.keysyms.plus or \
event.keyval == gtk.keysyms.KP_Add :
# Zoom in.
self.__sdepth += .5
if event.keyval == gtk.keysyms.minus or \
event.keyval == gtk.keysyms.KP_Subtract :
# Zoom out.
self.__sdepth -= .5
if event.keyval == gtk.keysyms.KP_Right :
self.__sgamma -= 2
if event.keyval == gtk.keysyms.KP_Left :
self.__sgamma += 2
if event.keyval == gtk.keysyms.Left :
self.__sside += .5
if event.keyval == gtk.keysyms.Right :
self.__sside -= .5
if event.keyval == gtk.keysyms.Up :
self.__supdown -= .5
if event.keyval == gtk.keysyms.Down :
self.__supdown += .5
if event.keyval == gtk.keysyms.KP_Up :
self.__stheta += 2
if event.keyval == gtk.keysyms.KP_Down :
self.__stheta -= 2
if event.keyval == gtk.keysyms.KP_Page_Up :
self.__sphi -= 2
if event.keyval == gtk.keysyms.KP_Page_Down :
self.__sphi += 2
if event.keyval == gtk.keysyms.x or \
event.keyval == gtk.keysyms.X:
if event.state & gtk.gdk.MOD1_MASK:
self.__timeout_remove()
area.destroy()
if event.keyval == gtk.keysyms.f or \
event.keyval == gtk.keysyms.F:
if event.state & gtk.gdk.MOD1_MASK:
self.win.fullscreen(); self.full = True
if event.keyval == gtk.keysyms.t or \
event.keyval == gtk.keysyms.T:
self.toggle_animation()
if event.keyval == gtk.keysyms.u or \
event.keyval == gtk.keysyms.U:
if event.state & gtk.gdk.MOD1_MASK:
self.win.unfullscreen(); self.full = False
if event.keyval == gtk.keysyms.r or \
event.keyval == gtk.keysyms.R or \
event.keyval == gtk.keysyms.Home or \
event.keyval == gtk.keysyms.KP_Home :
# Reset
self.initvars()
if event.keyval == gtk.keysyms.F11:
if self.full == True:
self.win.unfullscreen(); self.full = False
else:
self.win.fullscreen(); self.full = True
if event.keyval == gtk.keysyms.a:
self.showrand = not self.showrand
if event.keyval == gtk.keysyms.c:
self.showcubes = not self.showcubes
if event.keyval == gtk.keysyms.d:
self.showdonut = not self.showdonut
if event.keyval == gtk.keysyms.i:
self.showtri = not self.showtri
if event.keyval == gtk.keysyms.s :
self.showstars = not self.showstars
if event.keyval == gtk.keysyms.g :
self.showgrid = not self.showgrid
self.invalidate()
# --------------------------------------------------------------------
def __realize(self, widget):
#print "realize"
gldrawable = widget.get_gl_drawable()
glcontext = widget.get_gl_context()
# OpenGL begin.
if not gldrawable.gl_begin(glcontext):
return
light(self)
gldrawable.gl_end()
# OpenGL end
# --------------------------------------------------------------------
def __configure_event(self, widget, event):
self.width = widget.allocation.width
self.height = widget.allocation.height
gldrawable = widget.get_gl_drawable()
glcontext = widget.get_gl_context()
# OpenGL begin.
if not gldrawable.gl_begin(glcontext):
return
glViewport (0, 0, self.width, self.height)
glMatrixMode (GL_PROJECTION)
glLoadIdentity ()
#viewport = glGetIntegerv(GL_VIEWPORT)
#print "_configure", viewport[2], viewport[3]
#aspect = viewport[2] / viewport[3]
# We need screen aspect, not window
aspect = float(gtk.gdk.screen_width()) / gtk.gdk.screen_height()
gluPerspective(20.0, aspect, 5.0, 160.0);
gldrawable.gl_end()
# --------------------------------------------------------------------
def __expose_event(self, widget, event):
gldrawable = widget.get_gl_drawable()
glcontext = widget.get_gl_context()
if not gldrawable.gl_begin(glcontext):
print "no drawable state"
return
glViewport (0, 0, self.width, self.height)
#print "_expose", self.width, self.height
# OpenGL begin.
glMatrixMode (GL_MODELVIEW)
glPushMatrix()
glLoadIdentity ()
glRenderMode(GL_RENDER)
self.renderscene(True)
glPopMatrix()
if gldrawable.is_double_buffered():
gldrawable.swap_buffers()
else:
glFlush()
gldrawable.gl_end()
# OpenGL end
# --------------------------------------------------------------------
def renderscene(self, showall = False ):
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glInitNames()
glPushName(0)
ss = float(self.__sside) / 100
ud = float(self.__supdown) / 100
gluLookAt(ss, ud, 0, 0,0,-1, 0,1,0)
glTranslatef (0.0, 0.0, self.__sdepth)
glRotatef(-self.__stheta, 1.0, 0.0, 0.0)
glRotatef(self.__sphi, 0.0, 0.0, 1.0)
glRotatef(self.__sgamma, 0.0, 1.0, 0.0)
if 0: #self.anicount < 40:
ver = "PYTHON"
xstr = "FILE MANAGER"
#glRotatef (self.anicount / 2, 1.0, 0.0, 0.0)
#glRotatef (self.anicount/6, 0.0, 1.0, 0.0)
glPushMatrix ()
hoffset = -self.font.extent3Dstr(ver)[0]/2
offset = 0; self.zoffset += 1
glPushMatrix ()
offset -= self.font.FontHeight + self.font.linegap
glTranslatef (hoffset, offset, self.zoffset)
self.font.print3Dstr(ver)
glPopMatrix ()
glPushMatrix ()
offset -= self.font.FontHeight + self.font.linegap
offset -= self.font.FontHeight + self.font.linegap
hoffset = -self.font.extent3Dstr(xstr)[0]/2
glTranslatef (hoffset, offset, self.zoffset)
self.font.print3Dstr(xstr)
glPopMatrix ()
glPopMatrix ()
else:
try:
if self.showdonut:
donut(self)
if self.showcubes:
cubes(self)
if self.showrand:
randtri(self)
if self.showtri:
stuff(self)
if self.showtext:
text(self)
if self.showstars:
stars(self)
if self.showgrid:
grid(self)
except:
a,b,c = sys.exc_info(); print sys.excepthook(a,b,c)
exit
return
#raise SystemExit
if 1: #showall:
style = self.glarea.get_style()
self.gc = style.fg_gc[gtk.STATE_NORMAL]
gcx = gtk.gdk.GC(self.glarea.window); gcx.copy(self.gc)
gcr = gtk.gdk.GC(self.glarea.window); gcr.copy(self.gc)
colormap = gtk.widget_get_default_colormap()
gcr.set_foreground(colormap.alloc_color("#ff0000"))
for vv in self.arr:
aa,bb,cc = vv.vertex
aa=int(aa); bb=int(bb); cc=int(cc)
bb = self.height-bb
dd = 5
self.glarea.window.draw_line(gcr, aa-dd, bb-dd, aa+dd, bb+dd)
self.glarea.window.draw_line(gcr, aa+dd, bb-dd, aa-dd, bb+dd)
#self.win.window.draw_line(gcr, bb, cc, bb+dd, cc+dd)
if 0:
global pylonarr
for vv in self.selarr:
if pylonarr.id in vv:
for vvv in pylonarr.arr:
for vv in vvv.verts:
glPushMatrix()
glRotatef (self.angle7, 0.0, 1.0, 0.0)
glTranslatef (vv._x, vv._y, vv._z)
gtk.gdkgl.draw_cube (True, 0.03)
glPopMatrix()
# --------------------------------------------------------------------
def __timeout_callback(self, widget):
self.anicount += 1
# Pre start animation
if self.anicount < 42:
self.glarea.window.invalidate_rect(self.glarea.allocation, False)
#print self.glarea.allocation
self.cnt += 1
if self.cnt >= 1000:
self.cnt = 0
if not self.enable:
return True
self.angle += 1.0
if (self.angle >= 360.0):
self.angle2 -= 360.0
self.angle2 += 2.0
if (self.angle2 >= 360.0):
self.angle2 -= 360.0
self.angle3 += 3.0
if (self.angle >= 360.0):
self.angle -= 360.0
self.angle5 += 5.0
if (self.angle5 >= 360.0):
self.angle5 -= 360.0
self.angle7 += 7.0
if (self.angle7 >= 360.0):
self.angle7 -= 360.0
if self.cnt % 50 == 0:
self.twinkle += 1
if self.twinkle % 2 == 0:
self.starcol += 0.02
else:
self.starcol -= 0.02
t = self.angle * math.pi / 180.0
if t > math.pi:
t = 2.0 * math.pi - t
t2 = self.angle * math.pi / 180.0
self.pos_y = 2.0 * (math.sin (t) + 0.4 * math.sin (3.0*t)) - 1.0
self.pos_y2 = 2.0 * (math.sin (t))
self.pos_x = 2.0 * (math.sin (t/2)) - 1
self.pos_x2 = 2.0 * (math.sin (t2/2)) - 1
# Invalidate whole window.
self.glarea.window.invalidate_rect(self.glarea.allocation, False)
# Update window synchronously (fast).
#self.glarea.window.process_updates(False)
return True
def __timeout_add(self):
if self.__timeout_id == 0:
self.__timeout_id = gobject.timeout_add(self.__timeout_interval,
self.__timeout_callback,
self.glarea)
def __timeout_remove(self):
if self.__timeout_id != 0:
gobject.source_remove(self.__timeout_id)
self.__timeout_id = 0
def __map_event(self, widget, event):
if self.__enable_timeout:
self.__timeout_add()
return True
def __unmap_event(self, widget, event):
self.__timeout_remove()
return True
def __visibility_notify_event(self, widget, event):
if self.__enable_timeout:
if event.state == gtk.gdk.VISIBILITY_FULLY_OBSCURED:
self.__timeout_remove()
else:
self.__timeout_add()
return True
def toggle_animation(self):
#self.__enable_timeout = not self.__enable_timeout;
#if self.__enable_timeout:
# self.__timeout_add()
#else:
# self.__timeout_remove()
# self.glarea.window.invalidate_rect(self.glarea.allocation,
# False)
self.enable = not self.enable
def invalidate(self):
if self.glarea.window:
self.glarea.window.invalidate_rect(
self.glarea.allocation, False)
def run (self):
self.win.show()
gtk.main()
if __name__ == '__main__':
glapp = GLfileMan()
glapp.run()
| [
"[email protected]"
] | |
5204f8362a9e26be1b475cbda137fdacdc7ac103 | 030b1c1293ae3d31867839883484c6c990bedf6c | /data_utils/prepare_training_data.py | a616eec25fa6b0a9ed456cfcf8af398a3e86e854 | [] | no_license | nmonath/Coref-tf | 7427cabf9835deb8aab2c9519dbd4ed3134f9634 | 5ee8e240b55e14f95bb3691349092ded3ffedcc0 | refs/heads/master | 2022-11-17T23:19:31.768971 | 2020-07-19T08:44:50 | 2020-07-19T08:44:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,112 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import json
import collections
REPO_PATH = "/".join(os.path.realpath(__file__).split("/")[:-2])
if REPO_PATH not in sys.path:
sys.path.insert(0, REPO_PATH)
from utils import util
from data_utils import conll
from bert import tokenization
class DocumentState(object):
def __init__(self, key):
self.doc_key = key
self.sentence_end = []
self.token_end = []
self.tokens = []
self.subtokens = []
self.info = []
self.segments = []
self.subtoken_map = []
self.segment_subtoken_map = []
self.sentence_map = []
self.clusters = collections.defaultdict(list)
self.coref_stacks = collections.defaultdict(list)
self.speakers = []
self.segment_info = []
def finalize(self):
# finalized: segments, segment_subtoken_map
# populate speakers from info
for segment in self.segment_info:
speakers = []
for i, tok_info in enumerate(segment):
# if tok_info is None and (i == 0 or i == len(segment) - 1):
# speakers.append('[SPL]')
if tok_info is None:
speakers.append(speakers[-1])
else:
speakers.append(tok_info[9])
self.speakers += [speakers]
# populate sentence map
# populate clusters
first_subtoken_index = -1
for seg_idx, segment in enumerate(self.segment_info):
speakers = []
for i, tok_info in enumerate(segment):
first_subtoken_index += 1
coref = tok_info[-2] if tok_info is not None else '-'
if coref != "-":
last_subtoken_index = first_subtoken_index + tok_info[-1] - 1
for part in coref.split("|"):
if part[0] == "(":
if part[-1] == ")":
cluster_id = int(part[1:-1])
self.clusters[cluster_id].append((first_subtoken_index, last_subtoken_index))
else:
cluster_id = int(part[1:])
self.coref_stacks[cluster_id].append(first_subtoken_index)
else:
cluster_id = int(part[:-1])
start = self.coref_stacks[cluster_id].pop()
self.clusters[cluster_id].append((start, last_subtoken_index))
# merge clusters
merged_clusters = []
for c1 in self.clusters.values():
existing = None
for m in c1:
for c2 in merged_clusters:
if m in c2:
existing = c2
break
if existing is not None:
break
if existing is not None:
print("Merging clusters (shouldn't happen very often.)")
existing.update(c1)
else:
merged_clusters.append(set(c1))
merged_clusters = [list(c) for c in merged_clusters]
all_mentions = util.flatten(merged_clusters)
sentence_map = get_sentence_map(self.segments, self.sentence_end)
subtoken_map = util.flatten(self.segment_subtoken_map)
assert len(all_mentions) == len(set(all_mentions))
num_words = len(util.flatten(self.segments))
assert num_words == len(util.flatten(self.speakers))
assert num_words == len(subtoken_map), (num_words, len(subtoken_map))
assert num_words == len(sentence_map), (num_words, len(sentence_map))
return {
"doc_key": self.doc_key,
"sentences": self.segments,
"speakers": self.speakers,
"constituents": [],
"ner": [],
"clusters": merged_clusters,
'sentence_map':sentence_map,
"subtoken_map": subtoken_map
}
def normalize_word(word, language):
if language == "arabic":
word = word[:word.find("#")]
if word == "/." or word == "/?":
return word[1:]
else:
return word
def split_into_segments(document_state, max_segment_len, constraints1, constraints2):
current = 0
previous_token = 0
while current < len(document_state.subtokens):
end = min(current + max_segment_len - 1 - 2 - 1, len(document_state.subtokens) - 1)
while end >= current and not constraints1[end]:
end -= 1
if end < current:
end = min(current + max_segment_len - 1 - 2 - 1 , len(document_state.subtokens) - 1)
while end >= current and not constraints2[end]:
end -= 1
if end < current:
raise Exception('Can find valid segment')
document_state.segments.append( document_state.subtokens[current:end + 1])
subtoken_map = document_state.subtoken_map[current : end + 1]
document_state.segment_subtoken_map.append(subtoken_map)
info = document_state.info[current : end + 1]
document_state.segment_info.append(info)
current = end + 1
previous_token = subtoken_map[-1]
def get_sentence_map(segments, sentence_end):
current = 0
sent_map = []
sent_end_idx = 0
assert len(sentence_end) == sum([len(s) for s in segments])
for segment in segments:
for i in range(len(segment) ):
sent_map.append(current)
current += int(sentence_end[sent_end_idx])
sent_end_idx += 1
return sent_map
def get_document(document_lines, tokenizer, language, segment_len):
document_state = DocumentState(document_lines[0])
word_idx = -1
for line in document_lines[1]:
row = line.split()
sentence_end = len(row) == 0
if not sentence_end:
assert len(row) >= 12
word_idx += 1
word = normalize_word(row[3], language)
subtokens = tokenizer.tokenize(word)
document_state.tokens.append(word)
document_state.token_end += ([False] * (len(subtokens) - 1)) + [True]
for sidx, subtoken in enumerate(subtokens):
document_state.subtokens.append(subtoken)
info = None if sidx != 0 else (row + [len(subtokens)])
document_state.info.append(info)
document_state.sentence_end.append(False)
document_state.subtoken_map.append(word_idx)
else:
document_state.sentence_end[-1] = True
constraints1 = document_state.sentence_end if language != 'arabic' else document_state.token_end
split_into_segments(document_state, segment_len, constraints1, document_state.token_end)
stats["max_sent_len_{}".format(language)] = max(max([len(s) for s in document_state.segments]), stats["max_sent_len_{}".format(language)])
document = document_state.finalize()
return document
def skip(doc_key):
# if doc_key in ['nw/xinhua/00/chtb_0078_0', 'wb/eng/00/eng_0004_1']: #, 'nw/xinhua/01/chtb_0194_0', 'nw/xinhua/01/chtb_0157_0']:
# return True
return False
def minimize_partition(name, language, extension, labels, stats, tokenizer, seg_len, input_dir, output_dir):
input_path = "{}/{}.{}.{}".format(input_dir, name, language, extension)
output_path = "{}/{}.{}.{}.jsonlines".format(output_dir, name, language, seg_len)
count = 0
print("Minimizing {}".format(input_path))
documents = []
with open(input_path, "r") as input_file:
for line in input_file.readlines():
begin_document_match = re.match(conll.BEGIN_DOCUMENT_REGEX, line)
if begin_document_match:
doc_key = conll.get_doc_key(begin_document_match.group(1), begin_document_match.group(2))
documents.append((doc_key, []))
elif line.startswith("#end document"):
continue
else:
documents[-1][1].append(line)
with open(output_path, "w") as output_file:
for document_lines in documents:
if skip(document_lines[0]):
continue
document = get_document(document_lines, tokenizer, language, seg_len)
output_file.write(json.dumps(document))
output_file.write("\n")
count += 1
print("Wrote {} documents to {}".format(count, output_path))
def minimize_language(language, labels, stats, vocab_file, seg_len, input_dir, output_dir, do_lower_case):
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
minimize_partition("dev", language, "v4_gold_conll", labels, stats, tokenizer, seg_len, input_dir, output_dir)
minimize_partition("train", language, "v4_gold_conll", labels, stats, tokenizer, seg_len, input_dir, output_dir)
minimize_partition("test", language, "v4_gold_conll", labels, stats, tokenizer, seg_len, input_dir, output_dir)
if __name__ == "__main__":
# python3 prepare_training_data.py
vocab_file = "/xiaoya/pretrain_ckpt/cased_L-12_H-768_A-12/vocab.txt" # sys.argv[1]
input_dir = "/xiaoya/data" # sys.argv[2]
output_dir = "/xiaoya/corefqa_data" # sys.argv[3]
do_lower_case = False # sys.argv[4].lower() == 'true'
print(do_lower_case)
labels = collections.defaultdict(set)
stats = collections.defaultdict(int)
for seg_len in [64, 128, 384, 512]:
minimize_language("english", labels, stats, vocab_file, seg_len, input_dir, output_dir, do_lower_case)
# minimize_language("chinese", labels, stats, vocab_file, seg_len)
# minimize_language("es", labels, stats, vocab_file, seg_len)
# minimize_language("arabic", labels, stats, vocab_file, seg_len)
for k, v in labels.items():
print("{} = [{}]".format(k, ", ".join("\"{}\"".format(label) for label in v)))
for k, v in stats.items():
print("{} = {}".format(k, v))
| [
"[email protected]"
] | |
781b936bd6b7a6d6bca1a2cd172a259c68c9c05d | 368b89707805e8ac6d5baf26a11b280369995ca5 | /src/server/db/__init__.py | a67a6f3951ae05d2b8532be1e5c10e3a9612a4c5 | [] | no_license | cheng93/fof | 2940d739c1399c88db06b5c99e8075e918fbf060 | 0115a6d3f6775c5c1c8a30cfd7f6f7f9006e75fe | refs/heads/develop | 2020-03-19T06:55:20.274365 | 2018-07-25T22:14:11 | 2018-07-25T22:14:11 | 136,067,138 | 0 | 0 | null | 2018-07-25T22:14:12 | 2018-06-04T18:33:35 | Python | UTF-8 | Python | false | false | 489 | py | from db.conference import conference
from db.division import division
from db.draft import draft
from db.game import game
from db.player import player
from db.player_history import player_history
from db.position import position
from db.team import team
from db.staff import staff
from db.staff_group import staff_group
from db.staff_history import staff_history
from db.staff_role import staff_role
from db.stage import stage
from db.stage_type import stage_type
from db.year import year
| [
"[email protected]"
] | |
c749d8f7dbf564892ee366d7c3c6f7047ba94386 | 03dad0fb0d76b61524ab172e342e3e4ec22614d7 | /blender/arm/assets.py | cad343ada758334d3d277a06a79765bf10c75dff | [
"GPL-2.0-only",
"Zlib"
] | permissive | ceostevenjrogers/armory | 1739f1ddec20d7c720baaa7fd4952d14872c375a | c50a086d244dc2acac102ba91cb33e4b47bf40be | refs/heads/master | 2020-04-09T11:17:06.347466 | 2018-12-02T15:45:43 | 2018-12-02T15:45:43 | 160,304,146 | 1 | 0 | Zlib | 2018-12-04T05:33:45 | 2018-12-04T05:33:44 | null | UTF-8 | Python | false | false | 5,194 | py | import shutil
import os
import stat
import bpy
import arm.utils
assets = []
reserved_names = ['return.']
khafile_defs = []
khafile_defs_last = []
embedded_data = []
shaders = []
shaders_last = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_passes_assets = {}
shader_cons = {}
def reset():
global assets
global khafile_defs
global khafile_defs_last
global embedded_data
global shaders
global shaders_last
global shaders_external
global shader_datas
global shader_passes
global shader_cons
assets = []
khafile_defs_last = khafile_defs
khafile_defs = []
embedded_data = []
shaders_last = shaders
shaders = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_cons = {}
shader_cons['mesh_vert'] = []
shader_cons['depth_vert'] = []
shader_cons['depth_frag'] = []
shader_cons['voxel_vert'] = []
shader_cons['voxel_frag'] = []
shader_cons['voxel_geom'] = []
def add(file):
global assets
if file in assets:
return
base = os.path.basename(file)
for f in assets:
if f.endswith(base):
print('Armory Warning: Asset name "{0}" already exists, skipping'.format(base))
return
assets.append(file)
# Reserved file name
for f in reserved_names:
if f in file:
print('Armory Warning: File "{0}" contains reserved keyword, this will break C++ builds!'.format(file))
def add_khafile_def(d):
global khafile_defs
if d not in khafile_defs:
khafile_defs.append(d)
def add_embedded_data(file):
global embedded_data
if file not in embedded_data:
embedded_data.append(file)
def add_shader(file):
global shaders
global shaders_last
if file not in shaders:
shaders.append(file)
def add_shader_data(file):
global shader_datas
if file not in shader_datas:
shader_datas.append(file)
def add_shader_pass(data_name):
global shader_passes
# Shader data for passes are written into single shader_datas.arm file
add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm')
if data_name not in shader_passes:
shader_passes.append(data_name)
def add_shader_external(file):
global shaders_external
shaders_external.append(file)
name = file.split('/')[-1].split('\\')[-1]
add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name)
invalidate_enabled = True # Disable invalidating during build process
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def invalidate_shader_cache(self, context):
# compiled.inc changed, recompile all shaders next time
global invalidate_enabled
if invalidate_enabled == False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Shaders'):
shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/html5-resources'):
shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/krom-resources'):
shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/krom-resources'):
shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/windows-resources'):
shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/linux-resources'):
shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/osx-resources'):
shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly)
def invalidate_compiled_data(self, context):
global invalidate_enabled
if invalidate_enabled == False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled'):
shutil.rmtree(fp + '/compiled', onerror=remove_readonly)
def invalidate_mesh_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/meshes'):
shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly)
def invalidate_envmap_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/envmaps'):
shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly)
def invalidate_unpacked_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/unpacked'):
shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly)
def shader_equal(sh, ar, shtype):
# Merge equal shaders
for e in ar:
if sh.is_equal(e):
sh.context.data[shtype] = e.context.data[shtype]
sh.is_linked = True
return
ar.append(sh)
def vs_equal(c, ar):
shader_equal(c.vert, ar, 'vertex_shader')
def fs_equal(c, ar):
shader_equal(c.frag, ar, 'fragment_shader')
def gs_equal(c, ar):
shader_equal(c.geom, ar, 'geometry_shader')
def tcs_equal(c, ar):
shader_equal(c.tesc, ar, 'tesscontrol_shader')
def tes_equal(c, ar):
shader_equal(c.tese, ar, 'tesseval_shader')
| [
"[email protected]"
] | |
b7461bb69fa37381ee883fe28d2995c0f9a596d9 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_check_spline4.py | 594c19bac098d7a9a6471e37e50d007ad70eca84 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | from xcp2k.inputsection import InputSection
from _each309 import _each309
class _check_spline4(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each309()
self._name = "CHECK_SPLINE"
self._keywords = {'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Log_print_key': 'LOG_PRINT_KEY', 'Add_last': 'ADD_LAST', 'Filename': 'FILENAME'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| [
"[email protected]"
] | |
149c01d58071fd26c34db132b17aa214201a0652 | 3bcb4cba2caa77acf7e418455c29074d06553f7e | /scripts/files-touched-check.py | dc8077d357f74af375dae10523da758a621226c1 | [] | no_license | eleccoin/gitian.sigs | 7c02be84ee2dbf05334b1863c05e0f860ee5f0d8 | 056e34324642b28659d7b47832115bd3358b17fa | refs/heads/master | 2021-07-15T23:36:46.830658 | 2021-02-11T11:18:58 | 2021-02-11T11:18:58 | 237,335,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import sys
import subprocess
travis_commit_range = os.getenv('TRAVIS_COMMIT_RANGE')
if not travis_commit_range:
print("Travis commit range is empty, exiting...")
sys.exit(1)
try:
result = subprocess.check_output(['git', 'diff', '--no-commit-id', '--name-status', '-r', travis_commit_range])
except Exception as e:
print(e.output)
raise e
files_added = result.decode('utf-8').splitlines()
print(files_added)
subdir_name = ""
for file_added in files_added:
file_added = file_added.split(maxsplit=1)
# Exclude certain files from some checks
excluded_files = ['README.md', '.travis.yml', '.gitattributes', 'scripts/extract-sig.py', 'scripts/files-touched-check.py']
if file_added[1] in excluded_files:
print("Warning: modified non-gitian file", file_added[1])
continue
# Fail if file isn't a gitian file
if not file_added[1].endswith(".assert") and not file_added[1].endswith(".assert.sig"):
print("Error: file type is not valid:", file_added[1])
sys.exit(1)
# Check that files are only added, not modified or deleted
if file_added[0] != 'A':
print("Error: modified or removed existing file:", file_added[1])
sys.exit(1)
# Check that files added are only added to a single subdirectory name
if file_added[1].count('/') >= 1:
directories = file_added[1].split('/')
current_subdir = directories[1]
if not subdir_name:
subdir_name = current_subdir
if subdir_name != current_subdir:
print("Error: files added to multiple subdirectories. Already seen", subdir_name, "got", file_added[1])
sys.exit(1)
# Check if directory depth is accurate
if len(directories) != 3:
print("Error: Directory depth is not 3")
sys.exit(1)
# Check if directory structures match excepcted
if not directories[0].endswith(('-linux', '-osx-signed', '-osx-unsigned', '-win-signed', '-win-unsigned')):
print("Error: top directory name is not valid:", directories[0])
sys.exit(1)
else:
print("Error: unhandled file in pull request:", file_added[1])
sys.exit(1)
sys.exit(0)
| [
"[email protected]"
] | |
5ea95e780de03641bc3fa55a4a4d96545eb0e332 | a1f6290c078b3d9bd004c777972ce4d5bc8af749 | /IVote/.history/app_20211026134219.py | 716bafce93fc77cff60084c9051fa852e2da6c98 | [] | no_license | CS699-IITB-Autumn-2021/project-alpha_team | 2803b99b49dcfe6f1acdcdf768791d58e0441d05 | d3a7105d6d0d702d4b31a80a331b3772a03f2428 | refs/heads/master | 2023-08-19T17:32:01.401161 | 2021-10-27T19:14:08 | 2021-10-27T19:14:08 | 413,135,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,419 | py | from posixpath import lexists
import sqlite3
import os.path
from flask import Flask, render_template, request,redirect,session
from flask.helpers import url_for
from datetime import date
from datetime import datetime
from pathlib import Path
from werkzeug.utils import redirect
from generateResult import generateResults
app = Flask(__name__)
app.secret_key="ivote"
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS Voters(name TEXT,email TEXT,cardno TEXT,password TEXT,voted TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS admin(email TEXT,password TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS election(electionid INTEGER,topic TEXT,startdate TEXT,enddate TEXT,numcand INTEGER,candidate TEXT,ended Text)")
c.execute("CREATE TABLE IF NOT EXISTS candidate(name TEXT,electionid INTEGER,candidateid TEXT,age INTEGER,mobno INTEGER,email TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS result(election_id Text,cand_id Text, noofvotes Number)")
c.execute("SELECT electionid FROM election")
r = c.fetchall()
for i in r:
fle = Path("static/blockchain/"+str(i[0])+".txt")
c.execute("CREATE TABLE IF NOT EXISTS election"+str(i[0])+"(secret_code TEXT ,name_of_blockchain TEXT,voter_id TEXT,vote_given TEXT)")
fle.touch(exist_ok=True)
f = open(fle)
conn.commit()
conn.close()
@app.route('/',methods=['GET','POST'])
def login():
r = ""
if request.method=="POST":
email = request.form["email"]
password = request.form["password"]
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * FROM Voters WHERE email='"+email+"' and password='"+password+"'")
r = c.fetchall()
for i in r:
if email==i[1] and password == i[3]:
#session[]
return redirect(url_for("voter"))
return render_template('home.html')
@app.route('/signup.html',methods=['GET','POST'])
def signup():
if request.method=="POST":
name = request.form["name"]
email = request.form["email"]
cardno = request.form["id"]
password = request.form["password"]
confirm = request.form["confirm"]
if password==confirm:
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("INSERT INTO Voters VALUES('"+name+"','"+email+"','"+cardno+"','"+password+"')")
conn.commit()
conn.close()
return render_template('login.html')
return render_template('signup.html')
@app.route('/Login.html',methods=['GET','POST'])
def adminlogin():
r = ""
if request.method=="POST":
email = request.form["email"]
password = request.form["password"]
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * FROM admin WHERE email='"+email+"' and password='"+password+"'")
r = c.fetchall()
for i in r:
if email==i[0] and password == i[1]:
return redirect(url_for("admin"))
return render_template('Login.html')
@app.route('/forgotPassword.html',methods=['GET','POST'])
def forgot():
return render_template('forgotPassword.html')
@app.route('/admin.html',methods = ['GET','POST'])
def admin():
msg = None
if request.method=="POST":
id = request.form['id']
topic = request.form['topic']
start = request.form['startdate']
end = request.form['enddate']
numcand = request.form['numcand']
select = request.form['select']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r)>=1:
msg = "Election with this id already exist"
else :
c.execute("INSERT INTO election VALUES('"+id+"','"+topic+"','"+start+"','"+end+"','"+numcand+"','"+select+",'T'')")
conn.commit()
conn.close()
msg = "Election created"
return render_template('admin.html',msg = msg)
@app.route("/addcandidate.html",methods = ['GET','POST'])
def add():
if request.method=="POST":
name = request.form['name1']
id = request.form['id']
candid = request.form['candid']
age = request.form['age']
mobile = request.form['mobile']
email = request.form['email']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("INSERT INTO candidate VALUES('"+name+"','"+id+"','"+candid+"','"+age+"','"+mobile+"','"+email+"')")
conn.commit()
conn.close()
return render_template('addcandidate.html')
@app.route("/results.html",methods=['GET','POST'])
def result():
msg = None
print("Working")
if request.method=="POST":
id = request.form['id']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r) >= 1:
print("Working")
return redirect(url_for("viewresults",id = id))
else:
msg = "Please enter correct ID"
return render_template('results.html',msg = msg)
@app.route("/election",methods=['GET','POST'])
def election():
id = request.form.get("id",None)
return render_template('election.html')
@app.route("/voter.html",methods=['GET','POST'])
def voter():
if request.method=="POST":
id = request.form['id']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r) >= 1:
return redirect(url_for("election",id = id))
return render_template('voter.html')
@app.route("/voterresult.html")
def results():
msg = None
print("Working")
if request.method=="POST":
id = request.form['id']
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT * from election WHERE electionid = '"+id+"'")
r = c.fetchall()
if len(r) >= 1:
print("Working")
return redirect(url_for("viewresults",id = id))
else:
msg = "Please enter correct ID"
return render_template("voterresult.html",msg=msg)
@app.route("/view",methods=["GET","POST"])
def viewresults():
id = request.form.get('id',None)
print(id)
return render_template("view.html")
@app.route("/logout")
def logout():
return redirect(url_for("login "))
@app.route("/genResult.html",methods=["GET","POST"])
def genresult():
msg=""
if request.method=="POST":
GR = generateResults()
id = request.form.get('id',None)
msg=GR.genResult(id)
print(msg)
return render_template("genResult.html",msg=msg)
@app.route("/viewblockchain.html")
def viewblockchain():
conn = sqlite3.connect("ivote.db")
c = conn.cursor()
c.execute("SELECT electionid FROM election")
r = c.fetchall()
allbc=[]
for i in r:
fle = Path("static/blockchain/"+str(i[0])+".txt")
allbc.append("static/blockchain/"+str(i[0])+".txt")
fle.touch(exist_ok=True)
f = open(fle)
conn.commit()
conn.close()
return render_template('viewblockchain.html',allbc=allbc,r=r)
if __name__=="__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
626020df48e5f7b97d39391e1c4073a4c6431329 | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2S/2S-2L_wat_20Abox/set_1ns_equi.py | 312c2ffe7eec84ca46369451358d2b944b6fa62c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2S/wat_20Abox/ti_one-step/2S_2L/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2S-2L_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
a1c9ce48603c67ab62bd83e7a1b5276abec33b83 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03026/s122350460.py | 02e08d83c023e291595149871e806ce417d61dfc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #D問題
import heapq
N = int(input())
AB = [[] for i in range(N)]
for i in range(N-1):
a,b = map(int,input().split())
a-=1
b-=1
AB[a].append(b)
AB[b].append(a)
C = list(map(int,input().split()))
C.sort(reverse=True)
var = [0 for i in range(N)]
var[0] = C[0]
Q = []
heapq.heappush(Q,0)
ind = 1
for i in range(N-1):
q = heapq.heappop(Q)
for j in AB[q]:
if var[j] == 0:
var[j] = C[ind]
ind+=1
heapq.heappush(Q,j)
print(sum(C)-C[0])
for v in var:
print(v,end=" ")
| [
"[email protected]"
] | |
910c11f006b429ae64433b34f69b78eb30c952c9 | cb0bde8ab641d5e411e91477728ade090836b729 | /sdk/python/pulumi_azure_nextgen/datashare/v20200901/list_share_subscription_source_share_synchronization_settings.py | fc3e394afb17f91b49ec35a8c117a7a0f398233d | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | rchamorro/pulumi-azure-nextgen | 7debd444063f0f9810ac0ee5fe11e7e8913b4886 | 09987cba1c466657730a23f5083aa62ec3dc8247 | refs/heads/master | 2023-03-03T09:32:59.634185 | 2021-02-10T16:13:24 | 2021-02-10T16:13:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,670 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListShareSubscriptionSourceShareSynchronizationSettingsResult',
'AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult',
'list_share_subscription_source_share_synchronization_settings',
]
@pulumi.output_type
class ListShareSubscriptionSourceShareSynchronizationSettingsResult:
"""
List response for get source share Synchronization settings
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The Url of next result page.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.ScheduledSourceSynchronizationSettingResponseResult']:
"""
Collection of items of type DataTransferObjects.
"""
return pulumi.get(self, "value")
class AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult(ListShareSubscriptionSourceShareSynchronizationSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListShareSubscriptionSourceShareSynchronizationSettingsResult(
next_link=self.next_link,
value=self.value)
def list_share_subscription_source_share_synchronization_settings(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_subscription_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
:param str skip_token: Continuation token
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareSubscriptionName'] = share_subscription_name
__args__['skipToken'] = skip_token
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:datashare/v20200901:listShareSubscriptionSourceShareSynchronizationSettings', __args__, opts=opts, typ=ListShareSubscriptionSourceShareSynchronizationSettingsResult).value
return AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| [
"[email protected]"
] | |
f5e052b7c50e268c48c740cf42af38e32e675ace | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02399/s909319325.py | 32d9c6ecb2bc6766c355125c62d79130e5abb413 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | (a,b) = (int(i) for i in input().split())
d = a // b
r = a % b
f = a / b
print('{0} {1} {2:.8f}'.format(d,r,f)) | [
"[email protected]"
] | |
c4bf1aaa0079951e7f08ba9f8ba36f87a8665688 | 7b1a5db0a067766a9805fe04105f6c7f9ff131f3 | /pysal/lib/cg/ops/_accessors.py | fee7fd6a8618795c68306552da8fcaf9a7104661 | [] | permissive | ocefpaf/pysal | 2d25b9f3a8bd87a7be3f96b825995a185624e1d0 | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | refs/heads/master | 2020-06-26T17:13:06.016203 | 2019-07-31T19:54:35 | 2019-07-31T19:54:35 | 199,696,188 | 0 | 0 | BSD-3-Clause | 2019-07-30T17:17:19 | 2019-07-30T17:17:18 | null | UTF-8 | Python | false | false | 1,368 | py | import functools as _f
__all__ = [ 'area', 'bbox', 'bounding_box', 'centroid', 'holes', 'len',
'parts', 'perimeter', 'segments', 'vertices']
def get_attr(df, geom_col='geometry', inplace=False, attr=None):
outval = df[geom_col].apply(lambda x: x.__getattribute__(attr))
if inplace:
outcol = 'shape_{}'.format(func.__name__)
df[outcol] = outval
return None
return outval
_doc_template =\
"""
Tabular accessor to grab a geometric object's {n} attribute
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
geom_col: string
the name of the column in df containing the geometry
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived column will be under 'shape_{n}'
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a series.
See Also
---------
For further documentation about the attributes of the object in question, refer
to shape classes in pysal.cg.shapes
"""
_accessors = dict()
for k in __all__:
_accessors[k] = _f.partial(get_attr, attr=k)
_accessors[k].__doc__ = _doc_template.format(n=k)
globals().update(_accessors)
| [
"[email protected]"
] | |
1f2497cd5546b8b5809730d42af84ca132224a9b | daa5a7e8deaa5b2b1db76a907077a8c1bb3313b2 | /problem14/p14.py | f9e25df205ff783be0670d4bc40b485f8cc2bca3 | [] | no_license | janFrancoo/Project-Euler | 175933ca643ccca42cf1b7a27cc49694fe22da5c | 24f7d913939883786aaf68f485b31eda99f657b3 | refs/heads/master | 2020-06-19T19:11:35.681184 | 2019-11-23T06:54:21 | 2019-11-23T06:54:21 | 196,838,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | def chain_length(number, limit, chain):
length = 1
copyNum = number
while number != 1:
if number < limit:
if chain[number] > 0:
length += chain[number]
break
if number % 2 == 0:
number = number // 2
else:
number = (number * 3) + 1
length += 1
chain[copyNum] = length
return length
def find_longest_chain(limit):
chain = [0] * limit
max = 0
for num in range(1, limit):
length = chain_length(num, limit, chain)
if length > max:
max = length
perfectNum = num
return perfectNum
print(find_longest_chain(1000000))
| [
"[email protected]"
] | |
77c7244b91eb34417f48b52335e55d62f077c237 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03210/s465817633.py | de6f12b053fb956866a6a4cb9c9b6a6c7f24de52 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | import sys
from bisect import *
from heapq import *
from collections import *
from itertools import *
from functools import *
from math import *
from fractions import *
sys.setrecursionlimit(100000000)
input = lambda: sys.stdin.readline().rstrip()
def main():
print('YES' if int(input()) in [3, 5, 7] else 'NO')
main()
| [
"[email protected]"
] | |
5ef6fb29abe779f4c919e528eceadcae6e5b6d6c | f0fefeafdc4836fa76c5ccd493a40470839a61de | /core/virtualNetworkFunction.py | 193ceb2a03572f715a42de65de2f5257881fcb2a | [] | no_license | wuyangzhang/vnf | bb94b43bc29e78f8e218b4c0da7b32f12682c1e9 | 26e524f4efa5f161dac071169448cb7bef810cdd | refs/heads/master | 2022-11-10T02:25:53.743598 | 2020-06-19T19:41:44 | 2020-06-19T19:41:44 | 271,430,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | import random
import uuid
class VirtualNetworkFunction:
def __init__(self, name, cpu, mem, thr):
self.id = uuid.uuid4()
self.name = name
self.CPU = cpu # count
self.memory = mem # GB
self.throughput = thr # Mbps
self.attached_server = None
def __str__(self):
return 'id: {}, requested CPU num: {}, requested memory: {} GB, throughput: {} Mbps'.format(self.name, self.CPU, self.memory, self.throughput)
def attach_server(self, server):
self.attached_server = server
@staticmethod
def get_random_vnf():
return random.choice([VirtualNetworkFunction.vnf1(),
VirtualNetworkFunction.vnf2(),
VirtualNetworkFunction.vnf3(),
VirtualNetworkFunction.vnf4(),
VirtualNetworkFunction.vnf5(),
VirtualNetworkFunction.vnf6(),
VirtualNetworkFunction.vnf7(),
VirtualNetworkFunction.vnf8()])
@staticmethod
def vnf1():
return VirtualNetworkFunction('firewall_small', 4, 2, 100)
@staticmethod
def vnf2():
return VirtualNetworkFunction('firewall_normal', 4, 8, 200)
@staticmethod
def vnf3():
return VirtualNetworkFunction('firewall_large', 4, 8, 400)
@staticmethod
def vnf4():
return VirtualNetworkFunction('IDS', 4, 6, 80)
@staticmethod
def vnf5():
return VirtualNetworkFunction('IPSec_normal', 4, 4, 268)
@staticmethod
def vnf6():
return VirtualNetworkFunction('IPSec_large', 4, 8, 580)
@staticmethod
def vnf7():
return VirtualNetworkFunction('wan_opt_normal', 2, 2, 10)
@staticmethod
def vnf8():
return VirtualNetworkFunction('wan_opt_large', 2, 4, 50)
if __name__ == '__main__':
vnf = VirtualNetworkFunction.get_random_vnf()
print(vnf) | [
"[email protected]"
] | |
1f4c242409eb31a5e3cf9e347891200845218a79 | c33496682b760deac61fedecba3e82ce4e41dfde | /scripts/e284.py | 12aa868641e9ef20219b57ffb0df9a540a6225c2 | [
"MIT"
] | permissive | ferasalsaab/neuralnilm_prototype | c5e9cde02d475ac499b15fea62143e76adff07d0 | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | refs/heads/master | 2020-04-16T14:38:03.615279 | 2018-01-29T15:30:43 | 2018-01-29T15:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,751 | py | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
# max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2013-07-01"),
seq_length=512,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.7,
n_seq_per_batch=16,
# subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: mdn_nll(x, t).mean(),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
1000: 5e-05,
2000: 1e-05,
3000: 5e-06,
4000: 1e-06,
10000: 5e-07,
50000: 1e-07
},
plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': RecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': RecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5517778c89fa3a66841d7073b230af2f660c2348 | c652797f5303bb7102967fc6603e5704025afb36 | /gamelayer/boilerplates/scene/extension.py | 3fec054ba472343048494727bd1b360db95fb06e | [
"MIT"
] | permissive | Windspar/Gamelayer | fc1ce499cccb6530a4dcd446f9d86fd44026e564 | 65e1cf11548bc02bc49348eb265c209172c14844 | refs/heads/master | 2022-06-13T08:06:37.828771 | 2020-05-07T17:17:59 | 2020-05-07T17:17:59 | 258,047,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py |
class Extension:
def __init__(self):
self._logic = []
self._extension = []
def add(self, callback):
self._extension.append(callback)
def add_logic(self, callback):
self._logic.append(callback)
def process(self, manager):
for extension in self._extension:
extension(manager)
def process_logic(self, manager):
for extension in self._logic:
extension(manager)
| [
"[email protected]"
] | |
12f9e2c63739664ce40db1a2bc6707196ddec657 | 2119953dd04916fa2adf3f42a487f3f9754d1f66 | /modules/google-earth-engine/docker/src/sepal/image_operation.py | 67fc4e0360c44bc318732e200a493abe58beb22a | [
"MIT"
] | permissive | sarahwertz/sepal | 91d12e3317cd07ad4c99469d5b6211d74013b330 | efbbc33ac99db332fc13f9dfd4c777a8d2c1b41e | refs/heads/master | 2020-06-11T07:42:08.835556 | 2019-05-27T14:21:28 | 2019-05-27T14:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | import ee
import math
class ImageOperation(object):
def __init__(self, image):
super(ImageOperation, self).__init__()
self.image = image
self.input_band_names = image.bandNames()
def select(self, name):
return self.image.select(name)
def set(self, name, toAdd, args={}):
toAdd = self.toImage(toAdd, args)
self.image = self.image.addBands(toAdd.rename([name]), None, True)
def setIf(self, name, condition, trueValue, args={}):
self.setIfElse(name, condition, trueValue, name, args)
def setIfElse(self, name, condition, trueValue, falseValue, args={}):
self.set(name,
self.toImage(falseValue, args)
.where(self.toImage(condition, args), self.toImage(trueValue, args)))
def setAll(self, image):
# Replace bands in source image, to ensure all image properties are preserved
self.image = self.image.addBands(image, None, True)
def invertMask(self, mask):
return mask.multiply(-1).add(1)
def toImage(self, band, args={}):
if isinstance(band, basestring):
if band.find('.') > -1 or band.find(' ') > -1 or band.find('{') > -1:
band = self.image.expression(self.format(band, args), {'i': self.image})
else:
band = self.image.select(band)
return ee.Image(band)
def format(self, s, args={}):
if not args:
args = {}
allArgs = self.merge({'pi': math.pi}, args)
result = str(s).format(**allArgs)
if result.find('{') > -1:
return format(result, args)
return result
def isMasked(self, band):
return self.toImage(band).mask().reduce('min').eq(0)
def updateMask(self, condition):
self.image = self.image.updateMask(self.toImage(condition))
def merge(self, o1, o2):
return dict(list(o1.iteritems()) + list(o2.iteritems()))
| [
"[email protected]"
] | |
5af9e7c4d040127cae9671591ab150e165fef3b5 | 354d28102374bf51bc6e74dd8d952dc036925356 | /user_guide/src/examples/pivot/lazy.py | 8ce4ef07d645efa05a0f472e162dd43319fe4fae | [] | no_license | simonw/polars-book | 8c30c3707716ea1134a5a92e938055bcffd84b36 | 841f86dcc0f7c338de5eb1b34efbc405922c74ef | refs/heads/master | 2023-08-30T10:05:35.360224 | 2021-11-14T07:06:36 | 2021-11-14T07:06:36 | 428,919,663 | 0 | 0 | null | 2021-11-17T05:27:07 | 2021-11-17T05:27:06 | null | UTF-8 | Python | false | false | 144 | py | from .dataset import df
q = df.lazy().map(lambda df: df.groupby("foo").pivot(pivot_column="bar", values_column="N").first())
out = q.collect()
| [
"[email protected]"
] | |
84465ec3badfad506dd593f543fb4098424ac9eb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_organists.py | be4d1d8e2ad43f99e39a4fc06c52700fa9cc7b7c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._organist import _ORGANIST
#calss header
class _ORGANISTS(_ORGANIST, ):
def __init__(self,):
_ORGANIST.__init__(self)
self.name = "ORGANISTS"
self.specie = 'nouns'
self.basic = "organist"
self.jsondata = {}
| [
"[email protected]"
] | |
61948e3b28ade065e37d16bf8131afe690b1ce63 | 5e381364c2ab31ff3618369085afffba6caa8edb | /recipes/fakeit/all/conanfile.py | 1cd82fc64805d459038f563de4e2470341513252 | [
"MIT"
] | permissive | CAMOBAP/conan-center-index | 16aea68a6d22da22831ba985773125e8eda08f00 | 67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1 | refs/heads/master | 2023-07-30T08:58:57.285571 | 2021-10-02T14:57:54 | 2021-10-02T14:57:54 | 323,262,699 | 1 | 0 | MIT | 2021-05-29T13:37:04 | 2020-12-21T07:30:02 | Python | UTF-8 | Python | false | false | 2,011 | py | from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
class FakeItConan(ConanFile):
name = "fakeit"
license = "MIT"
homepage = "https://github.com/eranpeer/FakeIt"
url = "https://github.com/conan-io/conan-center-index"
description = "C++ mocking made easy. A simple yet very expressive, headers only library for c++ mocking."
topics = ("mock", "fake", "spy")
settings = "compiler"
options = {
"integration": ["boost", "catch", "cute", "gtest", "mettle", "nunit", "mstest", "qtest", "standalone", "tpunit"]
}
default_options = {"integration": "standalone"}
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def requirements(self):
if self.options.integration == "boost":
self.requires("boost/1.75.0")
elif self.options.integration == "catch":
self.requires("catch2/2.13.4")
elif self.options.integration == "gtest":
self.requires("gtest/cci.20210126")
elif self.options.integration == "qtest":
self.requires("qt/6.0.2")
elif self.options.integration == "standalone":
pass
else:
raise ConanInvalidConfiguration("%s is not (yet) available on cci" % self.options.integration)
def configure(self):
minimal_cpp_standard = "11"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "FakeIt-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="fakeit.hpp", dst="include", src=os.path.join(self._source_subfolder, "single_header", str(self.options.integration)))
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
def package_id(self):
del self.settings.compiler
| [
"[email protected]"
] | |
5ab4128daad6fc7ec81e3f308d5ded319af70f7c | 18219d0fc95936ded56fe44f9a65ecb27f015232 | /35 Laboratory Converter units.py | ed1ee2497ae870063545fbc0a031cc8248d0e0f6 | [] | no_license | JDavid121/Script-Curso-Cisco-Python | 20a61b91b09376dcaef54f8ae5f86fe252de5c33 | 6d68c17ff3c3826e9fc609d110ce9d0e6ebf718b | refs/heads/master | 2021-05-18T04:54:59.948970 | 2020-03-29T20:19:53 | 2020-03-29T20:19:53 | 251,120,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | """
Programa que convierte millas de kilometros y viceversa
"""
"""
1 milla = 1.61 km
"""
#Programa que transforma millas a kilometros
#******************************************************************
print("Programa que transforma millas a kilometros")
mile = input("Ingresar la distancia en millas\n\t")
mile=float(mile)
print("La distancia ingresada en millas es...",mile)
mile_to_kilometers = mile*1.61 # Transformación de millas a km
print(mile,"millas equivalen a",round(mile_to_kilometers,3),"kilometros")
print("Fin del programa")
print()
print("Programa que transforma kilometros a millas")
kilometer = input("Ingresar la distancia en kilometros\n\t")
kilometer=float(kilometer)
print("La distancia ingresada en kilometros es...",kilometer)
kilometers_to_mile = kilometer/1.61 #Transformación de km a millas
print(kilometer,"kilómetros euivalen a",round(kilometers_to_mile,3),"millas")
print("Fin del programa")
#Programa que transforma de grados ceisius a farhenheit
#******************************************************************
# expresión °C = (°F-32°)*(5/9)
print("Programa que transforma los grados Farhenheit a Celsius")
farhenheit=input("Ingresar los grados Farenheit\n\t")
farhenheit=float(farhenheit)
farhenheit=round(farhenheit,3) #Redondeamos la entrada a 3 decimales
print("\tLos grados farhenheit ingresados son",farhenheit,"°F")
celsius_to_farh=(farhenheit-32)*(5/9) #Transformación de grados farhenheit a celsius
celsius_to_farh=round(celsius_to_farh,3)
print("\t",farhenheit,"°F","equivalen a",celsius_to_farh,"°C")
print("Fin del programa")
print()
print("Programa que transforma los grados Celsius a Farhenheit")
celsius=input("Ingresar los grados Celsius\n\t")
celsius=float(celsius)
celsius=round(celsius,3) #Redondeando la entrada a 3 decimales.
farh_to_celsius=32+(9/5)*celsius
farh_to_celsius=round(farh_to_celsius,3)
print("\t",celsius,"°C equivalen a",farh_to_celsius,"°F")
print("Fin del programa")
| [
"[email protected]"
] | |
ba12f92e88b5290922b85f50d0b6bf924df5dbe7 | cd9f819b968def4f9b57448bdd926dc5ffa06671 | /B_輕鬆學python3_孫宏明_碁峰_2017/29-1/Graph/main.py | e65b7e9e5b5aa31d70a8bcc2c37da8a8c20e3540 | [] | no_license | AaronCHH/jb_pyoop | 06c67f3c17e722cf18147be4ae0fac81726e4cbc | 356baf0963cf216db5db7e11fb67234ff9b31b68 | refs/heads/main | 2023-04-02T05:55:27.477763 | 2021-04-07T01:48:04 | 2021-04-07T01:48:13 | 344,676,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import shape as sh
# 建立Circle、Rectangle和Triangle類別的物件
c = sh.Circle(10)
r = sh.Rectangle(5, 2)
t = sh.Triangle(8, 3)
# 把物件加入Tuple資料組
shapes = c, r, t
# 用For迴圈顯示每一個物件的內容和面積
for s in shapes:
s.show_shape_info()
print('面積:' + str(s.get_area()))
| [
"[email protected]"
] | |
e3fbb8c9d7fd01422301e9908dc1df4fb26ab066 | f714db4463dd37fc33382364dc4b1963a9053e49 | /tests/sentry_plugins/pivotal/test_pivotal_plugin.py | cf8294f3310c500617cfeb7ce3dbf7f8b51b6967 | [
"BUSL-1.1",
"Apache-2.0"
] | permissive | macher91/sentry | 92171c2ad23564bf52627fcd711855685b138cbd | dd94d574403c95eaea6d4ccf93526577f3d9261b | refs/heads/master | 2021-07-07T08:23:53.339912 | 2020-07-21T08:03:55 | 2020-07-21T08:03:55 | 140,079,930 | 0 | 0 | BSD-3-Clause | 2020-05-13T11:28:35 | 2018-07-07T11:50:48 | Python | UTF-8 | Python | false | false | 2,171 | py | from __future__ import absolute_import
from exam import fixture
from django.core.urlresolvers import reverse
from sentry.testutils import PluginTestCase
from sentry.utils import json
from sentry_plugins.pivotal.plugin import PivotalPlugin
class PivotalPluginTest(PluginTestCase):
@fixture
def plugin(self):
return PivotalPlugin()
def test_conf_key(self):
assert self.plugin.conf_key == "pivotal"
def test_entry_point(self):
self.assertPluginInstalled("pivotal", self.plugin)
def test_get_issue_label(self):
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_label(group, 1) == "#1"
def test_get_issue_url(self):
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_url(group, 1) == "https://www.pivotaltracker.com/story/show/1"
def test_is_configured(self):
assert self.plugin.is_configured(None, self.project) is False
self.plugin.set_option("token", "1", self.project)
self.plugin.set_option("project", "1", self.project)
assert self.plugin.is_configured(None, self.project) is True
def test_no_secrets(self):
self.user = self.create_user("[email protected]")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.plugin.set_option("token", "abcdef", self.project)
url = reverse(
"sentry-api-0-project-plugin-details",
args=[self.org.slug, self.project.slug, "pivotal"],
)
res = self.client.get(url)
config = json.loads(res.content)["config"]
token_config = [item for item in config if item["name"] == "token"][0]
assert token_config.get("type") == "secret"
assert token_config.get("value") is None
assert token_config.get("hasSavedValue") is True
assert token_config.get("prefix") == "abcd"
| [
"[email protected]"
] | |
c818fa2fbe0a931e015d9c72ca30b11428d45ae9 | 0983a837b8ca96c215a3bad0dfda0aba9b79b89f | /single_header/preprocess.py | 5170d044c37f03d160ccb053d2efc9947a1cb873 | [] | no_license | omardrwch/rlcpp | 53a950d3f3b4e75010c1acf7d047b3fe48c99720 | 5b12133c3f85cd2a7158915914beace31fdcd13e | refs/heads/master | 2020-08-13T15:38:28.366012 | 2020-03-07T20:40:09 | 2020-03-07T20:40:09 | 214,994,055 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import os
import shutil
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
project_path = os.path.dirname(dir_path)
dir_to_copy = os.path.join(project_path, 'rlcpp')
dir_destination = os.path.join(dir_path, 'all_files')
if not os.path.exists(dir_destination):
os.makedirs(dir_destination)
# Copy all project files (.cpp and .h) from dir_to_copy to dir_destination
for root, dirs, files in os.walk(dir_to_copy):
for file in files:
path_file = os.path.join(root,file)
shutil.copy2(path_file,dir_destination)
"""
Create header file to be used by acme.py
"""
header_contents = "#ifndef __RLCPP_H__ \n#define __RLCPP_H__ \n"
# List all source files
source_dir = dir_destination
source_files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(source_dir):
for filename in f:
if '.h' in filename and filename != "rlcpp.hpp":
print(filename)
header_contents += "#include " + "\"" + filename + "\"" + "\n"
for r, d, f in os.walk(source_dir):
for filename in f:
if '.cpp' in filename:
print(filename)
header_contents += "#include " + "\"" + filename + "\"" + "\n"
header_contents += "#endif"
header_file = open(os.path.join(dir_destination, "rlcpp.hpp"),"w+")
header_file.write(header_contents)
header_file.close() | [
"[email protected]"
] | |
b11f3da5b0ec58c42441646efad1247fda2d1a9e | 147715fa98fe40b919784ef703dcddb8e8ab37c4 | /indy_common/config_helper.py | 9250ac18b12a1047999603e56dc61347844a6a7a | [
"Apache-2.0"
] | permissive | RaghuVamz/indy-node | ae069bfe2766248937fb2662ec65736c305a5cd9 | 977249b9100de62290ed45e74f1df1c2a1c7afd9 | refs/heads/master | 2021-08-23T20:08:57.192581 | 2017-12-05T23:41:54 | 2017-12-05T23:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | import os
from plenum.common.config_helper import PConfigHelper
class ConfigHelper(PConfigHelper):
@property
def log_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.LOG_DIR, self.config.NETWORK_NAME))
@property
def genesis_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.GENESIS_DIR, self.config.NETWORK_NAME))
@property
def keys_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.KEYS_DIR, self.config.NETWORK_NAME, 'keys'))
@property
def ledger_base_dir(self):
return self.chroot_if_needed(self.config.LEDGER_DIR)
@property
def ledger_data_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.LEDGER_DIR, self.config.NETWORK_NAME, 'data'))
@property
def log_base_dir(self):
return self.chroot_if_needed(self.config.LOG_DIR)
class NodeConfigHelper(ConfigHelper):
def __init__(self, name: str, config, *, chroot='/'):
assert name is not None
super().__init__(config, chroot=chroot)
self.name = name
@property
def ledger_dir(self):
return self.chroot_if_needed(
os.path.join(self.config.LEDGER_DIR, self.config.NETWORK_NAME, 'data', self.name))
| [
"[email protected]"
] | |
20eb845f27d8f3d0b45c4495eab5e80b15e6fcc1 | f0987e17aea6668158cd334c1fbacfe6286d3c77 | /NITA/tests/unit/security/appsecure/test_apbr_commands.py | 23646f813ef71033f238a57643ad7cc20473dad9 | [] | no_license | fengyun4623/file | 00bf21f952ea3f95ffc9fe18448b244b26b7fadb | 3966c63d48557b0b94303896eed7a767593a4832 | refs/heads/master | 2023-04-02T05:01:25.066052 | 2020-07-29T16:15:31 | 2020-07-29T16:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,672 | py | from mock import patch
import unittest2 as unittest
from mock import MagicMock
from jnpr.toby.security.appsecure import apbr_commands
from jnpr.toby.hldcl.juniper.security.srx import Srx
class Response:
def __init__(self, x=""):
self.resp = x
def response(self):
return self.resp
class UnitTest(unittest.TestCase):
mocked_obj = MagicMock(spec=Srx)
mocked_obj.log = MagicMock()
def test_get_apbr_profile(self):
try:
apbr_commands.get_apbr_profile()
except Exception as err:
self.assertEqual(err.args[0], "Device handle is a mandatory argument")
dict_to_return = {'apbr-profiles':
{'apbr-profiles': {'pic': 0
}
}
}
self.mocked_obj.execute_as_rpc_command = MagicMock(return_value=dict_to_return)
self.assertEqual(apbr_commands.get_apbr_profile(device=self.mocked_obj), {})
dict_to_return = {'apbr-profiles':
{'apbr-profiles': {'profile-name': "abc",
'zone-name': "trust"
}
}
}
self.mocked_obj.execute_as_rpc_command.return_value = dict_to_return
self.assertEqual(apbr_commands.get_apbr_profile(device=self.mocked_obj), {"abc":"trust"})
dict_to_return = {'apbr-profiles':
{'apbr-profiles': {'profile-name': ["abc", "def"],
'zone-name': ["trust", "untrust"]
}
}
}
x = {"abc":"trust", "def":"untrust"}
self.mocked_obj.execute_as_rpc_command.return_value = dict_to_return
self.assertEqual(apbr_commands.get_apbr_profile(device=self.mocked_obj), x)
def test_verify_apbr_profile(self):
try:
apbr_commands.verify_apbr_profile()
except Exception as err:
self.assertEqual(err.args[0], "'device' is a mandatory argument")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj)
except Exception as err:
self.assertEqual(err.args[0], "'profile_name' is a mandatory argument")
x = {"abc": "trust", "def": "untrust"}
self.assertEqual(apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, profile_name="def", zone_name="untrust"), True)
self.assertEqual(apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict={}, no_profile=True),True)
p = patch("jnpr.toby.security.appsecure.apbr_commands.get_apbr_profile", new=MagicMock(return_value=x))
p.start()
self.assertEqual(apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_name="abc"),True)
p.stop()
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict={}, profile_name="abc", zone_name="untrust")
except Exception as err:
self.assertEqual(err.args[0], "No profiles configured")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, no_profile=True)
except Exception as err:
self.assertEqual(err.args[0], "Expected-NO profile, but some profile was found")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, profile_name="abc", zone_name="untrust")
except Exception as err:
self.assertEqual(err.args[0], "Zone name NOT matching")
try:
apbr_commands.verify_apbr_profile(device=self.mocked_obj, profile_dict=x, profile_name="abcd", zone_name="untrust")
except Exception as err:
self.assertEqual(err.args[0], "Profile name not found")
def test_get_apbr_stats(self):
try:
apbr_commands.get_apbr_stats()
except Exception as err:
self.assertEqual(err.args[0], "Device handle is a mandatory argument")
dict_to_return = {'apbr-statistics':
{'apbr-statistics': {'pic': 0
}
}
}
self.mocked_obj.execute_as_rpc_command = MagicMock(return_value=dict_to_return)
self.assertEqual(apbr_commands.get_apbr_stats(device=self.mocked_obj), {'pic':0})
def test_verify_apbr_stats(self):
try:
apbr_commands.verify_apbr_stats()
except Exception as err:
self.assertEqual(err.args[0], "Device handle is a mandatory argument")
try:
apbr_commands.verify_apbr_stats(device=self.mocked_obj)
except Exception as err:
self.assertEqual(err.args[0], "counter_values is None, it is mandatory argument")
x = {"a" : "1", "b":"2", "c":"3"}
p = patch("jnpr.toby.security.appsecure.apbr_commands.get_apbr_stats", new=MagicMock(return_value=x))
p.start()
self.assertEqual(apbr_commands.verify_apbr_stats(device=self.mocked_obj, counter_values={"b":2, "c":3}), True)
try:
apbr_commands.verify_apbr_stats(device=self.mocked_obj, counter_values={"b": 1, "c": 3})
except Exception as err:
self.assertEqual(err.args[0], "APBR statistics validation failed")
try:
apbr_commands.verify_apbr_stats(device=self.mocked_obj, counter_values={"d": 1, "c": 3})
except Exception as err:
self.assertEqual(err.args[0], "APBR statistics validation failed")
def test_clear_apbr_stats(self):
try:
apbr_commands.clear_apbr_stats()
except Exception as err:
self.assertEqual(err.args[0],"Device handle is a mandatory argument" )
self.mocked_obj.cli = MagicMock(return_value=Response(""))
try:
apbr_commands.clear_apbr_stats(device=self.mocked_obj)
except Exception as err:
self.assertEqual(err.args[0], "APBR stats couldn't be cleared")
self.mocked_obj.cli.return_value = Response("Advance-policy-based-routing statistics clear done")
self.assertEqual(apbr_commands.clear_apbr_stats(device=self.mocked_obj), True)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
cec71b1e9805f047d5d4bdc58734967d935567e5 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pihNcNQXiYHSRW8Cv_12.py | 26d083018c9ee0efaed8a96b8a45bd2fefe26834 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py |
def sort_by_length(lst):
lst.sort(key = len)
return lst
| [
"[email protected]"
] | |
e1d5705fadca5206369d9fc28471dee0f1be801f | ba60d3ccf11157abaf6c7bcf3a81aace27c6af88 | /spoj/wtk.py | c87681f874913fd405f05378c83c0744d2e5991b | [] | no_license | eightnoteight/compro | 9a09628593cdd3201f4d3bcf271f1ca6a4e5efca | 1e5d32ee83e9d8f27623dee7262decad3d107bd5 | refs/heads/master | 2021-01-21T04:19:02.746824 | 2016-08-01T20:51:16 | 2016-08-01T20:51:16 | 44,669,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from sys import stdin
try:
range = xrange
input = raw_input
except:
pass
def wtk(n):
ans = 1
for x in range(2, n + 1):
ans = ((ans + (n + 1 - x) - 1) % x) + 1
return ans
inp = stdin.readlines()
for _ in range(int(inp[0])):
print(wtk(int(inp[_ + 1])))
| [
"[email protected]"
] | |
5600718a422aecd517e3e3db0aa2ade322992a29 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1657.py | b6882f38110c00e2f3f7f2637ba12cf028460824 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | import math
def ispalindrome(n): # ok, didn't really need this, but since i wrote it i might as well use it
digits = [int(x) for x in str(n)]
length = len(digits)
# if even number of digits, we want to compare the first half to the second half
# if odd, we don't want to compare the middle to itself, so the truncating should be perfect
for i in range(length/2):
if digits[i] != digits[length-i-1]:
return 0
return 1
def getnextpal(n):
digits = [int(x) for x in str(n)]
length = len(digits)
digits = [0] + digits # extra digit in case of overflow
for i in range(length/2):
x = i + 1
y = length-i
#print "{} {}... {} {}".format(x, y, digits[x], digits[y])
if digits[x] > digits[y]:
digits[y] = digits[x]
elif digits[x] < digits[y]:
# find next incremental digit to the left of y, and increment it
# the leftmost possible z is x. x cannot be 9 (not incrementable), because digits[x] < digits[y]
z = y - 1
while digits[z] == 9:
z -= 1
digits[z] += 1
#now y is free to be any digit
digits[y] = digits[x]
#but we have to zero out the digits in between
for k in range(z+1,y):
digits[k] = 0
# else equal, in which case keep going
return int("".join(str(x) for x in digits))
def fairsquare(A,B):
fscount = 0
minroot = int(math.ceil(math.sqrt(A))) # minimum val that you can square to get A
nextpal = getnextpal(minroot)
nextsquare = nextpal * nextpal
while(nextsquare) <= B:
if ispalindrome(nextsquare):
fscount += 1
nextpal = getnextpal(nextpal+1)
nextsquare = nextpal * nextpal
return fscount
f = open('C-large-1.in', 'r')
T = int(f.readline())
for i in range(T):
nums = f.readline().split(' ')
A = int(nums[0])
B = int(nums[1])
print "Case #{}: {}".format(i+1, fairsquare(A,B)) | [
"[email protected]"
] | |
06cdb1171a3fca287acf3cd73cf81f7a7ac189a4 | 63bc95150f6af526199454602e5689bfadc882ba | /12/ex12-4.py | b134b84938d34ba5eb34a0dc964e2c228b3343a3 | [] | no_license | veenary/python-src | fd61d22a58d452ccb251402fecb0b7babd5372a7 | d61374bc32b8ebe3b2be366a6de259680821a4e1 | refs/heads/master | 2023-03-17T10:07:27.704611 | 2021-03-10T01:31:56 | 2021-03-10T01:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', family='Malgun Gothic')
xdata = ['안지영', '홍지수', '황예린']
ydata1 = [90, 85, 88]
ydata2 = [83, 88, 91]
plt.plot(xdata, ydata1, label='국어')
plt.plot(xdata, ydata2, label='영어')
plt.legend(loc='upper center')
plt.title('세명 학생의 국어, 영어 성적')
plt.show() | [
"[email protected]"
] | |
d6c67ffa4c48863a4c30baeec6a7167f27d0edd3 | 84868118de838472bca33c9f0455674e7e563d1b | /WLCG_GSoC_Task_Server/views.py | 5f7a847621fab880c815c0a05099b392a92a3702 | [
"Apache-2.0"
] | permissive | maany/MOOC-CA-Server | afa7b065fd5a059a94abec687236122ec6afd376 | 917109c7e5f37f3e7ee63ec0c5d1be3409b27f93 | refs/heads/master | 2022-12-12T12:03:52.430010 | 2019-03-12T13:46:06 | 2019-03-12T13:46:06 | 156,133,195 | 0 | 0 | Apache-2.0 | 2022-12-08T01:01:48 | 2018-11-04T22:56:11 | HTML | UTF-8 | Python | false | false | 392 | py | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from evaluator.models import Task, Applicant
@login_required
def profile(request):
tasks = Task.objects.filter(applicant=request.user.applicant)
context = {
'tasks': tasks,
}
return render(request,'registration/profile.html', context)
| [
"[email protected]"
] | |
6279b7c4ae80c6d7996d9e4072efb25f43a2d80b | 536538af28cfe40e10ff1ce469cd0f81e8b3a8fe | /majority_element_II.py | 02813649ad35d4fef5a7d02c179be39c5526676a | [] | no_license | ShunKaiZhang/LeetCode | 7e10bb4927ba8581a3a7dec39171eb821c258c34 | ede2a2e19f27ef4adf6e57d6692216b8990cf62b | refs/heads/master | 2021-09-01T07:41:03.255469 | 2017-12-25T19:22:18 | 2017-12-25T19:22:18 | 104,136,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # python3
# Given an integer array of size n, find all elements that appear more than ⌊ n/3 ⌋ times.
# The algorithm should run in linear time and in O(1) space.
# My solution
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums.sort()
n = len(nums) // 3
out = set()
for i in range(n, len(nums)):
if nums[i] == nums[i - n]:
out.add(nums[i])
return list(out)
| [
"[email protected]"
] | |
a091cf762f8da9cecbb9794772a0d394ad9338f9 | 16fe74651e6692ea3d8d0302b40ac42f3d58e0ca | /Divide_Two_Integers.py | 331dae5f9352e2e8f00995e3f8aaf0d11552e651 | [
"MIT"
] | permissive | Ahmed--Mohsen/leetcode | 7574f71b10dfb9582f62e856bbc2559d3b21b2a1 | ad8967a5d85ac54f53b3fcce04df1b4bdec5fd9e | refs/heads/master | 2021-01-18T14:34:06.987665 | 2015-12-23T21:17:27 | 2015-12-23T21:17:27 | 33,744,104 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
"""
class Solution:
# @return an integer
def divide(self, dividend, divisor):
p = abs(dividend)
q = abs(divisor)
ans = 0
while p >= q:
counter = 0
while p >= (q << counter): # detect 2^n that p is divisible by
counter += 1
ans += 1 << (counter - 1)
p -= q << (counter - 1)
if (dividend < 0 and divisor > 0) or (dividend > 0 and divisor < 0):
ans = -ans
return ans | [
"[email protected]"
] | |
55040119c5bec7c52ab31cb072da66f9e8f23c54 | 1719920a92f7194766624474b98d59ef8d6eddaf | /models/search_result.py | 1e44051445d3173c464a9e9db35eddc2d60c9dd2 | [
"MIT"
] | permissive | MIchaelMainer/msgraph-v10-models-python | cfa5e3a65ba675383975a99779763211ed9fa0a9 | adad66363ebe151be2332f3ef74a664584385748 | refs/heads/master | 2020-03-19T12:51:06.370673 | 2018-06-08T00:16:12 | 2018-06-08T00:16:12 | 136,544,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..one_drive_object_base import OneDriveObjectBase
class SearchResult(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def on_click_telemetry_url(self):
"""Gets and sets the onClickTelemetryUrl
Returns:
str:
The onClickTelemetryUrl
"""
if "onClickTelemetryUrl" in self._prop_dict:
return self._prop_dict["onClickTelemetryUrl"]
else:
return None
@on_click_telemetry_url.setter
def on_click_telemetry_url(self, val):
self._prop_dict["onClickTelemetryUrl"] = val
| [
"[email protected]"
] | |
1098ae39b7e485b89db97e585ff61d78edb48860 | 1a66df726d2ecc20d6b5ff9d35dac4ea7ba5cb66 | /upseto/recursivegit.py | 2382959e2c66d5083c1f207a1d9b02424a21ec09 | [
"Apache-2.0"
] | permissive | shlomimatichin/upseto | 83c601d7a6d625d00fad3134fe9192dcfec73950 | 0fedc8b7d628b971d07b92b61c7a29431ad55d22 | refs/heads/master | 2021-01-15T11:06:41.285140 | 2015-03-05T13:22:38 | 2015-03-05T13:22:38 | 28,646,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | from upseto import gitwrapper
from upseto import traverse
import sys
class RecursiveGit:
def __init__(self, baseDir=".."):
self._traverse = traverse.Traverse(baseDir)
def run(self, mani, commandLine):
git = gitwrapper.GitWrapper(".")
sys.stdout.write('#upseto %s\n' % git.directory())
sys.stdout.write(git.run(commandLine))
for dependency in self._traverse.traverse(mani):
git = gitwrapper.GitWrapper(dependency.projectDir)
sys.stdout.write('#upseto %s\n' % git.directory())
sys.stdout.write(git.run(commandLine))
| [
"[email protected]"
] | |
442148600eefdf14a8d8ae73741d95f019dc6024 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/permutations_20200723154912.py | faaf21e3ad0dd8cb060bd06a3f837bc62ea900d1 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def perm(arr):
# sort the array
if len(arr) == 0:
return 0
else:
arr.sort()
for i in range(len(arr)-1):
if arr[i] +1 != arr[i+1]:
return 0
return 1
print(perm([4,1,3,2])) | [
"[email protected]"
] | |
fe0f7faff241dde12ec27f6c4fd6b1011ef5bf46 | 2bebb669112e2955de612e7d0532fe545b609733 | /goatools/semsim/termwise/wang.py | 0228ed10453e6dc1055ce126d7f3dbcb4c539a3a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | rexdwyer/goatools | 008885ad534b73a1137fa59c9eb50e21b147f2ab | ef14f99844a2a42d36aa1ab2e40161b8bc7be78e | refs/heads/main | 2022-12-31T06:08:31.203278 | 2020-10-15T01:14:59 | 2020-10-15T01:14:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | """Wang's termwise semantic similarity for GO terms"""
__copyright__ = "Copyright (C) 2020-present, DV Klopfenstein. All rights reserved."
__author__ = "DV Klopfenstein"
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.semsim.termwise.dag_a import DagA
class SsWang:
"""Wang's termwise semantic similarity for GO terms"""
def __init__(self, godag, relationships=None, rel2scf=None):
self.godag = godag
self.rels = relationships
self.rel2scf = rel2scf
self.w_e = self._init_edge_weight_factor(rel2scf)
self.go2subdag = {}
def add_goid(self, goid, prt=None):
"""Add a GO ID which will be compared using semantic similarity"""
self.add_goids([goid], prt)
def add_goids(self, goids, prt=None):
"""Add GO IDs which will be compared using semantic similarity"""
# go2svalue = wang.get_go2svalue('go:0043231')
s_godag = self.godag
s_rels = self.rels
s_go2subdag = self.go2subdag
s_rel2scf = self.w_e
for goid in goids:
if goid in s_godag:
gosubdag = GoSubDag([goid], s_godag, s_rels, prt=prt)
dag = DagA(goid, gosubdag, s_rel2scf)
s_go2subdag[goid] = dag
def get_semsim(self, go_a, go_b):
"""Get Wang's semantic similarity between two GO terms"""
if self._not_loaded(go_a, go_b):
return None
dag_a = self.go2subdag[go_a]
dag_b = self.go2subdag[go_b]
gos_ab = set(dag_a.go2svalue.keys()).intersection(dag_b.go2svalue.keys())
s_a = dag_a.get_svalues(gos_ab)
s_b = dag_b.get_svalues(gos_ab)
s_ab = sum([a + b for a, b in zip(s_a, s_b)])
return s_ab/(dag_a.get_sv() + dag_b.get_sv())
def _not_loaded(self, go_a, go_b):
"""Check that both GO IDs are in the go2subdag dict"""
if go_a not in self.go2subdag:
print('**ERROR: {GO} NOT LOADED INTO SsWang'.format(GO=go_a))
return True
if go_b not in self.go2subdag:
print('**ERROR: {GO} NOT LOADED INTO SsWang'.format(GO=go_b))
return True
return False
@staticmethod
def _init_edge_weight_factor(rel2scf):
"""Initialize semantic contribution factor (scf) for weights for edge types (w_e)"""
if rel2scf is None:
return {
'is_a': 0.8,
'part_of': 0.6,
}
return rel2scf
# Copyright (C) 2020-present DV Klopfenstein. All rights reserved.
| [
"[email protected]"
] | |
88b31bcd1e68bb87db4eed1c8f9d1dc272541ee1 | 12e78946542250f64792bc6c1d8c8ff1ffecdaf7 | /Python/OOP/bikes.py | 3354bab3e9923ca80f29aa3f2dbd96f61c135e2b | [] | no_license | mkrabacher/CodingDojoAssignments | 0fde5adf7223a9eac07a4867499a243e230a300e | 4afef4aaf4f129fb56376e57d8be437d1f124521 | refs/heads/master | 2021-05-14T13:38:03.570533 | 2018-02-23T00:09:24 | 2018-02-23T00:09:24 | 113,722,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | class bike(object):
def __init__(self, price, max_speed):
bike.price = price
bike.max_speed = max_speed
bike.miles = 0
def display_info(self):
print 'This bike costs {}, has a max speed of {}, and have been ridden {} miles total'.format(self.price, self.max_speed, self.miles)
return self
def ride(self):
print 'ridin now boss.'
self.miles += 10
return self
def reverse(self):
print 'reversin now boss'
self.miles -= 5
return self
print '--------------------------------------------------------------------------'
bike1 = bike(200, "25mph")
bike1.display_info().ride().reverse().ride().ride().reverse().display_info()
print '--------------------------------------------------------------------------'
bike2 = bike(100, "21mph")
bike2.display_info().ride().reverse().reverse().reverse().reverse().display_info()
print '--------------------------------------------------------------------------'
bike3 = bike(400, "254mph")
bike3.display_info().reverse().ride().ride().display_info()
print '--------------------------------------------------------------------------' | [
"[email protected]"
] | |
45f245fa90ff36c4bd8ccba8af71faf3d28d06aa | 1025bc2aa5aaa40970ad1a51d8d0b1202a1ea11e | /StatTools/test/test_morph.py | 098bd9e136b860bf73ff5b65867dbd350b8bd07f | [] | no_license | uwcms/FinalStateAnalysis | f2be318546728621676a4b90ed2678b2560c94e6 | bcb164a8e27d459a9ac438780f6c8730d3e856bf | refs/heads/miniAOD_9_4_0 | 2022-11-09T01:28:52.199025 | 2019-03-15T19:25:10 | 2019-03-15T19:25:10 | 5,201,989 | 5 | 32 | null | 2020-11-19T17:02:32 | 2012-07-27T07:51:18 | Python | UTF-8 | Python | false | false | 646 | py | '''
Stupid tests of the th1fmorph tool
'''
from FinalStateAnalysis.StatTools.morph import morph
from rootpy.io import open, DoesNotExist
file = open('$fsa/VHiggs/test/plotting/wh_shapes.root')
hist1 = file.get('mmt_mumu_final_140_MuTauMass/VH140')
hist2 = file.get('mmt_mumu_final_120_MuTauMass/VH120')
hist130true = file.get('mmt_mumu_final_130_MuTauMass/VH130')
print '140', hist1.Integral(), hist1.GetMean()
print '130 true', hist130true.Integral(), hist130true.GetMean()
print '120', hist2.Integral(), hist2.GetMean()
# Try to morph to 130
m130 = morph('130', '130', 130, hist1, 140, hist2, 120)
print m130.Integral(), m130.GetMean()
| [
"[email protected]"
] | |
363cff7ddb7a55c545d517ea771c5a424f188146 | 7e9b45a66b3637cf571eb1e16c07dd888963d8ba | /ITcast/ITcast/spiders/itcast.py | a87e81bec5c50033e10d8550fb5f3e8c1c968117 | [] | no_license | tusonggao/scrapy-scripts | 60d4c7449819c6a2861c208c34f0fb8078ed94d4 | 2dd97c0a55e02c51a43c7a335c91ac64d8bbaf1b | refs/heads/master | 2020-04-08T18:01:21.351922 | 2018-12-02T01:57:03 | 2018-12-02T01:57:03 | 159,590,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | # -*- coding: utf-8 -*-
import scrapy
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['http://www.itcast.cn']
start_urls = ['http://http://www.itcast.cn/']
def parse(self, response):
pass
| [
"[email protected]"
] | |
435e68ce65d72b7e2bb3aa04ee15f26b39536bdf | cf1365736444f8bb4d29fcc220906e3a12672732 | /collate_20161103_kidney_livetox.py | eeee2f440ad1eeab64133a4624ccca66badaa3b1 | [] | no_license | jmuhlich/ramm_screening_data_processing | 3be8c5b8a057335b1d530883d4d840a685a53149 | 5fcb65a7fef637ba77df251dfc45aef8962845bf | refs/heads/master | 2020-04-14T14:15:18.730774 | 2017-05-23T14:33:22 | 2017-05-23T14:33:22 | 68,258,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,399 | py | from __future__ import division
import os
import glob
import re
import itertools
import collections
import zipfile
import datetime
import pandas as pd
import dateutil
from unipath import Path
loc_columns = ['WellName', 'Row', 'Column']
plate_invariant_columns = [
'ScreenName', 'ScreenID', 'PlateName', 'PlateID', 'MeasurementDate',
'MeasurementID', 'Timepoint', 'Plane'
]
def _build_wells():
rows = [(str(i), r) for i, r in enumerate('CDEFGHIJ', 3)]
columns = [str(i) for i in range(3, 22+1)]
rc = list(itertools.product(rows, columns))
data = {
'WellName': [r[1] + c for r, c in rc],
'Row': [r[0] for r, c in rc],
'Column': [c for r, c in rc],
}
return pd.DataFrame(data)
def fixplateintegrity(df, _wells=_build_wells()):
"""Fill in any missing rows from the corresponding technical replicate."""
# FIXME: This could be more robust if done after merging with the layout,
# since we could either find the replicate well directly or at least verify
# our assumptions. (This experiment lays the technical replicates out
# in 2x2 squares.)
df = df.merge(_wells, how='right')
missing_idx = df.ScreenName.isnull().nonzero()[0]
for idx in missing_idx:
loc = df.iloc[idx][loc_columns]
column = int(loc.Column)
# Select the technical replicates (2x2 squares).
if column % 2 == 0:
rep_column = column - 1
else:
rep_column = column + 1
rep_indexer = (df.Column == str(rep_column)) & (df.Row == loc.Row)
rep_data = df[rep_indexer].iloc[0].copy()
# Check the final (data) column since we'd never touch that one in the
# "both replicates missing" case.
if pd.isnull(rep_data.iloc[-1]):
# Both replicates are missing - copy invariant columns from any old
# row and leave data columns as nan.
invariant_data = df.iloc[0][plate_invariant_columns]
rep_data[plate_invariant_columns] = invariant_data
print (" !! Missing data for well %s; replicate also"
" missing! Inserting null values." % loc.WellName)
else:
rep_wellname = rep_data.WellName
print (" ** Missing data for well %s; copying from"
" replicate in %s" % (loc.WellName, rep_wellname))
rep_data[loc_columns] = loc
df.iloc[idx] = rep_data
return df
def round_timedelta(delta, granularity):
"""Rounds a timedelta to a given granularity in seconds."""
s = delta.total_seconds()
rounded_seconds = (s + granularity / 2) // granularity * granularity
return datetime.timedelta(0, rounded_seconds)
PROJECT_NAME = '20161103_kidney_livetox'
# List of duplicate analysis result files, to be skipped.
badpaths = ()
input_path = Path(__file__).parent.child('input', PROJECT_NAME)
output_path = Path(__file__).parent.child('output', PROJECT_NAME)
assert input_path.exists()
output_path.mkdir()
df = {}
print "Reading experimental design\n==========\n"
layout_filename = 'Plate layout_liver-kidney_HepG2.xlsx'
# Must resolve() to traverse to parent if input path is a symlink, which it
# typically is.
layout_path = input_path.resolve().parent.child(layout_filename)
print "File:", layout_path
plate_df = pd.read_excel(layout_path)
plate_df.Row = plate_df.Row.astype(unicode)
plate_df.Column = plate_df.Column.astype(unicode)
print "\n\nReading data files\n==========\n"
seen_scans = {}
# Previous datasets had multiple replicates. Leaving the general structure in
# place to avoid too much modification to the code.
for rpath in (input_path,):
print "Scanning", rpath
replicate = '1'
df.setdefault(replicate, {})
# Get Sim_.* directory paths.
plate_paths = [n for n in rpath.listdir()
if re.match(r'Sim_\d+\[\d+\]$', n.name)]
for plate_path in plate_paths:
sim = re.findall('(Sim_00000\d)', plate_path)[0]
print "Searching for plate data in", Path(*plate_path.components()[-2:])
print "(Rep: %s Plate: %s)" % (replicate, sim)
df[replicate].setdefault(sim, [])
# Find csv files under plate_path, but skip the ones listed in badpaths.
timepoint_paths = [p for p in plate_path.walk()
if p.endswith('.csv')
and p not in badpaths]
expected_tps = 7
num_tps = len(timepoint_paths)
assert num_tps == expected_tps, ("Expected %s timepoint .csv files, found"
" %d" % (expected_tps, num_tps))
# Here we rely on having an ISO8601 timestamp in the paths so that
# lexically sorting them puts them in time-course order. We'll still
# verify our assumption later by inspecting the timestamps inside the
# files, but this is a nice shortcut.
timepoint_paths = sorted(timepoint_paths)
timestamp0 = timepoint_paths[0].rsplit('/', 2)[-2][:22]
t0 = dateutil.parser.parse(timestamp0)
seen_timepoints = []
for csvpath in timepoint_paths:
# Ensure we don't have duplicate files for the same plate +
# timepoint (apparently some scans were processed more than once).
# badpaths is supposed to contain all of the duplicates for
# filtering above, and this code makes sure we didn't miss any.
scan_name = csvpath.rsplit('/', 2)[-2]
timestamp, scan_id = re.findall(r'^([^[]+)\[(\d+)\]$', scan_name)[0]
full_path = plate_path + csvpath
if scan_id in seen_scans:
other_path = seen_scans[scan_id]
msg = ("duplicate scan ID %s found in filenames:"
"\n %s\n %s" % (scan_id, other_path, full_path))
raise Exception(msg)
seen_scans[scan_id] = full_path
t = dateutil.parser.parse(timestamp)
delta_t = t - t0
hour = 60 * 60
# Experimental timepoints are supposed to be a multiple of 4 hours.
delta_t_4h = round_timedelta(delta_t, 4 * hour)
exp_timepoint = int(delta_t_4h.total_seconds() / hour)
actual_timepoint = delta_t.total_seconds() / hour
print ' %sh (%.1fh) @ %s' % (exp_timepoint, actual_timepoint,
Path(*csvpath.components()[-3:]))
seen_timepoints.append(exp_timepoint)
# Specify 'str' as dtype to prevent any parsing of floats etc. to
# preserve original values exactly.
tempdf = pd.read_csv(csvpath, encoding='utf-8', dtype='str')
tempdf = fixplateintegrity(tempdf)
assert len(tempdf) == 160, ('Expected 160 rows, found %d'
% len(tempdf))
# Insert actual timepoint column.
tempdf.insert(0, 'ActualTimepointHours', actual_timepoint)
# Verify timestamp column matches file path.
unique_mds = tempdf.MeasurementDate.unique()
assert len(unique_mds) == 1, "multiple timestamps, expected one"
data_timestamp = dateutil.parser.parse(tempdf.MeasurementDate[0])
assert data_timestamp == t, "timestamp mismatch"
# For sim1:
# Add a replicate column, but ignore the sim, we will join sim1 and
# 2 and sim will not be needed.
# Add a time column.
# Prepend experimental design and replicate number to Structural
# Panel plates; Functional Panel plates will be merged later.
if sim == 'Sim_000001':
pl = plate_df.copy()
pl['ReplicateNumber'] = replicate
pl['ExperimentalTimepointHours'] = exp_timepoint
assert len(tempdf) == len(pl), "design-experiment mismatch"
tempdf = pl.merge(tempdf, on=loc_columns)
assert len(tempdf) == len(pl), "design merge failure"
df[replicate][sim].append(tempdf)
assert seen_timepoints[:7] == range(0, 24 + 1, 4), "wrong timepoint"
#Ok all read in, now go through each replicate (1 to 2)
#For each sim1 item, join it to corresponding sim2 item, this is replicate X, compounds Y-Z, 7 time points, full set of struct and func features
def non_ascii(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
def drop_duplicate_columns(df):
keep = [True] * len(df.columns)
seen = {}
for i, cname in enumerate(df.columns):
if cname not in seen:
seen[cname] = i
else:
if not (df.iloc[:, seen[cname]] == df.iloc[:, i]).all():
raise ValueError(
'Duplicate "%s" columns differ; not dropping.' % cname)
keep[i] = False
return df.iloc[:, keep]
print "\n\nMerging plates\n==========\n"
for replicate, rdf in df.items():
print "Replicate", replicate
data = []
# It seems pointless to be iterating over one thing, but previous
# experiments had more and I didn't want to do too much surgery on the code.
for panel_a, panel_b in (('Sim_000001', 'Sim_000003'),):
for tp, (df1, df2) in enumerate(zip(rdf[panel_a], rdf[panel_b])):
print " Timepoint %d: %s / %s" % (tp, panel_a, panel_b)
df2.columns = [x + '_2' for x in df2.columns.values]
tempdf = df1.merge(df2, left_on='WellName', right_on='WellName_2')
assert len(tempdf) == len(df1) == len(df2), "panel length mismatch"
tempdf = drop_duplicate_columns(tempdf)
data.append(tempdf)
# Trivially succeeds on first iteration, of course.
assert (tempdf.columns == data[0].columns).all(), "column mismatch"
final = data[0].append(data[1:])
final = final.sort_values(['ExperimentalTimepointHours', 'PlateName'])
final = final.reset_index(drop=True)
assert final.shape[0] == 160*7, "wrong number of rows"
assert final.shape[1] == 694, "wrong number of columns"
final_path = output_path.child('Replicate_'+replicate+'.csv')
print " Writing output to", final_path
with open(final_path, 'w') as fp:
cols = final.columns
cols = [non_ascii(col) for col in cols]
final.columns = cols
final.to_csv(fp, index=False)
| [
"[email protected]"
] | |
6a2fa3268aaa3900c0d2c08b01d2ad48f9be95f7 | e5333b2e54f1adf2e5bc88a9a242234c5f15851a | /misoclib/com/liteeth/core/mac/core/crc.py | f08302b164d4733c2a5cc4d933ae70d2372c05e0 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hoangt/misoc | 1aaf850c18bab5b18db1fcc788feb96afbbc464e | 6c13879fb605a1ee2bd5a3b35669e093f9a4267b | refs/heads/master | 2021-01-21T02:55:59.398987 | 2015-07-13T15:00:03 | 2015-07-13T15:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,639 | py | from misoclib.com.liteeth.common import *
class LiteEthMACCRCEngine(Module):
"""Cyclic Redundancy Check Engine
Compute next CRC value from last CRC value and data input using
an optimized asynchronous LFSR.
Parameters
----------
data_width : int
Width of the data bus.
width : int
Width of the CRC.
polynom : int
Polynom of the CRC (ex: 0x04C11DB7 for IEEE 802.3 CRC)
Attributes
----------
data : in
Data input.
last : in
last CRC value.
next :
next CRC value.
"""
def __init__(self, data_width, width, polynom):
self.data = Signal(data_width)
self.last = Signal(width)
self.next = Signal(width)
# # #
def _optimize_eq(l):
"""
Replace even numbers of XORs in the equation
with an equivalent XOR
"""
d = OrderedDict()
for e in l:
if e in d:
d[e] += 1
else:
d[e] = 1
r = []
for key, value in d.items():
if value%2 != 0:
r.append(key)
return r
# compute and optimize CRC's LFSR
curval = [[("state", i)] for i in range(width)]
for i in range(data_width):
feedback = curval.pop() + [("din", i)]
for j in range(width-1):
if (polynom & (1<<(j+1))):
curval[j] += feedback
curval[j] = _optimize_eq(curval[j])
curval.insert(0, feedback)
# implement logic
for i in range(width):
xors = []
for t, n in curval[i]:
if t == "state":
xors += [self.last[n]]
elif t == "din":
xors += [self.data[n]]
self.comb += self.next[i].eq(optree("^", xors))
@DecorateModule(InsertReset)
@DecorateModule(InsertCE)
class LiteEthMACCRC32(Module):
"""IEEE 802.3 CRC
Implement an IEEE 802.3 CRC generator/checker.
Parameters
----------
data_width : int
Width of the data bus.
Attributes
----------
d : in
Data input.
value : out
CRC value (used for generator).
error : out
CRC error (used for checker).
"""
width = 32
polynom = 0x04C11DB7
init = 2**width-1
check = 0xC704DD7B
def __init__(self, data_width):
self.data = Signal(data_width)
self.value = Signal(self.width)
self.error = Signal()
# # #
self.submodules.engine = LiteEthMACCRCEngine(data_width, self.width, self.polynom)
reg = Signal(self.width, reset=self.init)
self.sync += reg.eq(self.engine.next)
self.comb += [
self.engine.data.eq(self.data),
self.engine.last.eq(reg),
self.value.eq(~reg[::-1]),
self.error.eq(self.engine.next != self.check)
]
class LiteEthMACCRCInserter(Module):
"""CRC Inserter
Append a CRC at the end of each packet.
Parameters
----------
description : description
description of the dataflow.
Attributes
----------
sink : in
Packets input without CRC.
source : out
Packets output with CRC.
"""
def __init__(self, crc_class, description):
self.sink = sink = Sink(description)
self.source = source = Source(description)
self.busy = Signal()
# # #
dw = flen(sink.data)
crc = crc_class(dw)
fsm = FSM(reset_state="IDLE")
self.submodules += crc, fsm
fsm.act("IDLE",
crc.reset.eq(1),
sink.ack.eq(1),
If(sink.stb & sink.sop,
sink.ack.eq(0),
NextState("COPY"),
)
)
fsm.act("COPY",
crc.ce.eq(sink.stb & source.ack),
crc.data.eq(sink.data),
Record.connect(sink, source),
source.eop.eq(0),
If(sink.stb & sink.eop & source.ack,
NextState("INSERT"),
)
)
ratio = crc.width//dw
if ratio > 1:
cnt = Signal(max=ratio, reset=ratio-1)
cnt_done = Signal()
fsm.act("INSERT",
source.stb.eq(1),
chooser(crc.value, cnt, source.data, reverse=True),
If(cnt_done,
source.eop.eq(1),
If(source.ack, NextState("IDLE"))
)
)
self.comb += cnt_done.eq(cnt == 0)
self.sync += \
If(fsm.ongoing("IDLE"),
cnt.eq(cnt.reset)
).Elif(fsm.ongoing("INSERT") & ~cnt_done,
cnt.eq(cnt - source.ack)
)
else:
fsm.act("INSERT",
source.stb.eq(1),
source.eop.eq(1),
source.data.eq(crc.value),
If(source.ack, NextState("IDLE"))
)
self.comb += self.busy.eq(~fsm.ongoing("IDLE"))
class LiteEthMACCRC32Inserter(LiteEthMACCRCInserter):
def __init__(self, description):
LiteEthMACCRCInserter.__init__(self, LiteEthMACCRC32, description)
class LiteEthMACCRCChecker(Module):
"""CRC Checker
Check CRC at the end of each packet.
Parameters
----------
description : description
description of the dataflow.
Attributes
----------
sink : in
Packets input with CRC.
source : out
Packets output without CRC and "error" set to 0
on eop when CRC OK / set to 1 when CRC KO.
"""
def __init__(self, crc_class, description):
self.sink = sink = Sink(description)
self.source = source = Source(description)
self.busy = Signal()
# # #
dw = flen(sink.data)
crc = crc_class(dw)
self.submodules += crc
ratio = crc.width//dw
error = Signal()
fifo = InsertReset(SyncFIFO(description, ratio + 1))
self.submodules += fifo
fsm = FSM(reset_state="RESET")
self.submodules += fsm
fifo_in = Signal()
fifo_out = Signal()
fifo_full = Signal()
self.comb += [
fifo_full.eq(fifo.fifo.level == ratio),
fifo_in.eq(sink.stb & (~fifo_full | fifo_out)),
fifo_out.eq(source.stb & source.ack),
Record.connect(sink, fifo.sink),
fifo.sink.stb.eq(fifo_in),
self.sink.ack.eq(fifo_in),
source.stb.eq(sink.stb & fifo_full),
source.sop.eq(fifo.source.sop),
source.eop.eq(sink.eop),
fifo.source.ack.eq(fifo_out),
source.payload.eq(fifo.source.payload),
source.error.eq(sink.error | crc.error),
]
fsm.act("RESET",
crc.reset.eq(1),
fifo.reset.eq(1),
NextState("IDLE"),
)
self.comb += crc.data.eq(sink.data)
fsm.act("IDLE",
If(sink.stb & sink.sop & sink.ack,
crc.ce.eq(1),
NextState("COPY")
)
)
fsm.act("COPY",
If(sink.stb & sink.ack,
crc.ce.eq(1),
If(sink.eop,
NextState("RESET")
)
)
)
self.comb += self.busy.eq(~fsm.ongoing("IDLE"))
class LiteEthMACCRC32Checker(LiteEthMACCRCChecker):
def __init__(self, description):
LiteEthMACCRCChecker.__init__(self, LiteEthMACCRC32, description)
| [
"[email protected]"
] | |
7529c1864d46cd2e1e57b64e66c2b36754ac89dc | 1713334f9b68255f9adab70175c21f399d0460f3 | /python/090_Subsets_II.py | 520727cc72e1a7221eaa68275d4716961b9b0695 | [
"MIT"
] | permissive | coy0725/leetcode | 0a798b7adafe80f726e51c06c34835c4aa51b563 | 743a0bfa22402ec39858dc9c4c7dc531f825b953 | refs/heads/master | 2020-05-21T18:25:09.683714 | 2019-05-11T13:00:40 | 2019-05-11T13:00:40 | 186,132,894 | 2 | 0 | MIT | 2019-05-11T12:55:22 | 2019-05-11T12:55:21 | null | UTF-8 | Python | false | false | 1,369 | py | class Solution(object):
# def subsetsWithDup(self, nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# nums.sort()
# res = []
# for i in range(1 << len(nums)):
# res.append(self.get_subsets(nums, i))
# # remove duplicate
# final_res = {}
# for subset in res:
# hash_key = ''.join([str(t) for t in subset])
# try:
# final_res[hash_key]
# except:
# final_res[hash_key] = subset
# return final_res.values()
#
# def get_subsets(self, nums, magic):
# res = []
# for i in range(len(nums)):
# if (1 << i) & magic != 0:
# res.append(nums[i])
# return res
def subsetsWithDup(self, nums):
nums.sort()
res = [[]]
begin = 0
for index in range(len(nums)):
if index == 0 or nums[index] != nums[index - 1]:
# generate all
begin = 0
size = len(res)
# use existing subsets to generate new subsets
for j in range(begin, size):
curr = list(res[j])
curr.append(nums[index])
res.append(curr)
# avoid duplicate subsets
begin = size
return res
| [
"[email protected]"
] | |
8e6d62758f3ba2b4d14aabfcf98d67e54cdbf873 | 0809673304fe85a163898983c2cb4a0238b2456e | /tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/_collections_abc.py | e4c1159a71158be1f13b05efc9d1e5db29e779e4 | [
"Apache-2.0"
] | permissive | jasonwee/asus-rt-n14uhp-mrtg | 244092292c94ff3382f88f6a385dae2aa6e4b1e1 | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | refs/heads/master | 2022-12-13T18:49:02.908213 | 2018-10-05T02:16:41 | 2018-10-05T02:16:41 | 25,589,776 | 3 | 1 | Apache-2.0 | 2022-11-27T04:03:06 | 2014-10-22T15:42:28 | Python | UTF-8 | Python | false | false | 38 | py | /usr/lib/python3.4/_collections_abc.py | [
"[email protected]"
] | |
ea825aa8dfb7fdbdf0f38041fed13b0c901d9a7f | 281c9bea63bf7d1188b40ae2cf3f2aa53f97a297 | /sections/views.py | 26113411fba416b064af202d600ebcb71728b3a6 | [] | no_license | pydatageek/fazla | 3ec13c5e8f4a621eb82e1d82e003e0e0e68f3657 | 0890de73f23e3f72b41095130d703a793745765e | refs/heads/master | 2023-02-14T01:11:17.477576 | 2021-01-05T10:15:58 | 2021-01-05T10:15:58 | 294,544,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import render
from django.urls import resolve, reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.defaults import server_error
from django.views.generic import (
DetailView, ListView, TemplateView)
from django.views.generic.edit import FormView
from core.choices import titles
from core.forms import ContactForm
def handler400(request, exception):
return render(request, 'lte/400.html', status=400)
def handler403(request, exception):
return render(request, 'lte/403.html', status=403)
def handler404(request, exception):
return render(request, 'lte/404.html', status=404)
def handler500(request):
return server_error(request, 'lte/500.html')
class HomeView(TemplateView):
""""""
# Fazla.net facts and stats
class SourceView(TemplateView):
""""""
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = _('Sources')
return context
class AboutView(TemplateView):
""""""
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = _('About')
return context
class ContactView(SuccessMessageMixin, FormView):
""""""
form_class = ContactForm
success_url = reverse_lazy('contact')
success_message = _('Your form submission is successful, thank you.')
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['title'] = _('Contact Us')
return context
def form_valid(self, form):
form.save()
return super().form_valid(form)
| [
"[email protected]"
] | |
241e3a94e942b64cd327a21199bf89a2fa868b9f | c522b0332ee42d01f1ee5bdd3cdd3d72eb9af24b | /venv/lib/python3.8/site-packages/lusid/models/transaction_set_configuration_data.py | 451ff60c8d6a19320df52968e69d9e43274314de | [] | no_license | Jeffkent01coder/trackphone | e5aad6f99efb0f0c11f260d1f2a0b232d5453dfe | 3570375938c7e947eb272d2cec1589202351141c | refs/heads/master | 2023-05-10T22:27:40.255686 | 2021-06-02T10:23:17 | 2021-06-02T10:23:17 | 373,125,235 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,941 | py | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2820
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TransactionSetConfigurationData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'transaction_configs': 'list[TransactionConfigurationData]',
'side_definitions': 'list[SideConfigurationData]',
'links': 'list[Link]'
}
attribute_map = {
'transaction_configs': 'transactionConfigs',
'side_definitions': 'sideDefinitions',
'links': 'links'
}
required_map = {
'transaction_configs': 'required',
'side_definitions': 'optional',
'links': 'optional'
}
def __init__(self, transaction_configs=None, side_definitions=None, links=None): # noqa: E501
"""
TransactionSetConfigurationData - a model defined in OpenAPI
:param transaction_configs: Collection of transaction type models (required)
:type transaction_configs: list[lusid.TransactionConfigurationData]
:param side_definitions: Collection of side definitions
:type side_definitions: list[lusid.SideConfigurationData]
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._transaction_configs = None
self._side_definitions = None
self._links = None
self.discriminator = None
self.transaction_configs = transaction_configs
self.side_definitions = side_definitions
self.links = links
@property
def transaction_configs(self):
"""Gets the transaction_configs of this TransactionSetConfigurationData. # noqa: E501
Collection of transaction type models # noqa: E501
:return: The transaction_configs of this TransactionSetConfigurationData. # noqa: E501
:rtype: list[TransactionConfigurationData]
"""
return self._transaction_configs
@transaction_configs.setter
def transaction_configs(self, transaction_configs):
"""Sets the transaction_configs of this TransactionSetConfigurationData.
Collection of transaction type models # noqa: E501
:param transaction_configs: The transaction_configs of this TransactionSetConfigurationData. # noqa: E501
:type: list[TransactionConfigurationData]
"""
if transaction_configs is None:
raise ValueError("Invalid value for `transaction_configs`, must not be `None`") # noqa: E501
self._transaction_configs = transaction_configs
@property
def side_definitions(self):
"""Gets the side_definitions of this TransactionSetConfigurationData. # noqa: E501
Collection of side definitions # noqa: E501
:return: The side_definitions of this TransactionSetConfigurationData. # noqa: E501
:rtype: list[SideConfigurationData]
"""
return self._side_definitions
@side_definitions.setter
def side_definitions(self, side_definitions):
"""Sets the side_definitions of this TransactionSetConfigurationData.
Collection of side definitions # noqa: E501
:param side_definitions: The side_definitions of this TransactionSetConfigurationData. # noqa: E501
:type: list[SideConfigurationData]
"""
self._side_definitions = side_definitions
@property
def links(self):
"""Gets the links of this TransactionSetConfigurationData. # noqa: E501
:return: The links of this TransactionSetConfigurationData. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this TransactionSetConfigurationData.
:param links: The links of this TransactionSetConfigurationData. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransactionSetConfigurationData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
665c35990e477086523bce03d9fee6d46495ae84 | e6f16fbba8fba750099252c3490f00079cb19101 | /算法/350_两个数组的交集Ⅱ.py | ad88f718a1d9fe87106cf709d3b5ade3707c38aa | [] | no_license | hookeyplayer/exercise.io | 0a36fbec9df6c24b60ff6f97de27d3d5ae7769d4 | 605c81cb44443efd974db9fa0a088ddcd5a96f0f | refs/heads/master | 2023-06-20T17:03:20.310816 | 2021-07-31T12:50:21 | 2021-07-31T12:50:21 | 277,175,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # 结果尊重实际的个数,含重复
from typing import List
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
nums2.sort()
nums1.sort()
ans = []
pos1, pos2 = 0, 0
while pos1 < len(nums1) and pos2 < len(nums2):
if nums1[pos1] == nums2[pos2]:
ans.append(nums1[pos1])
pos1 += 1
pos2 += 1
elif nums1[pos1] < nums2[pos2]:
pos1 += 1
else:
pos2 += 1
return ans
test = Solution()
nums1 = [1,2,2,1]
nums2 = [2,2]
print(test.intersect(nums1, nums2)) # [2, 2] | [
"[email protected]"
] | |
fa3d62dc6ea23306d2e45604b8b2469de665a70d | 4ede96380f20c65e014f7e5748789c81a4700115 | /enums/enum_example_pb2.py | 89ed4506ac879d427eb539c122205ebaab314ea2 | [] | no_license | DavidWalshe93/Python_Protobuf | 1724689fc4d24c51d2bf40cb5ac2655355ed9aae | 1af5ecf7ac9fd479b7e283d9cb5ef1c5dd54a94a | refs/heads/master | 2022-04-20T08:04:38.660161 | 2020-04-18T00:26:51 | 2020-04-18T00:26:51 | 256,630,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,370 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: enum_example.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='enum_example.proto',
package='example.enumerations',
syntax='proto3',
serialized_options=b'Z\006enumpb',
serialized_pb=b'\n\x12\x65num_example.proto\x12\x14\x65xample.enumerations\"V\n\x0b\x45numMessage\x12\n\n\x02id\x18\x01 \x01(\x05\x12;\n\x0f\x64\x61y_of_the_week\x18\x02 \x01(\x0e\x32\".example.enumerations.DayOfTheWeek*w\n\x0c\x44\x61yOfTheWeek\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06MONDAY\x10\x01\x12\x0b\n\x07TUESDAY\x10\x02\x12\r\n\tWEDNESDAY\x10\x03\x12\x0c\n\x08THURSDAY\x10\x04\x12\n\n\x06\x46RIDAY\x10\x05\x12\x0c\n\x08SATURDAY\x10\x06\x12\n\n\x06SUNDAY\x10\x07\x42\x08Z\x06\x65numpbb\x06proto3'
)
_DAYOFTHEWEEK = _descriptor.EnumDescriptor(
name='DayOfTheWeek',
full_name='example.enumerations.DayOfTheWeek',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MONDAY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUESDAY', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEDNESDAY', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='THURSDAY', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FRIDAY', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SATURDAY', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUNDAY', index=7, number=7,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=132,
serialized_end=251,
)
_sym_db.RegisterEnumDescriptor(_DAYOFTHEWEEK)
DayOfTheWeek = enum_type_wrapper.EnumTypeWrapper(_DAYOFTHEWEEK)
UNKNOWN = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
_ENUMMESSAGE = _descriptor.Descriptor(
name='EnumMessage',
full_name='example.enumerations.EnumMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='example.enumerations.EnumMessage.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='day_of_the_week', full_name='example.enumerations.EnumMessage.day_of_the_week', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=130,
)
_ENUMMESSAGE.fields_by_name['day_of_the_week'].enum_type = _DAYOFTHEWEEK
DESCRIPTOR.message_types_by_name['EnumMessage'] = _ENUMMESSAGE
DESCRIPTOR.enum_types_by_name['DayOfTheWeek'] = _DAYOFTHEWEEK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EnumMessage = _reflection.GeneratedProtocolMessageType('EnumMessage', (_message.Message,), {
'DESCRIPTOR' : _ENUMMESSAGE,
'__module__' : 'enum_example_pb2'
# @@protoc_insertion_point(class_scope:example.enumerations.EnumMessage)
})
_sym_db.RegisterMessage(EnumMessage)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
be75614c6f9ae2e85a6876128bdb7b9c4a8628dd | cea49ef38528989f4d2383b980265d2bd7aa6114 | /fabfile.py | cad1562f11cd007c73634bc5d8d7105dbd8a8f1d | [
"Apache-2.0"
] | permissive | pombredanne/lexibank | 672560c8fd4480edc2761314ef1057db67a2f458 | 2b6133729c8288fe1bf80c3c3020689e3a35778b | refs/heads/master | 2021-01-17T07:55:23.313670 | 2015-09-07T19:11:37 | 2015-09-07T19:11:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 52 | py | from clldfabric import tasks
tasks.init('lexibank')
| [
"[email protected]"
] | |
b13c1da60cd7c88594500929261c5c9b25d14f78 | ffdd64174bee59665833535c9ae618abd6ddcc7a | /graphs_to_tensors.py | 1be5bca997a64a463d7dbcb57d8ec8dceb298fed | [] | no_license | pengyuan2020/ProPPR-python | dee9e8e2713169af8538ac2cbc0a1f29c93f0d9a | 2977e9e0801d94048d114df8349d13b52091a7a8 | refs/heads/master | 2022-04-10T06:29:45.588641 | 2017-12-21T00:50:53 | 2017-12-21T00:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,856 | py | import numpy as np
import networkx as nx
import sparse
import nltk
import cPickle as pickle
import os
from setting import *
from joblib import Parallel, delayed
def get_proof_graph(ppr_grounded_line, feature_vector_size):
graph = nx.DiGraph()
array = ppr_grounded_line.strip().split(' ')
query_example = array[0]
query_node = int(array[1])
pos_nodes = map(int, array[2].split(','))
neg_nodes = map(int, array[3].split(','))
nodes_count = int(array[4])
edges_count = int(array[5])
label_dependencies_count = int(array[6])
edges = array[7:]
nodes = []
for e in ppr_grounded_line.strip().split(' '):
if '->' in e:
nodes.append(int(e.split('->')[0]))
nodes.append(int(e.split('->')[1].split(':')[0]))
nodes = list(set(nodes))
for node in nodes:
if node in pos_nodes:
graph.add_node(node, Label=1)
elif node in neg_nodes:
graph.add_node(node, Label=-1)
elif node == query_node:
graph.add_node(node, Label=2)
else:
graph.add_node(node, Label=0)
for edge in edges:
source, target, feature_weights = edge.replace('->', ':').split(':')
source = int(source)
target = int(target)
feature_weights = [
feature.split('@') for feature in feature_weights.split(',')
]
feature_weights = [(int(feature_weight[0]), float(feature_weight[1]))
for feature_weight in feature_weights]
vector = [0.0] * feature_vector_size
for feature_weight in feature_weights:
vector[feature_weight[0]] = feature_weight[1]
# graph.add_edge( source, target, {'feature_vector': ",".join( map(str, vector) ) } )
graph.add_edge(source, target, {'feature_vector': vector})
# nx.write_graphml( graph, "graph.graphml" )
return graph
def get_proof_graph_tensor(proof_graph, feature_vector_size):
node_list = proof_graph.nodes()
adjacency_matrix = nx.adjacency_matrix(proof_graph, weight=None)
adjacency_matrix = adjacency_matrix.astype(float)
size = len(node_list)
featured_adjacency_matrix = np.array([[[0.0] * feature_vector_size
for x in range(size)]
for y in range(size)])
for edge in proof_graph.edges_iter():
source = edge[0]
target = edge[1]
source_index = node_list.index(edge[0])
target_index = node_list.index(edge[1])
feature_vector = proof_graph[source][target]['feature_vector']
featured_adjacency_matrix[source_index][target_index] = feature_vector
featured_adjacency_matrix = np.reshape(featured_adjacency_matrix,
[size, size, -1])
correct_answer_vector = np.zeros([size, 1], dtype=np.float32)
incorrect_answer_vector = np.zeros([size, 1], dtype=np.float32)
one_hot_query_vector = np.zeros([size, 1], dtype=np.float32)
for node_index, node in enumerate(proof_graph.nodes()):
if int(proof_graph.node[node]["Label"]) == 1:
correct_answer_vector[node_index] = 1.0
else:
correct_answer_vector[node_index] = 0.0
for node_index, node in enumerate(proof_graph.nodes()):
if int(proof_graph.node[node]["Label"]) == -1:
incorrect_answer_vector[node_index] = 1.0
else:
incorrect_answer_vector[node_index] = 0.0
for node_index, node in enumerate(proof_graph.nodes()):
if int(proof_graph.node[node]["Label"]) == 2:
one_hot_query_vector[node_index] = 1.0
else:
one_hot_query_vector[node_index] = 0.0
return [
one_hot_query_vector, featured_adjacency_matrix, correct_answer_vector,
incorrect_answer_vector
]
##### (END) Conversion of question objects/graphs to feature representations #####
def dump_graph_tensor(idx, tensors_dir, ppr_grounded_line):
feature_vector_size = int(ppr_grounded_line.strip().split('\t')[6]) + 1
proof_graph = get_proof_graph(ppr_grounded_line, feature_vector_size)
data = get_proof_graph_tensor(proof_graph, feature_vector_size)
sparse_data = [sparse.COO(item) for item in data]
sparse_tensor_path = os.path.join(tensors_dir,
'{}-sparse.pickle'.format(idx))
with open(sparse_tensor_path, 'wb') as g:
pickle.dump(sparse_data, g, protocol=pickle.HIGHEST_PROTOCOL)
return feature_vector_size
# ppr_grounded_line = 'predict(train00004,X1). 1 6 5 6 13 42 6->6:[email protected] 6->1:[email protected] 5->5:[email protected] 5->1:[email protected] 4->6:[email protected],[email protected],[email protected],[email protected],[email protected] 4->1:[email protected] 3->5:[email protected],[email protected],[email protected],[email protected],[email protected] 3->1:[email protected] 2->3:[email protected] 2->4:[email protected] 2->1:[email protected] 1->2:[email protected] 1->1:[email protected]'
processed_data_dir = os.path.join('ProcessedData', program_name)
set_names = ['train', 'test']
process_count = 4
for set_name in set_names:
print 'In set {}'.format(set_name)
sld_grounded_path = os.path.join(
processed_data_dir, program_name + '-{}.grounded'.format(set_name))
tensors_dir = os.path.join(processed_data_dir, 'Tensors', set_name)
if not os.path.exists(tensors_dir):
os.makedirs(tensors_dir)
with open(sld_grounded_path) as f:
ppr_grounded_lines = f.readlines()
feature_vector_sizes = Parallel(n_jobs=process_count)(
delayed(dump_graph_tensor)(idx, tensors_dir, ppr_grounded_line)
for idx, ppr_grounded_line in enumerate(ppr_grounded_lines))
feature_vector_size = feature_vector_sizes[0]
print 'set {} processed'.format(set_name)
feature_vector_size_path = os.path.join(processed_data_dir, 'feat_size.txt')
with open(feature_vector_size_path, 'w') as f:
f.write(str(feature_vector_size))
| [
"[email protected]"
] | |
f0231d6bbd4a58f6b16cf5bba65790be6216608a | 0d0b8236ff06027037d2a8a724d13a1866a9999c | /0x11-python-network_1/5-hbtn_header.py | 34b55fdc04a4eacfad8cc0eaf47eda1070829434 | [] | no_license | Danucas/holbertonschool-higher_level_programming | 3f8e81a610bf80890280b764362b56ad8803e2df | b963d41af8bccf764dff67f80ea16f1184c0a96d | refs/heads/master | 2022-07-31T05:53:57.046789 | 2020-05-21T21:29:54 | 2020-05-21T21:29:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | #!/usr/bin/python3
"""
Python script to fetch an https request
"""
import requests
import sys
def main():
response = requests.get(sys.argv[1])
try:
print(response.headers['X-Request-Id'])
except:
pass
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9e7d0dfdbc7930489b9ea969fab38d8a0d270139 | 46942a623716ef361d892274f3f47d499fe91a97 | /service/wikipedia_scrape_service/service.py | e4bb2d38d1c8ec127bcfd478b23537916a54b77b | [] | no_license | ramosjanoah/gegeelisa | 86ebac3bfd1f03f45b087f4b78e9a1a9da857924 | 4e782b247bdc024153bf9ff271368957ab95727a | refs/heads/master | 2020-11-27T10:50:35.631254 | 2019-12-21T12:39:12 | 2019-12-21T12:39:12 | 229,410,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from urllib.request import urlopen
from functools import lru_cache
import repository
from repository import redis
from repository import beautiful_soup
from model import WikipediaPage, WikipediaPageComponent, PageStateEnum
from .scrape_unfounded_page import ScrapeUnfoundedPage
from .scrape_founded_page import ScrapeFoundedPage
from helper import *
class WikipediaScrapeService():
def __init__(self):
pass
def scrape_unfounded_page(self, page_id, **kwargs):
return ScrapeUnfoundedPage(page_id, kwargs).perform()
def scrape_founded_page(self, page_id, **kwargs):
return ScrapeFoundedPage(page_id, kwargs).perform()
if __name__ == '__main__':
pass
else:
WikipediaScrapeService = WikipediaScrapeService()
| [
"[email protected]"
] | |
6c0d12ea106d70215d25e9ad8be29acdaaef13c0 | 0d1576eea1733c04e227fda48b2fc1d13f5f37cb | /PyFBA/gapfill/limit_reactions.py | d460fbcfb12cc965ec07c19c71590b8997bd6773 | [
"MIT"
] | permissive | linsalrob/PyFBA | d207b976a7cc3a6dc682647b4a72396bde83a892 | 3723e95e6f96481c4fc470a004dc88da3617f3c3 | refs/heads/master | 2023-03-09T10:16:11.812589 | 2023-02-25T21:54:19 | 2023-02-25T21:54:19 | 46,241,465 | 27 | 15 | MIT | 2021-05-22T22:56:22 | 2015-11-15T23:51:34 | Python | UTF-8 | Python | false | false | 1,267 | py |
def limit_reactions_by_compound(reactions, reactions2run, suggestions, max_rcts=50):
"""
Limit the reactions in suggestions based on the compounds present in
the reactions in reactions2run and the number of reactions that each
compound is associated with.
We need to have < max_rcts reactions per compound for it to be
considered. This is to avoid things like H2O that have a lot of
connections
:param reactions: The reactions dict
:type reactions: dict
:param reactions2run: our base set of reactions that we will run
:type reactions2run: set
:param suggestions: the reactions we are considering adding
:type suggestions: set
:param max_rcts: the maximum number of reactions per compound
:type max_rcts: int
:return: a set of reactions which is those members of suggestions that meet our criteria
:rtype: set
"""
cpd = {}
for r in reactions2run:
for c in reactions[r].all_compounds():
cpd[str(c)] = cpd.get(str(c), 0) + 1
keep = set()
for r in suggestions:
for c in reactions[r].all_compounds():
if str(c) in cpd and (cpd[str(c)] < max_rcts):
keep.add(r)
keep.difference_update(reactions2run)
return keep
| [
"[email protected]"
] | |
ad29e7018315f14b50adfa5ff59da8fa74676902 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/benchmarks/src/benchmarks/simplejson-bench.py | a634f84c31fc67358e8e6a81d8aaa1487d1b1e20 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import sys, time, os
# setup path
simplejson_path = os.path.join(os.path.dirname(__file__), 'simplejson')
sys.path.append(simplejson_path)
from simplejson.encoder import JSONEncoder
encoder = JSONEncoder()
def produceData():
lst = [i for i in range(3000)]
return lst
DATA = produceData()
def encodeList(n):
for i in range(n):
json = encoder.refactored_encode(DATA)
return json
def encodeObject():
class Foo:
def for_json(self):
return {'a':1, 'b':2, 'c': [i for i in range(3000)]}
return encoder.refactored_encode(Foo())
def measure():
print("Start timing...")
start = time.time()
json = encodeList(num)
duration = "%.3f\n" % (time.time() - start)
# print(json)
print("simplejson-encode: " + duration)
# warm up
num = int(sys.argv[1]) # 10000
for i in range(100):
json = encodeList(100)
measure()
| [
"[email protected]"
] | |
37353ff9bc0e9011bda19ecf7bd8839ecc8cfae9 | 6a990a3d549ca3d1c607b60b13b10f14c0e15787 | /game/tuitongzi/command/client/reconnection_cmd.py | 8b4f70d45f713635ef02826a32818bd913f407f4 | [] | no_license | qianyc1020/server | 217a766f7df5a0acfb983d3fc48317a932fe092e | 3c897e5d6ee453d0a2f3b371b5eda5af954b8d1a | refs/heads/master | 2020-06-10T08:55:31.996449 | 2019-06-24T06:26:43 | 2019-06-24T06:26:43 | 193,626,075 | 1 | 0 | null | 2019-06-25T03:12:39 | 2019-06-25T03:12:37 | null | UTF-8 | Python | false | false | 3,693 | py | # coding=utf-8
import traceback
import core.globalvar as gl
from game.tuitongzi.mode.game_status import GameStatus
from protocol.base.base_pb2 import REENTER_GAME, SELF_INFO, SELF_PLAYED
from protocol.base.game_base_pb2 import RecReEnterGame, RecUpdateGameUsers
from protocol.game.bairen_pb2 import BaiRenScore
def execute(userId, message, messageHandle):
redis = gl.get_v("redis")
if redis.exists(str(userId) + "_room"):
roomNo = redis.get(str(userId) + "_room")
gameid = redis.get(str(roomNo) + "_gameId")
if 7 != gameid:
return
redis.lock("lockroom_" + str(roomNo))
try:
room = redis.getobj("room_" + str(roomNo))
seat = room.getWatchSeatByUserId(userId)
if seat is not None:
room.sendBetScore(messageHandle)
room.save(redis)
recReEnterGame = RecReEnterGame()
recReEnterGame.gameState = room.gameStatus
recReEnterGame.state = True
recReEnterGame.curPlayCount = room.gameCount
messageHandle.send_to_gateway(REENTER_GAME, recReEnterGame)
room.recUpdateGameInfo(messageHandle)
if room.getSeatByUserId(userId) is not None:
room.recUpdateScore(messageHandle, 0)
s = room.getSeatByUserId(userId)
userInfo = RecUpdateGameUsers.UserInfo()
userInfo.account = s.account
userInfo.playerId = s.userId
userInfo.headUrl = s.head
userInfo.createTime = s.createDate
userInfo.ip = s.ip
userInfo.online = s.online
userInfo.nick = s.nickname
userInfo.ready = s.ready
userInfo.score = s.score - s.playScore
userInfo.sex = s.sex
userInfo.totalCount = s.total_count
userInfo.loc = s.seatNo
userInfo.consumeVip = s.level
messageHandle.send_to_gateway(SELF_INFO, userInfo)
room.updateBankerList(messageHandle, userId)
else:
room.recUpdateScore(messageHandle, userId)
room.updateBankerList(messageHandle, userId)
room.updateTrend(messageHandle, userId)
room.updateWatchSize(messageHandle, userId)
if room.gameStatus != GameStatus.WAITING:
room.recReEnterGameInfo(messageHandle, userId)
if seat.playScore > 0:
score = BaiRenScore()
for position in room.positions:
score.score.append(0 if userId not in position.playScores else position.playScores[userId])
messageHandle.send_to_gateway(SELF_PLAYED, score)
room.executeAsk(messageHandle, userId, 2)
else:
if room.started:
room.recReEnterGameInfo(messageHandle, userId)
if seat.playScore > 0:
score = BaiRenScore()
for position in room.positions:
score.score.append(
0 if userId not in position.playScores else position.playScores[userId])
messageHandle.send_to_gateway(SELF_PLAYED, score)
room.executeAsk(messageHandle, userId, 1)
except:
print traceback.print_exc()
redis.unlock("lockroom_" + str(roomNo))
| [
"[email protected]"
] | |
39430d9671b2910b0caaeea21a9fb271311ea5f1 | 753de2f926ba91986742a12904736443190766b6 | /personas/asgi.py | bc8be5ecff5241e6528591f0dccae80119bba61c | [] | no_license | nachovidondo/Documentacion_Personas | 236170274071c87bf9915614fac35798d916f1d0 | ab43e94e72cb5c31286dd7b32092498ff1496e0f | refs/heads/master | 2023-03-21T05:25:46.264032 | 2021-03-20T22:57:00 | 2021-03-20T22:57:00 | 349,852,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for personas project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'personas.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
5803be5af3a275fe6d0c28041530f9decf49b8fe | 377d86194fd6d23c8ef3df3e6f7d90092dd8f9b4 | /workout_tracker/tests/unit/muscles/test_urls.py | 45a1cf524438ed183d455dd3dba59ca62ea62734 | [
"MIT"
] | permissive | e-dang/Workout-Tracker | f20f44b012e895244bad413a46103415ffae5732 | 00a27597ea628cff62b320d616f56b2df4f344a0 | refs/heads/master | 2022-12-28T07:49:34.179307 | 2020-10-12T20:48:28 | 2020-10-12T20:48:28 | 293,937,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from rest_framework.reverse import reverse
from tests.utils import add_api_prefix
def test_muscles_list_url():
assert reverse('muscle-list') == add_api_prefix('muscles/')
def test_muscles_detail_url():
pk = 1
assert reverse('muscle-detail', kwargs={'pk': pk}) == add_api_prefix(f'muscles/{pk}/')
| [
"[email protected]"
] | |
e5491beaf72768296fc9bc7d0172f95f085a9028 | 65e0c11d690b32c832b943fb43a4206739ddf733 | /bsdradius/branches/v_0_7/bsdradius/BsdRadiusServer.py | 18264ef4dafdb8a58a269f5a67f46f12236b0411 | [
"BSD-3-Clause"
] | permissive | Cloudxtreme/bsdradius | b5100062ed75c3201d179e190fd89770d8934aee | 69dba67e27215dce49875e94a7eedbbdf77bc784 | refs/heads/master | 2021-05-28T16:50:14.711056 | 2015-04-30T11:54:17 | 2015-04-30T11:54:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,940 | py | ## BSDRadius is released under BSD license.
## Copyright (c) 2006, DATA TECH LABS
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
## * Neither the name of the DATA TECH LABS nor the names of its contributors
## may be used to endorse or promote products derived from this software without
## specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
BSD Radius server class definition
Derived from Wichert Akkerman's <[email protected]> pyrad.
"""
# HeadURL $HeadURL: file:///Z:/backup/svn/bsdradius/branches/v_0_7/bsdradius/BsdRadiusServer.py $
# Author: $Author: valts $
# File version: $Revision: 333 $
# Last changes: $Date: 2007-08-17 02:38:15 +0300 (Pk, 17 Aug 2007) $
# import modules
import select, socket
from bsdradius import Syncdeque
from threading import Thread
import time
from bsdradius.pyrad import packet
from bsdradius.logger import *
from bsdradius.Config import main_config
from bsdradius import modules
from bsdradius import logger
from bsdradius import misc
from bsdradius.serverModules import dumpPacket
# socket types
SOCKTYPE_AUTH = 1
SOCKTYPE_ACCT = 2
# maximum radius packet size
MAXPACKETSZ = 8192
# dropped packet exception
class DroppedPacket(Exception):
pass
# authentication failure exception
class AuthFailure(Exception):
pass
# accounting failure exception
class AcctFailure(Exception):
pass
class BsdRadiusServer:
"""BSD Radius Server class defnition
@ivar hosts: hosts who are allowed to talk to us
@type hosts: dictionary of Host class instances
@ivar pollobj: poll object for network sockets
@type pollobj: select.poll class instance
@ivar fdmap: map of filedescriptors to network sockets
@type fdmap: dictionary
"""
def __init__(self, addresses = [], authport = 1812, acctport = 1813, hosts = {}, dict = None):
"""Constructor.
@param addresses: IP addresses to listen on
@type addresses: sequence of strings
@param authport: port to listen on for authentication packets
@type authport: integer
@param acctport: port to listen on for accounting packets
@type acctport: integer
@param hosts: hosts who we can talk to
@type hosts: dictionary mapping IP to RemoteHost class instances
@param dict: RADIUS dictionary to use
@type dict: Dictionary class instance
"""
self.dict = dict
self.authport = authport
self.acctport = acctport
self.hosts = hosts
self.authfds = []
self.acctfds = []
for addr in addresses:
self.BindToAddress(addr)
def BindToAddress(self, addr):
"""Add an address to listen to.
An empty string indicates you want to listen on all addresses.
@param addr: IP address to listen on
@type addr: string
"""
authfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
authfd.bind((addr, self.authport))
acctfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
acctfd.bind((addr, self.acctport))
self.authfds.append(authfd)
self.acctfds.append(acctfd)
def CreateThreads(self):
"""Starts all threads"""
# start thread which listens to sockets
#thread.start_new_thread(self.Listen, ())
tListen = ListenThread(self)
tListen.start()
# start worker threads
numthreads = 1
if not main_config['SERVER']['no_threads']:
numthreads = main_config['SERVER']['number_of_threads']
for x in range(numthreads):
#thread.start_new_thread(self.WorkThread, (x,))
tWorking = WorkingThread(self)
tWorking.start()
def RegisterSockets(self):
"""Prepare all sockets to receive packets."""
events = select.POLLIN | select.POLLPRI | select.POLLERR
# register auth sockets
for sock in self.authfds:
self.fdmap[sock.fileno()] = (sock, SOCKTYPE_AUTH)
self.pollobj.register(sock, events)
# register accounting sockets
for sock in self.acctfds:
self.fdmap[sock.fileno()] = (sock, SOCKTYPE_ACCT)
self.pollobj.register(sock, events)
def Run(self):
"""Main loop.
Wait for packets to arrive via network and place them on
synchronization queues for other threads to process.
"""
# we map socket descriptors (integers) to their socket objects
# because when polling for events we only receive the
# descriptor int and must find which object it refers to
self.fdmap = {}
# register sockets for event polling
self.pollobj = select.poll()
self.RegisterSockets()
# get queue limits
auth_qlen = main_config["AUTHORIZATION"]["auth_queue_maxlength"]
acct_qlen = main_config["ACCOUNTING"]["acct_queue_maxlength"]
# create synchronization queue
self.packets = Syncdeque.RadiusDeque(auth_qlen, acct_qlen)
self.CreateThreads()
def addClientHosts(self, hostsInfo):
"""Simplify adding client hosts.
Input: (dict) clients configuration data.
Format: {'address': {'name' : name, 'secret': secret}}
Output: none
"""
for address, tokens in hostsInfo.items():
# print what we are doing
if str(address) not in self.hosts:
debug ('Adding client %s: %s' % (address, tokens['name']))
else:
oldItem = self.hosts[str(address)]
if oldItem.name != tokens['name']:
debug ('Changing client\'s "%s" name from "%s" to "%s"' % (address, oldItem.name, tokens['name']))
if oldItem.secret != tokens['secret']:
debug ('Changing client\'s "%s" secret' % address)
# if we need to log from one client only let's set the needed attributes to
# client's host entry
enableLogging = False
if address == main_config['SERVER']['log_client']:
enableLogging = True
if address not in self.hosts:
debug ('Enabling unrestricted logging for client "%s"' % tokens['name'])
elif not self.hosts[address].enableLogging:
debug ('Enabling unrestricted logging for client "%s"' % tokens['name'])
# replace old or create new client record
self.hosts[str(address)] = RemoteHost(address, tokens['secret'], tokens['name'], enableLogging)
class RemoteHost:
"""Remote RADIUS capable host we can talk to."""
def __init__(self, address, secret, name, enableLogging = False, authport = 1812, acctport = 1813):
"""Constructor.
@param address: IP address
@type address: string
@param secret: RADIUS secret
@type secret: string
@param name: short name (used for logging only)
@type name: string
@param authport: port used for authentication packets
@type authport: integer
@param acctport: port used for accounting packets
@type acctport: integer
"""
self.address = str(address)
self.secret = str(secret)
# self.authport = int(authport)
# self.acctport = int(acctport)
self.name = str(name)
self.enableLogging = bool(enableLogging)
class BaseThread(Thread):
"""Base thread class for BSDRadius threads
"""
def __init__(self, server):
"""Constructor
Input: (BsdRadiusServer) class instance
Output: none
"""
# call base class' constructor
Thread.__init__(self)
self.server = server
self.threadMayRun = True
def exit(self):
"""Stop the thread
Input: none
Output: none
"""
debug ('Thread "%s" exiting' % self.getName())
self.threadMayRun = False
def sendAuthResponse(self, pkt, authResult):
# create and send a reply packet
address = pkt.source[0]
client = self.server.hosts[address]
if authResult[0]:
# access accept
code = packet.AccessAccept
debug ("Sending Authorization ACCEPT to %s (%s)" % (client.name, address))
else:
# access reject
code = packet.AccessReject
debug ("Sending Authorization REJECT to %s (%s)" % (client.name, address))
reply = pkt.CreateReply(**authResult[1])
reply.source = pkt.source
reply.code = code
debug (reply)
pkt.fd.sendto(reply.ReplyPacket(), reply.source)
def sendAcctResponse(self, pkt):
# create and send a reply packet
client = self.server.hosts[pkt.source[0]]
address = pkt.source[0]
debug ("Sending Accounting ACCEPT to %s (%s)" % (client.name, address))
reply = pkt.CreateReply()
reply.source = pkt.source
pkt.fd.sendto(reply.ReplyPacket(), reply.source)
class WorkingThread(BaseThread):
"""Defines thread which looks for arrived messages in queue and processes
them.
"""
_threadTopId = 0
def __init__(self, server):
"""Constructor
Input: (BsdRadiusServer) class instance
Output: none
"""
BaseThread.__init__(self, server)
# assign id to this thread
self.setName('Working thread ' + str(WorkingThread._threadTopId))
WorkingThread._threadTopId += 1
def run(self):
"""Thread that does the actual job of processing RADIUS packets"""
# since this method is assigned to thread we have to catch all exceptions
# by ourselves
try:
threadnum = self.getName()
hosts = self.server.hosts
packets = self.server.packets
auth_timeout = main_config["AUTHORIZATION"]["packet_timeout"]
info("--- started %s ---" % threadnum)
while self.threadMayRun:
# grab a RADIUS packet and process it
pkt = packets.remove_packet(blocking = False)
if not pkt:
continue
# check if this thread should be allowed for logging
if pkt.source[0] in hosts and hosts[pkt.source[0]].enableLogging:
logger.addUnrestrictedThread()
info('thread "%s" grabbed a packet for processing' % threadnum)
if isinstance(pkt, packet.AuthPacket):
# check if packet is too old
if (time.time() - pkt.timestamp > auth_timeout):
# Dump timed out auth packet
dumpPacket.dumpUnhandledAuthPacket(pkt)
continue
try:
authResult = self.ProcessAuthPacket(pkt)
except AuthFailure, err:
error ("auth failure: ", err)
continue
except:
misc.printException()
continue
# create and send a reply packet
self.sendAuthResponse(pkt, authResult)
elif isinstance(pkt, packet.AcctPacket):
try:
acctResult = self.ProcessAcctPacket(pkt)
except AcctFailure, err:
error ("acct failure: ", err)
continue
except:
misc.printException()
continue
# send accounting reply if processing packet was ok
# send acct response to client only after processing the packet
if acctResult is True and not main_config['SERVER']['fast_accounting']:
self.sendAcctResponse(pkt)
else:
error('Wrong packet received: ', pkt)
info ('%s\n\n' % ('=' * 62))
# remove this thread from non-restricted thread list
logger.rmUnrestrictedThread()
except:
logger.addUnrestrictedThread()
misc.printException()
error ('Error in working thread')
logger.rmUnrestrictedThread()
def ProcessAuthPacket(self, pkt):
# decrypt crypted attributes
pkt.decryptAttributes()
#debug (pkt)
received = dict(pkt) # don't use packet instance any more
check = {'Auth-Type': [None]}
reply = {}
debug (misc.authPacketToStr(received))
# wait for authorization modules to process the request
authzModulesResult = modules.execAuthorizationModules(received, check, reply)
if authzModulesResult == modules.MODULE_OK:
# execute authentication modules
authcModulesResult = modules.execAuthenticationModules(received, check, reply)
if authcModulesResult == modules.MODULE_OK:
info ('===\n')
info ('Authorization and authentication successful')
return (True, reply)
else:
info ('===\n')
info ('Authentication phase failed')
if authcModulesResult == modules.MODULE_FAILED:
dumpPacket.dumpFailedAuthPacket(received)
return (False, reply)
else:
info ('===\n')
info ('Authorization phase failed')
if authzModulesResult == modules.MODULE_FAILED:
dumpPacket.dumpFailedAuthPacket(received)
return (False, reply)
def ProcessAcctPacket(self, pkt):
#debug (pkt)
received = dict(pkt)
debug (misc.acctPacketToStr(received))
# wait for accounting modules to process the request
acctModulesResult = modules.execAccountingModules(received)
if acctModulesResult == modules.MODULE_OK:
info ('===\n')
info ('Accounting successful')
return True
else:
info ('===\n')
info ('Accounting failed')
dumpPacket.dumpFailedAcctPacket(received)
return False
class ListenThread(BaseThread):
"""Defines thread which listens for messages from RADIUS server
clients.
"""
def __init__(self, server):
# call base class' constructor
BaseThread.__init__(self, server)
self.setName('Listen thread')
def run(self):
"""Listen to sockets and put received packets in raw
data queue for later operations.
Input: none
Output: none
"""
# since this method is assigned to thread we have to catch all exceptions
# by ourselves
try:
info ('--- Started Listen thread ---')
# poll packets and put them onto rawpacket sync queue
while self.threadMayRun:
for (socknum, event) in self.server.pollobj.poll(1000):
if event != select.POLLIN:
logger.addUnrestrictedThread()
error ("unexpected event!")
logger.rmUnrestrictedThread()
continue
# receive packet
(sock, socktype) = self.server.fdmap[socknum]
(data, addr) = sock.recvfrom(MAXPACKETSZ)
# process the raw packet
if addr[0] in self.server.hosts and self.server.hosts[addr[0]].enableLogging:
logger.addUnrestrictedThread()
# this one might fail :)
try:
self.ProcessPacket(data, addr, sock, socktype)
except:
logger.addUnrestrictedThread()
misc.printException()
error ('Error while processing received packet')
logger.rmUnrestrictedThread()
except:
logger.addUnrestrictedThread()
misc.printException()
error ('Error in listen thread')
logger.rmUnrestrictedThread()
def ProcessPacket(self, data, addr, sock, socktype):
"""
The purpose of this function is to create a RADIUS packet
structure, quickly dispatch an accounting-ok packet back
to the sender (in case we recieved an acct packet) and
possibly start logging (instead of dispatching for further
processing) new accounting packets under heavy server load
when the packet queue becomes overpopulated with accounting
packets.
"""
if socktype == SOCKTYPE_AUTH:
# create auth packet
pkt = packet.AuthPacket(dict = self.server.dict, packet = data)
pkt.timestamp = time.time()
pkt.source = addr
pkt.fd = sock
pkt.addClientIpAddress()
pkt.addRequestAuthenticator()
if not pkt.source[0] in self.server.hosts:
warning ("dropped packet: received packet from unknown host")
return
pkt.secret = self.server.hosts[pkt.source[0]].secret
if pkt.code != packet.AccessRequest:
error ("dropped packet: received non-authentication packet on authentication port")
return
self.server.packets.add_auth_packet(pkt)
return
if socktype == SOCKTYPE_ACCT:
# create acct packet
pkt = packet.AcctPacket(dict = self.server.dict, packet = data)
pkt.timestamp = time.time()
pkt.source = addr
pkt.fd = sock
pkt.addClientIpAddress()
if not pkt.source[0] in self.server.hosts:
error ("dropped packet: received packet from unknown host")
return
pkt.secret = self.server.hosts[pkt.source[0]].secret
if not pkt.code in [packet.AccountingRequest, packet.AccountingResponse]:
error ("dropped packet: received non-accounting packet on accounting port")
return
# send acct response to client even before processing the packet
if main_config['SERVER']['fast_accounting']:
self.sendAcctResponse(pkt)
# put the whole packet into packet queue for later processing
if self.server.packets.add_acct_packet(pkt) == False:
info ("WARNING: Accounting packet queue full, must start logging")
dumpPacket.dumpUnhandledAcctPacket(pkt)
return
| [
"valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef"
] | valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef |
1b7f3fb41bc1ce7090ff2124e2cefb6b6cc8561c | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/workflows/v1/workflows-v1-py/docs/conf.py | 810536a2c4205cdd3300bfcddd270c6fde2ad127 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,491 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# google-cloud-workflows documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-workflows"
copyright = u"2020, Google, LLC"
author = u"Google APIs" # TODO: autogenerate this bit
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-workflows-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-workflows.tex",
u"google-cloud-workflows Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-workflows",
u"Google Cloud Workflows Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-workflows",
u"google-cloud-workflows Documentation",
author,
"google-cloud-workflows",
"GAPIC library for Google Cloud Workflows API",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("http://requests.kennethreitz.org/en/stable/", None),
"proto": ("https://proto-plus-python.readthedocs.io/en/stable", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
d1d93378ea4d333133881b4caa5a37b7c65de232 | 79ad169779336d346b58e9bd9652ce64e9e4856a | /dynamic_rest/fields/choice.py | 605be5cfe3669fa1f93ddad262ffad9a97a4dc00 | [
"MIT"
] | permissive | asaak-co/dynamic-rest | 0546444034926ff3b8b8e96834fbb6a1576aeaf6 | 09f510063a35898a871ca86b5a130595a046c7aa | refs/heads/master | 2023-08-09T02:42:28.577865 | 2023-07-28T14:00:38 | 2023-07-28T14:00:38 | 132,636,828 | 0 | 1 | MIT | 2018-08-09T18:05:29 | 2018-05-08T16:34:39 | Python | UTF-8 | Python | false | false | 434 | py | from .base import DynamicField
from rest_framework.serializers import ChoiceField
from dynamic_rest.meta import Meta
class DynamicChoiceField(
DynamicField,
ChoiceField,
):
def admin_render_value(self, value):
model = self.parent_model
source = self.source or self.field_name
choices = Meta(model).get_field(source).choices
choices = dict(choices).get(value, None)
return choices
| [
"[email protected]"
] | |
248901b372b2f892552f9ecbc8f14fe246636431 | da0a7446122a44887fa2c4f391e9630ae033daa2 | /python/ray/serve/tests/test_deployment_graph_autoscaling.py | b390d9c5f360612ba766020637ad10aec2e5ea01 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | whiledoing/ray | d8d9ba09b7545e8fd00cca5cfad451278e61fffd | 9272bcbbcae1630c5bb2db08a8279f0401ce6f92 | refs/heads/master | 2023-03-06T16:23:18.006757 | 2022-07-22T02:06:47 | 2022-07-22T02:06:47 | 252,420,044 | 0 | 0 | Apache-2.0 | 2023-03-04T08:57:20 | 2020-04-02T10:07:23 | Python | UTF-8 | Python | false | false | 6,753 | py | import sys
import pytest
from python.ray.serve import constants as serve_constants
import ray
from ray import serve
from ray.serve.drivers import DAGDriver
from ray.dag.input_node import InputNode
from ray.serve.deployment_state import ReplicaState
from ray._private.test_utils import SignalActor, wait_for_condition
# Magic number to use for speed up scale from 0 replica
serve_constants.HANDLE_METRIC_PUSH_INTERVAL_S = 1
def get_num_running_replicas(controller, deployment_name):
replicas = ray.get(
controller._dump_replica_states_for_testing.remote(deployment_name)
)
running_replicas = replicas.get([ReplicaState.RUNNING])
return len(running_replicas)
def test_autoscaling_0_replica(serve_instance):
autoscaling_config = {
"metrics_interval_s": 0.1,
"min_replicas": 0,
"max_replicas": 2,
"look_back_period_s": 0.4,
"downscale_delay_s": 0,
"upscale_delay_s": 0,
}
@serve.deployment(
autoscaling_config=autoscaling_config,
)
class Model:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
with InputNode() as user_input:
model = Model.bind(1)
output = model.forward.bind(user_input)
serve_dag = DAGDriver.options(
route_prefix="/my-dag",
autoscaling_config=autoscaling_config,
).bind(output)
dag_handle = serve.run(serve_dag)
assert 2 == ray.get(dag_handle.predict.remote(1))
@pytest.mark.parametrize("min_replicas", [0, 1])
def test_autoscaling_with_chain_nodes(min_replicas, serve_instance):
signal = SignalActor.remote()
autoscaling_config = {
"metrics_interval_s": 0.1,
"min_replicas": min_replicas,
"max_replicas": 2,
"look_back_period_s": 0.4,
"downscale_delay_s": 30,
"upscale_delay_s": 0,
}
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
class Model1:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
ray.get(signal.wait.remote())
return input + self.weight
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
class Model2:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
with InputNode() as user_input:
model1 = Model1.bind(0)
model2 = Model2.bind(1)
output = model1.forward.bind(user_input)
output2 = model2.forward.bind(output)
serve_dag = DAGDriver.options(
route_prefix="/my-dag",
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
).bind(output2)
dag_handle = serve.run(serve_dag)
controller = serve_instance._controller
# upscaling
[dag_handle.predict.remote(0) for _ in range(10)]
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) >= 1
)
[dag_handle.predict.remote(0) for _ in range(10)]
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) >= 2
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model1.name) >= 1, timeout=40
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model1.name) >= 2, timeout=40
)
signal.send.remote()
wait_for_condition(
lambda: get_num_running_replicas(controller, Model2.name) >= 1, timeout=40
)
# downscaling
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) == min_replicas,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model1.name) == min_replicas,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, Model2.name) == min_replicas,
timeout=60,
)
def test_autoscaling_with_ensemble_nodes(serve_instance):
signal = SignalActor.remote()
autoscaling_config = {
"metrics_interval_s": 0.1,
"min_replicas": 0,
"max_replicas": 2,
"look_back_period_s": 0.4,
"downscale_delay_s": 30,
"upscale_delay_s": 0,
}
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
class Model:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
@serve.deployment(
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
)
def combine(value_refs):
ray.get(signal.wait.remote())
return sum(ray.get(value_refs))
with InputNode() as user_input:
model1 = Model.bind(0)
model2 = Model.bind(1)
output1 = model1.forward.bind(user_input)
output2 = model2.forward.bind(user_input)
output = combine.bind([output1, output2])
serve_dag = DAGDriver.options(
route_prefix="/my-dag",
autoscaling_config=autoscaling_config,
graceful_shutdown_timeout_s=1,
).bind(output)
dag_handle = serve.run(serve_dag)
controller = serve_instance._controller
assert get_num_running_replicas(controller, "Model") == 0
assert get_num_running_replicas(controller, "Model_1") == 0
assert get_num_running_replicas(controller, "combine") == 0
# upscaling
[dag_handle.predict.remote(0) for _ in range(10)]
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) >= 1
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model") >= 1, timeout=40
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model_1") >= 1, timeout=40
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "combine") >= 2, timeout=40
)
signal.send.remote()
# downscaling
wait_for_condition(
lambda: get_num_running_replicas(controller, DAGDriver.name) == 0,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model") == 0,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "Model_1") == 0,
timeout=60,
)
wait_for_condition(
lambda: get_num_running_replicas(controller, "combine") == 0, timeout=60
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.