blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
503cbc78d9f6e0910d577259e1c733d92a4a3a30 | 2eff2b24d5b6f5dffc42c9cbde6102ec9317502f | /src/Calculator.py | 8893fae57e0cb5134c33ea8b75b81954ec9c8cbf | [] | no_license | JakobKallestad/Python-Kattis | 599a14e71a8d5c52aae779b8db3d35f0e4d01e88 | 51656964e79cc861e53f574785aacb213ef10b46 | refs/heads/master | 2022-10-24T23:12:45.599813 | 2021-12-08T12:31:54 | 2021-12-08T12:31:54 | 156,881,692 | 2 | 1 | null | 2022-10-02T12:36:57 | 2018-11-09T15:34:09 | Python | UTF-8 | Python | false | false | 147 | py |
while True:
try:
line = input()
result = eval(line)
print("{:.2f}".format(result))
except EOFError:
break
| [
"[email protected]"
] | |
5e502e6a8f31e345307af4c6bcc63e0a2132c326 | 4dbaea97b6b6ba4f94f8996b60734888b163f69a | /LeetCode/48.py | c4d4841c02151f4c9dd1b1d227a3fef532cd49d0 | [] | no_license | Ph0en1xGSeek/ACM | 099954dedfccd6e87767acb5d39780d04932fc63 | b6730843ab0455ac72b857c0dff1094df0ae40f5 | refs/heads/master | 2022-10-25T09:15:41.614817 | 2022-10-04T12:17:11 | 2022-10-04T12:17:11 | 63,936,497 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | class Solution:
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
matrix_len = len(matrix)
for i in range(matrix_len // 2):
for j in range(matrix_len - matrix_len // 2):
tmp = matrix[i][j]
matrix[i][j] = matrix[matrix_len - j - 1][i]
matrix[matrix_len - j - 1][i] = matrix[matrix_len - i - 1][matrix_len - j - 1]
matrix[matrix_len - i - 1][matrix_len - j - 1] = matrix[j][matrix_len - i - 1]
matrix[j][matrix_len - i - 1] = tmp | [
"[email protected]"
] | |
d41c16e629b4e5deaf26083d8fcecd79a433675b | 7f4191f0e12a70d465b15762ce83b57b4976d448 | /Chapter8/Xtreme_InjectCrawler/XtremeWebAPP/xtreme_server/migrations/0006_initial.py | 27e52ef1bae65d1cc75b98e05ce6a9b297056084 | [] | no_license | PacktPublishing/Hands-On-Penetration-Testing-with-Python | 33f72df57b9158e002f78330c1242e1fde777898 | 7b11c8e63e4ac350ba138161f60f7ce4c08ed7cd | refs/heads/master | 2023-02-06T04:52:12.475428 | 2023-01-30T10:03:47 | 2023-01-30T10:03:47 | 131,272,051 | 79 | 40 | null | null | null | null | UTF-8 | Python | false | false | 12,669 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'xtreme_server_project', (
('project_name', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('start_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('query_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('status', self.gf('django.db.models.fields.CharField')(default='Not Set', max_length=50)),
('login_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('logout_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('username_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('password_field', self.gf('django.db.models.fields.TextField')(default='Not Set')),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Project'])
# Adding model 'Page'
db.create_table(u'xtreme_server_page', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('URL', self.gf('django.db.models.fields.URLField')(max_length=200)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status_code', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('connection_details', self.gf('django.db.models.fields.TextField')(blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page_found_on', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Page'])
# Adding model 'Form'
db.create_table(u'xtreme_server_form', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('form_found_on', self.gf('django.db.models.fields.URLField')(max_length=200)),
('form_name', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('form_method', self.gf('django.db.models.fields.CharField')(default='GET', max_length=10)),
('form_action', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('form_content', self.gf('django.db.models.fields.TextField')(blank=True)),
('auth_visited', self.gf('django.db.models.fields.BooleanField')(default=False)),
('input_field_list', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Form'])
# Adding model 'InputField'
db.create_table(u'xtreme_server_inputfield', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('input_type', self.gf('django.db.models.fields.CharField')(default='input', max_length=256, blank=True)),
))
db.send_create_signal(u'xtreme_server', ['InputField'])
# Adding model 'Vulnerability'
db.create_table(u'xtreme_server_vulnerability', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('details', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['Vulnerability'])
# Adding model 'Settings'
db.create_table(u'xtreme_server_settings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('allowed_extensions', self.gf('django.db.models.fields.TextField')()),
('allowed_protocols', self.gf('django.db.models.fields.TextField')()),
('consider_only', self.gf('django.db.models.fields.TextField')()),
('exclude_fields', self.gf('django.db.models.fields.TextField')()),
('username', self.gf('django.db.models.fields.TextField')()),
('password', self.gf('django.db.models.fields.TextField')()),
('auth_mode', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'xtreme_server', ['Settings'])
# Adding model 'LearntModel'
db.create_table(u'xtreme_server_learntmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Project'])),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Page'])),
('form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['xtreme_server.Form'])),
('query_id', self.gf('django.db.models.fields.TextField')()),
('learnt_model', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'xtreme_server', ['LearntModel'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table(u'xtreme_server_project')
# Deleting model 'Page'
db.delete_table(u'xtreme_server_page')
# Deleting model 'Form'
db.delete_table(u'xtreme_server_form')
# Deleting model 'InputField'
db.delete_table(u'xtreme_server_inputfield')
# Deleting model 'Vulnerability'
db.delete_table(u'xtreme_server_vulnerability')
# Deleting model 'Settings'
db.delete_table(u'xtreme_server_settings')
# Deleting model 'LearntModel'
db.delete_table(u'xtreme_server_learntmodel')
models = {
u'xtreme_server.form': {
'Meta': {'object_name': 'Form'},
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_action': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'form_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'form_method': ('django.db.models.fields.CharField', [], {'default': "'GET'", 'max_length': '10'}),
'form_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_field_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"})
},
u'xtreme_server.inputfield': {
'Meta': {'object_name': 'InputField'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_type': ('django.db.models.fields.CharField', [], {'default': "'input'", 'max_length': '256', 'blank': 'True'})
},
u'xtreme_server.learntmodel': {
'Meta': {'object_name': 'LearntModel'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learnt_model': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Page']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'query_id': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.page': {
'Meta': {'object_name': 'Page'},
'URL': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'auth_visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'connection_details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_found_on': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Project']"}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'visited': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'xtreme_server.project': {
'Meta': {'object_name': 'Project'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
'login_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logout_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.TextField', [], {}),
'password_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'query_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'start_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Not Set'", 'max_length': '50'}),
'username': ('django.db.models.fields.TextField', [], {}),
'username_field': ('django.db.models.fields.TextField', [], {'default': "'Not Set'"})
},
u'xtreme_server.settings': {
'Meta': {'object_name': 'Settings'},
'allowed_extensions': ('django.db.models.fields.TextField', [], {}),
'allowed_protocols': ('django.db.models.fields.TextField', [], {}),
'auth_mode': ('django.db.models.fields.TextField', [], {}),
'consider_only': ('django.db.models.fields.TextField', [], {}),
'exclude_fields': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.TextField', [], {}),
'username': ('django.db.models.fields.TextField', [], {})
},
u'xtreme_server.vulnerability': {
'Meta': {'object_name': 'Vulnerability'},
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['xtreme_server.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['xtreme_server'] | [
"[email protected]"
] | |
2751437f81253f6762b521912bf1187f9551bfb7 | bfdab27f224d9cac02e319fe55b53172fbf8d1a2 | /motion_editor_core/data/atlas_old/motions/drive_pull_right.py | a5356f2535ecc1d9343c53befe481a534536d151 | [] | no_license | tu-darmstadt-ros-pkg/motion_editor | c18294b4f035f737ff33d1dcbdfa87d4bb4e6f71 | 178a7564b18420748e1ca4413849a44965823655 | refs/heads/master | 2020-04-06T12:37:30.763325 | 2016-09-15T14:11:48 | 2016-09-15T14:11:48 | 35,028,245 | 2 | 3 | null | 2015-05-05T13:20:27 | 2015-05-04T10:18:22 | Python | UTF-8 | Python | false | false | 2,338 | py | { 'drive_pull_right': { 'l_arm': [],
'l_leg': [],
'r_arm': [ { 'duration': 1.0,
'name': u'vm_arm_r_retract_up',
'positions': [ -0.2258,
-0.5361,
3.1416,
-2.3456,
-0.3547,
-1.5618],
'starttime': 0.0},
{ 'duration': 1.0,
'name': u'vm_arm_r_retract_up_up',
'positions': [ -0.2258,
-1.2716,
3.1416,
-2.3562,
-0.3547,
-1.5618],
'starttime': 1.0},
{ 'duration': 1.0,
'name': u'vm_arm_r_retract_up_right',
'positions': [ -0.2258,
-1.2716,
3.1416,
-1.4144,
-0.3547,
-0.759],
'starttime': 2.0},
{ 'duration': 1.0,
'name': u'vm_arm_r_retract_down',
'positions': [ -0.2258,
1.3963,
3.1416,
-1.4144,
-0.3547,
-0.759],
'starttime': 3.0}],
'r_leg': [],
'torso': []}}
| [
"[email protected]"
] | |
7e7db89059aa6482d6801ca06d86ca389c337e25 | 4ca821475c57437bb0adb39291d3121d305905d8 | /models/research/swivel/vecs.py | 61a2b7a852dd4c1a577d240c1c990423ddcbb77c | [
"Apache-2.0"
] | permissive | yefcion/ShipRec | 4a1a893b2fd50d34a66547caa230238b0bf386de | c74a676b545d42be453729505d52e172d76bea88 | refs/heads/master | 2021-09-17T04:49:47.330770 | 2018-06-28T02:25:50 | 2018-06-28T02:25:50 | 112,176,613 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmap
import numpy as np
import os
from six import string_types
class Vecs(object):
def __init__(self, vocab_filename, rows_filename, cols_filename=None):
"""Initializes the vectors from a text vocabulary and binary data."""
with open(vocab_filename, 'r') as lines:
self.vocab = [line.split()[0] for line in lines]
self.word_to_idx = {word: idx for idx, word in enumerate(self.vocab)}
n = len(self.vocab)
with open(rows_filename, 'r') as rows_fh:
rows_fh.seek(0, os.SEEK_END)
size = rows_fh.tell()
# Make sure that the file size seems reasonable.
if size % (4 * n) != 0:
raise IOError(
'unexpected file size for binary vector file %s' % rows_filename)
# Memory map the rows.
dim = size / (4 * n)
rows_mm = mmap.mmap(rows_fh.fileno(), 0, prot=mmap.PROT_READ)
rows = np.matrix(
np.frombuffer(rows_mm, dtype=np.float32).reshape(n, dim))
# If column vectors were specified, then open them and add them to the
# row vectors.
if cols_filename:
with open(cols_filename, 'r') as cols_fh:
cols_mm = mmap.mmap(cols_fh.fileno(), 0, prot=mmap.PROT_READ)
cols_fh.seek(0, os.SEEK_END)
if cols_fh.tell() != size:
raise IOError('row and column vector files have different sizes')
cols = np.matrix(
np.frombuffer(cols_mm, dtype=np.float32).reshape(n, dim))
rows += cols
cols_mm.close()
# Normalize so that dot products are just cosine similarity.
self.vecs = rows / np.linalg.norm(rows, axis=1).reshape(n, 1)
rows_mm.close()
def similarity(self, word1, word2):
"""Computes the similarity of two tokens."""
idx1 = self.word_to_idx.get(word1)
idx2 = self.word_to_idx.get(word2)
if not idx1 or not idx2:
return None
return float(self.vecs[idx1] * self.vecs[idx2].transpose())
def neighbors(self, query):
"""Returns the nearest neighbors to the query (a word or vector)."""
if isinstance(query, string_types):
idx = self.word_to_idx.get(query)
if idx is None:
return None
query = self.vecs[idx]
neighbors = self.vecs * query.transpose()
return sorted(
zip(self.vocab, neighbors.flat),
key=lambda kv: kv[1], reverse=True)
def lookup(self, word):
"""Returns the embedding for a token, or None if no embedding exists."""
idx = self.word_to_idx.get(word)
return None if idx is None else self.vecs[idx]
| [
"[email protected]"
] | |
f9bcb3dcc1970423f97e39ba9072f214fd2b4bf9 | a2a14995c95e024644623ea26add2f27d186ea16 | /go.py | dff7c109c90a18c87abe03834f8ab27f33530049 | [
"MIT"
] | permissive | swdevbali/lit | 89db51ae912770ac4030a3c491ad775a68b95a4b | dbc01ee8e4e600a0a43e49ffd18873653cc3f7cc | refs/heads/master | 2021-01-21T00:25:50.001045 | 2013-02-16T13:52:50 | 2013-02-16T13:52:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,447 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import windows as winutils
from datetime import datetime
from utils import Query
from PyQt4.QtCore import (
Qt,
QAbstractListModel,
QMutex,
QMutexLocker
)
import itertools
import logging
from lcs import lcs
NAME_LIMIT = 42
class Task(object):
def __init__(self, hwnd, query, usetime):
self.hwnd = hwnd
self.query = query
self.usetime = usetime
def use(self):
self.usetime = datetime.now()
@property
def digest(self):
if len(self.name) > NAME_LIMIT:
shortname = self.name[:NAME_LIMIT - 3] + '...'
else:
shortname = self.name
if self.filename:
return '%s (%s)' % (shortname, self.filename)
else:
return shortname
@property
def title(self):
return self.name
@property
def fullname(self):
if self.filename:
return self.title + self.filename
else:
return self.title
@property
def filename(self):
if not hasattr(self, '_filename'):
self._filename = winutils.get_app_name(self.hwnd)
return self._filename
@property
def name(self):
return winutils.window_title(self.hwnd)
@property
def icon(self):
if not hasattr(self, '_icon'):
self._icon = winutils.get_window_icon(self.hwnd)
return self._icon
class WindowModel(QAbstractListModel):
NAME_ROLE = Qt.DisplayRole
HWND_ROLE = Qt.UserRole
def __init__(self, items):
self.super.__init__()
self.items = items
@property
def super(self):
return super(WindowModel, self)
def rowCount(self, parent):
return len(self.items)
def columnCount(self, parent):
return 1
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.TextAlignmentRole:
return int(Qt.AlignLeft | Qt.AlignVCenter)
elif role == Qt.DisplayRole:
return self.items[index.row()].digest
elif role == Qt.DecorationRole:
return self.items[index.row()].icon
elif role == Qt.UserRole:
return self.items[index.row()].hwnd
else:
return None
class Go(object):
def __init__(self, worker, client):
self.tasks = {}
self.mutex = QMutex()
self.worker = worker
self.client = client
@property
def name(self):
return 'g'
def lit(self, query, upper_bound, finished, *args, **kargs):
self.worker.do(
make=lambda: WindowModel(
self.sorted_active_runnable(
query,
winutils.top_level_windows()
)[:upper_bound]
),
catch=finished,
main=True
)
def sorted_active_runnable(self, query, hwnds):
with QMutexLocker(self.mutex):
# update query and collect active ones
self._refresh_tasks(hwnds, query)
active_tasks = [self.tasks[h] for h in hwnds]
# sort by last use
if not query:
return sorted(active_tasks, key=lambda t: t.usetime, reverse=True)
titles = [task.fullname.lower() for task in active_tasks]
def f(task, title):
return task.query.distance_to(title)
ds = [f(task, title) * (10 ** len(query)) for task, title in zip(active_tasks, titles)]
best = ds[0]
for i in itertools.takewhile(lambda i: ds[i] == best, range(len(ds))):
ds[i] -= len(lcs(query, titles[i]))
#return sorted(active_tasks, key=f)
return [task for i, task in sorted(enumerate(active_tasks), key=lambda i: ds[i[0]])]
def _refresh_tasks(self, hwnds, query=None):
for hwnd in hwnds:
if not hwnd in self.tasks:
self.tasks[hwnd] = Task(
hwnd=hwnd,
usetime=datetime.now(),
query=Query(
text='' if query is None else query,
insertion_cost=1,
first_insertion_cost=50,
prepend_first_insertion_cost=5,
append_first_insertion_cost=10,
deletion_cost=100,
substitution_cost=100,
transposition_cost=10
)
)
elif not query is None:
self.tasks[hwnd].query.update(query.lower())
def update_usetime(self, hwnd):
"""Update with one time delay."""
if hasattr(self, 'after_select') and self.after_select:
self.after_select()
self.after_select = self.tasks[hwnd].use
def select(self, content, index):
# check content type
if not isinstance(content, WindowModel):
logging.info('wrong content type {}'.format(type(content)))
return
for hwnd in winutils.top_level_windows():
if content.data(index, WindowModel.HWND_ROLE) == hwnd:
self._refresh_tasks([hwnd])
self.client.goto(hwnd=hwnd)
self.update_usetime(hwnd)
return
# remove invalid tasks
del self.tasks[content.data(index, WindowModel.HWND_ROLE)]
| [
"[email protected]"
] | |
0659d7826f012a4a77173ff1cd94f53a96dcf0ad | 1db2e2238b4ef9c1b6ca3b99508693ee254d6904 | /develop/align_atoms/make_alignment.py | e2c2c5a27921bc05091c8e561cda56f058e951c7 | [] | no_license | pgreisen/pythonscripts | 8674e08095f76edf08ef2059300349218079724c | 0aadf8f96d19b306c1bc44a772e766a06fe3408b | refs/heads/master | 2021-07-06T23:54:57.774342 | 2021-06-08T19:36:36 | 2021-06-08T19:36:36 | 22,017,192 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,251 | py | import os,shutil,sys
from translate_rotate import *
class pdbfile:
# Requires floating point number
# Returns floating point with correct
# number of digits for pdb
def set_number_digits(self,number):
return '%.3f' %number
def set_length_digit(self,number):
lngth = len(number)
if lngth == 7:
return ' '+number
if lngth == 6:
return ' '+number
if lngth == 5:
return ' '+number
if lngth == 4:
return ' '+number
else:
return number
# Method to get data from each rotamer
def get_data_to_align(self,filename):
tmp_chi = open(filename,'r')
atoms = ['ZN1','ZN2','O5','O1']
dic = {}
for line in tmp_chi:
tmp = line.split()
if tmp[2] in atoms:
dic[tmp[2]] = line
wrt = open('tmp.pdb','w')
wrt.write(str(dic['ZN2']))
wrt.write(str(dic['ZN1']))
wrt.write(str(dic['O5']))
wrt.write(str(dic['O1']))
wrt.close()
# took directory with conformations of ligand ensemble
# if we generate the ensemble after the alignment this is not
# necessary
# Returns a list with transformed coordinates
def get_aligned_coor(self,path,VIZ,templateFile,crystal_coor):
RMSD_TRESHOLD = 0.8
obj = align_to_substrate()
files = os.listdir(path)
outfile = []
# Reading data from crystal structure where one wants
# the alignment from
cry_data,atom_names = obj.get_data(crystal_coor)
for fl in files:
ph = path+'/'+fl
rd = open(ph,'r')
# Hvad indeholder denne file og hvor er den genereret
# Filen indeholder data fra modellen, altsaa de data
# som vi har lavet for vores model system
self.get_data_to_align(ph)
fname = 'tmp.pdb'
# her faar vi navne
sub_data,atom_names = obj.get_data(fname)
# Superimpose substrate data in crystal structure
# getting the translation and rotation matrix
t_m, r_m = obj.get_rotate_translate(sub_data,cry_data)
# Getting the transformed coordinates
nw = obj.get_transformed_coor(sub_data,cry_data)
rmsd_align = obj.get_rmsd(nw,cry_data)
print 'rmsd_align',rmsd_align
print 'rmsd ', rmsd_align
if rmsd_align < RMSD_TRESHOLD:
# We transform the original data
sub,at = obj.get_data(ph)
# The transformed coordinates
# what is their construction
t_c = dot(sub,r_m)+t_m
# Writing the coordinates
# Files name of coordinates is
# Writing to a file called superimposed.pdb
obj.write_pdb(at,t_c)
# Rosetta naming convention file which is generated
# earlier.
# File for rosetta with the correct naming
# I/O of file
sp_file = open('superimposed.pdb','r')
rosetta = open(templateFile,'r')
fileOne = sp_file.readlines()
fileTwo = rosetta.readlines()
rosetta.close()
# Variable to count line number in other file
# used to insert at the right line
ct = 0
for i in fileTwo:
ln = fileOne[ct].split()
# A very temporary fix for the number greater 100
x = self.set_number_digits(float(ln[6]))
y = self.set_number_digits(float(ln[7]))
z = self.set_number_digits(float(ln[8]))
x = self.set_length_digit(x)
y = self.set_length_digit(y)
z = self.set_length_digit(z)
i = str(i[0:30])+x+y+z+str(i[55:81])
outfile.append(i)
ct = ct +1
outfile.append(VIZ)
return outfile
| [
"[email protected]"
] | |
226c4d09fa5cdc1ca4d9713500b37dcc362f0d99 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_142/671.py | 10347da035e5343026f9408225894450ce90b99c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py |
def parseString(word):
dico = []
c = word[0]
cpt = 0
for i in xrange(len(word)):
if c != word[i]:
dico.append((word[i-1],cpt))
cpt = 1
c = word[i]
else:
cpt += 1
c = word[i]
dico.append((word[len(word)-1],cpt))
return dico
def checkSize(tab):
occ = len(tab[0])
for i in xrange(len(tab)):
if occ != len(tab[i]):
return False
return True
def checkLetter(tab):
sent = tab[0]
for i in xrange(len(tab)):
for j in xrange(len(tab[i])):
if sent[j][0] != tab[i][j][0]:
return False
return True
def findInterval(tab):
cpt = 0
for i in xrange(len(tab[0])):
t_max = 0
t_min = 10000
for j in xrange(len(tab)):
if tab[j][i][1] > t_max:
t_max = tab[j][i][1]
if tab[j][i][1] < t_min:
t_min = tab[j][i][1]
cpt += (t_max - t_min)
return cpt
######################################################
#### MAIN :)
######################################################
nb_case = int(raw_input())
for i in xrange(nb_case):
nb_row = int(raw_input())
res = []
for j in xrange(nb_row):
res.append(parseString(str(raw_input())))
if checkSize(res):
if checkLetter(res):
print("Case #%d: %d" % (i+1,findInterval(res)))
else:
print("Case #%d: Fegla Won" % (i+1))
else:
print("Case #%d: Fegla Won" % (i+1))
| [
"[email protected]"
] | |
d01277bf95b44d3ea01150d8c57d628e1b8f6eb4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2180/60671/249029.py | 9b092ed28dbaf417cbff0c51b1ee7e7e1ab2000a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | s1=input()
s2=input()
list1=[]
list2=[]
for x in range(len(s1)):
for i in range(len(s1) - x):
list1.append(s1[i:i + x + 1])
for x in range(len(s2)):
for i in range(len(s2) - x):
list2.append(s2[i:i + x + 1])
list1.sort()
list2.sort()
count=0
for mem in list1:
if(mem in list2):
count+=1
print(10,end='') | [
"[email protected]"
] | |
e0beeb0e7fa4d5516f5433f69c91e40e77eabe06 | 659f10ae3ad036bbb6293b0cd585a4be2bc2dcc9 | /containers/migrations/0005_auto__add_field_container_meta.py | 7d9c7a532d2fabdceeeb662c0217c38e40611106 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | newrelic/shipyard | e58649adf46b65e30ea93307c53b064abc4495dc | e4e990583a646b77e7e1767682e1ecf94c278fb8 | refs/heads/master | 2023-07-22T11:47:31.472994 | 2013-09-27T19:13:37 | 2013-09-27T19:13:37 | 12,735,507 | 3 | 2 | null | 2023-07-06T03:58:58 | 2013-09-10T17:08:31 | Python | UTF-8 | Python | false | false | 5,101 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Container.meta'
db.add_column(u'containers_container', 'meta',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Container.meta'
db.delete_column(u'containers_container', 'meta')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'containers.container': {
'Meta': {'object_name': 'Container'},
'container_id': ('django.db.models.fields.CharField', [], {'max_length': '96', 'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Host']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'containers.host': {
'Meta': {'object_name': 'Host'},
'enabled': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.SmallIntegerField', [], {'default': '4243', 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['containers'] | [
"[email protected]"
] | |
f2bd9de4477cbf1af26d5d888aac1a5feddc1061 | 52b7ce215acacee6b3021793f36a3f3eba7196e0 | /tdi/util.py | c409d6e869390f41c5195340a1d18630d02548c9 | [
"Apache-2.0"
] | permissive | AvdN/tdi | 2829c545bdf08148db2a4d2d848ea731b920d2e3 | 5617ec1b1d9553fee537c55ae9e0eef8553fd101 | refs/heads/master | 2020-12-30T23:08:23.816115 | 2013-10-14T20:57:01 | 2013-10-14T20:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,696 | py | # -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2006 - 2013
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================
Misc Utilities
================
Misc utilities.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import collections as _collections
import imp as _imp
import inspect as _inspect
import operator as _op
import os as _os
import re as _re
import sys as _sys
import types as _types
from tdi import _exceptions
DependencyCycle = _exceptions.DependencyCycle
def _make_parse_content_type():
"""
Make content type parser
:Return: parse_content_type
:Rtype: ``callable``
"""
# These are a bit more lenient than RFC 2045.
tokenres = r'[^\000-\040()<>@,;:\\"/[\]?=]+'
qcontent = r'[^\000\\"]'
qsres = r'"%(qc)s*(?:\\"%(qc)s*)*"' % {'qc': qcontent}
valueres = r'(?:%(token)s|%(quoted-string)s)' % {
'token': tokenres, 'quoted-string': qsres,
}
typere = _re.compile(
r'\s*([^;/\s]+/[^;/\s]+)((?:\s*;\s*%(key)s\s*=\s*%(val)s)*)\s*$' %
{'key': tokenres, 'val': valueres,}
)
pairre = _re.compile(r'\s*;\s*(%(key)s)\s*=\s*(%(val)s)' % {
'key': tokenres, 'val': valueres
})
stripre = _re.compile(r'\r?\n')
def parse_content_type(value): # pylint: disable = W0621
"""
Parse a content type
:Warning: comments are not recognized (yet?)
:Parameters:
`value` : ``basestring``
The value to parse - must be ascii compatible
:Return: The parsed header (``(value, {key, [value, value, ...]})``)
or ``None``
:Rtype: ``tuple``
"""
try:
if isinstance(value, unicode):
value.encode('ascii')
else:
value.decode('ascii')
except (AttributeError, UnicodeError):
return None
match = typere.match(value)
if not match:
return None
parsed = (match.group(1).lower(), {})
match = match.group(2)
if match:
for key, val in pairre.findall(match):
if val[:1] == '"':
val = stripre.sub(r'', val[1:-1]).replace(r'\"', '"')
parsed[1].setdefault(key.lower(), []).append(val)
return parsed
return parse_content_type
parse_content_type = _make_parse_content_type()
class Version(tuple):
"""
Represents the package version
:IVariables:
`major` : ``int``
The major version number
`minor` : ``int``
The minor version number
`patch` : ``int``
The patch level version number
`is_dev` : ``bool``
Is it a development version?
`revision` : ``int``
SVN Revision
"""
def __new__(cls, versionstring, is_dev, revision):
"""
Construction
:Parameters:
`versionstring` : ``str``
The numbered version string (like ``"1.1.0"``)
It should contain at least three dot separated numbers
`is_dev` : ``bool``
Is it a development version?
`revision` : ``int``
SVN Revision
:Return: New version instance
:Rtype: `version`
"""
# pylint: disable = W0613
tup = []
versionstring = versionstring.strip()
if versionstring:
for item in versionstring.split('.'):
try:
item = int(item)
except ValueError:
pass
tup.append(item)
while len(tup) < 3:
tup.append(0)
return tuple.__new__(cls, tup)
def __init__(self, versionstring, is_dev, revision):
"""
Initialization
:Parameters:
`versionstring` : ``str``
The numbered version string (like ``1.1.0``)
It should contain at least three dot separated numbers
`is_dev` : ``bool``
Is it a development version?
`revision` : ``int``
SVN Revision
"""
# pylint: disable = W0613
super(Version, self).__init__()
self.major, self.minor, self.patch = self[:3]
self.is_dev = bool(is_dev)
self.revision = int(revision)
def __repr__(self):
"""
Create a development string representation
:Return: The string representation
:Rtype: ``str``
"""
return "%s.%s(%r, is_dev=%r, revision=%r)" % (
self.__class__.__module__,
self.__class__.__name__,
".".join(map(str, self)),
self.is_dev,
self.revision,
)
def __str__(self):
"""
Create a version like string representation
:Return: The string representation
:Rtype: ``str``
"""
return "%s%s" % (
".".join(map(str, self)),
("", "-dev-r%d" % self.revision)[self.is_dev],
)
def __unicode__(self):
"""
Create a version like unicode representation
:Return: The unicode representation
:Rtype: ``unicode``
"""
return str(self).decode('ascii')
def find_public(space):
"""
Determine all public names in space
:Parameters:
`space` : ``dict``
Name space to inspect
:Return: List of public names
:Rtype: ``list``
"""
if space.has_key('__all__'):
return list(space['__all__'])
return [key for key in space.keys() if not key.startswith('_')]
def Property(func): # pylint: disable = C0103
"""
Property with improved docs handling
:Parameters:
`func` : ``callable``
The function providing the property parameters. It takes no arguments
as returns a dict containing the keyword arguments to be defined for
``property``. The documentation is taken out the function by default,
but can be overridden in the returned dict.
:Return: The requested property
:Rtype: ``property``
"""
kwargs = func()
kwargs.setdefault('doc', func.__doc__)
kwargs = kwargs.get
return property(
fget=kwargs('fget'),
fset=kwargs('fset'),
fdel=kwargs('fdel'),
doc=kwargs('doc'),
)
def decorating(decorated, extra=None):
"""
Create decorator for designating decorators.
:Parameters:
`decorated` : function
Function to decorate
`extra` : ``dict``
Dict of consumed keyword parameters (not existing in the originally
decorated function), mapping to their defaults. If omitted or
``None``, no extra keyword parameters are consumed. The arguments
must be consumed by the actual decorator function.
:Return: Decorator
:Rtype: ``callable``
"""
# pylint: disable = R0912
def flat_names(args):
""" Create flat list of argument names """
for arg in args:
if isinstance(arg, basestring):
yield arg
else:
for arg in flat_names(arg):
yield arg
name = decorated.__name__
try:
dargspec = argspec = _inspect.getargspec(decorated)
except TypeError:
dargspec = argspec = ([], 'args', 'kwargs', None)
if extra:
keys = extra.keys()
argspec[0].extend(keys)
defaults = list(argspec[3] or ())
for key in keys:
defaults.append(extra[key])
argspec = (argspec[0], argspec[1], argspec[2], defaults)
# assign a name for the proxy function.
# Make sure it's not already used for something else (function
# name or argument)
counter, proxy_name = -1, 'proxy'
names = dict.fromkeys(flat_names(argspec[0]))
names[name] = None
while proxy_name in names:
counter += 1
proxy_name = 'proxy%s' % counter
def inner(decorator):
""" Actual decorator """
# Compile wrapper function
space = {proxy_name: decorator}
if argspec[3]:
kwnames = argspec[0][-len(argspec[3]):]
else:
kwnames = None
passed = _inspect.formatargspec(argspec[0], argspec[1], argspec[2],
kwnames, formatvalue=lambda value: '=' + value
)
# pylint: disable = W0122
exec "def %s%s: return %s%s" % (
name, _inspect.formatargspec(*argspec), proxy_name, passed
) in space
wrapper = space[name]
wrapper.__dict__ = decorated.__dict__
wrapper.__doc__ = decorated.__doc__
if extra and decorated.__doc__ is not None:
if not decorated.__doc__.startswith('%s(' % name):
wrapper.__doc__ = "%s%s\n\n%s" % (
name,
_inspect.formatargspec(*dargspec),
decorated.__doc__,
)
return wrapper
return inner
class Deprecator(object):
"""
Deprecation proxy class
The class basically emits a deprecation warning on access.
:IVariables:
`__todeprecate` : any
Object to deprecate
`__warn` : ``callable``
Warn function
"""
def __new__(cls, todeprecate, message=None):
"""
Construct
:Parameters:
`todeprecate` : any
Object to deprecate
`message` : ``str``
Custom message. If omitted or ``None``, a default message is
generated.
:Return: Deprecator instance
:Rtype: `Deprecator`
"""
# pylint: disable = W0613
if type(todeprecate) is _types.MethodType:
call = cls(todeprecate.im_func, message=message)
@decorating(todeprecate.im_func)
def func(*args, **kwargs):
""" Wrapper to build a new method """
return call(*args, **kwargs) # pylint: disable = E1102
return _types.MethodType(func, None, todeprecate.im_class)
elif cls == Deprecator and callable(todeprecate):
res = CallableDeprecator(todeprecate, message=message)
if type(todeprecate) is _types.FunctionType:
res = decorating(todeprecate)(res)
return res
return object.__new__(cls)
def __init__(self, todeprecate, message=None):
"""
Initialization
:Parameters:
`todeprecate` : any
Object to deprecate
`message` : ``str``
Custom message. If omitted or ``None``, a default message is
generated.
"""
self.__todeprecate = todeprecate
if message is None:
if type(todeprecate) is _types.FunctionType:
name = todeprecate.__name__
else:
name = todeprecate.__class__.__name__
message = "%s.%s is deprecated." % (todeprecate.__module__, name)
if _os.environ.get('EPYDOC_INSPECTOR') == '1':
def warn():
""" Dummy to not clutter epydoc output """
pass
else:
def warn():
""" Emit the message """
_exceptions.DeprecationWarning.emit(message, stacklevel=3)
self.__warn = warn
def __getattr__(self, name):
""" Get attribute with deprecation warning """
self.__warn()
return getattr(self.__todeprecate, name)
def __iter__(self):
""" Get iterator with deprecation warning """
self.__warn()
return iter(self.__todeprecate)
class CallableDeprecator(Deprecator):
""" Callable proxy deprecation class """
def __call__(self, *args, **kwargs):
""" Call with deprecation warning """
self._Deprecator__warn()
return self._Deprecator__todeprecate(*args, **kwargs)
def load_dotted(name):
"""
Load a dotted name
The dotted name can be anything, which is passively resolvable
(i.e. without the invocation of a class to get their attributes or
the like). For example, `name` could be 'tdi.util.load_dotted'
and would return this very function. It's assumed that the first
part of the `name` is always is a module.
:Parameters:
`name` : ``str``
The dotted name to load
:Return: The loaded object
:Rtype: any
:Exceptions:
- `ImportError` : A module in the path could not be loaded
"""
components = name.split('.')
path = [components.pop(0)]
obj = __import__(path[0])
while components:
comp = components.pop(0)
path.append(comp)
try:
obj = getattr(obj, comp)
except AttributeError:
__import__('.'.join(path))
try:
obj = getattr(obj, comp)
except AttributeError:
raise ImportError('.'.join(path))
return obj
def make_dotted(name):
"""
Generate a dotted module
:Parameters:
`name` : ``str``
Fully qualified module name (like ``tdi.util``)
:Return: The module object of the last part and the information whether
the last part was newly added (``(module, bool)``)
:Rtype: ``tuple``
:Exceptions:
- `ImportError` : The module name was horribly invalid
"""
sofar, parts = [], name.split('.')
oldmod = None
for part in parts:
if not part:
raise ImportError("Invalid module name %r" % (name,))
partname = ".".join(sofar + [part])
try:
fresh, mod = False, load_dotted(partname)
except ImportError:
mod = _imp.new_module(partname)
mod.__path__ = []
fresh = mod == _sys.modules.setdefault(partname, mod)
if oldmod is not None:
setattr(oldmod, part, mod)
oldmod = mod
sofar.append(part)
return mod, fresh
class DependencyGraph(object):
"""
Dependency Graph Container
This is a simple directed acyclic graph. The graph starts empty, and new
nodes (and edges) are added using the `add` method. If the newly added
create a cycle, an exception is thrown.
Finally, the graph is resolved using the `resolve` method. The method will
return topologically ordered nodes and destroy the graph. The topological
order is *stable*, meaning, the same graph will always produce the same
output.
:IVariables:
`_outgoing` : ``dict``
Mapping of outgoing nodes (node -> set(outgoing neighbours))
`_incoming` : ``dict``
Mapping of incoming nodes (node -> set(incoming neighbours))
"""
__slots__ = ('_outgoing', '_incoming')
def __init__(self):
""" Initialization """
self._outgoing = {}
self._incoming = {}
def add(self, start, end):
"""
Add a new nodes with edge to the graph
The edge is directed from `start` to `end`.
:Parameters:
`start` : ``str``
Node
`end` : ``str``
Node
"""
outgoing, incoming = self._outgoing, self._incoming
if start not in outgoing:
outgoing[start] = set()
outgoing[start].add(end)
if end not in incoming:
incoming[end] = set()
incoming[end].add(start)
self._check_cycle(end)
def resolve(self):
"""
Resolve graph and return nodes in topological order
The graph is defined by outgoing and incoming dicts (mapping nodes to
their outgoing or incoming neighbours). The graph is destroyed in the
process.
:Return: Sorted node list. The output is stable, because nodes on
the same level are sorted alphabetically. Furthermore all
leave nodes are put at the end.
:Rtype: ``list``
"""
result, outgoing, incoming = [], self._outgoing, self._incoming
roots = list(set(outgoing.iterkeys()) - set(incoming.iterkeys()))
leaves = set(incoming.iterkeys()) - set(outgoing.iterkeys())
roots.sort() # ensure stable output
roots = _collections.deque(roots)
roots_push, roots_pop = roots.appendleft, roots.pop
result_push, opop, ipop = result.append, outgoing.pop, incoming.pop
while roots:
node = roots_pop()
if node not in leaves:
result_push(node)
children = list(opop(node, ()))
children.sort() # ensure stable output
for child in children:
parents = incoming[child]
parents.remove(node)
if not parents:
roots_push(child)
ipop(child)
if outgoing or incoming:
raise AssertionError("Graph not resolved (this is a bug).")
leaves = list(leaves)
leaves.sort() # ensure stable output
return result + leaves
def _check_cycle(self, node):
"""
Find a cycle containing `node`
This assumes, that there's no other possible cycle in the graph. This
assumption is valid, because the graph is checked whenever a new
edge is added.
:Parameters:
`node` : ``str``
Node which may be part of a cycle.
:Exceptions:
- `DependencyCycle` : Raised, if there is, indeed, a cycle in the
graph. The cycling nodes are passed as a list to the exception.
"""
# run a DFS for each child node until we find
# a) a leaf (then backtrack)
# b) node (cycle)
outgoing = self._outgoing
if node in outgoing:
iter_ = iter
stack = [(node, iter_(outgoing[node]).next)]
exhausted, push, pop = StopIteration, stack.append, stack.pop
while stack:
try:
child = stack[-1][1]()
except exhausted:
pop()
else:
if child == node:
raise DependencyCycle(map(_op.itemgetter(0), stack))
elif child in outgoing:
push((child, iter_(outgoing[child]).next))
| [
"[email protected]"
] | |
03da3037aa5075dd5cc26a9b6f22f10ac33ea3dc | 3cc8af76b1fd487eea86610d7a07f477afeab048 | /setup.py | da827dc7ffda6b32ae816d398f0fb9cec5e512e5 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | expresschen/HanLP | 20ff6d03b01b508e4395ea3532e8af712e065ebf | 24b48966e90dfafa1faa65765eb6f35e19cac801 | refs/heads/doc-zh | 2023-07-13T10:16:30.231114 | 2020-02-15T17:19:28 | 2021-08-24T02:15:49 | 401,305,599 | 1 | 0 | Apache-2.0 | 2021-08-30T10:37:28 | 2021-08-30T10:37:27 | null | UTF-8 | Python | false | false | 1,990 | py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 19:26
from os.path import abspath, join, dirname
from setuptools import find_packages, setup
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
long_description = file.read()
version = {}
with open(join(this_dir, "hanlp", "version.py")) as fp:
exec(fp.read(), version)
setup(
name='hanlp',
version=version['__version__'],
description='HanLP: Han Language Processing',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/hankcs/HanLP',
author='hankcs',
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
"Development Status :: 3 - Alpha",
'Operating System :: OS Independent',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Topic :: Text Processing :: Linguistic"
],
keywords='corpus,machine-learning,NLU,NLP',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
install_requires=[
'termcolor',
'pynvml',
'alnlp',
'toposort==1.5',
'transformers>=4.1.1',
'sentencepiece>=0.1.91'
'torch>=1.6.0',
'hanlp-common>=0.0.9',
'hanlp-trie>=0.0.2',
'hanlp-downloader',
],
extras_require={
'full': [
'fasttext==0.9.1',
'tensorflow==2.3.0',
'bert-for-tf2==0.14.6',
'py-params==0.9.7',
'params-flow==0.8.2',
'penman==0.6.2',
],
},
python_requires='>=3.6',
# entry_points={
# 'console_scripts': [
# 'hanlp=pyhanlp.main:main',
# ],
# },
)
| [
"[email protected]"
] | |
4c5824d086f61db8d6a64e10bf494165a522f574 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/ag/models/UpdateStepAsRuleSpec.py | defba859f89b925fee619a7ec0fa42170e4f650c | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,341 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class UpdateStepAsRuleSpec(object):
def __init__(self, adjustmentType=None, stepAdjustments=None):
"""
:param adjustmentType: (Optional) 伸缩调整方式,取值范围:[`Number`,`Percentage`,`Total`]
- `Number`:增加或减少指定数量的实例
- `Percentage`:增加或减少指定百分比的实例
- `Total`:将当前伸缩组的实例数量调整到指定数量
如果修改了参数 `adjustmentType`,则参数 `stepAdjustments` 也必须传,否则报错
:param stepAdjustments: (Optional) 步进调整策略数组
"""
self.adjustmentType = adjustmentType
self.stepAdjustments = stepAdjustments
| [
"[email protected]"
] | |
84191deb0a80f8875e115aa3f5eae0046025e1d7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04031/s251975826.py | 5d8d02c8d8407057ca16c1bec6857fff705531e1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | import math
import collections
import fractions
import itertools
import functools
import operator
def solve():
n = int(input())
a = list(map(int, input().split()))
cost = []
for i in range(-100, 101):
ramen = 0
for j in range(n):
ramen += abs(a[j]-i)**2
cost.append(ramen)
print(min(cost))
return 0
if __name__ == "__main__":
solve()
| [
"[email protected]"
] | |
f95f8a329e6279fc0ee59df351c887432ee6fec1 | 93736e8d0d5517eb73af91eeda6e9b0f4b07439e | /Python/Intro_Python/exercise3.py | e85c0cc0eae27917d1839cebdafb0e37e1cd146e | [] | no_license | aayushgupta97/TTN | 0de1a5d3a25d7399d68a81ea51f17233f81029e0 | 324466cbdf0a9b0953dd4ae574bd0b3f753c4fd7 | refs/heads/master | 2020-04-21T12:18:25.721602 | 2019-04-15T11:09:13 | 2019-04-15T11:09:13 | 169,557,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | from abc import ABC, abstractmethod
class Box(ABC):
def add(self, *items):
raise NotImplementedError()
def empty(self):
raise NotImplementedError()
def count(self):
raise NotImplementedError()
class Item():
def __init__(self, name, value):
self.name = name
self.value = value
class ListBox(Box):
def __init__(self):
self._items = []
def add(self, *items):
self._items.extend(items)
def empty(self):
items = self._items
self._items = []
return items
def count(self):
return len(self._items)
# class DictBox(Box):
# def __init__(self):
# self._items = {}
# def add(self, *items):
# self._items.update(dict((i.name, i) for i in items))
# def empty(self):
# items = list(self._items.values())
# self._items = {}
# return items
# def count(self):
# return len(self._items)
# #repack
# def repack_boxes(*boxes):
# items = []
# for box in boxes:
# items.extend(box.empty())
# while items:
# for box in boxes:
# try:
# box.add(items.pop())
# except IndexError:
# break
# box1 = ListBox()
# box1.add(Item(str(i), i) for i in range(20))
# box2 = ListBox()
# box2.add(Item(str(i), i) for i in range(9))
# # box3 = DictBox()
# # box3.add(Item(str(i), i) for i in range(5))
# repack_boxes(box1, box2) #, box2, box3
# print(box1.count())
# print(box2.count())
# # print(box3.count())
| [
"[email protected]"
] | |
f2e9d1fa4f806aa5430bcc405d3ed2f4ea3e94d2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03165/s926838179.py | 74a1ba1007c89e718147b0b4242828dcbc0a88f7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | s = input()
t = input()
dp = [[0]*(len(t)+1) for _ in range(len(s)+1)]
for i in range(len(s)):
for j in range(len(t)):
if s[i]==t[j]:
dp[i+1][j+1] = dp[i][j]+1
else:
if dp[i+1][j] < dp[i][j+1]:
dp[i+1][j+1] = dp[i][j+1]
else:
dp[i+1][j+1] = dp[i+1][j]
i = len(s)-1
j = len(t)-1
ans = ''
while i>=0 and j>=0:
if s[i]==t[j]:
ans += t[j]
i -= 1
j -= 1
else:
if dp[i][j+1]>dp[i+1][j]:
i -= 1
else:
j -= 1
print(ans[::-1]) | [
"[email protected]"
] | |
e91848d3129b01eeac17497f4be7ff57f9e5a2d5 | 215cafb0a79338a2a268c19629f07df20cf68f76 | /venv/bin/pip-3.8 | 6de21cb3dcc8b330bf64648feb3896c8b6bb5f2d | [] | no_license | safwanvk/erp | c95741c5873ebaa53a8a96093928745e02000be9 | d4e427dbb6b71eb9aa6e2d15a039e2e669c53cbe | refs/heads/master | 2022-12-08T12:38:36.817514 | 2020-08-16T15:10:55 | 2020-08-16T15:10:55 | 287,913,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | 8 | #!/home/safwan/Desktop/projects/erp-pro/erp/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
9f2833105773edd29e1268cc3705ad9ff9dc2a1c | 98be00ee32971cade82d10c067aff532c3394a62 | /Competitions/Xtreme/xplore.py | f6e47c98fc808d7cc0c5fe1f897956f31365aa4a | [] | no_license | vigneshhari/Competitive_solutions | 5ab34933ea8d84eab67bdef9bb9e4562f6b90782 | 7a35e1386e5cff71cb5746b6797ccc0f03ceb3f4 | refs/heads/master | 2023-01-11T02:53:01.456863 | 2022-12-29T13:50:03 | 2022-12-29T13:50:03 | 115,146,700 | 4 | 2 | null | 2019-10-26T09:15:03 | 2017-12-22T20:03:51 | Python | UTF-8 | Python | false | false | 4,010 | py | import json
from collections import defaultdict
authors_citations = defaultdict(list)
for i in range(input()):
data = raw_input()
temp = json.loads(data)
citation_count = temp["citing_paper_count"]
for i in temp["authors"]["authors"]:
authors_citations[i["full_name"]].append(citation_count)
answers = defaultdict(list)
for i in authors_citations:
values = authors_citations[i]
values.sort()
length = len(values)
out = 0
for j in range(length):
if( length - j >= values[j] ):
out = values[j]
else:
if(values[j] > length - j and length - j > out ):
out = length - j
answers[out].append(i)
temp = sorted(answers.keys())
temp = temp[::-1]
for i in temp:
for k in sorted(answers[i]):
print k , i
"""
10
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Echo"}, {"author_order": 2,"affiliation": "","full_name": "Bravo"}, {"author_order": 3,"affiliation": "","full_name": "Alfa"}]},"title": "Article Title 1","article_number": "1","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}, {"author_order": 2,"affiliation": "","full_name": "Bravo"}]},"title": "Article Title 2","article_number": "2","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Echo"}, {"author_order": 2,"affiliation": "","full_name": "Delta"}, {"author_order": 3,"affiliation": "","full_name": "Alfa"}, {"author_order": 4,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 3","article_number": "3","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 4,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 4","article_number": "4","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}, {"author_order": 2,"affiliation": "","full_name": "Echo"}, {"author_order": 3,"affiliation": "","full_name": "Alfa"}]},"title": "Article Title 5","article_number": "5","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 5,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}, {"author_order": 2,"affiliation": "","full_name": "Echo"}]},"title": "Article Title 6","article_number": "6","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 6,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Delta"}]},"title": "Article Title 7","article_number": "7","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 4,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 8","article_number": "8","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 9,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Delta"}, {"author_order": 2,"affiliation": "","full_name": "Charlie"}]},"title": "Article Title 9","article_number": "9","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 4,"publisher": "IEEE"}
{"authors": {"authors": [{"author_order": 1,"affiliation": "","full_name": "Bravo"}, {"author_order": 2,"affiliation": "","full_name": "Echo"}]},"title": "Article Title 10","article_number": "10","publication_title": "Publication Title 1","publication_number": "7","citing_paper_count": 6,"publisher": "IEEE"}
"""
# Solved Completely
| [
"[email protected]"
] | |
6d57c6855dd53ede783641ec65bed681aa69e10a | 1196fe960947b4a7d6bba5df6cdfc7010bb118fb | /examples/apikeys/apikeys.py | 8b921f815ddb9dc3eea6e33e1ad7b042f43026be | [
"MIT"
] | permissive | Nextdoor/sendgrid-python | a4afe5cda9015c7cf6a3a1303785fda05e844277 | a7c834b6391775b796969ef65a3ef259ccabf0f0 | refs/heads/master | 2021-01-22T11:12:08.221546 | 2016-04-22T21:20:07 | 2016-04-22T21:20:07 | 56,885,507 | 0 | 0 | null | 2016-04-22T21:11:50 | 2016-04-22T21:11:49 | null | UTF-8 | Python | false | false | 1,657 | py | import sendgrid
import json
import os
sg = sendgrid.SendGridAPIClient(apikey='YOUR_SENDGRID_API_KEY')
# You can also store your API key an .env variable 'SENDGRID_API_KEY'
##################################################
# List all API Keys belonging to the authenticated user #
# GET /api_keys #
response = sg.client.api_keys.get()
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Update the name & scopes of an API Key #
# PUT /api_keys/{api_key_id} #
data = {'sample': 'data'}
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).put(request_body=data)
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Update API keys #
# PATCH /api_keys/{api_key_id} #
data = {'sample': 'data'}
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).patch(request_body=data)
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Get an existing API Key #
# GET /api_keys/{api_key_id} #
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).get()
print(response.status_code)
print(response.response_body)
print(response.response_headers)
##################################################
# Delete API keys #
# DELETE /api_keys/{api_key_id} #
api_key_id = "test_url_param"
response = sg.client.api_keys._(api_key_id).delete()
print(response.status_code)
print(response.response_body)
print(response.response_headers)
| [
"[email protected]"
] | |
df55ffe5751d160215654f44ca59df406536a410 | c03edd979ad6fd4a8abd155e3e63bcefbd93d5c2 | /Image/band_stats.py | 7b83630a66a15155d6b74a944ca88a2b17ef34e5 | [
"MIT"
] | permissive | xiangtaoxu/earthengine-py-examples | 538dafc88a22a351b762ba02df09db583df955bb | 76ae8e071a71b343f5e464077afa5b0ed2f9314c | refs/heads/master | 2022-11-03T03:16:11.933616 | 2020-06-12T15:47:52 | 2020-06-12T15:47:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py |
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# get highest value
def maxValue(img, scale=30):
max_value = img.reduceRegion(**{
'reducer': ee.Reducer.max(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return max_value
# get lowest value
def minValue(img, scale=30):
min_value = img.reduceRegion(**{
'reducer': ee.Reducer.min(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return min_value
# get mean value
def meanValue(img, scale=30):
mean_value = img.reduceRegion(**{
'reducer': ee.Reducer.mean(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return mean_value
# get standard deviation
def stdValue(img, scale=30):
std_value = img.reduceRegion(**{
'reducer': ee.Reducer.stdDev(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return std_value
dataset = ee.Image('USGS/NED')
dem = dataset.select('elevation')
# dem = ee.Image('srtm90_v4')
vis_params = {'min': 0, 'max': 3000}
Map.addLayer(dem, vis_params, 'NED', False)
roi = ee.Geometry.Polygon(
[[[-120.18204899532924, 38.53481618819663],
[-120.18204899532924, 36.54889033300136],
[-116.75431462032924, 36.54889033300136],
[-116.75431462032924, 38.53481618819663]]])
image = dem.clip(roi)
Map.centerObject(image, 9)
Map.addLayer(image, vis_params, 'DEM')
scale = image.projection().nominalScale()
print("Resolution: ", scale.getInfo())
scale = 30
print("Minimum value: ", minValue(image, scale).get('elevation').getInfo())
print("Maximum value: ", maxValue(image, scale).get('elevation').getInfo())
print("Average value: ", meanValue(image, scale).get('elevation').getInfo())
print("Standard deviation: ", stdValue(image, scale).get('elevation').getInfo())
# Display the map.
Map
| [
"[email protected]"
] | |
9242782550ab6ddf1a26238b272e633e1ed1d3c8 | c342c8b9b2437d6474b9ae7da154ba47c6fc447c | /src/data/memory_store.py | 81d6cb406f3ba13f5011c2669584a64d0cdc0b4a | [] | no_license | nezaj/menu-api | 0d5118f3a1392f85e51700b5e8ac234bac605518 | bcf759b91893bf72821323c41f963923d9184e68 | refs/heads/master | 2021-01-10T07:09:15.664561 | 2015-11-16T21:28:45 | 2015-11-16T21:28:45 | 44,832,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,572 | py | """
Store implementation using in-memory data
"""
import json
import os
from .store_interface import StoreInterface
class MemoryStore(object):
__implements__ = (StoreInterface, )
def __init__(self, data_dir):
self.data_dir = data_dir
self.data = self._load_data(data_dir)
def _load_data(self, data_dir):
"""
Loads data from directory defined in settings. We expect there can be
multiple collections of data and that each collection lives in its
own subdirectory. As a result, we go through each directory and load
it's data into it's own key.
"""
data = {}
for d in os.listdir(data_dir):
subd = os.path.join(data_dir, d)
if os.path.isdir(subd):
data[d] = self._load_json_files(subd)
return data
def _load_json_files(self, data_dir):
"""
Return a dictionary representing a collection of json from the given
data directory.
We iterate through each json file and load it's data. We then key
the data in each file by the id defined in the file itself.
"""
collection = {}
for item in os.listdir(data_dir):
df = os.path.join(data_dir, item)
if df.endswith(".json"):
jd = self._load_json_file(df)
d_id, d_meta = self._process_json(jd)
collection[d_id] = d_meta
return collection
@staticmethod
def _load_json_file(f):
with open(f) as jf:
jd = json.load(jf)
return jd
@staticmethod
def _process_json(jd):
jd_id = jd["id"]
return jd_id, jd
def create_item(self, collection_id, params):
c = self.data[collection_id]
item_id, item = self._process_json(params)
c[item_id] = item
return item
def delete_item(self, collection_id, item_id):
collection = self.get_collection(collection_id)
item = self.get_item(collection_id, item_id)
if collection and item:
del collection[item_id]
return item
def get_collection(self, collection_id):
return self.data.get(collection_id)
def get_item(self, collection_id, item_id):
collection = self.get_collection(collection_id)
if collection:
return collection.get(item_id)
def update_item(self, collection_id, item_id, params):
item = self.get_item(collection_id, item_id)
if item:
item.update(params)
return item
| [
"[email protected]"
] | |
7ebec6c0a7924a9438dc5a473cc822f219125df8 | 402ed5374ab189c8599b56864c5ce066f34b26c6 | /tests/test_pdf_normal.py | e1fa17bd053bb3771266064aae86ee311b5c241c | [
"BSD-3-Clause"
] | permissive | kailiu77/zfit | db354e9c3eb4a41274af5363834fe231823c6d66 | 8bddb0ed3a0d76fde0aa2cdbf74434b0ee0ae8bb | refs/heads/master | 2020-10-01T23:49:55.751825 | 2019-12-06T15:48:47 | 2019-12-06T15:48:47 | 227,650,723 | 1 | 0 | BSD-3-Clause | 2019-12-12T16:33:54 | 2019-12-12T16:33:53 | null | UTF-8 | Python | false | false | 2,899 | py | # Copyright (c) 2019 zfit
import numpy as np
import pytest
import tensorflow as tf
import zfit
from zfit import Parameter
from zfit.models.dist_tfp import Gauss
from zfit.core.testing import setup_function, teardown_function, tester
mu1_true = 1.
mu2_true = 2.
mu3_true = 0.6
sigma1_true = 1.4
sigma2_true = 2.3
sigma3_true = 1.8
test_values = np.random.uniform(low=-3, high=5, size=100)
norm_range1 = (-4., 2.)
obs1 = 'obs1'
limits1 = zfit.Space(obs=obs1, limits=(-0.3, 1.5))
def create_gauss():
mu1 = Parameter("mu1a", mu1_true)
mu2 = Parameter("mu2a", mu2_true)
mu3 = Parameter("mu3a", mu3_true)
sigma1 = Parameter("sigma1a", sigma1_true)
sigma2 = Parameter("sigma2a", sigma2_true)
sigma3 = Parameter("sigma3a", sigma3_true)
gauss1 = Gauss(mu=mu1, sigma=sigma1, obs=obs1, name="gauss1a")
normal1 = Gauss(mu=mu1, sigma=sigma1, obs=obs1, name="normal1a")
gauss2 = Gauss(mu=mu2, sigma=sigma2, obs=obs1, name="gauss2a")
normal2 = Gauss(mu=mu2, sigma=sigma2, obs=obs1, name="normal2a")
gauss3 = Gauss(mu=mu3, sigma=sigma3, obs=obs1, name="gauss3a")
normal3 = Gauss(mu=mu3, sigma=sigma3, obs=obs1, name="normal3a")
return gauss1, gauss2, gauss3, normal1, normal2, normal3
# gauss1, gauss2, gauss3, normal1, normal2, normal3 = create_gauss()
def test_gauss1():
gauss1, gauss2, gauss3, normal1, normal2, normal3 = create_gauss()
probs1 = gauss1.pdf(x=test_values, norm_range=norm_range1)
probs1_tfp = normal1.pdf(x=test_values, norm_range=norm_range1)
probs1 = zfit.run(probs1)
probs1_tfp = zfit.run(probs1_tfp)
np.testing.assert_allclose(probs1, probs1_tfp, rtol=1e-2)
probs1_unnorm = gauss1.pdf(x=test_values, norm_range=False)
probs1_tfp_unnorm = normal1.pdf(x=test_values, norm_range=False)
probs1_unnorm = zfit.run(probs1_unnorm)
probs1_tfp_unnorm = zfit.run(probs1_tfp_unnorm)
assert not np.allclose(probs1_tfp, probs1_tfp_unnorm, rtol=1e-2)
assert not np.allclose(probs1, probs1_unnorm, rtol=1e-2)
# np.testing.assert_allclose(probs1_unnorm, probs1_tfp_unnorm, rtol=1e-2)
def test_truncated_gauss():
high = 2.
low = -0.5
truncated_gauss = zfit.pdf.TruncatedGauss(mu=1, sigma=2, low=low, high=high, obs=limits1)
gauss = zfit.pdf.Gauss(mu=1., sigma=2, obs=limits1)
probs_truncated = truncated_gauss.pdf(test_values)
probs_gauss = gauss.pdf(test_values)
probs_truncated_np, probs_gauss_np = zfit.run([probs_truncated, probs_gauss])
bool_index_inside = np.logical_and(low < test_values, test_values < high)
inside_probs_truncated = probs_truncated_np[bool_index_inside]
outside_probs_truncated = probs_truncated_np[np.logical_not(bool_index_inside)]
inside_probs_gauss = probs_gauss_np[bool_index_inside]
assert inside_probs_gauss == pytest.approx(inside_probs_truncated, rel=1e-3)
assert all(0 == outside_probs_truncated)
| [
"[email protected]"
] | |
3e70f5bce473ccd4c866c43a7f594f03af071dca | f569978afb27e72bf6a88438aa622b8c50cbc61b | /douyin_open/EnterprisePersonaPersonaCreate/api/__init__.py | d436a85b96fd99e2b5e5d7a6b654b4348bb48850 | [] | no_license | strangebank/swagger-petstore-perl | 4834409d6225b8a09b8195128d74a9b10ef1484a | 49dfc229e2e897cdb15cbf969121713162154f28 | refs/heads/master | 2023-01-05T10:21:33.518937 | 2020-11-05T04:33:16 | 2020-11-05T04:33:16 | 310,189,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from douyin_open.EnterprisePersonaPersonaCreate.api.enterprise_im_persona_create_api import EnterpriseImPersonaCreateApi
| [
"[email protected]"
] | |
d44f23bdc3a2ebd7b826cebb9d784a04528b90e6 | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /tabnet/experiment_covertype.py | c00ea76b7c9058be7df642fae0d69184a435f921 | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 6,699 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment to train and evaluate the TabNet model on Forest Covertype."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
import data_helper_covertype
import numpy as np
import tabnet_model
import tensorflow as tf
# Run Tensorflow on GPU 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Training parameters
TRAIN_FILE = "data/train_covertype.csv"
VAL_FILE = "data/val_covertype.csv"
TEST_FILE = "data/test_covertype.csv"
MAX_STEPS = 1000000
DISPLAY_STEP = 5000
VAL_STEP = 10000
SAVE_STEP = 40000
INIT_LEARNING_RATE = 0.02
DECAY_EVERY = 500
DECAY_RATE = 0.95
BATCH_SIZE = 16384
SPARSITY_LOSS_WEIGHT = 0.0001
GRADIENT_THRESH = 2000.0
SEED = 1
def main(unused_argv):
# Fix random seeds
tf.set_random_seed(SEED)
np.random.seed(SEED)
# Define the TabNet model
tabnet_forest_covertype = tabnet_model.TabNet(
columns=data_helper_covertype.get_columns(),
num_features=data_helper_covertype.NUM_FEATURES,
feature_dim=128,
output_dim=64,
num_decision_steps=6,
relaxation_factor=1.5,
batch_momentum=0.7,
virtual_batch_size=512,
num_classes=data_helper_covertype.NUM_CLASSES)
column_names = sorted(data_helper_covertype.FEATURE_COLUMNS)
print(
"Ordered column names, corresponding to the indexing in Tensorboard visualization"
)
for fi in range(len(column_names)):
print(str(fi) + " : " + column_names[fi])
# Input sampling
train_batch = data_helper_covertype.input_fn(
TRAIN_FILE, num_epochs=100000, shuffle=True, batch_size=BATCH_SIZE)
val_batch = data_helper_covertype.input_fn(
VAL_FILE,
num_epochs=10000,
shuffle=False,
batch_size=data_helper_covertype.N_VAL_SAMPLES)
test_batch = data_helper_covertype.input_fn(
TEST_FILE,
num_epochs=10000,
shuffle=False,
batch_size=data_helper_covertype.N_TEST_SAMPLES)
train_iter = train_batch.make_initializable_iterator()
val_iter = val_batch.make_initializable_iterator()
test_iter = test_batch.make_initializable_iterator()
feature_train_batch, label_train_batch = train_iter.get_next()
feature_val_batch, label_val_batch = val_iter.get_next()
feature_test_batch, label_test_batch = test_iter.get_next()
# Define the model and losses
encoded_train_batch, total_entropy = tabnet_forest_covertype.encoder(
feature_train_batch, reuse=False, is_training=True)
logits_orig_batch, _ = tabnet_forest_covertype.classify(
encoded_train_batch, reuse=False)
softmax_orig_key_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_orig_batch, labels=label_train_batch))
train_loss_op = softmax_orig_key_op + SPARSITY_LOSS_WEIGHT * total_entropy
tf.summary.scalar("Total loss", train_loss_op)
# Optimization step
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
INIT_LEARNING_RATE,
global_step=global_step,
decay_steps=DECAY_EVERY,
decay_rate=DECAY_RATE)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
gvs = optimizer.compute_gradients(train_loss_op)
capped_gvs = [(tf.clip_by_value(grad, -GRADIENT_THRESH,
GRADIENT_THRESH), var) for grad, var in gvs]
train_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
# Model evaluation
# Validation performance
encoded_val_batch, _ = tabnet_forest_covertype.encoder(
feature_val_batch, reuse=True, is_training=False)
_, prediction_val = tabnet_forest_covertype.classify(
encoded_val_batch, reuse=True)
predicted_labels = tf.cast(tf.argmax(prediction_val, 1), dtype=tf.int32)
val_eq_op = tf.equal(predicted_labels, label_val_batch)
val_acc_op = tf.reduce_mean(tf.cast(val_eq_op, dtype=tf.float32))
tf.summary.scalar("Val accuracy", val_acc_op)
# Test performance
encoded_test_batch, _ = tabnet_forest_covertype.encoder(
feature_test_batch, reuse=True, is_training=False)
_, prediction_test = tabnet_forest_covertype.classify(
encoded_test_batch, reuse=True)
predicted_labels = tf.cast(tf.argmax(prediction_test, 1), dtype=tf.int32)
test_eq_op = tf.equal(predicted_labels, label_test_batch)
test_acc_op = tf.reduce_mean(tf.cast(test_eq_op, dtype=tf.float32))
tf.summary.scalar("Test accuracy", test_acc_op)
# Training setup
model_name = "tabnet_forest_covertype_model"
init = tf.initialize_all_variables()
init_local = tf.local_variables_initializer()
init_table = tf.tables_initializer(name="Initialize_all_tables")
saver = tf.train.Saver()
summaries = tf.summary.merge_all()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter("./tflog/" + model_name, sess.graph)
sess.run(init)
sess.run(init_local)
sess.run(init_table)
sess.run(train_iter.initializer)
sess.run(val_iter.initializer)
sess.run(test_iter.initializer)
for step in range(1, MAX_STEPS + 1):
if step % DISPLAY_STEP == 0:
_, train_loss, merged_summary = sess.run(
[train_op, train_loss_op, summaries])
summary_writer.add_summary(merged_summary, step)
print("Step " + str(step) + " , Training Loss = " +
"{:.4f}".format(train_loss))
else:
_ = sess.run(train_op)
if step % VAL_STEP == 0:
feed_arr = [
vars()["summaries"],
vars()["val_acc_op"],
vars()["test_acc_op"]
]
val_arr = sess.run(feed_arr)
merged_summary = val_arr[0]
val_acc = val_arr[1]
print("Step " + str(step) + " , Val Accuracy = " +
"{:.4f}".format(val_acc))
summary_writer.add_summary(merged_summary, step)
if step % SAVE_STEP == 0:
saver.save(sess, "./checkpoints/" + model_name + ".ckpt")
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
7d8af894f2c76cc47cf868d00ed53d834dc11006 | 138f2550bb088a0597e1e71124d9ae32b1fe59c9 | /xbrr/edinet/reader/element_schema.py | b78030ff98b2a4b89b4ca5131bc6e0a11deb5645 | [
"MIT"
] | permissive | chakki-works/xbrr | 9009539e1821c3d9c815f694eb52158ccbbeeb78 | a9783acbb6c23eb0be0e1fbfb47e5b0b0e2cbfb8 | refs/heads/master | 2022-07-22T22:30:17.054418 | 2021-06-16T13:27:40 | 2021-06-16T13:27:40 | 182,622,738 | 23 | 5 | MIT | 2022-07-15T18:42:36 | 2019-04-22T04:26:21 | Python | UTF-8 | Python | false | false | 1,947 | py | from xbrr.base.reader.base_element_schema import BaseElementSchema
class ElementSchema(BaseElementSchema):
def __init__(self,
name="", reference="", label="", alias="",
abstract="", data_type="",
period_type="", balance=""):
super().__init__()
self.name = name
self.reference = reference
self.label = label
self.alias = alias
self.abstract = abstract
self.data_type = data_type
self.period_type = period_type
self.balance = balance
def set_alias(self, alias):
self.alias = alias
return self
@classmethod
def create_from_reference(cls, reader, reference,
label_kind="", label_verbose=False):
name = reference.split("#")[-1]
label = ""
abstract = ""
data_type = ""
period_type = ""
balance = ""
if reader.xbrl_dir:
_def = reader.read_by_link(reference)
if label_kind is not None:
label = _def.label(label_kind, label_verbose)
xsd = _def.xsd
abstract = xsd["abstract"]
data_type = xsd["type"]
if "xbrli:periodType" in xsd.attrs:
period_type = xsd["xbrli:periodType"]
if "xbrli:balance" in xsd.attrs:
balance = xsd["xbrli:balance"]
instance = cls(name=name, reference=reference, label=label,
abstract=abstract, data_type=data_type,
period_type=period_type, balance=balance)
return instance
def to_dict(self):
return {
"name": self.name,
"reference": self.reference,
"label": self.label,
"abstract": self.abstract,
"data_type": self.data_type,
"period_type": self.period_type,
"balance": self.balance
}
| [
"[email protected]"
] | |
bb394288997b3ae09c3bf5e93b767c0a5aa8fcdb | 7ad616ab89e9b67bd27df2df3c8ca7487c5e4564 | /ood/4_stack_overflow.py | 102433a9d98dbc8f0d47c4101d2b370291a90a1b | [] | no_license | zihuaweng/algorithm-snacks | cd7643c7d80d0bcb680336231214c1700fe74cc9 | aa3d88f861bb8b0aceb7ef6c6d05523f54202d77 | refs/heads/master | 2023-01-13T11:03:04.395542 | 2020-11-10T04:42:41 | 2020-11-10T04:42:41 | 149,380,311 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,997 | py | #!/usr/bin/env python3
# coding: utf-8
class QuestionStatus(Enum):
OPEN, CLOSED, ON_HOLD, DELETED = 1, 2, 3, 4
class QuestionClosingRemark(Enum):
DUPLICATE, OFF_TOPIC, TOO_BROAD, NOT_CONSTRUCTIVE, NOT_A_REAL_QUESTION, PRIMARILY_OPINION_BASED = 1, 2, 3, 4, 5, 6
class AccountStatus(Enum):
ACTIVE, CLOSED, CANCELED, BLACKLISTED, BLOCKED = 1, 2, 3, 4, 5
# For simplicity, we are not defining getter and setter functions. The reader can
# assume that all class attributes are private and accessed through their respective
# public getter methods and modified only through their public methods function.
class Account:
def __init__(self, id, password, name, address, email, phone, status=AccountStatus.Active):
self.__id = id
self.__password = password
self.__name = name
self.__address = address
self.__email = email
self.__phone = phone
self.__status = status
self.__reputation = 0
def reset_password(self):
None
class Member:
def __init__(self, account):
self.__account = account
self.__badges = []
def get_reputation(self):
return self.__account.get_reputation()
def get_email(self):
return self.__account.get_email()
def create_question(self, question):
None
def create_tag(self, tag):
None
class Admin(Member):
def block_member(self, member):
None
def unblock_member(self, member):
None
class Moderator(Member):
def close_question(self, question):
None
def undelete_question(self, question):
None
class Badge:
def __init__(self, name, description):
self.__name = name
self.__description = description
class Tag:
def __init__(self, name, description):
self.__name = name
self.__description = description
self.__daily_asked_frequency = 0
self.__weekly_asked_frequency = 0
# import datetime
class Notification:
def __init__(self, id, content):
self.__notification_id = id
self.__created_on = datetime.datetime.now()
self.__content = content
def send_notification(self):
None
import datetime
class Photo:
def __init__(self, id, path, member):
self.__photo_id = id
self.__photo_path = path
self.__creation_date = datetime.datetime.now()
self.__creating_member = member
def delete(self):
None
# import datetime
class Bounty:
def __init__(self, reputation, expiry):
self.__reputation = reputation
self.__expiry = expiry
def modify_reputation(self, reputation):
None
from abc import ABC, abstractmethod
class Search(ABC):
def search(self, query):
None
import datetime
class Question(Search):
def __init__(self, title, description, bounty, asking_member):
self.__title = title
self.__description = description
self.__view_count = 0
self.__vote_count = 0
self.__creation_time = datetime.datetime.now()
self.__update_time = datetime.datetime.now()
self.__status = QuestionStatus.OPEN
self.__closing_remark = QuestionClosingRemark.DUPLICATE
self.__bounty = bounty
self.__asking_member = asking_member
self.__photos = []
self.__comments = []
self.__answers = []
def close(self):
None
def undelete(self):
None
def add_comment(self, comment):
None
def add_bounty(self, bounty):
None
def search(self, query):
# return all questions containing the string query in their title or description.
None
class Comment:
def __init__(self, text, member):
self.__text = text
self.__creation_time = datetime.datetime.now()
self.__flag_count = 0
self.__vote_count = 0
self.__asking_member = member
def increment_vote_count(self):
None
class Answer:
def __init__(self, text, member):
self.__answer_text = text
self.__accepted = False
self.__vote_count = 0
self.__flag_count = 0
self.__creation_time = datetime.datetime.now()
self.__creating_member = member
self.__photos = []
def increment_vote_count(self):
None | [
"[email protected]"
] | |
b208e49da531e72d4264b91f912ebd1523d749d6 | 731c3f2f85f6002725322eedc0b2c8b5e74f610e | /0-jakc/jakc_hr/__openerp__.py | 2df1fff1bd5ba0cb87ac4230b96c9fe3ed3e6001 | [] | no_license | babarlhr/project-0021 | 1ac824657f893c8f25d6eb3b839051f350d7cc9d | e30b8a9f5d2147d3ca5b56b69ec5dbd22f712a91 | refs/heads/master | 2021-09-22T15:45:47.431000 | 2018-09-11T14:59:49 | 2018-09-11T14:59:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # -*- coding: utf-8 -*-
{
'name': 'Jakc Labs - HR Enhancement',
'version': '9.0.0.1.0',
'category': 'General',
'license': 'AGPL-3',
'summary': 'HR Enchancement',
'author': "Jakc Labs",
'website': 'http://www.jakc-labs.com/',
'depends': [
'hr'
],
'data': [
'views/jakc_hr_view.xml',
],
'installable': True,
'application': True,
}
| [
"[email protected]"
] | |
91b6b76a311bc1f86fdb741e4608f8220dbde146 | 30d61ce0b728f31a830db6b6b1954a32551990b2 | /src/gui_config/custom/sr_source_mode_tab.py | b2a24e0a0387ec82695aca2a32af5633db14603c | [
"MIT"
] | permissive | hgiesel/anki_set_randomizer | 6755dc8489b703887c55a5427bbbdab858f58a65 | 1a9a22480eb6c0e7f421dc08d36d14920e43dd3e | refs/heads/master | 2022-08-24T05:45:13.339132 | 2020-01-15T17:04:26 | 2020-01-30T13:56:50 | 197,258,760 | 5 | 0 | MIT | 2022-07-20T17:28:42 | 2019-07-16T19:56:27 | JavaScript | UTF-8 | Python | false | false | 1,391 | py | from aqt.qt import QWidget
from ...lib.config import deserialize_source_mode, deserialize_cloze_options, deserialize_occlusion_options
from ..sr_source_mode_tab_ui import Ui_SRSourceModeTab
class SRSourceModeTab(QWidget):
def __init__(self):
super().__init__()
self.ui = Ui_SRSourceModeTab()
self.ui.setupUi(self)
def setupUi(self, source_mode):
cloze_options = source_mode.cloze_options
self.ui.clozeShortcutsEnabledCheckBox.setChecked(cloze_options.shortcuts_enabled)
self.ui.clozeVsPrefixLineEdit.setText(cloze_options.vs_prefix)
self.ui.clozeOpenDelimLineEdit.setText(cloze_options.open_delim)
self.ui.clozeCloseDelimLineEdit.setText(cloze_options.close_delim)
def exportClozeOptions(self):
return deserialize_cloze_options({
'shortcutsEnabled': self.ui.clozeShortcutsEnabledCheckBox.isChecked(),
'vsPrefix': self.ui.clozeVsPrefixLineEdit.text(),
'openDelim': self.ui.clozeOpenDelimLineEdit.text(),
'closeDelim': self.ui.clozeCloseDelimLineEdit.text(),
})
def exportOcclusionOptions(self):
return deserialize_occlusion_options({})
def exportData(self):
return deserialize_source_mode({
'clozeOptions': self.exportClozeOptions(),
'occlusionOptions': self.exportOcclusionOptions(),
})
| [
"[email protected]"
] | |
223bd273f49b7e533b590ec4dc1f9394ef62d3c7 | bfbe642d689b5595fc7a8e8ae97462c863ba267a | /bin/Python27/Lib/site-packages/OMPython/OMTypedParser.py | a0e4c90b6d536f97341c456f18de90f519d82e80 | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | mcanthony/meta-core | 0c0a8cde1669f749a4880aca6f816d28742a9c68 | 3844cce391c1e6be053572810bad2b8405a9839b | refs/heads/master | 2020-12-26T03:11:11.338182 | 2015-11-04T22:58:13 | 2015-11-04T22:58:13 | 45,806,011 | 1 | 0 | null | 2015-11-09T00:34:22 | 2015-11-09T00:34:22 | null | UTF-8 | Python | false | false | 4,041 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Martin Sjölund"
__license__ = """
This file is part of OpenModelica.
Copyright (c) 1998-CurrentYear, Open Source Modelica Consortium (OSMC),
c/o Linköpings universitet, Department of Computer and Information Science,
SE-58183 Linköping, Sweden.
All rights reserved.
THIS PROGRAM IS PROVIDED UNDER THE TERMS OF THE BSD NEW LICENSE OR THE
GPL VERSION 3 LICENSE OR THE OSMC PUBLIC LICENSE (OSMC-PL) VERSION 1.2.
ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS PROGRAM CONSTITUTES
RECIPIENT'S ACCEPTANCE OF THE OSMC PUBLIC LICENSE OR THE GPL VERSION 3,
ACCORDING TO RECIPIENTS CHOICE.
The OpenModelica software and the OSMC (Open Source Modelica Consortium)
Public License (OSMC-PL) are obtained from OSMC, either from the above
address, from the URLs: http://www.openmodelica.org or
http://www.ida.liu.se/projects/OpenModelica, and in the OpenModelica
distribution. GNU version 3 is obtained from:
http://www.gnu.org/copyleft/gpl.html. The New BSD License is obtained from:
http://www.opensource.org/licenses/BSD-3-Clause.
This program is distributed WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, EXCEPT AS
EXPRESSLY SET FORTH IN THE BY RECIPIENT SELECTED SUBSIDIARY LICENSE
CONDITIONS OF OSMC-PL.
Author : Anand Kalaiarasi Ganeson, [email protected], 2012-03-19
Version: 1.0
"""
__status__ = "Prototype"
__maintainer__ = "https://openmodelica.org"
from pyparsing import *
import sys
def convertNumbers(s,l,toks):
n = toks[0]
try:
return int(n)
except ValueError, ve:
return float(n)
def convertString(s,s2):
return s2[0].replace("\\\"",'"')
def convertDict(d):
return dict(d[0])
def convertTuple(t):
return tuple(t[0])
omcRecord = Forward()
omcValue = Forward()
TRUE = Keyword("true").setParseAction( replaceWith(True) )
FALSE = Keyword("false").setParseAction( replaceWith(False) )
NONE = (Keyword("NONE") + Suppress("(") + Suppress(")") ).setParseAction( replaceWith(None) )
SOME = (Suppress( Keyword("SOME") ) + Suppress("(") + omcValue + Suppress(")") )
omcString = QuotedString(quoteChar='"',escChar='\\', multiline = True).setParseAction( convertString )
omcNumber = Combine( Optional('-') + ( '0' | Word('123456789',nums) ) +
Optional( '.' + Word(nums) ) +
Optional( Word('eE',exact=1) + Word(nums+'+-',nums) ) )
ident = Word(alphas+"_",alphanums+"_") | Combine( "'" + Word(alphanums+"!#$%&()*+,-./:;<>=?@[]^{}|~ ") + "'" )
fqident = Forward()
fqident << ( (ident + "." + fqident) | ident )
omcValues = delimitedList( omcValue )
omcTuple = Group( Suppress('(') + Optional(omcValues) + Suppress(')') ).setParseAction(convertTuple)
omcArray = Group( Suppress('{') + Optional(omcValues) + Suppress('}') ).setParseAction(convertTuple)
omcValue << ( omcString | omcNumber | omcRecord | omcArray | omcTuple | SOME | TRUE | FALSE | NONE | Combine(fqident) )
recordMember = delimitedList( Group( ident + Suppress('=') + omcValue ) )
omcRecord << Group( Suppress('record') + Suppress( ident ) + Dict( recordMember ) + Suppress('end') + Suppress( ident ) + Suppress(';') ).setParseAction(convertDict)
omcGrammar = omcValue + StringEnd()
omcNumber.setParseAction( convertNumbers )
def parseString(string):
return omcGrammar.parseString(string)[0]
if __name__ == "__main__":
testdata = """
(1.0,{{1,true,3},{"4\\"
",5.9,6,NONE ( )},record ABC
startTime = ErrorLevel.warning,
'stop*Time' = SOME(1.0)
end ABC;})
"""
expected = (1.0, ((1, True, 3), ('4"\n', 5.9, 6, None), {"'stop*Time'": 1.0, 'startTime': 'ErrorLevel.warning'}))
results = parseString(testdata)
if results <> expected:
print "Results:",results
print "Expected:",expected
print "Failed"
sys.exit(1)
print "Matches expected output",
print type(results),repr(results)
| [
"[email protected]"
] | |
6ddbc8154053d1a105be3ce47e7b58a27e253eb8 | de479d4a8af0e070b2bcae4186b15a8eb74971fb | /cn/iceknc/study/k_python_mini_web/__init__.py | 2c29f9732decc87fd29a825cf08dd49ab11e8eb8 | [] | no_license | iceknc/python_study_note | 1d8f6e38be57e4dc41a661c0a84d6ee223c5a878 | 730a35890b77ecca3d267fc875a68e96febdaa85 | refs/heads/master | 2020-05-19T18:44:55.957392 | 2019-09-27T01:15:54 | 2019-09-27T01:15:54 | 185,160,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | # -*- coding: utf-8 -*-
# @Author: 徐志鹏
# @Date : 2019/5/29
# @Desc :
def main():
pass
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f66be245d49c0500c212fbf3f7565976f9419b1f | 80755ce68bf894bfa7c7cec50051b18a6069c552 | /nkamg_malware/collector/samples/file_monitor.py | 0d59cd857a599c427bd46b6fa686fa151a915729 | [
"Apache-2.0"
] | permissive | NKQiuKF/malware_update | 6538c9308dd7b476b687fca4ea120209207257bc | a875b5011fee2486da5618e01da61d730d6ac0dd | refs/heads/master | 2022-10-17T09:08:34.605641 | 2019-09-02T09:00:45 | 2019-09-02T09:00:45 | 205,817,190 | 0 | 0 | null | 2022-10-06T18:33:50 | 2019-09-02T08:59:47 | JavaScript | UTF-8 | Python | false | false | 2,497 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Nankai University Information Security
#QiuKF [email protected]
#get file results at fixed time
#create processed.csv at sub dirctories
#create Total_File_Data.csv at /collection
from multiprocessing import Process,Pool
import os
import pandas as pd
import time
import sys
sys.path.append('../core/')
from setting import SAMPLES_PATH
#samples_path='/data/malware/'
def merge_file():
data = {"sha256":[],"type":[]}
total_df=pd.DataFrame(data)
chr=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
for first in chr:
sub_dir_list=make_file_dir(first)
for each in sub_dir_list:
sub_pd=pd.read_csv(SAMPLES_PATH+each+'processed.csv')
total=[total_df,sub_pd]
total_df=pd.concat(total)
print 'concat '+each+'processed.csv'
total_df.to_csv('Total_File_Data.csv',index=False)
def exe_file(first_dir):
count=0
#print 'test'
print 'Run task %s (%s)...' % (first_dir, os.getpid())
child_dir=make_file_dir(first_dir)
#print child_dir
for each_dir in child_dir:
data = {"sha256":[],"type":[]}
processed_df=pd.DataFrame(data)
all_files=os.listdir(SAMPLES_PATH+each_dir)
for each_file in all_files:
file_command = os.popen('file ' +SAMPLES_PATH+each_dir+each_file)
#print 'file ' +SAMPLES_PATH+each_dir+each_file
read_data= file_command.read()
tmp=read_data[read_data.index(':') + 2 : read_data.index('\n')]
#print tmp
processed_df.loc[len(processed_df)]=[each_file,tmp]
processed_df.to_csv(SAMPLES_PATH+each_dir+'processed.csv',index=False)
print 'created '+SAMPLES_PATH+each_dir+'processed.csv'
def make_file_dir(first):
ret=[]
chr_list=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
tmp=''
for second in chr_list:
two='/'+second
for third in chr_list:
three=two+'/'+third+'/'
ret.append(first+three)
#print len(ret)
#print ret
return ret
def main():
#print SAMPLES_PATH
print('Parent process %s.' %os.getpid())
#dic_list=make_file_dir()
first_dic=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
p=Pool(16)
for each in first_dic:
#print each
p.apply_async(exe_file,args=(each,))
p.close()
p.join()
print 'start merging file results...'
merge_file()
#is_apk('1.apk')
#is_apk(base_path+'three')
if __name__=='__main__':
while True:
main()
time.sleep(36000)
| [
"[email protected]"
] | |
8d77c1ca5725c5efe3918715e630d4c0b280af6f | cf803d382d6e0bc7492d787e91a695a2fda944b8 | /model.py | a1971dd66b502e9a7ab9cad39d075165745a907a | [
"BSD-2-Clause"
] | permissive | parenthetical-e/fmri | d676d524cf1606f098864c5bf9e98607674db1ab | 32c5571d8767684ec6586320e85485cd89ed9327 | refs/heads/master | 2021-01-02T22:17:28.243141 | 2020-04-07T06:07:26 | 2020-04-07T06:07:26 | 3,848,746 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | """
A set of functions for creating or maipulating files needed for design
matrices, both in spm and python.
"""
def spm_onsets(trialfile='', durations=3, recode=None):
"""
Map <trialfile> (a 1d csv) into onset/TR time which is determind by
<durations> (which can be an int if every trial had the same length in the
model or a list if not).
If <recode> is a dict of the form {1:1,2:1,3:1} where the key is the
current code in trialfile and value is what you would
like that one to be recoded as. In this example, 1,2,3 all become 1.
Any value without a key, but with an entry in trialfile is silently left
as is.
"""
import csv
fs = open(trialfile, 'r')
trials = csv.reader(fs).next()
fs.close()
if isinstance(durations, int):
tmp = [durations, ] * len(trials)
elif isinstance(duration,(list,tuple)):
pass
else:
raise TypeError('<durations> must be an int, list or tuple.')
if recode != None:
print('Recoding....')
[rtrials.extend(recode.get(t)) for t in trials]
# mMap the trialfile data into TR/onset time.
onsets = []
for t,d in zip(trials,durations):
onsets.extend([t,] + [0,]*(d-1))
## if t = 2 and d = 3 then [t,] + [0,]*(d-1)
## should give the list: [2 0 0]
return onsets,durations
| [
"[email protected]"
] | |
4906a33c2bde49d3d89e48c0aa86f333a1aef478 | 1602b8f6f40e27269a6d9fe42dbc720a5127b175 | /fleet/category.py | 9b756897fb0f2b29b020ab0444a68c1526aa3707 | [] | no_license | mit-jp/fleet-model | a9f581c2cb56196a13e2db8ef883c1f8b61b2682 | 2c1b293299741a076384114572dc74a988bb8581 | refs/heads/master | 2020-04-11T01:30:26.634473 | 2017-01-29T04:08:31 | 2017-01-29T04:08:31 | 32,412,401 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py |
class Category:
"""Rudimentary ordered tree data structure for vehicle classes."""
_parent = None
_children = []
_label = ''
def __init__(self, label, children=dict(), parent=None):
self._parent = parent
self._label = label
try:
self._children = list([Category(k, v, self) for k, v in
children.items()])
except AttributeError:
pass
def __str__(self):
return self._label
def children(self):
return list(map(str, self._children))
def parent(self):
return str(self._parent)
def nodes(self):
return sum([child.nodes() for child in self._children], [self._label])
def leaves(self, root):
if len(self._children) == 0:
return self._label
else:
return sum([child.leaves() for child in self._children], [])
def find(self, label):
"""Return the subtree with *label* at its head."""
if label == self._label:
return self
for child in self._children:
result = child.find(label)
if result:
return result
return None
| [
"[email protected]"
] | |
87e8b16a2d83845e4d137ca080069e56f6a1690d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/awentzonline_keras-rtst/keras-rtst-master/keras_rtst/models/style_xfer.py | d88c20d29b26ef489cc52a716031330e201234f5 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,226 | py | '''Texture Network for style transfer.'''
import time
import keras_vgg_buddy
import numpy as np
from keras import activations
from keras import backend as K
from keras.layers import advanced_activations
from keras.layers.core import Layer
from keras.layers.convolutional import AveragePooling2D
from keras.models import Graph
from keras.optimizers import Adam
from .base import create_res_texture_net, create_sequential_texture_net, dumb_objective
from .regularizers import (
AnalogyRegularizer, FeatureContentRegularizer, FeatureStyleRegularizer,
MRFRegularizer, TVRegularizer)
def make_model(args, style_img=None):
model = Graph()
model.add_input('content', batch_input_shape=(args.batch_size, 3, args.max_height, args.max_width))
try: # if it's a standard activation then just keep the string
activations.get(args.activation)
activation = args.activation
except: # otherwise we need to look up the class in advanced activations (e.g. LeakyReLU)
activation = getattr(advanced_activations, args.activation, 'activation function')
if args.sequential_model:
texnet = create_sequential_texture_net(args.max_height, args.max_width,
activation=activation, num_res_filters=args.num_res_filters,
num_inner_blocks=args.num_blocks)
else:
texnet = create_res_texture_net(args.max_height, args.max_width,
activation=activation, num_res_filters=args.num_res_filters,
num_res_blocks=args.num_blocks)
# add the texture net to the model
model.add_node(texnet, 'texnet', 'content')
model.add_output('texture_rgb', 'texnet')
# hook up the training network stuff
if args.train:
model.add_node(Layer(), 'vgg_concat', inputs=['texnet', 'content'], concat_axis=0)
# add VGG and the constraints
keras_vgg_buddy.add_vgg_to_graph(model, 'vgg_concat', pool_mode=args.pool_mode,
trainable=False, weights_path=args.vgg_weights)
# add the regularizers for the various feature layers
vgg = keras_vgg_buddy.VGG16(args.max_height, args.max_width, pool_mode=args.pool_mode, weights_path=args.vgg_weights)
print('computing static features')
feature_layers = set()
if args.style_weight:
feature_layers.update(args.style_layers)
if args.content_weight:
feature_layers.update(args.content_layers)
if args.mrf_weight:
feature_layers.update(args.mrf_layers)
if args.analogy_weight:
feature_layers.update(args.analogy_layers)
style_features = vgg.get_features(np.expand_dims(style_img, 0), feature_layers)
regularizers = []
if args.style_weight != 0.0:
for layer_name in args.style_layers:
layer = model.nodes[layer_name]
style_regularizer = FeatureStyleRegularizer(
target=style_features[layer_name],
weight=args.style_weight / len(args.style_layers))
style_regularizer.set_layer(layer)
regularizers.append(style_regularizer)
if args.content_weight != 0.0:
for layer_name in args.content_layers:
layer = model.nodes[layer_name]
content_regularizer = FeatureContentRegularizer(
weight=args.content_weight / len(args.content_layers))
content_regularizer.set_layer(layer)
regularizers.append(content_regularizer)
if args.mrf_weight != 0.0:
for layer_name in args.mrf_layers:
layer = model.nodes[layer_name]
mrf_regularizer = MRFRegularizer(
K.variable(style_features[layer_name]),
weight=args.mrf_weight / len(args.mrf_layers))
mrf_regularizer.set_layer(layer)
regularizers.append(mrf_regularizer)
if args.analogy_weight != 0.0:
style_map_img = keras_vgg_buddy.load_and_preprocess_image(args.style_map_image_path, width=args.max_width, square=True)
style_map_features = vgg.get_features(np.expand_dims(style_map_img, 0), args.analogy_layers)
for layer_name in args.analogy_layers:
layer = model.nodes[layer_name]
analogy_regularizer = AnalogyRegularizer(
style_map_features[layer_name],
style_features[layer_name],
weight=args.analogy_weight / len(args.analogy_layers))
analogy_regularizer.set_layer(layer)
regularizers.append(analogy_regularizer)
if args.tv_weight != 0.0:
tv_regularizer = TVRegularizer(weight=args.tv_weight)
tv_regularizer.set_layer(model.nodes['texnet'])
regularizers.append(tv_regularizer)
setattr(model.nodes['vgg_concat'], 'regularizers', regularizers) # Gotta put em somewhere?
print('compiling')
start_compile = time.time()
adam = Adam(lr=args.learn_rate, beta_1=0.7)
model.compile(optimizer=adam, loss=dict(texture_rgb=dumb_objective))
print('Compiled model in {:.2f}'.format(time.time() - start_compile))
return model
| [
"[email protected]"
] | |
366ad807aedcc7af54f5060dcaa12dc46f0f7613 | 4beb10c8a8023f4945c996a1487ec1b3968cb5da | /f5_lbaas_dashboard/enabled/_1480_project_loadbalancersv2_panel.py | d89ef94a631dd6c077b8bc716031d61c12c3ef69 | [
"Apache-2.0"
] | permissive | F5Networks/f5-lbaas-dashboard | 7aebb669a27d8ebdc9feaa7f088f9158fb157046 | 62cb1dfbb87c94bdcb3f53f6ec2ab0004ac43d54 | refs/heads/master | 2023-03-28T01:59:58.666570 | 2022-09-27T01:16:34 | 2022-09-27T01:16:34 | 147,327,541 | 0 | 0 | Apache-2.0 | 2022-09-27T01:16:37 | 2018-09-04T10:15:51 | JavaScript | UTF-8 | Python | false | false | 993 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'loadbalancersv2'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'network'
# Python panel class of the PANEL to be added.
ADD_PANEL = (
'f5_lbaas_dashboard.dashboards.project.loadbalancersv2.panel'
'.LoadBalancersUI')
| [
"[email protected]"
] | |
f0708c24fd6f6ad48737cffa37907961622cd1ca | 911e7b25961067339c31957ff41ebdb3c355d948 | /_utils/python/libs_my_test/test_http_util2.py | bce813fa41030ee2228f882b54d33f586e0f1d73 | [] | no_license | qlong2012/notes-1 | c93efcc9a70f786929ef7e4c053e266e2bf354ad | 78913e8235845d4a94dd19f730d607df754da7fe | refs/heads/master | 2020-05-20T01:05:04.678662 | 2019-04-25T10:06:37 | 2019-04-25T10:06:53 | 185,303,355 | 1 | 0 | null | 2019-05-07T02:10:14 | 2019-05-07T02:10:14 | null | UTF-8 | Python | false | false | 17,681 | py | #!python
# -*- coding:utf-8 -*-
"""
公用函数(http请求处理) http_util2.py 的测试
Created on 2019/3/14
Updated on 2019/3/14
@author: Holemar
依赖第三方库:
tornado==3.1.1
通过用线程启动一个 tornado 服务器来测试 http 请求
(使用 mock 的方式需要很了解 urllib 库,暂没那功力,做不到)
todo:测试 压缩 和 线程 时,使用读取日志的方式,不太精确。后续需优化
"""
import os
import logging
import unittest
import threading
import __init__
from libs_my import http_util2 as http_util
from libs_my import str_util, tornado_util
# 用 Filter 类获取日志信息
NOW_LOG_RECORD = []
class TestFilter(logging.Filter):
def filter(self, record):
global NOW_LOG_RECORD
NOW_LOG_RECORD.append(record) # 把 Filter 获取到的日志信息传递出去,供测试使用
return True
http_util.logger.addFilter(TestFilter())
class TestHttpUtil(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""测试这个类前的初始化动作"""
super(TestHttpUtil, cls).setUpClass()
# 启动请求接收线程
cls.port = tornado_util.get_port()
cls.web = tornado_util.run(cls.port, worker='tornado', threads=True)
@classmethod
def tearDownClass(cls):
"""测试这个类所有函数后的结束动作"""
super(TestHttpUtil, cls).tearDownClass()
cls.web._Thread__stop() # 停掉web线程
# GET 测试
def test_get(self):
url = 'http://127.0.0.1:%d/test_get' % self.port
param_url = url + '?a=11&b=22&c=%E5%93%88&d=%E5%93%88&e='
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_get/?", method='get')
def get_test_get(self, **kwargs):
if self.request.body:
return {"result": -1, "reason":'这是POST请求,请求方式有误!'}
if (self.get_argument('a', '') == '11' and self.get_argument('b', '') == '22' and self.get_argument('c', '') == u'哈' and
self.get_argument('d', '') == u'哈' and self.get_argument('e', '') == ''):
return result
else:
return kwargs
# 普通请求,原样返回
res = http_util.get(param_url)
assert isinstance(res, basestring)
assert res == result
# 参数转换
res = http_util.get(url, {'a':11,'b':'22','c':'哈','d':u'哈','e':None})
assert isinstance(res, basestring)
assert res == result
res2 = http_util.get(url, {'b':'22','c':'哈','d':u'哈','e':0})
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == {'b':'22','c':u'哈','d':u'哈','e':'0'}
# return_json 返回结果转换
res = http_util.get(param_url, return_json=True)
assert isinstance(res, dict)
assert res == {"use_time": "0.0003", "reason": u"访问成功", "version": "2.0.0", "result": 0}
# POST 测试
def test_post(self):
url = 'http://127.0.0.1:%d/test_post' % self.port
param = {'a':11,'b':'22','c':'哈','d':u'哈','e':None}
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_post/?", method='post')
def get_test_post(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
if (self.get_argument('a', '') == '11' and self.get_argument('b', '') == '22' and self.get_argument('c', '') == u'哈' and
self.get_argument('d', '') == u'哈' and self.get_argument('e', '') == ''):
return result
else:
return kwargs
# 无参数请求,原样返回
res = http_util.post(url)
assert isinstance(res, basestring)
assert res == "{}"
# 参数转换
res = http_util.post(url, param)
assert isinstance(res, basestring)
assert res == result
res2 = http_util.post(url, {'b':'22','c':'哈','d':u'哈','e':0})
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == {'b':'22','c':u'哈','d':u'哈','e':'0'}
# return_json 返回结果转换
res = http_util.post(url, param, return_json=True)
assert isinstance(res, dict)
assert res == {"use_time": "0.0003", "reason": u"访问成功", "version": "2.0.0", "result": 0}
# put,patch 请求 测试
def test_put_patch(self):
url = 'http://127.0.0.1:%d/test_put_patch' % self.port
param = {'a':11,'b':'22','c':'哈','d':u'哈','e':None}
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
methods = ['put','patch']
@tornado_util.fn(url=r"/test_put_patch/?", method=methods)
def test_put_patch(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
if (self.get_argument('a', '') == '11' and self.get_argument('b', '') == '22' and self.get_argument('c', '') == u'哈' and
self.get_argument('d', '') == u'哈' and self.get_argument('e', '') == ''):
return result
else:
return kwargs
for method in methods:
fun = getattr(http_util, method)
# 无参数请求,原样返回
res = fun(url)
assert isinstance(res, basestring)
assert res == "{}"
# 参数转换
res = fun(url, param)
assert isinstance(res, basestring)
assert res == result
res2 = fun(url, {'b':'22','c':'哈','d':u'哈','e':0})
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == {'b':'22','c':u'哈','d':u'哈','e':'0'}
# return_json 返回结果转换
res = fun(url, param, return_json=True)
assert isinstance(res, dict)
assert res == {"use_time": "0.0003", "reason": u"访问成功", "version": "2.0.0", "result": 0}
# get 请求,访问不了
res = http_util.get(url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# post 请求,访问不了
res = http_util.post(url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# delete,options 请求 测试
def test_delete_options(self):
url = 'http://127.0.0.1:%d/test_delete_options' % self.port
param_url = url + '?a=11&b=22&c=%E5%93%88&d=%E5%93%88&e='
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
methods = ['delete','options']
@tornado_util.fn(url=r"/test_delete_options/?", method=methods)
def test_delete_options(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
return result
for method in methods:
fun = getattr(http_util, method)
# 普通请求,原样返回
res = fun(param_url)
assert isinstance(res, basestring)
assert res == result
# get 请求,访问不了
res = http_util.get(param_url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# post 请求,访问不了
res = http_util.post(param_url)
assert isinstance(res, basestring)
assert res == "<html><title>405: Method Not Allowed</title><body>405: Method Not Allowed</body></html>"
# 提交 json 测试
def test_send_json(self):
url = 'http://127.0.0.1:%d/test_send_json' % self.port
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_send_json/?", method='post')
def test_send_json(self, **kwargs):
if self.request.body is None:
return {"result": -1, "reason":'这是GET请求,请求方式有误!'}
return str_util.to_json(self.request.body)
# 提交 json
param = {'a':11,'b':'22','c':'哈','d':u'哈','e':None}
res = http_util.post(url, param, send_json=True)
assert isinstance(res, basestring)
assert str_util.to_json(res) == param
param2 = {'b':'22','c':'哈','d':u'哈','e':0}
res2 = http_util.post(url, param2, send_json=True)
assert isinstance(res2, basestring)
assert str_util.to_json(res2) == param2
# gzip 压缩
def test_gzip(self):
url = 'http://127.0.0.1:%d/test_gzip' % self.port
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}' * 100
@tornado_util.fn(url=r"/test_gzip/?", gzip_length=10)
def get_test_gzip(self, **kwargs):
if self.request.headers.get("Accept-Encoding", "") in ('gzip', 'deflate'):
return result
else:
return {"result": -1, "reason":'这是没有压缩的请求,请求方式有误!'}
# get 请求
global NOW_LOG_RECORD
NOW_LOG_RECORD = []
res = http_util.get(url, gzip=True)
assert isinstance(res, basestring)
assert res == result
assert len(NOW_LOG_RECORD) >= 2
record = NOW_LOG_RECORD[-2] # 倒数第二条日志,正是解压日志
assert record is not None
assert record.levelno == logging.INFO
assert u'压缩请求' in record.msg
assert record.method == 'GET'
assert record.before_length # 有写入解压前长度
assert record.after_length # 有写入解压后长度
assert record.after_length == len(result)
assert record.before_length < record.after_length # 解压后长度更长
# post 请求
NOW_LOG_RECORD = []
res = http_util.post(url, gzip=True)
assert isinstance(res, basestring)
assert res == result
assert len(NOW_LOG_RECORD) >= 2
record = NOW_LOG_RECORD[-2] # 倒数第二条日志,正是解压日志
assert record is not None
assert record.levelno == logging.INFO
assert u'压缩请求' in record.msg
assert record.method == 'POST'
assert record.before_length # 有写入解压前长度
assert record.after_length # 有写入解压后长度
assert record.after_length == len(result)
assert record.before_length < record.after_length # 解压后长度更长
# headers 设值
def test_headers(self):
url = 'http://127.0.0.1:%d/test_headers/' % self.port
token = 'dfwerwer1548hgjhfre35656'
@tornado_util.fn(url=r"/test_headers/")
def get_test_test_headers(self, **kwargs):
if self.request.headers.get("Accept-Token", "") == token:
return 111
else:
return 222
# get 请求
res = http_util.get(url)
assert isinstance(res, basestring)
assert res == '222'
res = http_util.get(url, headers={"Accept-Token":token})
assert isinstance(res, basestring)
assert res == '111'
# post 请求
res = http_util.post(url)
assert isinstance(res, basestring)
assert res == '222'
res = http_util.post(url, headers={"Accept-Token":token})
assert isinstance(res, basestring)
assert res == '111'
# 出错测试
def test_error(self):
# 设成默认异步请求
http_util.init(repeat_time=3)
url = 'http://127.0.0.1:%d/test_error' % self.port
global error_times
error_times = 0
# 自定义一个出错页面
class _ExceptionHandler(tornado_util.RequestHandler):
def get(self):
global error_times
error_times += 1
raise Exception('出错测试')
post = get
# 添加到请求地址列表
tornado_util.add_apps(r"/test_error/?", _ExceptionHandler)
# GET 请求,返回 None
res = http_util.get(url)
assert res == '<html><title>500: Internal Server Error</title><body>500: Internal Server Error</body></html>'
assert error_times == 3 # 请求次数
# POST 请求,返回 None
error_times = 0
res = http_util.post(url)
assert res == '<html><title>500: Internal Server Error</title><body>500: Internal Server Error</body></html>'
assert error_times == 3 # 请求次数
# 改回默认值,避免影响其它测试
http_util.init(repeat_time=1)
# 异步测试
def test_threads(self):
# 设成默认异步请求
http_util.init(threads=True)
url = 'http://127.0.0.1:%d/test_threads' % self.port
result = '{"use_time": "0.0003", "reason": "\u8bbf\u95ee\u6210\u529f", "version": "2.0.0", "result": 0}'
@tornado_util.fn(url=r"/test_threads/?")
def get_test_threads(self, **kwargs):
return result
# 异步 GET 请求,返回线程
global NOW_LOG_RECORD
NOW_LOG_RECORD = []
th1 = http_util.get(url)
assert len(NOW_LOG_RECORD) == 0 # 通过日志查看有没有发启线程,因为发启线程肯定没有这么快打印日志
assert isinstance(th1, threading.Thread)
th1.join() # 等待线程返回,以便检查日志
assert len(NOW_LOG_RECORD) >= 1
record = NOW_LOG_RECORD[0]
assert record is not None
assert record.levelno == logging.INFO
assert record.method == 'GET'
log_msg = record.getMessage()
assert url in log_msg
assert result in log_msg
# 异步 POST 请求,返回线程
NOW_LOG_RECORD = []
th2 = http_util.post(url)
assert len(NOW_LOG_RECORD) == 0 # 通过日志查看有没有发启线程,因为发启线程肯定没有这么快打印日志
assert isinstance(th2, threading.Thread)
th2.join() # 等待线程返回,以便检查日志
assert len(NOW_LOG_RECORD) >= 1
record = NOW_LOG_RECORD[0]
assert record is not None
assert record.levelno == logging.INFO
assert record.method == 'POST'
log_msg = record.getMessage()
assert url in log_msg
assert result in log_msg
# 改回默认值,避免影响其它测试
http_util.init(threads=False)
# 参数转换
def test_url_encode(self):
# 字典转请求参数
param1 = {'name' : '测试用户', 'password' : 123456}
assert http_util.url_encode(param1) == 'password=123456&name=%E6%B5%8B%E8%AF%95%E7%94%A8%E6%88%B7'
assert http_util.url_encode(param1, encode='gbk') == 'password=123456&name=%B2%E2%CA%D4%D3%C3%BB%A7'
param2 = {'name' : '测试用户', 'password' : {u'哈':[1,2,'3',u'测试']}}
assert http_util.url_encode(param2) == 'password=%7B%22%5Cu54c8%22%3A+%5B1%2C+2%2C+%223%22%2C+%22%5Cu6d4b%5Cu8bd5%22%5D%7D&name=%E6%B5%8B%E8%AF%95%E7%94%A8%E6%88%B7'
# 请求参数转字典
assert str_util.deep_str(http_util.getRequestParams(http_util.url_encode(param1))) == str_util.deep_str(param1, all2str=True)
assert str_util.deep_str(http_util.getRequestParams('http://xx.xx.com:8080/te?password=123456&name=%E6%B5%8B%E8%AF%95%E7%94%A8%E6%88%B7')) == str_util.deep_str(param1, all2str=True)
assert str_util.deep_str(http_util.getRequestParams(http_util.url_encode(param2))) == {u'password': u'{"\\u54c8": [1, 2, "3", "\\u6d4b\\u8bd5"]}', u'name': u'测试用户'}
# 请求方式测试
def test_method(self):
url = 'http://127.0.0.1:%d/get_test_method/' % self.port
# 定义处理各种请求方式的类
class MethodHandler(tornado_util.RequestHandler):
def get(self): return self.finish('get')
def post(self): return self.finish('post')
def put(self): return self.finish('put')
def delete(self): return self.finish('delete')
def patch(self): return self.finish('patch')
def options(self): return self.finish('options')
tornado_util.add_apps(r"/get_test_method/", MethodHandler)
# 测试请求
res = http_util.get(url)
assert isinstance(res, basestring) and res == "get"
res = http_util.post(url)
assert isinstance(res, basestring) and res == "post"
res = http_util.put(url)
assert isinstance(res, basestring) and res == "put"
res = http_util.delete(url)
assert isinstance(res, basestring) and res == "delete"
res = http_util.patch(url)
assert isinstance(res, basestring) and res == "patch"
res = http_util.options(url)
assert isinstance(res, basestring) and res == "options"
if __name__ == "__main__":
unittest.main() # 执行所有
'''
# 执行指定函数
suite = unittest.TestSuite()
suite.addTest(TestHttpUtil("test_send_json"))
suite.addTest(TestHttpUtil("test_delete_options"))
unittest.TextTestRunner().run(suite)
'''
| [
"[email protected]"
] | |
5bb9776224c4813a523963dc2805bc70a092fa60 | 40d5394eea0c1288fcdd57180a0141672cb198fa | /users/views.py | d8f37654f883f0bde0e2232915ec025e0a7e6ec4 | [
"MIT"
] | permissive | mornicamwende/ranker | 6c12b0297703ac559de84bb0b36396ec2738f970 | 107bcaad61bb5e726570a8250b55eb2e6245dc7a | refs/heads/master | 2023-01-07T00:18:54.192155 | 2020-10-27T17:07:21 | 2020-10-27T17:07:21 | 306,592,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Profile
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
# Create your views here..
def register(request):
if request.method =='POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
# profile = UserProfile.objects.create(user=request.user)
username = form.cleaned_data.get('username')
messages.success(request, f' Your account has been created! You are now able to log in!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form':form})
@login_required
def profile(request):
if request.method =='POST':
u_form=UserUpdateForm(request.POST, instance=request.user)
p_form=ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f' Your account has been updated!')
return redirect('profile')
else:
u_form=UserUpdateForm(instance=request.user)
p_form=ProfileUpdateForm(instance=request.user.profile)
context={
'u_form':u_form,
'p_form':p_form
}
return render(request, 'users/profile.html', context)
| [
"[email protected]"
] | |
449e5f4d3e112507dc7540c319584658b70805eb | 560df0c3f859ae2d4c279f4669f9ab8758c486fb | /old/Euler063.py | defeccb9479085d6e4df2722c16167b7442a9de6 | [] | no_license | gronkyzog/Puzzles | 0e7cdd7fa5ab8139d63a721cac5ee30e80728c7a | cdc145857f123a98f1323c95b5744d36ce50355f | refs/heads/master | 2021-03-13T00:01:17.715403 | 2015-02-22T11:59:03 | 2015-02-22T11:59:03 | 17,100,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import math
counter = 0
for p in range(1,1000):
for n in range(1,1000):
x = p**n
z = len(str(x))
if z == n:
counter +=1
print counter,p,n,x
if z > n:
break
| [
"[email protected]"
] | |
b44c18c0337ef4ede7f2ee27dff0c56a32873a98 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5639104758808576_0/Python/hongkai/standing_ovation.py | 85c7aaf75714250e3ffa80b7c69e7aa3067301b0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | fin = open("../../Downloads/A-small-attempt0 (1).in", "r")
out = open("standing_ovation.out", "w")
cases = int(fin.readline())
for i in range(cases):
d, shy = fin.readline().split()
min = 0
curr = 0
for x in shy:
curr += int(x)
curr -= 1
if curr < min:
min = curr
out.write("Case #%d: %d\n" % (i + 1, -min))
out.close()
| [
"[email protected]"
] | |
97f78d057353db5df358e1e31bac1b98392279f5 | 646b0a41238b96748c7d879dd1bf81858651eb66 | /archive/memd.archive/gulp/Potential.py | ac129df6be3cb0a4594a14181a7914d174181b84 | [] | no_license | danse-inelastic/molDynamics | ded0298f8219064e086472299e1383d3dff2dac3 | c8e0bfd9cb65bcfc238e7993b6e7550289d2b219 | refs/heads/master | 2021-01-01T19:42:29.904390 | 2015-05-03T17:27:38 | 2015-05-03T17:27:38 | 34,993,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,489 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Brandon Keith
# California Institute of Technology
# (C) 2005 All Rights Reserved All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
#from molDynamics.gulp.forcefields.ForcefieldLoader import ForcefieldLoader
from memd.gulp.forcefields.InputFile import InputFile
class Potential(Component):
'''This class serves as an API/interface for gulp potential construction.'''
class Inventory(Component.Inventory):
import pyre.inventory as inv
dispersionInRecipSpace = inv.bool('dispersionInRecipSpace', default = False)
dispersionInRecipSpace.meta['tip'] = '''whether to calculate dispersion forces
partly in reciprocal space'''
useInitialBondingOnly = inv.bool('useInitialBondingOnly', default = False)
useInitialBondingOnly.meta['tip'] = '''instead of reassigning
bonding based on every optimization or time step, use intial geometry only to assign bonding'''
forcefield = inv.facility('forcefield', default=InputFile('gulpLibrary'))
forcefield.meta['tip'] = 'a class containing forcefield types'
#forcefield.meta['known_plugins'] = ['gulpLibrary','manualEntry']
moleculeIdentification = inv.str('moleculeIdentification', default = 'None')
moleculeIdentification.meta['tip'] = '''identify molecules based on covalent radii
and deal with intramolecular coulomb interactions'''
moleculeIdentification.validator=inv.choice(['None','identify molecules; remove intramolecular Coulomb forces',
'identify molecules; retain intramolecular Coulomb forces'])
def __init__(self, name='potential', facility='Potential'):
Component.__init__(self, name, facility)
self.i=self.inventory
# def _configure(self):
# Component._configure(self)
# #self.sample = self.i.sample
def identifyOptions( self, visitor):
return visitor.writePotentialOptions(self)
def identifyKeywords( self, visitor):
return visitor.writePotentialKeywords(self)
# version
__id__ = "$Id$"
# Generated automatically by PythonMill on Mon Apr 16 12:44:30 2007
# End of file | [
"[email protected]"
] | |
9450412ca95624708fe0ba54ba1780d0d0691d95 | 4c639c521834f4349ba2165e72c5857ddecee625 | /acoustic_X_text_X_visual/AttComb_aXtXv/gender/attention_fusion_network/archived_models/archived_model_1_(MSE_best)/metrics.py | 9b7d3a09c1cd9ee948834703374dc115f06d923e | [] | no_license | arbaazQureshi/attention_based_multimodal_fusion_for_estimating_depression | f4ea86746d9961fe4b9cf4f88f6cec604a201656 | e4c57ac51c271c36c244c260b01a22fa1897ffcb | refs/heads/master | 2020-05-19T22:48:03.665953 | 2019-05-06T19:34:31 | 2019-05-06T19:34:31 | 185,252,875 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import numpy as np
import sklearn.metrics
from load_data import load_development_data
from load_model import load_model
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
if __name__ == "__main__":
model = load_model()
model.load_weights('optimal_weights.h5')
dev_COVAREP_X_FORMANT, dev_facial_X_pose, dev_gaze_X_action, dev_transcript, dev_Y, dev_X_gender = load_development_data()
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['mean_absolute_error'])
dev_Y_hat = model.predict([dev_COVAREP_X_FORMANT, dev_facial_X_pose, dev_gaze_X_action, dev_X_gender, dev_transcript])
dev_Y = np.array(dev_Y)
dev_Y_hat = dev_Y_hat.reshape((dev_Y.shape[0],))
RMSE = np.sqrt(sklearn.metrics.mean_squared_error(dev_Y, dev_Y_hat))
MAE = sklearn.metrics.mean_absolute_error(dev_Y, dev_Y_hat)
EVS = sklearn.metrics.explained_variance_score(dev_Y, dev_Y_hat)
print('RMSE :', RMSE)
print('MAE :', MAE)
#print(np.std(dev_Y - dev_Y_hat))
print('EVS :', EVS)
with open('regression_metrics.txt', 'w') as f:
f.write('RMSE\t:\t' + str(RMSE) + '\nMAE\t\t:\t' + str(MAE) + '\nEVS\t\t:\t' + str(EVS)) | [
"[email protected]"
] | |
a2453d90db22aca756d619b74b525d6186f4875d | 699c7f26a91106a2fc79bb15299ce0cee532a2dd | /xrayspecprocessing/multi.combine.group.py | ff57d3b4c05ec312c219fc8fc8133076e2dafd82 | [] | no_license | samconnolly/astro | 70581a4d3f2086716aace3b5db65b74aaaa5df95 | 3731be313592c13dbb8af898e9734b98d83c0cc2 | refs/heads/master | 2020-04-06T03:40:27.454279 | 2014-03-12T14:36:34 | 2014-03-12T14:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | # multi.combine.group.py
# Uses the HEADAS tool addspec and grppha to sum sets of spectra, assign their
# summed background and response files and produce a grouped spectrum
# Uses a text file of input spectra. Does so from output file from listbinmin.py
# Sam Connolly 4/3/13
import os
# ====================== PARAMATERS ============================================
# file route - directory containing spectra and spectrum list
inroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData"\
+"/ngc1365/spectra/all/"
outroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData"\
+"/ngc1365/spectra/summed/"
# file names
fname = "speclist.txt"
outname = "13.14.25.summed"
# Grouping command (e.g. "group min 15" for min of 15 counts per bin,
# "group 25 150 4" to group channels 25-150 into groups of 4
# [Swift XRT has 1024 channels] )
groupcommand = 'group min 15'
# overwrite existing files?
overwrite = False
# ==============================================================================
# get current directory, to return to
originaldir = os.getcwd()
# change to directory of spectra
os.chdir(inroute)
#===============================================================================
# sum spectra
#===============================================================================
# creat sum command
sumcommand = "addspec " + fname + " " + outname + " qaddrmf = yes"\
+ " qsubback = yes" + " clobber = " + str(overwrite)
# add spectra
os.system(sumcommand)
#===============================================================================
# group spectra
#===============================================================================
# file names
spectrum = outname + ".pha"
back = outname + ".bak"
rmf = outname + ".rsp"
output = outname + "_grp.pha"
# overwriting or not
if overwrite == True:
over = '!'
else:
over = ''
# generate grppha command
gcommand = 'grppha ' + spectrum + ' ' + over + output + ' comm = "' + \
'chkey BACKFILE ' + back + \
' & chkey RESPFILE ' + rmf + \
' & ' + groupcommand + ' & exit"'
# execute command
os.system(gcommand)
# move files to output folder
movecommand = "mv " + spectrum + " " + outroute \
+ " & mv " + back + " " + outroute \
+ " & mv " + rmf + " " + outroute\
+ " & mv " + output + " " + outroute
os.system(movecommand)
#-------------------------------------------------------------------------------
# switch back to original directory
os.chdir(originaldir)
| [
"[email protected]"
] | |
d3249edfbd3bfe038c605e6a6c80a59a783bba05 | 4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446 | /Python基础笔记/19/代码/2.协程.py | b87a5c7b38c3ac5ebbe4f72a39d93ec54e0ed60b | [] | no_license | zhenguo96/test1 | fe21510aea7feb674e52fd7a86d4177666f841c5 | 0d8de7e73e7e635d26462a0bc53c773d999498be | refs/heads/master | 2020-05-03T13:09:53.592103 | 2019-04-06T07:08:47 | 2019-04-06T07:08:47 | 178,646,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # 协程
def sub():
print("开始")
x = yield
print("x = ",x)
y = yield x + 1
print("x = ", x, "y = ", y)
yield
x1 = sub()
next(x1)
print(x1.send(3))
x1.send(4)
| [
"[email protected]"
] | |
2e6c96eebb6bfd7df53fac17a2a7426d3b7e2769 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/metrics/regression.py | 044f99f1a540fd04348675877a6d73fce7eb1cd9 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 9,758 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from abc import abstractmethod
from functools import partial
from typing import Any, Union
import torch
from monai.metrics.utils import do_metric_reduction
from monai.utils import MetricReduction
from .metric import CumulativeIterationMetric
class RegressionMetric(CumulativeIterationMetric):
"""
Base class for regression metrics.
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
`y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]).
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__()
self.reduction = reduction
self.get_not_nans = get_not_nans
def aggregate(self): # type: ignore
data = self.get_buffer()
if not isinstance(data, torch.Tensor):
raise ValueError("the data to aggregate must be PyTorch Tensor.")
f, not_nans = do_metric_reduction(data, self.reduction)
return (f, not_nans) if self.get_not_nans else f
def _check_shape(self, y_pred: torch.Tensor, y: torch.Tensor) -> None:
if y_pred.shape != y.shape:
raise ValueError(
"y_pred and y shapes dont match, received y_pred: [{}] and y: [{}]".format(y_pred.shape, y.shape)
)
# also check if there is atleast one non-batch dimension i.e. num_dims >= 2
if len(y_pred.shape) < 2:
raise ValueError("either channel or spatial dimensions required, found only batch dimension")
@abstractmethod
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor): # type: ignore
if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor):
raise ValueError("y_pred and y must be PyTorch Tensor.")
self._check_shape(y_pred, y)
return self._compute_metric(y_pred, y)
class MSEMetric(RegressionMetric):
r"""Compute Mean Squared Error between two tensors using function:
.. math::
\operatorname {MSE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i} \right)^{2}.
More info: https://en.wikipedia.org/wiki/Mean_squared_error
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
return mse_out
class MAEMetric(RegressionMetric):
r"""Compute Mean Absolute Error between two tensors using function:
.. math::
\operatorname {MAE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left|y_i-\hat{y_i}\right|.
More info: https://en.wikipedia.org/wiki/Mean_absolute_error
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.abs_func = torch.abs
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mae_out = compute_mean_error_metrics(y_pred, y, func=self.abs_func)
return mae_out
class RMSEMetric(RegressionMetric):
r"""Compute Root Mean Squared Error between two tensors using function:
.. math::
\operatorname {RMSE}\left(Y, \hat{Y}\right) ={ \sqrt{ \frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i}\right)^2 } } \
= \sqrt {\operatorname{MSE}\left(Y, \hat{Y}\right)}.
More info: https://en.wikipedia.org/wiki/Root-mean-square_deviation
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
rmse_out = torch.sqrt(mse_out)
return rmse_out
class PSNRMetric(RegressionMetric):
r"""Compute Peak Signal To Noise Ratio between two tensors using function:
.. math::
\operatorname{PSNR}\left(Y, \hat{Y}\right) = 20 \cdot \log_{10} \left({\mathit{MAX}}_Y\right) \
-10 \cdot \log_{10}\left(\operatorname{MSE\left(Y, \hat{Y}\right)}\right)
More info: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Help taken from:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py line 4139
Input `y_pred` is compared with ground truth `y`.
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
max_val: The dynamic range of the images/volumes (i.e., the difference between the
maximum and the minimum allowed values e.g. 255 for a uint8 image).
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
"""
def __init__(
self,
max_val: Union[int, float],
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
self.max_val = max_val
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> Any:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
psnr_val = 20 * math.log10(self.max_val) - 10 * torch.log10(mse_out)
return psnr_val
def compute_mean_error_metrics(y_pred: torch.Tensor, y: torch.Tensor, func) -> torch.Tensor:
# reducing in only channel + spatial dimensions (not batch)
# reducion of batch handled inside __call__() using do_metric_reduction() in respective calling class
flt = partial(torch.flatten, start_dim=1)
error_metric = torch.mean(flt(func(y - y_pred)), dim=-1, keepdim=True)
return error_metric
| [
"[email protected]"
] | |
29d0e0ec5b2fe97cccdec9c22eb438321a537b2f | 68405fe5bec0b374867f44effda2cba3b6c1ebaa | /src/wscript | f00c671bc10cc29f13fb109cbb091ca449571257 | [] | no_license | unnonouno/oxelon | fce3dfd3d6d617d0268e34ed875e152989d60859 | 3686863b81db2dc23996cf305001e2ad56332086 | refs/heads/master | 2020-04-04T01:53:12.896018 | 2014-01-23T17:08:59 | 2014-01-23T17:08:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | def build(bld):
bld.recurse(['oxelon', 'cmd'])
| [
"[email protected]"
] | ||
3260dc302f4391afe755256b44ea9ca140f33a0e | 8ad8ee4e3a4e0e8ae0ed8e92c68cf122f5ba3723 | /jk_en/jk_en/sendEmail.py | e87f6f678832966b752cbb243ab64a762fe3c534 | [] | no_license | yangyangyanga/automatic_update | 5b5065713853c4a1225142ece4ea39be1a05d011 | 53c1777cbb84e489b887f38e2745477d6b6f4604 | refs/heads/master | 2020-05-25T21:18:24.979779 | 2019-05-22T08:34:02 | 2019-05-22T08:34:02 | 187,996,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.multipart import MIMEMultipart
import pymysql
def sendEmail(subject='无标题',messageText='无内容'):
# accepter = '[email protected]'
accepter = "[email protected]"
sender = "[email protected]"
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message = MIMEText(messageText, 'plain', 'utf-8')
message['From'] = sender
message['To'] = accepter
#邮件标题subject
subject = subject
message['Subject'] = Header(subject, 'utf-8')
try:
smtp = smtplib.SMTP()
smtp.connect('smtp.163.com', '25')
smtp.login('[email protected]', 'cyh1995')
smtp.sendmail(sender,accepter, message.as_string())
print("发送成功")
smtp.quit()
except smtplib.SMTPException as e:
print(e, "发送失败")
def SendEmailMain():
conn = pymysql.connect(host='172.16.10.71', port=3306, user='python_team', passwd='shiqiyu', db='hooli_school',charset="utf8")
cursor = conn.cursor()
#获取变化的学校数据
conn.ping(reconnect=True)
sql = "select old_id,url_old,university,change_context from Label_content where old_id like 'e%' and change_context like '%1%' order by university"
cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
sql2 = "select count(*),university from Label_content where change_context like '%1%' and old_id like 'e%' GROUP BY university"
cursor.execute(sql2)
conn.commit()
result2=cursor.fetchall()
# print(result)
# print(result2)
conn.close()
sendemailschool=''.join(list(map(lambda x:x[1]+'有'+str(x[0])+'条专业发送变化'+'\n',result2)))
sendemaillists=''.join(list(map(lambda x:'id为: '+x[0]+' 的专业'+x[3].replace('01','内容发生变化').replace('11','内容和标签发生变化').replace('10','标签发生变化')+' 学校: '+x[2]+' 链接为:'+x[1]+'\n',result)))
messagetext=sendemailschool+'\n'+sendemaillists
if messagetext!='\n':
sendEmail(subject='英国变化邮件',messageText=messagetext)
# SendEmailMain()
| [
"[email protected]"
] | |
9a93a2420acc3eb462984f16192bf378b923dbf2 | 0f880fab72fe18a2e5c4718ba4bf78fbe800f988 | /code/CityList.py | 6dd00a6060c4d7c719719ac0c9f538ffcdc1ab89 | [] | no_license | clwater/GetCityGeo | ce208abb69130b091acaf9ac77b194035d7d96d4 | c82b922c25c07ace0245eaa20055bfe8290d7072 | refs/heads/master | 2021-01-19T17:33:09.171032 | 2017-08-24T16:23:00 | 2017-08-24T16:23:00 | 101,068,888 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,244 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import requests
import json , re
import mysql.connector
conn = mysql.connector.connect(user='root', password='root', database='Utils')
def getCityGeo(cityname):
url = 'http://ditu.amap.com/service/poiInfo?query_type=TQUERY&keywords=%s' % (cityname)
html = requests.get(url).text
print html
if len(html) < len('{"status":"2","data":"Return Failure!"12312323}') :
return -1
data = json.loads(html)
cityList = []
try:
searchList = data['data']['locres']['poi_list']
# searchList = data['data']['poi_list']
# city = searchList[0]
# _city = {'level': '', 'child_station_count': city['child_station_count'],
# 'adcode': city['adcode'], 'coords': '', 'address': city['address'],
# 'ename': '', 'name': city['name'], 'longitude': city['longitude'],
# 'latitude': city['latitude']}
# return _city
for city in searchList:
_city = { 'level' : city['level'] , 'child_station_count' : city['child_station_count'],
'adcode': city['adcode'] , 'coords' : city['coords'] , 'address' : city['address'],
'ename' : city['ename'], 'name' : city['name'] , 'longitude' : city['longitude'],
'latitude': city['latitude']}
return _city
except Exception:
return cityList
def saveInfo(cityInfo , city):
if cityInfo < 3:
print city + 'not include'
return
print city
try:
print cityInfo['ename']
cursor = conn.cursor()
tem = cityInfo['ename']
tem = str(tem).replace('\'' , '`')
_sql = 'insert into CityGeo(ename , name , level , adcode ,child_station_count,coords , address , longitude ,latitude ) values (\'%s\',\'%s\',\'%s\',\'%s\',%s, \'%s\' ,\'%s\' ,\'%s\', \'%s\')' % (
tem, city, cityInfo['level'], cityInfo['adcode'], cityInfo['child_station_count'],
# cityInfo['coords'] ,
"",
cityInfo['address'] ,cityInfo['longitude'] ,cityInfo['latitude'])
print(_sql)
cursor.execute(_sql)
conn.commit()
except Exception:
with open('errorcity' ,'a') as f:
# print city
f.write(city + '\n')
print (city + 'error')
def getCityListDB():
cursor = conn.cursor()
_sql = 'SELECT `ChinaCity`.`cityName`,`ChinaCity`.`regionName` FROM `ChinaCity` WHERE `ChinaCity`.`cityName` != \'\' and id > 248'
cursor.execute(_sql)
cityList = cursor.fetchall()
for city in cityList:
if len(city) > 1:
if '盟' in city[0]:
temp = city[0] + city[1]
else:
temp = city[0] + u'市' + city[1]
else:
temp = city[0] + u'市'
print temp
saveInfo( getCityGeo(temp) , temp)
def getCityListText():
with open('citylist' , 'r') as f:
cityList = f.readlines()
for city in cityList:
city = city.strip()
# city = city + '县'
saveInfo(getCityGeo(city), city)
getCityListText()
# getCityListDB()
# getCityGeo('北京') | [
"[email protected]"
] | |
b4b07278d2bdd76b4fcc168e8ca4a5e2f2b61686 | 4a027b32b1e2dfebd6d65c9e7afce1f2e93f16bc | /webblog/blog/admin.py | c103bbbe9137099f88f5d13d6b08262854240b18 | [] | no_license | Jethet/DjangoProject-WebBlog | 92aa2959349129b2ef192163ab5637dbd4099224 | f64a79d889abe6a2d3caa9aa5350178d97b5c5eb | refs/heads/master | 2020-05-29T23:32:53.154542 | 2020-02-11T21:59:24 | 2020-02-11T21:59:24 | 189,438,086 | 1 | 0 | null | 2020-02-11T21:59:25 | 2019-05-30T15:28:38 | Python | UTF-8 | Python | false | false | 197 | py | from django.contrib import admin
# First import the model that you want to show up on the admin page:
from .models import Post
# Register your models on the admin page:
admin.site.register(Post)
| [
"[email protected]"
] | |
ce555ee518fcfbdb43e59334bdddd885f194b341 | 1a24def8879972f21d846ffb3813632070e1cf12 | /Chapter08/0813exception-while-true.py | fa0b51fa722222322031c355be5fd5b499f32cbf | [] | no_license | mushahiroyuki/beginning-python | 03bb78c8d3f678ce39662a44046a308c99f29916 | 4d761d165203dbbe3604173c404f70a3eb791fd8 | refs/heads/master | 2023-08-16T12:44:01.336731 | 2023-07-26T03:41:22 | 2023-07-26T03:41:22 | 238,684,870 | 5 | 4 | null | 2023-09-06T18:34:01 | 2020-02-06T12:33:26 | Python | UTF-8 | Python | false | false | 397 | py | #ファイル名 Chapter08/0813exception-while-true.py
while True:
try:
x = int(input('最初の数を入れてください: '))
y = int(input('2番目の数を入れてください: '))
value = x / y
print(f'{x}/{y}は{value}です。')
except:
print('入力が正しくありません。再度入力してください。')
else:
break
| [
"[email protected]"
] | |
5b3cbdb9ee3124e0fee05d82c702f0c9e56923ec | fc77fc08e983385521f7073e160cf05b8484dc9d | /Music App/mapp/db_setup.py | 7d68ace0c79aa6840b2127f24640c4be99f1da1e | [] | no_license | Icode4passion/Apps | e561a179147ab0f9bd074998f2b3e3a9bfedc539 | 51e5f2c9026a7f6a6efef33f4f54c9d7573a3070 | refs/heads/master | 2020-05-04T15:22:59.139023 | 2019-04-03T07:57:58 | 2019-04-03T07:57:58 | 179,238,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///mymusic.db', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine,))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import models
Base.metadata.create_all(bind=engine) | [
"[email protected]"
] | |
6ef1afb8fd47c9869fbc831c11b4d24aacbf704c | 9062c1b2b1715d4b5b34062dd52b6007fb2ca537 | /tensorflow/python/ops/collective_ops_gpu_test.py | d12d6240cf97e1d80b16ed5dd9f5a36901f73d69 | [
"Apache-2.0"
] | permissive | robotastic/tensorflow | 54c4c7cbcde5e9d374897d5038a96eb5feff16aa | b88f9f60de706dbe78acf9189b9fa04bdc7a6836 | refs/heads/master | 2020-08-30T06:13:07.176029 | 2019-11-05T01:49:44 | 2019-11-05T01:49:44 | 218,283,699 | 2 | 1 | Apache-2.0 | 2019-10-29T12:38:51 | 2019-10-29T12:38:50 | null | UTF-8 | Python | false | false | 12,428 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Collective Operations that require GPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class CollectiveOpGPUTest(test.TestCase):
def _configure(self, group_size, set_config_proto_nccl=True):
"""Set environment variables and return `ConfigProto` for NCCL execution."""
# Configure virtual GPU devices
virtual_devices = [config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=([1 << 10] * group_size))] # 1 GB per virtual GPU
gpu_options = config_pb2.GPUOptions(
visible_device_list='0',
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
# Configure NCCL
os.environ['NCCL_DEBUG'] = 'INFO'
os.environ['NCCL_LAUNCH_MODE'] = 'PARALLEL'
experimental = config_pb2.ConfigProto.Experimental()
if set_config_proto_nccl:
experimental.collective_nccl = True
return config_pb2.ConfigProto(gpu_options=gpu_options,
experimental=experimental)
@test_util.run_deprecated_v1
def testBasicNcclAllReduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_size = len(inputs)
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div'))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testInt32Error(self):
inputs = [[0, 1], [2, 3]]
group_size = len(inputs)
group_key = 1
instance_key = 50
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.int32)
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div'))
with self.assertRaisesRegexp(
errors.InternalError,
'does not support datatype DT_INT32 on DEVICE_GPU'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testFp16Reduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_size = len(inputs)
group_key = 1
instance_key = 100
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.float16)
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div'))
results = sess.run(collectives)
for result in results:
logging.info('i {} result {} expected {}'.format(i, results[i], expected))
self.assertAllClose(result, expected, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testNcclHintAllReduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_size = len(inputs)
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(
config=self._configure(group_size,
set_config_proto_nccl=False)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_reduce(
t, group_size, group_key, instance_key, 'Add', 'Div',
communication_hint='nccl'))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testBasicNcclBroadcast(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_size = 2
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
with ops.device(devices[0]):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_send(
t, t.shape, t.dtype, group_size, group_key, instance_key))
with ops.device(devices[1]):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_recv(
t.shape, t.dtype, group_size, group_key, instance_key))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, tensor_value, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testNcclBroadcastDoubleRecv(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_size = 2
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for device in devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_recv(
t.shape, t.dtype, group_size, group_key, instance_key))
with self.assertRaisesRegexp(errors.InternalError, 'found no source'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testNcclBroadcastDoubleSend(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_size = 2
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for device in devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_send(
t, t.shape, t.dtype, group_size, group_key, instance_key))
with self.assertRaisesRegexp(errors.InternalError, 'already has source'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testBasicNcclAllGather(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1,
0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]
group_size = len(inputs)
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(group_size)]
with self.session(config=self._configure(group_size)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
for i in range(group_size):
with ops.device(devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_gather(t, group_size,
group_key, instance_key))
results = sess.run(collectives)
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testCollectiveDeviceMismatch(self):
group_size = 2
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
with self.session(
config=self._configure(group_size,
set_config_proto_nccl=False)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(in0, group_size, group_key,
instance_key, 'Add', 'Id')
with ops.device('/GPU:0'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(in1, group_size, group_key,
instance_key, 'Add', 'Id')
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 100
with self.assertRaisesRegexp(errors.InternalError,
'but that group has type'):
sess.run([c0, c1], options=run_options)
@test_util.run_v2_only
def testCollectiveReduceMinMax(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) != 1:
self.skipTest('Expected 1 GPU but found {} GPUs'.format(len(gpus)))
config.set_virtual_device_configuration(gpus[0], [
context.VirtualDeviceConfiguration(1024),
context.VirtualDeviceConfiguration(1024)
])
context.ensure_initialized()
@def_function.function
def run_all_reduce(group_key, instance_key, merge_op):
group_size = 2
t0 = [1., 20., 3., 40., 5.]
t1 = [10., 2., 30., 4., 50.]
os.environ['NCCL_DEBUG'] = 'INFO'
os.environ['NCCL_LAUNCH_MODE'] = 'PARALLEL'
with ops.device('/GPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0, group_size, group_key, instance_key, merge_op, final_op='Id',
communication_hint='nccl')
with ops.device('/GPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1, group_size, group_key, instance_key, merge_op, final_op='Id',
communication_hint='nccl')
return c0, c1
for combination in [('Max', [10., 20., 30., 40., 50.]),
('Min', [1., 2., 3., 4., 5.])]:
merge_op = combination[0]
results = run_all_reduce(group_key=10, instance_key=20, merge_op=merge_op)
expected = combination[1]
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
192f1edf5a7c689278a89613efd7f7460b9516b8 | 1f6a85330596eb86a55e631ce5a0a643e200e977 | /muddery/server/typeclasses/script_room_interval.py | 2048e8b16d3f7555894ca832a36db1eb0acbe74d | [
"BSD-3-Clause"
] | permissive | kwer8080/muddery | ba41765c6245d33978b431ef490f10873ca8615c | 8b712eeb90cfee2d602aad4505a4929528d44afd | refs/heads/master | 2022-12-02T14:27:22.363386 | 2020-08-16T03:51:12 | 2020-08-16T03:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,549 | py | """
Scripts
Scripts are powerful jacks-of-all-trades. They have no in-game
existence and can be used to represent persistent game systems in some
circumstances. Scripts can also have a time component that allows them
to "fire" regularly or a limited number of times.
There is generally no "tree" of Scripts inheriting from each other.
Rather, each script tends to inherit from the base Script class and
just overloads its hooks to have it perform its function.
"""
import time
from evennia.scripts.scripts import DefaultScript
from muddery.server.mappings.event_action_set import EVENT_ACTION_SET
class ScriptRoomInterval(DefaultScript):
"""
This script triggers an event in a room at intervals.
"""
def at_script_creation(self):
# Set default data.
if not self.attributes.has("room"):
self.db.room = None
if not self.attributes.has("event_key"):
self.db.event_key = ""
if not self.attributes.has("action"):
self.db.action = ""
if not self.attributes.has("begin_message"):
self.db.begin_message = ""
if not self.attributes.has("end_message"):
self.db.end_message = ""
if not self.attributes.has("offline"):
self.db.offline = False
if not self.attributes.has("last_trigger_time"):
self.db.last_trigger_time = 0
def set_action(self, room, event_key, action, offline, begin_message, end_message):
"""
Set action data.
Args:
event: (string) event's key.
action: (string) action's key.
"""
self.db.room = room
self.db.event_key = event_key
self.db.action = action
self.db.begin_message = begin_message
self.db.end_message = end_message
self.db.offline = offline
self.db.last_trigger_time = 0
def at_start(self):
"""
Called every time the script is started.
"""
# The script will be unpaused when the server restarts. So pause it if the character is no online now.
if self.db.begin_message:
if self.obj:
self.obj.msg(self.db.begin_message)
# Offline intervals.
if self.db.offline:
last_time = self.db.last_trigger_time
if last_time:
current_time = time.time()
times = int((current_time - last_time) / self.interval)
if times > 0:
self.db.last_trigger_time = current_time
action = EVENT_ACTION_SET.get(self.db.action)
if action and hasattr(action, "offline_func"):
action.offline_func(self.db.event_key, self.obj, self.db.room, times)
def at_repeat(self):
"""
Trigger events.
"""
if not self.obj.location:
# The character's location is empty (maybe just login).
return
if self.obj.location != self.db.room:
# The character has left the room.
self.obj.scripts.delete(self)
return
# Do actions.
if self.db.offline:
self.db.last_trigger_time = time.time()
func = EVENT_ACTION_SET.func(self.db.action)
if func:
func(self.db.event_key, self.obj, self.db.room)
def at_stop(self):
"""
Called every time the script is stopped.
"""
if self.db.end_message:
if self.obj:
self.obj.msg(self.db.end_message)
| [
"[email protected]"
] | |
cb72bed745489fd0e982e080dff5966200d993e3 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py | e7bb5052b4b5d9571da6b4b40941ddd27288a488 | [
"Apache-2.0",
"EPL-1.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 1,405 | py | # Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
| [
"[email protected]"
] | |
deac4dc9c416a85a1b186d22b702e4fd17d953c0 | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/sklearn/decomposition/_pca.py | 01906b2b0cc5586c35abeabb5c496f37ee7c9cf0 | [] | no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,683 | py | """ Principal Component Analysis.
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
# Giorgio Patrini <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numbers
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from ._base import _BasePCA
from ..utils import check_random_state
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import fast_logdet, randomized_svd, svd_flip
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
def _assess_dimension(spectrum, rank, n_samples):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``. This implements the method of
T. P. Minka.
Parameters
----------
spectrum : ndarray of shape (n_features,)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float
The log-likelihood.
References
----------
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
<https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
"""
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -np.inf
pu = -rank * log(2.)
for i in range(1, rank + 1):
pu += (gammaln((n_features - i + 1) / 2.) -
log(np.pi) * (n_features - i + 1) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
v = max(eps, np.sum(spectrum[rank:]) / (n_features - rank))
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension(spectrum, n_samples):
"""Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
ll = np.empty_like(spectrum)
ll[0] = -np.inf # we don't want to return n_components = 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = _assess_dimension(spectrum, rank, n_samples)
return ll.argmax()
class PCA(_BasePCA):
"""Principal component analysis (PCA).
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
It can also use the scipy.sparse.linalg ARPACK implementation of the
truncated SVD.
Notice that this class does not support sparse input. See
:class:`TruncatedSVD` for an alternative with sparse data.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float or 'mle', default=None
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
MLE is used to guess the dimension. Use of ``n_components == 'mle'``
will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
number of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If ``svd_solver == 'arpack'``, the number of components must be
strictly less than the minimum of n_features and n_samples.
Hence, the None case results in::
n_components == min(n_samples, n_features) - 1
copy : bool, default=True
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, default=False
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : {'auto', 'full', 'arpack', 'randomized'}, default='auto'
If auto :
The solver is selected by a default policy based on `X.shape` and
`n_components`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient 'randomized'
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
If full :
run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
If arpack :
run SVD truncated to n_components calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_components < min(X.shape)
If randomized :
run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
tol : float, default=0.0
Tolerance for singular values computed by svd_solver == 'arpack'.
Must be of range [0.0, infinity).
.. versionadded:: 0.18.0
iterated_power : int or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'.
Must be of range [0, infinity).
.. versionadded:: 0.18.0
random_state : int, RandomState instance or None, default=None
Used when the 'arpack' or 'randomized' solvers are used. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18.0
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
The amount of variance explained by each of the selected components.
The variance estimation uses `n_samples - 1` degrees of freedom.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
.. versionadded:: 0.19
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
n_features_ : int
Number of features in the training data.
n_samples_ : int
Number of samples in the training data.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
compute the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
See Also
--------
KernelPCA : Kernel Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
IncrementalPCA : Incremental Principal Component Analysis.
References
----------
For n_components == 'mle', this class uses the method from:
`Minka, T. P.. "Automatic choice of dimensionality for PCA".
In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
Implements the probabilistic PCA model from:
`Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
component analysis". Journal of the Royal Statistical Society:
Series B (Statistical Methodology), 61(3), 611-622.
<http://www.miketipping.com/papers/met-mppca.pdf>`_
via the score and score_samples methods.
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions".
SIAM review, 53(2), 217-288.
<https://doi.org/10.1137/090771806>`_
and also
`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
"A randomized algorithm for the decomposition of matrices".
Applied and Computational Harmonic Analysis, 30(1), 47-68
<https://doi.org/10.1016/j.acha.2010.02.003>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(n_components=2)
>>> print(pca.explained_variance_ratio_)
[0.9924... 0.0075...]
>>> print(pca.singular_values_)
[6.30061... 0.54980...]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X)
PCA(n_components=2, svd_solver='full')
>>> print(pca.explained_variance_ratio_)
[0.9924... 0.00755...]
>>> print(pca.singular_values_)
[6.30061... 0.54980...]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(n_components=1, svd_solver='arpack')
>>> print(pca.explained_variance_ratio_)
[0.99244...]
>>> print(pca.singular_values_)
[6.30061...]
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, Vt = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
# Raise an error for sparse input.
# This is more informative than the generic one raised by check_array.
if issparse(X):
raise TypeError('PCA does not support sparse input. See '
'TruncatedSVD for a possible alternative.')
X = self._validate_data(X, dtype=[np.float64, np.float32],
ensure_2d=True, copy=self.copy)
# Handle n_components==None
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
# Handle svd_solver
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
# Small problem or n_components == 'mle', just call full PCA
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
self._fit_svd_solver = 'randomized'
# This is also the case of n_components in (0,1)
else:
self._fit_svd_solver = 'full'
# Call different fits for either full or truncated SVD
if self._fit_svd_solver == 'full':
return self._fit_full(X, n_components)
elif self._fit_svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, self._fit_svd_solver)
else:
raise ValueError("Unrecognized svd_solver='{0}'"
"".format(self._fit_svd_solver))
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, numbers.Integral):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, Vt = linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt)
components_ = Vt
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components,
side='right') + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, Vt
def _fit_truncated(self, X, n_components, svd_solver):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
"""
n_samples, n_features = X.shape
if isinstance(n_components, str):
raise ValueError("n_components=%r cannot be a string "
"with svd_solver='%s'"
% (n_components, svd_solver))
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
elif not isinstance(n_components, numbers.Integral):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, was of type=%r"
% (n_components, type(n_components)))
elif svd_solver == 'arpack' and n_components == min(n_samples,
n_features):
raise ValueError("n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
random_state = check_random_state(self.random_state)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if svd_solver == 'arpack':
v0 = _init_arpack_v0(min(X.shape), random_state)
U, S, Vt = svds(X, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1])
elif svd_solver == 'randomized':
# sign flipping is done inside
U, S, Vt = randomized_svd(X, n_components=n_components,
n_iter=self.iterated_power,
flip_sign=True,
random_state=random_state)
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = np.var(X, ddof=1, axis=0)
self.explained_variance_ratio_ = \
self.explained_variance_ / total_var.sum()
self.singular_values_ = S.copy() # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = (total_var.sum() -
self.explained_variance_.sum())
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.
return U, S, Vt
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : Ignored
Returns
-------
ll : float
Average log-likelihood of the samples under the current model.
"""
return np.mean(self.score_samples(X))
def _more_tags(self):
return {'preserves_dtype': [np.float64, np.float32]}
| [
"[email protected]"
] | |
6f64803b680f530118f50d12f840345200374827 | 001ca88155c90447ae3564bb51c503500d4fdcdd | /apps/christmas/migrations/0001_initial.py | 2f33cc812b526ca9d65d097c3b32136603943187 | [] | no_license | andre23arruda/cartas-de-natal | b7d5766b2806814dc7aaed1315b0d51d4aa53582 | b704b28137256e9c52a7d716e462334928c9d2bd | refs/heads/main | 2023-04-28T04:33:28.139797 | 2021-05-14T04:56:05 | 2021-05-14T04:56:05 | 367,122,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | # Generated by Django 3.1.4 on 2021-05-13 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Letter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('created_at', models.DateField(auto_now=True)),
('updated_at', models.DateField(auto_now_add=True)),
('title', models.CharField(max_length=100)),
('message', models.TextField()),
],
),
]
| [
"[email protected]"
] | |
bd52bb1039bba3f6e62021f5e1b5035e90a422c1 | 7bc0075367290ff06565991e19033b13f0604f96 | /Mundo 2/aula13/desafio047.py | 531922ef5ea3d9c949fd2497d363dc2cbe2bf5db | [] | no_license | iamtheluiz/curso_em_video_python | 298acd90e36473fbf797ba7bf85d729d0ca28407 | aa4247b7d206771f9c9b08ad5d8585c3813ddaff | refs/heads/master | 2020-04-12T16:17:51.672662 | 2019-01-22T00:10:41 | 2019-01-22T00:10:41 | 162,608,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # imports
print("""
|******************|
| Desafio047 |
|******************|
""")
print("Números Pares de 1 até 50")
result = ""
for i in range(1, 51, 2):
if i == 1:
result += str(i + 1)
else:
result += ", "+str(i + 1)
print(result)
| [
"[email protected]"
] | |
5291f471b2d5e46a05cd5e2ec8fd990b3acf7711 | 33114a0f96406008da69adac757b271229fb81bf | /__init__.py | 5488e89bfa7b2ba3c29c0da45814f981069162df | [] | no_license | ROB-Seismology/simpledb | 9f1eaf3ad4cd2367a03b5e79931a18959e9a370d | 4993dd472d1cb37023751ffca80e4dde7a6ad7fc | refs/heads/master | 2021-06-24T12:13:34.309067 | 2020-10-20T10:30:34 | 2020-10-20T10:30:34 | 90,835,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | """
Module providing basic read-write access to SQL databases.
Currently supports MySQL, PostgreSQL and SQLite/SpatiaLite databases.
Author: Kris Vanneste, Royal Observatory of Belgium
"""
from __future__ import absolute_import, division, print_function, unicode_literals
## Reloading mechanism
try:
reloading
except NameError:
## Module is imported for the first time
reloading = False
else:
## Module is reloaded
reloading = True
try:
## Python 3
from importlib import reload
except ImportError:
## Python 2
pass
## Import submodules
## base
if not reloading:
from . import base
else:
reload(base)
from .base import (SQLDB, SQLRecord, build_sql_query)
## sqlite, depends on base
if not reloading:
from . import sqlite
else:
reload(sqlite)
from .sqlite import (SQLiteDB, query_sqlite_db, query_sqlite_db_generic)
__all__ = base.__all__ + sqlite.__all__
## mysql, depends on base
if not reloading:
from . import mysql
else:
reload(mysql)
if mysql.HAS_MYSQL:
from .mysql import (MySQLDB, query_mysql_db, query_mysql_db_generic)
__all__ += mysql.__all__
## postgres, depends on base
if not reloading:
from . import postgres
else:
reload(postgres)
if postgres.HAS_POSTGRES:
from .postgres import (PgSQLDB, query_pgsql_db, query_pgsql_db_generic)
__all__ += postgres.__all__
| [
"[email protected]"
] | |
7f788150cb65d8a9dd0618a8bae8840a7efe7aac | b788f1f8bfa8949177e28dd4be436572162c418b | /regular expression.py | 5622c6603c31b7c63a0a789938965fc66832786f | [] | no_license | KaziMotiour/pyhton-OOP | bc9506f3afe7686a7451de9a5448c759f3cdcbac | 8e85cbe31809a11293fb90d6e39b2d0293cff9b5 | refs/heads/master | 2022-02-18T09:40:05.274955 | 2019-09-02T17:46:53 | 2019-09-02T17:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | import re
def multi_find(text_patterns,phrase):
for pat in test_patterns:
print("Search for pattern {}".format(pat))
print(re.findall(pat,phrase))
print('\n')
patterns = ['Team1', 'Team2']
text = "This is a starting! with Team1, 1234567, not to others arr arrr"
# for pattern in patterns:
# print("I'm searching for: "+pattern)
# if re.search(pattern,text):
# #if pattern in text:
# print("Match")
# else:
# print("Not Match")
# match = re.search('Team1',text)
# print(match.start())
# textSplit = re.split('with', text)
# print(textSplit)
# print(re.findall('a', text))
# test_patterns = ['ar*']
# test_patterns = ['ar+']
# test_patterns = ['ar{2}']
# test_patterns = ['ar{1,2}']
# test_patterns = ['[^!>?]+']
# test_patterns = ['[a-z]+'] # show all the lowercase in text
# test_patterns = ['[A-Z]+'] # show all the uppercase in text
# test_patterns = [r'\d'] # show all the number in text
# test_patterns = [r'\d+'] # show all the number in text
# test_patterns = [r'\D+'] # show all the text except number in text
test_patterns = [r'\w+'] # show all the text alpha numeric in text
multi_find(test_patterns,text) | [
"[email protected]"
] | |
321383aac6ddb384a5de4743a8d8fba4a11a44cc | a6d36a861c156e9dd9c3f4733978f194bcc62c2c | /api/serializers.py | b284259aa9cd0eee350124e29334949953db0bd5 | [] | no_license | wjaccck/upfile | 091f3ba132748cef348ff8a9973eba009e5423fa | 2721cc29ca394ddcf9f415e4fba7e2b422e87701 | refs/heads/master | 2021-01-01T04:30:18.024584 | 2016-05-26T02:25:51 | 2016-05-26T02:25:51 | 57,368,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | from rest_framework import serializers
from api.models import Up_file,Status,Azure_key,Dirs,Recode_dirs
class Up_fileSerializer(serializers.ModelSerializer):
status = serializers.SlugRelatedField(queryset=Status.objects.all(), slug_field='alias')
base_dir = serializers.SlugRelatedField(queryset=Dirs.objects.all(), slug_field='name')
class Meta:
model = Up_file
fields = ('url', 'id', 'base_dir','blob_name', 'blob_url','file_name', 'file_md5', 'file_location',
'department','status', 'modified_date', 'created_date')
class StatusSerializer(serializers.ModelSerializer):
class Meta:
model = Status
class Azure_keySerializer(serializers.ModelSerializer):
class Meta:
model = Azure_key
class DirsSerializer(serializers.ModelSerializer):
class Meta:
model = Dirs
class Recode_dirsSerializer(serializers.ModelSerializer):
base_dir=serializers.SlugRelatedField(queryset=Dirs.objects.all(), slug_field='name')
sub_dir = serializers.SlugRelatedField(queryset=Dirs.objects.all(), slug_field='name',many=True)
sub_files=serializers.SerializerMethodField()
class Meta:
model = Recode_dirs
fields = ('url', 'id', 'base_dir', 'sub_dir', 'sub_files')
def get_sub_files(self,obj):
base_dir=obj.base_dir
result=[]
for m in Up_file.objects.filter(base_dir=base_dir):
status = m.status.alias
if status == 'upload':
sig = Azure_key.objects.get(name='azure').sig
url = m.blob_url + '?' + sig
else:
url = ''
data = {}
data['status'] = status
data['file'] = m.file_name
data['url'] = url
data['created_date']=m.created_date
data['department']=m.department
result.append(data)
total={"total":len(result)}
result.append(total)
return result
| [
"[email protected]"
] | |
6b307266c03ec45f6004645eac1d4985b1bfbb4c | d5a5ff1ed1f508c47e9506a552bf44844bcdc071 | /payroll/apps.py | 8313bd0aaa30a056f07efb95e1823ad6458d08af | [] | no_license | sintaxyzcorp/prometeus | 5c9dc20e3c2f33ea6b257b850ff9505621302c47 | 2508603b6692023e0a9e40cb6cd1f08465a33f1c | refs/heads/master | 2021-09-01T09:31:36.868784 | 2017-12-26T07:58:27 | 2017-12-26T07:58:27 | 113,787,842 | 0 | 1 | null | 2017-12-18T08:25:31 | 2017-12-10T22:16:28 | JavaScript | UTF-8 | Python | false | false | 182 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class PayrollConfig(AppConfig):
name = 'payroll'
verbose_name = 'Nomina'
| [
"[email protected]"
] | |
da837fb82085ba56a201b6894220c72ba25ea444 | 38182d45f0b1f6228aeec03a876ee8213404d171 | /questionnaire/admin.py | be8327b50af90b1628c99da556843bb64cf84a85 | [] | no_license | alexzinoviev/MobileDoc | 1283ec5cd52d27510e54f22522b9e1a01b65d8f8 | 66c22f1b8fe96ad5630c3d33bcc26e5d815f48db | refs/heads/master | 2020-06-24T05:29:41.366198 | 2017-08-03T16:37:10 | 2017-08-03T16:37:10 | 96,920,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from django.contrib import admin
from .models import Questionnaire
# Register your models here.
@admin.register(Questionnaire)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question', 'category')
#admin.site.register(Questionnaire, QuestionAdmin)
# @admin.register(Product)
# class ProductAdmin(admin.ModelAdmin):
# #pass
# prepopulated_fields = {'slug': ('name',)}
# list_display = ('name','desc', 'cost', 'active') | [
"[email protected]"
] | |
51f34c3e0287e316f0918f7bae364df3289de792 | 966ea314bcd64f40bfaea457f914fcedbe26426a | /March-week3/testconversion.py | be419779776866290ed22ba1214ccc83499f7eda | [] | no_license | vandanasen/Python-Projects | 30caa85cf87ba712e1307b0441fed2d7fa9298a0 | 9b24a9f6af0374bb0d6a3a15c05099f49edfd581 | refs/heads/master | 2020-03-26T00:26:06.067905 | 2019-03-11T22:58:25 | 2019-03-11T22:58:25 | 144,320,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | a_list=[1,1,2,3,3]
a = tuple(a_list)
print(a)
b = list(a)
print(len(b))
c = set(b)
print(len(c))
d=list(c)
print(len(d))
e=list(range(1, 11, 1))
print(e)
dict = dict([(1,2),(3,4),(5,6),(7,8),(9,10)])
print(dict)
t= tuple(list(dict.items()))
print(t)
v = tuple(dict.keys())
print(v)
k = tuple(dict.values())
print(k)
s = "antidisestablishmentarianism"
print(s)
s = sorted(s)
print(s)
s2="".join(s)
print(s2)
w = "the quick brown fox jumped over the lazy dog"
w = w.split()
print(w) | [
"[email protected]"
] | |
7f9cf2b44780a6c73735f0b55eb8a5f232bd2098 | 88e2c87d087e30dedda11cad8a2665e89f6ac32c | /tests/contrib/operators/test_opsgenie_alert_operator.py | 1b4467bc5a523be4b00ce8c701d2f578da10ece5 | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | bigo-sg/airflow | 690805b782d3490c5d01047203ee4766f9695cf0 | e2933fc90d8fd9aeb61402f7a237778553762a17 | refs/heads/master | 2020-05-30T19:25:36.289802 | 2019-07-15T10:14:34 | 2019-07-15T10:14:34 | 189,924,188 | 2 | 1 | Apache-2.0 | 2019-10-18T06:30:14 | 2019-06-03T02:50:51 | Python | UTF-8 | Python | false | false | 4,788 | py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.opsgenie_alert_operator import OpsgenieAlertOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestOpsgenieAlertOperator(unittest.TestCase):
_config = {
'message': 'An example alert message',
'alias': 'Life is too short for no alias',
'description': 'Every alert needs a description',
'responders': [
{'id': '4513b7ea-3b91-438f-b7e4-e3e54af9147c', 'type': 'team'},
{'name': 'NOC', 'type': 'team'},
{'id': 'bb4d9938-c3c2-455d-aaab-727aa701c0d8', 'type': 'user'},
{'username': '[email protected]', 'type': 'user'},
{'id': 'aee8a0de-c80f-4515-a232-501c0bc9d715', 'type': 'escalation'},
{'name': 'Nightwatch Escalation', 'type': 'escalation'},
{'id': '80564037-1984-4f38-b98e-8a1f662df552', 'type': 'schedule'},
{'name': 'First Responders Schedule', 'type': 'schedule'}
],
'visibleTo': [
{'id': '4513b7ea-3b91-438f-b7e4-e3e54af9147c', 'type': 'team'},
{'name': 'rocket_team', 'type': 'team'},
{'id': 'bb4d9938-c3c2-455d-aaab-727aa701c0d8', 'type': 'user'},
{'username': '[email protected]', 'type': 'user'}
],
'actions': ['Restart', 'AnExampleAction'],
'tags': ['OverwriteQuietHours', 'Critical'],
'details': {'key1': 'value1', 'key2': 'value2'},
'entity': 'An example entity',
'source': 'Airflow',
'priority': 'P1',
'user': 'Jesse',
'note': 'Write this down'
}
expected_payload_dict = {
'message': _config['message'],
'alias': _config['alias'],
'description': _config['description'],
'responders': _config['responders'],
'visibleTo': _config['visibleTo'],
'actions': _config['actions'],
'tags': _config['tags'],
'details': _config['details'],
'entity': _config['entity'],
'source': _config['source'],
'priority': _config['priority'],
'user': _config['user'],
'note': _config['note']
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
def test_build_opsgenie_payload(self):
# Given / When
operator = OpsgenieAlertOperator(
task_id='opsgenie_alert_job',
dag=self.dag,
**self._config
)
payload = operator._build_opsgenie_payload()
# Then
self.assertEqual(self.expected_payload_dict, payload)
def test_properties(self):
# Given / When
operator = OpsgenieAlertOperator(
task_id='opsgenie_alert_job',
dag=self.dag,
**self._config
)
self.assertEqual('opsgenie_default', operator.opsgenie_conn_id)
self.assertEqual(self._config['message'], operator.message)
self.assertEqual(self._config['alias'], operator.alias)
self.assertEqual(self._config['description'], operator.description)
self.assertEqual(self._config['responders'], operator.responders)
self.assertEqual(self._config['visibleTo'], operator.visibleTo)
self.assertEqual(self._config['actions'], operator.actions)
self.assertEqual(self._config['tags'], operator.tags)
self.assertEqual(self._config['details'], operator.details)
self.assertEqual(self._config['entity'], operator.entity)
self.assertEqual(self._config['source'], operator.source)
self.assertEqual(self._config['priority'], operator.priority)
self.assertEqual(self._config['user'], operator.user)
self.assertEqual(self._config['note'], operator.note)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a0083cab532c5db426c3e4e1e0041d4f1d5ec536 | 0cfb5831a748ebd46e438e3ad7e7a09c1d196499 | /com/chapter_09/section_04/task_9.4.5_importAllClass.py | 92d4e7f85081ee09dbfc6731f3670ef472dcf5a0 | [] | no_license | StevenGeGe/pythonFromIntroductionToPractice01 | 7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108 | 9d2ba499056b30ded14180e6c4719ee48edd9772 | refs/heads/master | 2023-02-15T04:08:59.878711 | 2020-12-28T13:27:55 | 2020-12-28T13:27:55 | 310,980,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/25 20:34
# @Author : Yong
# @Email : [email protected]
# @File : task_9.4.5_importAllClass.py
# @Software: PyCharm
# 导入模块中所有的类
# from module_name import *
# 不推荐这样使用。
# 推荐使用:module_name.class_name 语法来访问类
| [
"[email protected]"
] | |
babdbff65d7df7830fbc35159f977fcaebc87b48 | 7be7190aeceef43841274518d260bcd92e04e5a7 | /Mahouo-Account/sever/app/__init__.py | a0d44f9b8c1ba491d8fa05edb03452397aa3f1ee | [] | no_license | weivis/Mahouo | 078c440b41a686d355a49e3fc29175bc225dff2c | 81fd6919a884b97cb53ac3e97f1e48d78ddd4e63 | refs/heads/master | 2020-04-20T16:56:44.813853 | 2019-02-03T18:47:11 | 2019-02-03T18:47:11 | 168,974,099 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | __author__ = 'Ran'
from flask import Flask # flask
from flask_cache import Cache # cache
from flask_login import LoginManager
from flask_cors import *
from flask_sqlalchemy import SQLAlchemy # sql
from datetime import timedelta
from app import config # config
#实例化app
app = Flask(__name__,
template_folder='templates', #指定模板路径,可以是相对路径,也可以是绝对路径。
static_folder='static', #指定静态文件前缀,默认静态文件路径同前缀
)
#引入全局配置
app.config.from_object(config)
app.permanent_session_lifetime = timedelta(days=7)
#跨域密匙
app.secret_key = '\x12my\x0bVO\xeb\xf8\x18\x15\xc5_?\x91\xd7h\x06AC'
#配置flasklogin
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.account_login'
login_manager.init_app(app=app)
#绑定对象
db = SQLAlchemy(app)
cache = Cache(app)
cache.init_app(app) | [
"[email protected]"
] | |
703dc8683d7f928f96e719bf5febd0627d683364 | 9a9e0398f26cee9864d48c4618c0a482e5475e83 | /Python/code/top_k_frequent_elements.py | 1e0b45c6c2186a3d5aa1760acecb875e104754cb | [] | no_license | CNife/leetcode | 92693c653bb41780ee431293286c3e909009e9b0 | 7cdd61692ecb52dd1613169e80b924dd39d35996 | refs/heads/main | 2021-06-22T21:22:12.997253 | 2021-03-18T07:07:15 | 2021-03-18T07:07:15 | 206,955,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | from collections import defaultdict
from heapq import heappush, heapreplace
from typing import List, Tuple
from leetcode import test, sorted_list
def top_k_frequent(nums: List[int], k: int) -> List[int]:
counter = defaultdict(lambda: 0)
for num in nums:
counter[num] += 1
heap: List[Tuple[int, int]] = []
for num, count in counter.items():
if len(heap) < k:
heappush(heap, (count, num))
elif heap[0][0] < count:
heapreplace(heap, (count, num))
return [t[1] for t in heap]
test(
top_k_frequent,
[
([1, 1, 1, 2, 2, 3], 2, [1, 2]),
([1], 1, [1]),
],
map_func=sorted_list,
)
| [
"[email protected]"
] | |
9c68c1a48ea5e7f1c1e9b39fb95197c685595749 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02260/s279877008.py | 38cfba9d040e317dbba645db4cba4794e44c61a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | input()
num_list = raw_input().split()
num_list = map(int, num_list)
def selection_sort(num_list, count):
for i in range(0, len(num_list)):
minj = i
for j in range(i, len(num_list)):
if num_list[j] < num_list[minj]:
minj = j
temp = num_list[minj]
if minj != i:
num_list[minj] = num_list[i]
num_list[i]= temp
count += 1
i += 1
return count, num_list
count = 0
count, num_list = selection_sort(num_list, count)
num_list = map(str, num_list)
print " ".join(num_list)
print count | [
"[email protected]"
] | |
39b8c01806e7f01b801d077e55cdbe99b11dd5a9 | 0883188e1648f982e3a27bf0b89c4c09dac3d3ef | /nmigen/test/compat/test_fifo.py | bc6b81cdee56cf3921aa32628db05c3a8a6097be | [
"BSD-2-Clause"
] | permissive | pbsds/nmigen | b44c0b212ddd2d88a6641243efbb632baacb66f7 | d964ba9cc45490b141c8c4c4c3d8add1a26a739d | refs/heads/master | 2022-12-04T10:32:52.573521 | 2020-07-31T13:17:39 | 2020-07-31T18:41:59 | 286,076,534 | 0 | 0 | BSD-2-Clause | 2020-08-08T16:12:24 | 2020-08-08T16:12:23 | null | UTF-8 | Python | false | false | 1,272 | py | import unittest
from itertools import count
from ...compat import *
from ...compat.genlib.fifo import SyncFIFO
from .support import SimCase
class SyncFIFOCase(SimCase, unittest.TestCase):
class TestBench(Module):
def __init__(self):
self.submodules.dut = SyncFIFO(64, 2)
self.sync += [
If(self.dut.we & self.dut.writable,
self.dut.din[:32].eq(self.dut.din[:32] + 1),
self.dut.din[32:].eq(self.dut.din[32:] + 2)
)
]
def test_run_sequence(self):
seq = list(range(20))
def gen():
for cycle in count():
# fire re and we at "random"
yield self.tb.dut.we.eq(cycle % 2 == 0)
yield self.tb.dut.re.eq(cycle % 3 == 0)
# the output if valid must be correct
if (yield self.tb.dut.readable) and (yield self.tb.dut.re):
try:
i = seq.pop(0)
except IndexError:
break
self.assertEqual((yield self.tb.dut.dout[:32]), i)
self.assertEqual((yield self.tb.dut.dout[32:]), i*2)
yield
self.run_with(gen())
| [
"[email protected]"
] | |
0d6bf526fcc135ca7f156726c43622f99a0c3269 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_053/ch160_2020_06_19_19_49_19_966261.py | 0eb7281b995c2be04327db8c79ad0fcf74f9446d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import math
resultados = {}
valores = []
i = 0
for x in range(91):
# Aplica fórmula
y = 4*x*(180 - x)/(40500 - x*(180 - x))
# Converte para rad
x = x*math.pi/180
# Verifica diferença
dif = abs(y - math.sin(x))
# Adiciona na lista de diferenças
valores.append(dif)
# Adiciona diferença com índice no dicionário
resultados[i] = dif
i += 1
for indice, diferenca in resultados.items():
if diferenca == max(valores):
print(indice)
break | [
"[email protected]"
] | |
cee5b8650269efe733b6f7b95dcc8366a0fa8d3b | ba919c512e131de90427b1a6bfd29e1d7a2e22c8 | /debug/verification_test.py | a509b9d96a2cf195f0bfa7b8082cccaa8b3446a1 | [] | no_license | qq183727918/influence | 7d3b0106db55402630979b86455e4b82ebed2e98 | 75cb04453278d13dd82a6f319d6f9ecdfad5fb88 | refs/heads/master | 2023-01-22T23:00:51.979543 | 2020-12-08T11:12:12 | 2020-12-08T11:12:12 | 317,783,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # _*_ coding: UTF-8 _*_
# @Time : 2020/12/4 17:01
# @Author : LiuXiaoQiang
# @Site : http:www.cdtest.cn/
# @File : verification_test.py
# @Software : PyCharm
def verification():
from PIL import Image
# 转换灰度
# 使用路径导入图片
imgName = '13.png'
im = Image.open(imgName)
# 使用 byte 流导入图片
# im = Image.open(io.BytesIO(b))
# 转化到灰度图
imgry = im.convert('L')
# 保存图像
imgry.save('gray-' + imgName)
# 二值化降噪的过程
from PIL import Image, ImageEnhance, ImageFilter
im = Image.open('../verification/gray-13.png')
im = im.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(2)
im = im.convert('1')
im.show()
im.save('./1213.png')
verification()
from PIL import Image
import pytesseract
# pytesseract.pytesseract.tesseract_cmd = r'D:\Tools\tesseract\Tesseract-OCR/tesseract.exe'
image = Image.open("../verification/gray-13.png")
code = pytesseract.image_to_string(image, None)
print(code)
| [
"[email protected]"
] | |
05881bf793aa55eee51c75d99cdbe7a1085333a9 | 86fc644c327a8d6ea66fd045d94c7733c22df48c | /scripts/managed_cpe_services/customer/qos_service/policy_class_map_update/update_policy_class_map/update_policy_class_map.py | d79055588ad5e2c89764db215cca6b39ed2e3bd7 | [] | no_license | lucabrasi83/anutacpedeployment | bfe703657fbcf0375c92bcbe7560051817f1a526 | 96de3a4fd4adbbc0d443620f0c53f397823a1cad | refs/heads/master | 2021-09-24T16:44:05.305313 | 2018-10-12T02:41:18 | 2018-10-12T02:41:18 | 95,190,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,843 | py | #
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2015-2016 Anuta Networks, Inc. All Rights Reserved.
#
#
#DO NOT EDIT THIS FILE ITS AUTOGENERATED ONE
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO service_customization.py FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
qos-service
|
policy-class-map-update
|
update-policy-class-map
Schema Representation:
/services/managed-cpe-services/customer/qos-service/policy-class-map-update/update-policy-class-map
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
import service_customization
class UpdatePolicyClassMap(yang.AbstractYangServiceHandler):
_instance = None
def __init__(self):
self.delete_pre_processor = service_customization.DeletePreProcessor()
self.create_pre_processor = service_customization.CreatePreProcessor()
def create(self, id, sdata):
sdata.getSession().addYangSessionPreReserveProcessor(self.create_pre_processor)
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'update_policy_class_map')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = None
dev = []
devbindobjs={}
inputdict = {}
# START OF FETCHING THE LEAF PARAMETERS
inputdict['name'] = config.get_field_value('name')
inputdict['policy_name'] = config.get_field_value('policy_name')
inputdict['update_profile'] = config.get_field_value('update_profile')
inputdict['apply_to_sites'] = config.get_field_value('apply_to_sites')
inputdict['apply_to_device_group'] = config.get_field_value('apply_to_device_group')
inputdict['device_group'] = config.get_field_value('device_group')
inputdict['class1'] = config.get_field_value('class')
inputdict['packet_handling'] = config.get_field_value('packet_handling')
inputdict['percentage'] = config.get_field_value('percentage')
inputdict['queue_limit'] = config.get_field_value('queue_limit')
inputdict['packets'] = config.get_field_value('packets')
inputdict['qos_group'] = config.get_field_value('qos_group')
inputdict['single_cpe_site'] = config.get_field_value('single_cpe_site')
inputdict['single_cpe_sites'] = config.get_field_value('single_cpe_sites')
if inputdict['single_cpe_sites'] is None:
inputdict['single_cpe_sites'] = '[]'
inputdict['dual_cpe_site'] = config.get_field_value('dual_cpe_site')
inputdict['dual_cpe_sites'] = config.get_field_value('dual_cpe_sites')
if inputdict['dual_cpe_sites'] is None:
inputdict['dual_cpe_sites'] = '[]'
inputdict['single_cpe_dual_wan_site'] = config.get_field_value('single_cpe_dual_wan_site')
inputdict['single_cpe_dual_wan_sites'] = config.get_field_value('single_cpe_dual_wan_sites')
if inputdict['single_cpe_dual_wan_sites'] is None:
inputdict['single_cpe_dual_wan_sites'] = '[]'
inputdict['triple_cpe_site'] = config.get_field_value('triple_cpe_site')
inputdict['triple_cpe_sites'] = config.get_field_value('triple_cpe_sites')
if inputdict.get('triple_cpe_sites') is None:
inputdict['triple_cpe_sites'] = '[]'
inputdict['dual_cpe_dual_wan_site'] = config.get_field_value('dual_cpe_dual_wan_site')
inputdict['dual_cpe_dual_wan_sites'] = config.get_field_value('dual_cpe_dual_wan_sites')
if inputdict.get('dual_cpe_dual_wan_sites') is None:
inputdict['dual_cpe_dual_wan_sites'] = '[]'
# END OF FETCHING THE LEAF PARAMETERS
inputkeydict = {}
# START OF FETCHING THE PARENT KEY LEAF PARAMETERS
inputkeydict['managed_cpe_services_customer_name'] = sdata.getRcPath().split('/')[-4].split('=')[1]
# END OF FETCHING THE PARENT KEY LEAF PARAMETERS
#Use the custom methods to process the data
service_customization.ServiceDataCustomization.process_service_create_data(smodelctx, sdata, dev, device=dev, parentobj=parentobj, inputdict=inputdict, config=config)
def update(self, id, sdata):
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'update_policy_class_map')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = None
dev = []
#Use the custom method to process the data
service_customization.ServiceDataCustomization.process_service_update_data(smodelctx, sdata, dev=dev, parentobj=parentobj, config=config)
def delete(self, id, sdata):
sdata.getSession().addYangSessionPreReserveProcessor(self.delete_pre_processor)
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'update_policy_class_map')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = None
dev = []
#Use the custom method to process the data
service_customization.ServiceDataCustomization.process_service_delete_data(smodelctx, sdata, dev=dev, parentobj=parentobj, config=config)
@staticmethod
def getInstance():
if(UpdatePolicyClassMap._instance == None):
UpdatePolicyClassMap._instance = UpdatePolicyClassMap()
return UpdatePolicyClassMap._instance
#def rollbackCreate(self, id, sdata):
# log('rollback: id = %s, sdata = %s' % (id, sdata))
# self.delete(id,sdata)
| [
"[email protected]"
] | |
b2df8f27cd5d4d14b5223a244f2bcd6cf2720ddc | f64aaa4b0f78774464033148290a13453c96528e | /generated/intermediate/ansible-module-rest/azure_rm_frontdoorroutingrule.py | f59ed875aad2cbda07790fb752d5abdc38e7cce3 | [
"MIT"
] | permissive | audevbot/autorest.cli.debug | e8996270a6a931f243532f65782c7f8fbb1b55c6 | a507fb6e2dd7826212537f27d583f203aac1c28f | refs/heads/master | 2020-06-04T05:25:17.018993 | 2019-08-27T21:57:18 | 2019-08-27T21:57:18 | 191,876,321 | 0 | 0 | MIT | 2019-08-28T05:57:19 | 2019-06-14T04:35:39 | Python | UTF-8 | Python | false | false | 16,885 | py | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_frontdoorroutingrule
version_added: '2.9'
short_description: Manage Azure RoutingRule instance.
description:
- 'Create, update and delete instance of Azure RoutingRule.'
options:
resource_group:
description:
- Name of the Resource group within the Azure subscription.
required: true
type: str
front_door_name:
description:
- Name of the Front Door which is globally unique.
required: true
type: str
name:
description:
- Resource name.
type: str
id:
description:
- Resource ID.
type: str
frontend_endpoints:
description:
- Frontend endpoints associated with this rule
type: list
suboptions:
id:
description:
- Resource ID.
type: str
accepted_protocols:
description:
- Protocol schemes to match for this rule
type: list
patterns_to_match:
description:
- The route patterns of the rule.
type: list
enabled_state:
description:
- >-
Whether to enable use of this rule. Permitted values are 'Enabled' or
'Disabled'
type: str
route_configuration:
description:
- A reference to the routing configuration.
type: dict
resource_state:
description:
- Resource status.
type: str
type:
description:
- Resource type.
type: str
state:
description:
- Assert the state of the RoutingRule.
- >-
Use C(present) to create or update an RoutingRule and C(absent) to
delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create or update specific Forwarding Routing Rule
azure_rm_frontdoorroutingrule:
resource_group: myResourceGroup
front_door_name: myFrontDoor
name: myRoutingRule
routing_rule_parameters:
name: routingRule1
properties:
frontendEndpoints:
- id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{
resource_group }}/providers/Microsoft.Network/frontDoors/{{
front_door_name }}/frontendEndpoints/{{ frontend_endpoint_name }}
- id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{
resource_group }}/providers/Microsoft.Network/frontDoors/{{
front_door_name }}/frontendEndpoints/{{ frontend_endpoint_name }}
acceptedProtocols:
- Http
patternsToMatch:
- /*
routeConfiguration:
'@odata.type': '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration'
backendPool:
id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{
resource_group }}/providers/Microsoft.Network/frontDoors/{{
front_door_name }}/backendPools/{{ backend_pool_name }}
enabledState: Enabled
- name: Create or update specific Redirect Routing Rule
azure_rm_frontdoorroutingrule:
resource_group: myResourceGroup
front_door_name: myFrontDoor
name: myRoutingRule
routing_rule_parameters:
name: redirectRoutingRule1
properties:
frontendEndpoints:
- id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{
resource_group }}/providers/Microsoft.Network/frontDoors/{{
front_door_name }}/frontendEndpoints/{{ frontend_endpoint_name }}
- id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{
resource_group }}/providers/Microsoft.Network/frontDoors/{{
front_door_name }}/frontendEndpoints/{{ frontend_endpoint_name }}
acceptedProtocols:
- Https
patternsToMatch:
- /*
routeConfiguration:
'@odata.type': '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration'
redirectType: Moved
redirectProtocol: HttpsOnly
customHost: www.bing.com
customPath: /api
customFragment: fragment
customQueryString: a=b
enabledState: Enabled
- name: Delete Routing Rule
azure_rm_frontdoorroutingrule:
resource_group: myResourceGroup
front_door_name: myFrontDoor
name: myRoutingRule
state: absent
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: null
properties:
description:
- Properties of the Front Door Routing Rule
returned: always
type: dict
sample: null
contains:
frontend_endpoints:
description:
- Frontend endpoints associated with this rule
returned: always
type: dict
sample: null
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
accepted_protocols:
description:
- Protocol schemes to match for this rule
returned: always
type: str
sample: null
patterns_to_match:
description:
- The route patterns of the rule.
returned: always
type: str
sample: null
enabled_state:
description:
- >-
Whether to enable use of this rule. Permitted values are 'Enabled' or
'Disabled'
returned: always
type: str
sample: null
route_configuration:
description:
- A reference to the routing configuration.
returned: always
type: dict
sample: null
resource_state:
description:
- Resource status.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMRoutingRules(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resourceGroupName',
required=true
),
front_door_name=dict(
type='str',
updatable=False,
disposition='frontDoorName',
required=true
),
name=dict(
type='str',
updatable=False,
disposition='routingRuleName',
required=true
),
id=dict(
type='str',
updatable=False,
disposition='/'
),
frontend_endpoints=dict(
type='list',
disposition='/properties/frontendEndpoints',
options=dict(
id=dict(
type='str'
)
)
),
accepted_protocols=dict(
type='list',
disposition='/properties/acceptedProtocols',
choices=['Http',
'Https']
),
patterns_to_match=dict(
type='list',
disposition='/properties/patternsToMatch'
),
enabled_state=dict(
type='str',
disposition='/properties/enabledState',
choices=['Enabled',
'Disabled']
),
route_configuration=dict(
type='dict',
disposition='/properties/routeConfiguration'
),
resource_state=dict(
type='str',
disposition='/properties/resourceState',
choices=['Creating',
'Enabling',
'Enabled',
'Disabling',
'Disabled',
'Deleting']
),
name=dict(
type='str',
updatable=False,
disposition='/'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.front_door_name = None
self.name = None
self.type = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-04-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMRoutingRules, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Network' +
'/frontDoors' +
'/{{ front_door_name }}' +
'/routingRules' +
'/{{ routing_rule_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ front_door_name }}', self.front_door_name)
self.url = self.url.replace('{{ routing_rule_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("RoutingRule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('RoutingRule instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the RoutingRule instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('RoutingRule instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('RoutingRule instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["properties"] = response["properties"]
self.results["name"] = response["name"]
self.results["type"] = response["type"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the RoutingRule instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the RoutingRule instance.')
self.fail('Error creating the RoutingRule instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the RoutingRule instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the RoutingRule instance.')
self.fail('Error deleting the RoutingRule instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the RoutingRule instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("RoutingRule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the RoutingRule instance.')
if found is True:
return response
return False
def main():
AzureRMRoutingRules()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a1d530110266afe81a9bbd327cde526441ccc73b | b79bce0cf363d2b6dd11371d378d78d48e973270 | /tests/test_custom_multi_output_classification.py | d9959a3efafdab09cb105e8eec4ea79477e7dcfa | [
"Apache-2.0"
] | permissive | CharlotteSean/Kashgari | 2d9338761b16d9804fb81ff92ce2ab1d256c80a7 | ab9970ecf6c0164416bfbbec1378c690b0f00d76 | refs/heads/master | 2022-01-22T03:52:12.284458 | 2019-07-17T03:48:04 | 2019-07-17T03:48:04 | 197,900,673 | 2 | 0 | Apache-2.0 | 2019-07-20T08:15:03 | 2019-07-20T08:15:03 | null | UTF-8 | Python | false | false | 4,917 | py | # encoding: utf-8
# author: BrikerMan
# contact: [email protected]
# blog: https://eliyar.biz
# file: test_custom_multi_output_classification.py
# time: 2019-05-22 13:36
import unittest
import numpy as np
import tensorflow as tf
import kashgari
from typing import Tuple, List, Optional, Dict, Any
from kashgari.layers import L
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.tasks.classification.base_model import BaseClassificationModel
from kashgari.corpus import SMP2018ECDTCorpus
from tensorflow.python.keras.utils import to_categorical
train_x, train_y = SMP2018ECDTCorpus.load_data('valid')
output_1_raw = np.random.randint(3, size=len(train_x))
output_2_raw = np.random.randint(3, size=len(train_x))
output_1 = to_categorical(output_1_raw, 3)
output_2 = to_categorical(output_2_raw, 3)
print(train_x[:5])
print(output_1[:5])
print(output_2[:5])
print(len(train_x))
print(output_1.shape)
print(output_2.shape)
class MultiOutputProcessor(ClassificationProcessor):
def process_y_dataset(self,
data: Tuple[List[List[str]], ...],
maxlens: Optional[Tuple[int, ...]] = None,
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
# Data already converted to one-hot
# Only need to get the subset
result = []
for index, dataset in enumerate(data):
if subset is not None:
target = kashgari.utils.get_list_subset(dataset, subset)
else:
target = dataset
result.append(np.array(target))
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _build_label_dict(self,
labels: List[str]):
# Data already converted to one-hot
# No need to build label dict
self.label2idx = {1: 1, 0: 0}
self.idx2label = dict([(value, key) for key, value in self.label2idx.items()])
self.dataset_info['label_count'] = len(self.label2idx)
class MultiOutputModel(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 256,
'return_sequences': False
}
}
def build_model_arc(self):
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']), name='layer_bi_lstm')
layer_output_1 = L.Dense(3, activation='sigmoid', name='layer_output_1')
layer_output_2 = L.Dense(3, activation='sigmoid', name='layer_output_2')
tensor = layer_bi_lstm(embed_model.output)
output_tensor_1 = layer_output_1(tensor)
output_tensor_2 = layer_output_2(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, [output_tensor_1, output_tensor_2])
def predict(self,
x_data,
batch_size=None,
debug_info=False,
threshold=0.5):
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
output_1 = pred[0]
output_2 = pred[1]
output_1[output_1 >= threshold] = 1
output_1[output_1 < threshold] = 0
output_2[output_2 >= threshold] = 1
output_2[output_2 < threshold] = 0
return output_1, output_2
class TestCustomMultiOutputModel(unittest.TestCase):
def test_build_and_fit(self):
from kashgari.embeddings import BareEmbedding
processor = MultiOutputProcessor()
embedding = BareEmbedding(processor=processor)
m = MultiOutputModel(embedding=embedding)
m.build_model(train_x, (output_1, output_2))
m.fit(train_x, (output_1, output_2), epochs=2)
res = m.predict(train_x[:10])
assert len(res) == 2
assert res[0].shape == (10, 3)
def test_build_with_BERT_and_fit(self):
from kashgari.embeddings import BERTEmbedding
from tensorflow.python.keras.utils import get_file
from kashgari.macros import DATA_PATH
sample_bert_path = get_file('bert_sample_model',
"http://s3.bmio.net/kashgari/bert_sample_model.tar.bz2",
cache_dir=DATA_PATH,
untar=True)
processor = MultiOutputProcessor()
embedding = BERTEmbedding(
model_folder=sample_bert_path,
processor=processor)
m = MultiOutputModel(embedding=embedding)
m.build_model(train_x, (output_1, output_2))
m.fit(train_x, (output_1, output_2), epochs=2)
res = m.predict(train_x[:10])
assert len(res) == 2
assert res[0].shape == (10, 3) | [
"[email protected]"
] | |
b6db08130173918bab964091422606ec2957af39 | a34ec07c3464369a88e68c9006fa1115f5b61e5f | /A_Basic/String/L0_1684_Count_the_Number_of_Consistent_Strings.py | 802755735771f634df680bf789b44d5b52ac935f | [] | no_license | 824zzy/Leetcode | 9220f2fb13e03d601d2b471b5cfa0c2364dbdf41 | 93b7f4448a366a709214c271a570c3399f5fc4d3 | refs/heads/master | 2023-06-27T02:53:51.812177 | 2023-06-16T16:25:39 | 2023-06-16T16:25:39 | 69,733,624 | 14 | 3 | null | 2022-05-25T06:48:38 | 2016-10-01T10:56:07 | Python | UTF-8 | Python | false | false | 213 | py | class Solution:
def countConsistentStrings(self, allowed: str, words: List[str]) -> int:
ans = 0
for word in words:
if all([w in allowed for w in word]): ans += 1
return ans | [
"[email protected]"
] | |
9207c9cab23edfac359cbc19c3db823e8b193cb9 | 84d2efd222fa190c8b3efcad083dcf2c7ab30047 | /linRegNoSTD.py | 34a9e45c64ae6545a196ca7279c57aa4acfd4220 | [] | no_license | webclinic017/Capstone-2 | aedfc8692647f2e84114da5b2e32856d0de80586 | d476723f7893c7c5da14e24f28736a8f0ba7ff55 | refs/heads/master | 2023-01-23T06:44:36.868373 | 2020-12-03T19:44:51 | 2020-12-03T19:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn.linear_model
from alpha_vantage.timeseries import TimeSeries
api_key = '8FIYTT49ZEZT2GV5'
ts = TimeSeries(key=api_key, output_format='pandas')
data, meta_data = ts.get_daily_adjusted(symbol='SPY', outputsize = 'full')
data = data.reset_index()
data.plot(x = 'date', y = '4. close')
data['date'] = data['date'].values.astype(float)
X = np.c_[data['date']]
Y = np.c_[data['4. close']]
model = sklearn.linear_model.LinearRegression()
model.fit(X, Y)
date = [[1736208000000000000.0]]
print(model.predict(date))
plt.show()
#standard deviation | [
"[email protected]"
] | |
a7784cf4b12ea9bed917ce26508e4c63ce253b6c | 12e42f4f34030b90c1841ece8d4efdd28925394f | /test/functional/wallet_scriptaddress2.py | 1f6b0e35dc51989b468955669c9f87acde059877 | [
"MIT"
] | permissive | GerardoTaboada/EducaCoin | 46d8aa08dd4b3859e59b739713ced08ec0b8c510 | c7f1be5dacd0a10464775c7eeb0eb799fc66cd43 | refs/heads/master | 2020-03-31T20:01:41.768383 | 2018-10-17T21:54:13 | 2018-10-17T21:54:13 | 152,522,009 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Educacoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = False
self.extra_args = [['-addresstype=legacy'], [], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main() | [
"[email protected]"
] | |
74096f3871ce295e10d08f00012c88bc032e9da1 | f972e22df004b419d23b4b03d3c7e42e604a2e2b | /compute/wps/tasks/ophidia.py | 830321413d5463dd764a9eed4384191c13d65a43 | [] | no_license | OphidiaBigData/esgf-compute-wps | 9ec663b1701f2336f08117a6fb0725d71adfe078 | 8dd26dde385fbe861c78e432e0954725d7bf9b18 | refs/heads/master | 2020-04-28T10:20:49.718253 | 2019-02-04T09:46:43 | 2019-02-04T09:46:43 | 175,198,536 | 0 | 0 | null | 2019-03-12T11:39:20 | 2019-03-12T11:39:19 | null | UTF-8 | Python | false | false | 5,490 | py | import json
import os
import uuid
import cwt
from celery.utils.log import get_task_logger
from django.conf import settings
from PyOphidia import client
from wps import WPSError
from wps.tasks import base
__ALL__ = [
'PROCESSES',
'oph_submit',
]
logger = get_task_logger('wps.tasks.ophidia')
PROCESSES = {
'Oph.max': 'max',
'Oph.min': 'min',
'Oph.avg': 'avg',
'Oph.sum': 'sum',
'Oph.std': 'std',
'Oph.var': 'var',
}
class OphidiaTask(object):
def __init__(self, name, operator, on_error=None):
self.name = name
self.operator = operator
self.on_error = on_error
self.arguments = []
self.dependencies = []
def add_arguments(self, **kwargs):
self.arguments.extend(['{}={}'.format(key, value) for key, value in kwargs.iteritems()])
def add_dependencies(self, *args):
self.dependencies.extend(dict(task=x.name) for x in args)
def to_dict(self):
data = {
'name': self.name,
'operator': self.operator,
'arguments': self.arguments,
}
if self.on_error:
data['on_error'] = self.on_error
if self.dependencies:
data['dependencies'] = self.dependencies
return data
class OphidiaWorkflow(object):
def __init__(self, oph_client):
self.oph_client = oph_client
self.workflow = {
'name': 'ESGF WPS Workflow',
'author': 'ESGF WPS',
'abstract': 'Auto-generated abstract',
'exec_mode': 'sync',
'cwd': '/',
'ncores': '2',
'tasks': []
}
def add_tasks(self, *args):
self.workflow['tasks'].extend(args)
def check_error(self):
if self.oph_client.last_error is not None and self.oph_client.last_error != '':
error = '{}\n'.format(self.oph_client.last_error)
res = self.oph_client.deserialize_response()
try:
for x in res['response'][2]['objcontent']:
for y in x['rowvalues']:
error += '\t{}: {}\n'.format(y[-3], y[-1])
except IndexError:
raise WPSError('Failed to parse last error from Ophidia')
raise WPSError(error)
def submit(self):
self.check_error()
self.oph_client.wsubmit(self.to_json())
def to_json(self):
def default(o):
if isinstance(o, OphidiaTask):
return o.to_dict()
return json.dumps(self.workflow, default=default, indent=4)
@base.cwt_shared_task()
def oph_submit(self, parent_variables, variables, domains, operation, user_id, job_id):
self.PUBLISH = base.ALL
proc = process.Process(self.request.id)
proc.initialize(user_id, job_id)
v, d, o = self.load(parent_variables, variables, domains, operation)
oph_client = client.Client(settings.WPS_OPHIDIA_USER, settings.WPS_OPHIDIA_PASSWORD, settings.WPS_OPHIDIA_HOST, settings.WPS_OPHIDIA_PORT)
workflow = OphidiaWorkflow(oph_client)
workflow.check_error()
cores = o.get_parameter('cores')
if cores is None:
cores = settings.WPS_OPHIDIA_DEFAULT_CORES
else:
cores = cores.values[0]
axes = o.get_parameter('axes')
if axes is not None:
axes = axes.values[0]
else:
axes = 'time'
proc.log('Connected to Ophidia backend, building workflow')
container_task = OphidiaTask('create container', 'oph_createcontainer', on_error='skip')
container_task.add_arguments(container='work')
proc.log('Add container task')
# only take the first input
inp = o.inputs[0]
import_task = OphidiaTask('import data', 'oph_importnc')
import_task.add_arguments(container='work', measure=inp.var_name, src_path=inp.uri, ncores=cores, imp_dim=axes)
import_task.add_dependencies(container_task)
proc.log('Added import task')
try:
operator = PROCESSES[o.identifier]
except KeyError:
raise WPSError('Process "{name}" does not exist for Ophidia backend', name=o.identifier)
if axes == 'time':
reduce_task = OphidiaTask('reduce data', 'oph_reduce')
reduce_task.add_arguments(operation=operator, ncores=cores)
reduce_task.add_dependencies(import_task)
proc.log('Added reduction task over implicit axis')
else:
reduce_task = OphidiaTask('reduce data', 'oph_reduce2')
reduce_task.add_arguments(operation=operator, dim=axes, ncores=cores)
reduce_task.add_dependencies(import_task)
proc.log('Added reduction task over axes "{}"', axes)
output_name = '{}'.format(uuid.uuid4())
export_task = OphidiaTask('export data', 'oph_exportnc2')
export_task.add_arguments(output_path=settings.WPS_OPHIDIA_OUTPUT_PATH, output_name=output_name, ncores=cores, force='yes')
export_task.add_dependencies(reduce_task)
proc.log('Added export task')
workflow.add_tasks(container_task, import_task, reduce_task, export_task)
proc.log('Added tasks to workflow')
workflow.submit()
proc.log('Submitted workflow to Ophidia backend')
workflow.check_error()
proc.log('No errors reported by Ophidia')
output_url = settings.WPS_OPHIDIA_OUTPUT_URL.format(output_path=settings.WPS_OPHIDIA_OUTPUT_PATH, output_name=output_name)
output_var = cwt.Variable(output_url, inp.var_name, name=o.name)
return {o.name: output_var.parameterize()}
| [
"[email protected]"
] | |
1d299fc35a1f1aa5feca93086cb650a6d0e1c2f3 | 8842d6c864f12dc8853d22b8a986b01acdf0e446 | /27_12_15_Nico3/LDA.pyx | c15e5a4eef6680c7665544e3191ce137506966f6 | [] | no_license | yukkyo/ResearchSource | 0d701aa09d3cfc5aae80a022445ecf14c42f0a07 | db497d19aae41ea57d7d6dd245714a477a7a1d4c | refs/heads/master | 2021-01-18T20:01:20.427148 | 2019-06-20T05:17:54 | 2019-06-20T05:17:54 | 24,621,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | pyx | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# cython: profile=True, boundscheck=False, wraparound=False
from __future__ import division
cimport cython
from libc.stdlib cimport rand, RAND_MAX
from libcpp.vector cimport vector
from libc.math cimport log, exp
from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free
# Latent Dirichlet Allocation + collapsed Gibbs sampling
# 全文書(約50万)に対してLDA(Collapsed Gibbs Sampling)を適用する
# トピック-語彙分布行列の各値からBetaを引いて転置した語彙-トピック分布、perplexitiesを返す
class LDA:
@cython.cdivision(True)
def __init__(self, r_n_topics, r_alpha, r_beta, raw_docs, r_V, r_iteration):
print "init lda instance"
self.n_topics = r_n_topics
self.alpha = r_alpha # parameter of topics prior
self.beta = r_beta # parameter of words prior
self.V = r_V # size of vocabulary
self.perps = []
self.iteration = r_iteration
print "initalize topics"
cdef vector[vector[int]] docs = raw_docs
# self.docs = docs
cdef int n_corpus, len_doc, m, n, new_z, v
n_corpus = 0
cdef int n_topics_int = self.n_topics
cdef int V_int = self.V
cdef double n_topics = self.n_topics
cdef double alpha = self.alpha
cdef double beta = self.beta
cdef double V = self.V
cdef double Vbeta = V * beta
n_topics_s = self.n_topics
v2 = self.V
# number of times topic z and word w co-occur
cdef int max_docs = 1
max_docs = docs.size()
# word count of each document and topic
cdef vector[vector[double]] n_m_z
n_m_z = vector[vector[double]](max_docs, vector[double](n_topics_int, alpha))
# word count of each topic and vocabulary
cdef vector[vector[double]] n_z_t
# n_z_t = vector[vector[double]](n_topics_int, vector[double](<int>V, beta))
n_z_t = vector[vector[double]](V_int, vector[double](n_topics_int, beta))
# word count of each topic
cdef vector[double] n_z
n_z = vector[double](n_topics_int, Vbeta)
cdef vector[vector[int]] z_m_n
cdef vector[int] z_n
for m in xrange(max_docs):
len_doc = docs[m].size()
n_corpus += len_doc
z_n.clear()
for n in xrange(len_doc):
v = docs[m][n]
new_z = int((rand()/(RAND_MAX +1.)) * n_topics)
z_n.push_back(new_z)
n_m_z[m][new_z] += 1.
n_z_t[v][new_z] += 1.
n_z[new_z] += 1
z_m_n.push_back(z_n)
print "end initialize topics"
"""learning once iteration"""
print "inference start"
cdef int j, ite, iteration
iteration = self.iteration
cdef vector[vector[double]] n_z_t_tmp
cdef vector[double] n_m_z_m
n_m_z_m.resize(n_topics_int)
cdef vector[int] z_m_n_m
cdef vector[double] p_z2
p_z2.resize(n_topics_int)
cdef double p_z2j, u, perp
# cdef long V = self.V
cdef vector[int] docs_m
cdef double n_z_j
cdef vector[double] theta
cdef double Kalpha = <double>n_topics * alpha
cdef double log_per, tmp_logper, len_doc_kalpha
print "calc first perp"
n_z_t_tmp = n_z_t
log_per = 0.0
for v in xrange(V_int):
for j in xrange(n_topics_int):
n_z_t_tmp[v][j] /= n_z[j]
for m in xrange(max_docs):
len_doc = docs[m].size()
len_doc_kalpha = <double>len_doc + Kalpha
theta = n_m_z[m]
docs_m = docs[m]
for j in xrange(n_topics_int):
theta[j] = theta[j] / len_doc_kalpha
for n in xrange(len_doc):
v = docs_m[n]
tmp_logper = 0.0
for j in xrange(n_topics_int):
tmp_logper += (theta[j] * n_z_t_tmp[v][j])
log_per -= log(tmp_logper)
theta.clear()
n_z_t_tmp.clear()
log_per /= <double>n_corpus
perp = exp(log_per)
print "perp: " + str(perp)
self.perps.append(perp)
for ite in xrange(iteration):
print "ite: " + str(ite)
# sampling each word in corpus
for m in xrange(max_docs):
len_doc = docs[m].size()
n_m_z_m = n_m_z[m]
z_m_n_m = z_m_n[m]
for n in xrange(len_doc):
v = docs[m][n]
# discount for n-th word n with topic z
z = z_m_n_m[n]
n_m_z_m[z] -= 1
n_z_t[v][z] -= 1
n_z[z] -= 1
# sampling new_z
for j in xrange(n_topics_int):
p_z2j = n_z_t[v][j] * n_m_z_m[j]
p_z2j /= n_z[j]
if j != 0:
p_z2j += p_z2[j-1]
p_z2[j] = p_z2j
u = (rand()/(RAND_MAX +1.))
u *= p_z2[n_topics_int - 1]
new_z = n_topics_int - 1
for j in xrange(n_topics_int):
if u < p_z2[j]:
new_z = j
break
# set z the new topic and increment counters
z_m_n_m[n] = new_z
n_m_z_m[new_z] += 1
n_z_t[v][new_z] += 1
n_z[new_z] += 1
z_m_n[m] = z_m_n_m
n_m_z[m] = n_m_z_m
if (m + 1) % 100000 == 0:
print "end docs: " + str(m + 1)
print "calc perp"
log_per = 0.0
n_z_t_tmp = n_z_t
for v in xrange(V_int):
for j in xrange(n_topics_int):
n_z_t_tmp[v][j] /= n_z[j]
for m in xrange(max_docs):
len_doc = docs[m].size()
len_doc_kalpha = <double>len_doc + Kalpha
theta = n_m_z[m]
docs_m = docs[m]
for j in xrange(n_topics_int):
theta[j] = theta[j] / len_doc_kalpha
for n in xrange(len_doc):
v = docs_m[n]
tmp_logper = 0.0
for j in xrange(n_topics_int):
tmp_logper += (theta[j] * n_z_t_tmp[v][j])
log_per -= log(tmp_logper)
theta.clear()
n_z_t_tmp.clear()
log_per /= <double>n_corpus
perp = exp(log_per)
print "perp: " + str(perp)
self.perps.append(perp)
print "calc new alpha and beta"
self.n_z_t = n_z_t
self.z_m_n = z_m_n
return | [
"[email protected]"
] | |
9fc5b6e12ba33400052ec9e08c251ff1626f1477 | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /testing/test_programs/numpy/basic_numpy/arrays/stypy_test_files/numpy_array_broadcasting_4__type_data.py | 99ec5d09a07932264cc57dd68828680219e497e5 | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from testing.code_generation_testing.codegen_testing_common import instance_of_class_name
test_types = {
'__main__': {
'r4': instance_of_class_name("ndarray"),
'r5': instance_of_class_name("ndarray"),
'__name__': instance_of_class_name("str"),
'r2': instance_of_class_name("ndarray"),
'r3': instance_of_class_name("ndarray"),
'__builtins__': instance_of_class_name("module"),
'__file__': instance_of_class_name("str"),
'__package__': instance_of_class_name("NoneType"),
'r': instance_of_class_name("ndarray"),
'w': instance_of_class_name("ndarray"),
'v': instance_of_class_name("ndarray"),
'np': instance_of_class_name("module"),
'x': instance_of_class_name("ndarray"),
'__doc__': instance_of_class_name("NoneType"),
},
}
| [
"[email protected]"
] | |
7c58362f81d2eebf86e77c4f52201dabd123be2d | e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1 | /AlgorithmCodeTemplates/algorithm/sliding_window_examples.py | 634c355c9efa3418e81eeafb9f04d218da1225cd | [] | no_license | sevenhe716/LeetCode | 41d2ef18f5cb317858c9b69d00bcccb743cbdf48 | 4a1747b6497305f3821612d9c358a6795b1690da | refs/heads/master | 2020-03-16T16:12:27.461172 | 2019-04-22T13:27:54 | 2019-04-22T13:27:54 | 130,221,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | from collections import Counter
from collections import defaultdict
# [3] https://leetcode.com/problems/longest-substring-without-repeating-characters/
# variation with no pattern
def lengthOfLongestSubstring(s):
# create a default dict to maintain state
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(s):
counter[s[end]] += 1
if counter[s[end]] > 1:
count += 1
end += 1
while count > 0:
counter[s[start]] -= 1
if counter[s[start]] > 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [76] https://leetcode.com/problems/minimum-window-substring/
# variation with finding minimum
def minWindow(s: str, t: str) -> str:
counter = Counter(t)
count, start, end, res = len(t), 0, 0, [float('inf'), 0]
while end < len(s):
counter[s[end]] -= 1
# consider duplicate char in t
if counter[s[end]] >= 0:
count -= 1
end += 1
# valid in while
while count == 0:
# update minimum here, inner while loop
if end - start < res[0]:
res = (end - start, start)
counter[s[start]] += 1
if counter[s[start]] > 0:
count += 1
start += 1
return s[res[1]:res[0] + res[1]] if res[0] != float('inf') else ''
# [904] https://leetcode.com/problems/fruit-into-baskets/
# variation with list
def totalFruit(tree: 'List[int]') -> int:
cnt = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(tree):
cnt[tree[end]] += 1
if cnt[tree[end]] == 1:
count += 1
end += 1
while count > 2:
cnt[tree[start]] -= 1
if cnt[tree[start]] == 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [438] https://leetcode.com/problems/find-all-anagrams-in-a-string/
# variation with restrict between start and end
def findAnagrams(s: str, p: str) -> 'List[int]':
len_p, len_s = len(p), len(s)
if len_p > len_s:
return []
counter = Counter(p)
count, start, end, res = len_p, 0, 0, []
while end < len_s:
# only update counter when match char in p
counter[s[end]] -= 1
if counter[s[end]] >= 0:
count -= 1
end += 1
if count == 0:
res.append(start)
# not use a while, because restrict the length
if end - start == len_p:
counter[s[start]] += 1
# exclude char not in p, because always negative
if counter[s[start]] > 0:
count += 1
start += 1
return res
# [30] https://leetcode.com/problems/substring-with-concatenation-of-all-words/
# variation with complex match policy
def findSubstring(s: str, words: 'List[str]') -> 'List[int]':
if not words:
return []
word_len, res = len(words[0]), []
# start offset from 0 to word_len, and step is word_len
for i in range(word_len):
# reset state every epoch
counter = Counter(words)
start, end, count = i, i, len(words)
while end < len(s):
cur_word = s[end:end + word_len]
# check is not necessary here, just for performance
if cur_word in counter:
counter[cur_word] -= 1
if counter[cur_word] >= 0:
count -= 1
end += word_len
if count == 0:
res.append(start)
# ensure consecutive words
if end - start == word_len * len(words):
cur_word = s[start:start + word_len]
if cur_word in counter:
counter[cur_word] += 1
if counter[cur_word] > 0:
count += 1
start += word_len
# the order is not necessary here
return res | [
"[email protected]"
] | |
f52fb6152bba23a4c0a005ca2f666c5e95d07473 | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /data/lmdbMaker.py | 0081b73ccebdd68d83571914b0342cb8bcb9817a | [
"MIT"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 6,516 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Please execute the code with python2
'''
import os
import lmdb
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
try:
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
except:
print("Image is invalid!")
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert (len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
import codecs
with open(imagePath, 'r') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
def read_image_label(image_directory, label_address):
import os
image_lis = os.listdir(image_directory)
f = open(label_address)
dict = {}
i = 1
# 图片:目标记录
for line in f.readlines():
# TODO
dict[line[10:].split(" ")[0]] = line.split(' ')[1].replace('\n', '').replace('\r',
'') # arttrain-11.art/lsvttest10.lsvt12
'''
print(dict)
i+=1
if i==14:
break
print(dict)
'''
# print(dict)
result1 = []
result2 = []
# TODO
for image_path1 in image_lis:
for image_path2 in os.listdir(image_directory + '/' + image_path1):
try:
# image_path = image_path.replace('.jpg','')
# result1.append(image_directory+'/'+image_path1+'/'+image_path2)
result2.append(dict[image_path1 + '/' + image_path2])
result1.append(image_directory + '/' + image_path1 + '/' + image_path2)
except:
# pass
print("jianzhi")
return result1, result2
def extract_result_from_xml():
import re
f = open('../xml_test/word.xml', 'r')
string = ""
for line in f.readlines():
print(line)
string += line
print(string)
# 记录文件路径
result1 = re.findall(r'file=\"(.*?)\"', string)
for i in range(len(result1)):
result1[i] = '/home/chenjingye/datasets/ICDAR2003/WordR/TrialTest/' + result1[i]
print(result1)
result2 = re.findall(r'tag=\"(.*?)\"', string)
print(result2)
return result1, result2
def ic15():
f = open('/home/chenjingye/datasets/ICDAR2015/Word_recognition/Challenge4_Test_Task3_GT.txt', 'r')
result1 = []
result2 = []
for line in f.readlines():
# print(line)
# print(line.split())
a, b = line.split(', ')
print(a, b)
result1.append(
'/home/chenjingye/datasets/ICDAR2015/Word_recognition/ch4_test_word_images_gt/' + a.replace(',', ''))
result2.append(b.replace("\"", "").replace('\r\n', ''))
print(result1)
print(result2)
return result1, result2
def find_jpg():
import os
root = "/mnt/sdb1/zifuzu/chenjingye/datasets/mnt/ramdisk/max/90kDICT32px"
flag = True
def findjpg(path, ret):
"""Finding the *.txt file in specify path"""
filelist = os.listdir(path)
for filename in filelist:
# if len(ret) > 500000 :
# return
de_path = os.path.join(path, filename)
if os.path.isfile(de_path):
if de_path.endswith(".jpg"): # Specify to find the txt file.
print(de_path)
ret.append(de_path)
# if len(ret) > 500000:
# return
else:
findtxt(de_path, ret)
ret = []
findtxt(root, ret)
for path in ret:
print(path)
try:
os.remove('./temp.txt')
except:
pass
f = open('./temp.txt', 'a')
for element in ret:
f.write(element + '\n')
f.close()
def syn90():
import re
f = open('./temp.txt', 'r')
result1 = []
result2 = []
for line in f.readlines():
result1.append(line.replace('\n', ''))
target = re.findall(r'_(.*?)_', line)[0]
result2.append(target)
return result1, result2
if __name__ == '__main__':
'''
将两个list传进createDataset函数
list1: 图片路径列表
list2: 图片标签列表
其中两个列表在相同位置
'''
imgList, labelList = ic15()
print(imgList)
print(labelList)
print("The length of the list is ", len(imgList))
'''Input the address you want to generate the lmdb file.'''
createDataset('/mnt/sdb1/zifuzu/chenjingye/datasets/syn90_train_500000data_lmdb', imgList, labelList)
| [
"[email protected]"
] | |
25ea3ccf9694bbff46ace1ccdf8a44257540ba69 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/myfavouritekk_vdetlib/vdetlib-master/utils/log.py | 331282eee61fb52e30ff5a83431ec74d430c69e0 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 175 | py | #!/usr/bin/env python
import logging
logging.basicConfig(
format='[%(asctime)s %(process)d %(filename)s:%(lineno)s %(levelname)s] %(message)s',
level=logging.DEBUG)
| [
"[email protected]"
] | |
e13fab0514aa87a22f4efac43760c2d877c23adb | 64a99161204051f6f2abb9e8d88a5508952c0115 | /examples/saveLoadV1/create_save.py | 61f2ff9f7627dd79c32b0c968455c4711de7a2ad | [
"MIT"
] | permissive | suny-downstate-medical-center/netpyne | d1ba5a258ba63c8ad8b0fa91a6d8bbd99f2e8d28 | 9d08867205b776bbb467554c49df9d8aba57dcf2 | refs/heads/development | 2023-08-23T22:48:26.020812 | 2023-08-16T14:20:23 | 2023-08-16T14:20:23 | 48,733,333 | 18 | 18 | MIT | 2023-09-11T16:01:19 | 2015-12-29T07:12:08 | Jupyter Notebook | UTF-8 | Python | false | false | 164 | py | from netpyne import sim
import params
# Create network and save
sim.create(netParams=params.netParams, simConfig=params.simConfig)
sim.gatherData()
sim.saveData()
| [
"[email protected]"
] | |
46c205a3f435959086389638a9fd7fefd957308c | 99fa82f29a5b50a5595985acc460a0afaa6099a8 | /app/shopdj/sale/migrations/0004_invoice_total.py | a4f08cde3a2ed9f2afa42d4898d917d64e08dcca | [] | no_license | nnocturnnn/university_rep | a47cce9e29f96e9cc33293c76321e298e7628a4d | 4a8cd42f53dd112640a37ad5ff815ecf09ce1c25 | refs/heads/master | 2023-04-20T09:44:24.144760 | 2021-05-11T16:16:07 | 2021-05-11T16:16:07 | 304,661,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 3.0.5 on 2020-07-02 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sale', '0003_auto_20200701_0535'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='total',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
0d3dcd3a21ccefd0f1a1dfbce2f6cea60b4365f9 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/conch/test/test_checkers.py | 4111f2169895e93fbeeaf8e2819916a1a8a017e1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 31,498 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.checkers}.
"""
from __future__ import absolute_import, division
try:
import crypt
except ImportError:
cryptSkip = 'cannot run without crypt module'
else:
cryptSkip = None
import os
from collections import namedtuple
from io import BytesIO
from zope.interface.verify import verifyObject
from twisted.python import util
from twisted.python.compat import _b64encodebytes
from twisted.python.failure import Failure
from twisted.python.reflect import requireModule
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase, ShadowDatabase
from twisted.test.test_process import MockOS
if requireModule('cryptography') and requireModule('pyasn1'):
dependencySkip = None
from twisted.conch.ssh import keys
from twisted.conch import checkers
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
else:
dependencySkip = "can't run without cryptography and PyASN1"
if getattr(os, 'geteuid', None) is None:
euidSkip = "Cannot run without effective UIDs (questionable)"
else:
euidSkip = None
class HelperTests(TestCase):
"""
Tests for helper functions L{verifyCryptedPassword}, L{_pwdGetByName} and
L{_shadowGetByName}.
"""
skip = cryptSkip or dependencySkip
def setUp(self):
self.mockos = MockOS()
def test_verifyCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{True} if the plaintext password
passed to it matches the encrypted password passed to it.
"""
password = 'secret string'
salt = 'salty'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %r' % (
crypted, password))
def test_verifyCryptedPasswordMD5(self):
"""
L{verifyCryptedPassword} returns True if the provided cleartext password
matches the provided MD5 password hash.
"""
password = 'password'
salt = '$1$salt'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %s' % (
crypted, password))
def test_refuteCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{False} if the plaintext password
passed to it does not match the encrypted password passed to it.
"""
password = 'string secret'
wrong = 'secret string'
crypted = crypt.crypt(password, password)
self.assertFalse(
checkers.verifyCryptedPassword(crypted, wrong),
'%r not supposed to be valid encrypted password for %s' % (
crypted, wrong))
def test_pwdGetByName(self):
"""
L{_pwdGetByName} returns a tuple of items from the UNIX /etc/passwd
database if the L{pwd} module is present.
"""
userdb = UserDatabase()
userdb.addUser(
'alice', 'secrit', 1, 2, 'first last', '/foo', '/bin/sh')
self.patch(checkers, 'pwd', userdb)
self.assertEqual(
checkers._pwdGetByName('alice'), userdb.getpwnam('alice'))
def test_pwdGetByNameWithoutPwd(self):
"""
If the C{pwd} module isn't present, L{_pwdGetByName} returns L{None}.
"""
self.patch(checkers, 'pwd', None)
self.assertIsNone(checkers._pwdGetByName('alice'))
def test_shadowGetByName(self):
"""
L{_shadowGetByName} returns a tuple of items from the UNIX /etc/shadow
database if the L{spwd} is present.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', userdb)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(util, 'os', self.mockos)
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutSpwd(self):
"""
L{_shadowGetByName} returns L{None} if C{spwd} is not present.
"""
self.patch(checkers, 'spwd', None)
self.assertIsNone(checkers._shadowGetByName('bob'))
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
class SSHPublicKeyDatabaseTests(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
skip = euidSkip or dependencySkip
def setUp(self):
self.checker = checkers.SSHPublicKeyDatabase()
self.key1 = _b64encodebytes(b"foobar")
self.key2 = _b64encodebytes(b"eggspam")
self.content = (b"t1 " + self.key1 + b" foo\nt2 " + self.key2 +
b" egg\n")
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.patch(util, 'os', self.mockos)
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser(
b'user', b'password', 1, 2, b'first last',
self.mockos.path.path, b'/bin/shell')
self.checker._userdb = userdb
def test_deprecated(self):
"""
L{SSHPublicKeyDatabase} is deprecated as of version 15.0
"""
warningsShown = self.flushWarnings(
offendingFunctions=[self.setUp])
self.assertEqual(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.conch.checkers.SSHPublicKeyDatabase "
"was deprecated in Twisted 15.0.0: Please use "
"twisted.conch.checkers.SSHPublicKeyChecker, "
"initialized with an instance of "
"twisted.conch.checkers.UNIXAuthorizedKeysFiles instead.")
self.assertEqual(len(warningsShown), 1)
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword(b"user", b"password")
user.blob = b"foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = b"eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = b"notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0o000)
self.addCleanup(keyFile.chmod, 0o777)
# And restore the right mode when seteuid is called
savedSeteuid = self.mockos.seteuid
def seteuid(euid):
keyFile.chmod(0o777)
return savedSeteuid(euid)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(self.mockos, "seteuid", seteuid)
self.patch(util, 'os', self.mockos)
user = UsernamePassword(b"user", b"password")
user.blob = b"foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEqual(self.mockos.seteuidCalls, [0, 1, 0, 2345])
self.assertEqual(self.mockos.setegidCalls, [2, 1234])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign(b'foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEqual(avatarId, b'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateDSA_openssh).sign(b'foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(b'test', None, b'blob', b'sigData', b'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTests(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
skip = dependencySkip
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(), )
self.assertEqual(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
checkers.SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a specific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(),
IUsernamePassword)
self.assertEqual(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
checkers.SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = checkers.SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser(b'test', b'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
def _callback(avatarId):
self.assertEqual(avatarId, b'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = checkers.SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser(b'test', b'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = checkers.SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertTrue(checkers.SSHProtocolChecker().areDone(None))
class UNIXPasswordDatabaseTests(TestCase):
"""
Tests for L{UNIXPasswordDatabase}.
"""
skip = cryptSkip or dependencySkip
def assertLoggedIn(self, d, username):
"""
Assert that the L{Deferred} passed in is called back with the value
'username'. This represents a valid login for this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{Deferred}
"""
result = []
d.addBoth(result.append)
self.assertEqual(len(result), 1, "login incomplete")
if isinstance(result[0], Failure):
result[0].raiseException()
self.assertEqual(result[0], username)
def test_defaultCheckers(self):
"""
L{UNIXPasswordDatabase} with no arguments has checks the C{pwd} database
and then the C{spwd} database.
"""
checker = checkers.UNIXPasswordDatabase()
def crypted(username, password):
salt = crypt.crypt(password, username)
crypted = crypt.crypt(password, '$1$' + salt)
return crypted
pwd = UserDatabase()
pwd.addUser('alice', crypted('alice', 'password'),
1, 2, 'foo', '/foo', '/bin/sh')
# x and * are convention for "look elsewhere for the password"
pwd.addUser('bob', 'x', 1, 2, 'bar', '/bar', '/bin/sh')
spwd = ShadowDatabase()
spwd.addUser('alice', 'wrong', 1, 2, 3, 4, 5, 6, 7)
spwd.addUser('bob', crypted('bob', 'password'),
8, 9, 10, 11, 12, 13, 14)
self.patch(checkers, 'pwd', pwd)
self.patch(checkers, 'spwd', spwd)
mockos = MockOS()
self.patch(util, 'os', mockos)
mockos.euid = 2345
mockos.egid = 1234
cred = UsernamePassword(b"alice", b"password")
self.assertLoggedIn(checker.requestAvatarId(cred), b'alice')
self.assertEqual(mockos.seteuidCalls, [])
self.assertEqual(mockos.setegidCalls, [])
cred.username = b"bob"
self.assertLoggedIn(checker.requestAvatarId(cred), b'bob')
self.assertEqual(mockos.seteuidCalls, [0, 2345])
self.assertEqual(mockos.setegidCalls, [0, 1234])
def assertUnauthorizedLogin(self, d):
"""
Asserts that the L{Deferred} passed in is erred back with an
L{UnauthorizedLogin} L{Failure}. This reprsents an invalid login for
this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{None}
"""
self.assertRaises(
checkers.UnauthorizedLogin, self.assertLoggedIn, d, 'bogus value')
def test_passInCheckers(self):
"""
L{UNIXPasswordDatabase} takes a list of functions to check for UNIX
user information.
"""
password = crypt.crypt('secret', 'secret')
userdb = UserDatabase()
userdb.addUser('anybody', password, 1, 2, 'foo', '/bar', '/bin/sh')
checker = checkers.UNIXPasswordDatabase([userdb.getpwnam])
self.assertLoggedIn(
checker.requestAvatarId(UsernamePassword(b'anybody', b'secret')),
b'anybody')
def test_verifyPassword(self):
"""
If the encrypted password provided by the getpwnam function is valid
(verified by the L{verifyCryptedPassword} function), we callback the
C{requestAvatarId} L{Deferred} with the username.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword(b'username', b'username')
self.assertLoggedIn(checker.requestAvatarId(credential), b'username')
def test_failOnKeyError(self):
"""
If the getpwnam function raises a KeyError, the login fails with an
L{UnauthorizedLogin} exception.
"""
def getpwnam(username):
raise KeyError(username)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword(b'username', b'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_failOnBadPassword(self):
"""
If the verifyCryptedPassword function doesn't verify the password, the
login fails with an L{UnauthorizedLogin} exception.
"""
def verifyCryptedPassword(crypted, pw):
return False
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword(b'username', b'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_loopThroughFunctions(self):
"""
UNIXPasswordDatabase.requestAvatarId loops through each getpwnam
function associated with it and returns a L{Deferred} which fires with
the result of the first one which returns a value other than None.
ones do not verify the password.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam1(username):
return [username, 'not the password']
def getpwnam2(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam1, getpwnam2])
credential = UsernamePassword(b'username', b'username')
self.assertLoggedIn(checker.requestAvatarId(credential), b'username')
def test_failOnSpecial(self):
"""
If the password returned by any function is C{""}, C{"x"}, or C{"*"} it
is not compared against the supplied password. Instead it is skipped.
"""
pwd = UserDatabase()
pwd.addUser('alice', '', 1, 2, '', 'foo', 'bar')
pwd.addUser('bob', 'x', 1, 2, '', 'foo', 'bar')
pwd.addUser('carol', '*', 1, 2, '', 'foo', 'bar')
self.patch(checkers, 'pwd', pwd)
checker = checkers.UNIXPasswordDatabase([checkers._pwdGetByName])
cred = UsernamePassword(b'alice', b'')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword(b'bob', b'x')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword(b'carol', b'*')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
class AuthorizedKeyFileReaderTests(TestCase):
"""
Tests for L{checkers.readAuthorizedKeyFile}
"""
skip = dependencySkip
def test_ignoresComments(self):
"""
L{checkers.readAuthorizedKeyFile} does not attempt to turn comments
into keys
"""
fileobj = BytesIO(b'# this comment is ignored\n'
b'this is not\n'
b'# this is again\n'
b'and this is not')
result = checkers.readAuthorizedKeyFile(fileobj, lambda x: x)
self.assertEqual([b'this is not', b'and this is not'], list(result))
def test_ignoresLeadingWhitespaceAndEmptyLines(self):
"""
L{checkers.readAuthorizedKeyFile} ignores leading whitespace in
lines, as well as empty lines
"""
fileobj = BytesIO(b"""
# ignore
not ignored
""")
result = checkers.readAuthorizedKeyFile(fileobj, parseKey=lambda x: x)
self.assertEqual([b'not ignored'], list(result))
def test_ignoresUnparsableKeys(self):
"""
L{checkers.readAuthorizedKeyFile} does not raise an exception
when a key fails to parse (raises a
L{twisted.conch.ssh.keys.BadKeyError}), but rather just keeps going
"""
def failOnSome(line):
if line.startswith(b'f'):
raise keys.BadKeyError('failed to parse')
return line
fileobj = BytesIO(b'failed key\ngood key')
result = checkers.readAuthorizedKeyFile(fileobj,
parseKey=failOnSome)
self.assertEqual([b'good key'], list(result))
class InMemorySSHKeyDBTests(TestCase):
"""
Tests for L{checkers.InMemorySSHKeyDB}
"""
skip = dependencySkip
def test_implementsInterface(self):
"""
L{checkers.InMemorySSHKeyDB} implements
L{checkers.IAuthorizedKeysDB}
"""
keydb = checkers.InMemorySSHKeyDB({b'alice': [b'key']})
verifyObject(checkers.IAuthorizedKeysDB, keydb)
def test_noKeysForUnauthorizedUser(self):
"""
If the user is not in the mapping provided to
L{checkers.InMemorySSHKeyDB}, an empty iterator is returned
by L{checkers.InMemorySSHKeyDB.getAuthorizedKeys}
"""
keydb = checkers.InMemorySSHKeyDB({b'alice': [b'keys']})
self.assertEqual([], list(keydb.getAuthorizedKeys(b'bob')))
def test_allKeysForAuthorizedUser(self):
"""
If the user is in the mapping provided to
L{checkers.InMemorySSHKeyDB}, an iterator with all the keys
is returned by L{checkers.InMemorySSHKeyDB.getAuthorizedKeys}
"""
keydb = checkers.InMemorySSHKeyDB({b'alice': [b'a', b'b']})
self.assertEqual([b'a', b'b'], list(keydb.getAuthorizedKeys(b'alice')))
class UNIXAuthorizedKeysFilesTests(TestCase):
"""
Tests for L{checkers.UNIXAuthorizedKeysFiles}.
"""
skip = dependencySkip
def setUp(self):
mockos = MockOS()
mockos.path = FilePath(self.mktemp())
mockos.path.makedirs()
self.userdb = UserDatabase()
self.userdb.addUser(b'alice', b'password', 1, 2, b'alice lastname',
mockos.path.path, b'/bin/shell')
self.sshDir = mockos.path.child('.ssh')
self.sshDir.makedirs()
authorizedKeys = self.sshDir.child('authorized_keys')
authorizedKeys.setContent(b'key 1\nkey 2')
self.expectedKeys = [b'key 1', b'key 2']
def test_implementsInterface(self):
"""
L{checkers.UNIXAuthorizedKeysFiles} implements
L{checkers.IAuthorizedKeysDB}.
"""
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb)
verifyObject(checkers.IAuthorizedKeysDB, keydb)
def test_noKeysForUnauthorizedUser(self):
"""
If the user is not in the user database provided to
L{checkers.UNIXAuthorizedKeysFiles}, an empty iterator is returned
by L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys}.
"""
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual([], list(keydb.getAuthorizedKeys('bob')))
def test_allKeysInAllAuthorizedFilesForAuthorizedUser(self):
"""
If the user is in the user database provided to
L{checkers.UNIXAuthorizedKeysFiles}, an iterator with all the keys in
C{~/.ssh/authorized_keys} and C{~/.ssh/authorized_keys2} is returned
by L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys}.
"""
self.sshDir.child('authorized_keys2').setContent(b'key 3')
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual(self.expectedKeys + [b'key 3'],
list(keydb.getAuthorizedKeys(b'alice')))
def test_ignoresNonexistantFile(self):
"""
L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys} returns only
the keys in C{~/.ssh/authorized_keys} and C{~/.ssh/authorized_keys2}
if they exist.
"""
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual(self.expectedKeys,
list(keydb.getAuthorizedKeys(b'alice')))
def test_ignoresUnreadableFile(self):
"""
L{checkers.UNIXAuthorizedKeysFiles.getAuthorizedKeys} returns only
the keys in C{~/.ssh/authorized_keys} and C{~/.ssh/authorized_keys2}
if they are readable.
"""
self.sshDir.child('authorized_keys2').makedirs()
keydb = checkers.UNIXAuthorizedKeysFiles(self.userdb,
parseKey=lambda x: x)
self.assertEqual(self.expectedKeys,
list(keydb.getAuthorizedKeys(b'alice')))
_KeyDB = namedtuple('KeyDB', ['getAuthorizedKeys'])
class _DummyException(Exception):
"""
Fake exception to be used for testing.
"""
pass
class SSHPublicKeyCheckerTests(TestCase):
"""
Tests for L{checkers.SSHPublicKeyChecker}.
"""
skip = dependencySkip
def setUp(self):
self.credentials = SSHPrivateKey(
b'alice', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign(b'foo'))
self.keydb = _KeyDB(lambda _: [
keys.Key.fromString(keydata.publicRSA_openssh)])
self.checker = checkers.SSHPublicKeyChecker(self.keydb)
def test_credentialsWithoutSignature(self):
"""
Calling L{checkers.SSHPublicKeyChecker.requestAvatarId} with
credentials that do not have a signature fails with L{ValidPublicKey}.
"""
self.credentials.signature = None
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
ValidPublicKey)
def test_credentialsWithBadKey(self):
"""
Calling L{checkers.SSHPublicKeyChecker.requestAvatarId} with
credentials that have a bad key fails with L{keys.BadKeyError}.
"""
self.credentials.blob = b''
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
keys.BadKeyError)
def test_credentialsNoMatchingKey(self):
"""
If L{checkers.IAuthorizedKeysDB.getAuthorizedKeys} returns no keys
that match the credentials,
L{checkers.SSHPublicKeyChecker.requestAvatarId} fails with
L{UnauthorizedLogin}.
"""
self.credentials.blob = keydata.publicDSA_openssh
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
UnauthorizedLogin)
def test_credentialsInvalidSignature(self):
"""
Calling L{checkers.SSHPublicKeyChecker.requestAvatarId} with
credentials that are incorrectly signed fails with
L{UnauthorizedLogin}.
"""
self.credentials.signature = (
keys.Key.fromString(keydata.privateDSA_openssh).sign(b'foo'))
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
UnauthorizedLogin)
def test_failureVerifyingKey(self):
"""
If L{keys.Key.verify} raises an exception,
L{checkers.SSHPublicKeyChecker.requestAvatarId} fails with
L{UnauthorizedLogin}.
"""
def fail(*args, **kwargs):
raise _DummyException()
self.patch(keys.Key, 'verify', fail)
self.failureResultOf(self.checker.requestAvatarId(self.credentials),
UnauthorizedLogin)
self.flushLoggedErrors(_DummyException)
def test_usernameReturnedOnSuccess(self):
"""
L{checker.SSHPublicKeyChecker.requestAvatarId}, if successful,
callbacks with the username.
"""
d = self.checker.requestAvatarId(self.credentials)
self.assertEqual(b'alice', self.successResultOf(d))
| [
"[email protected]"
] | |
504b693788900fa8fe43fab87c1075ce5593cf3b | 6f2675eee55b7ebc5adf9c2176ced8cb59fc64d4 | /dataInterSrvKm/bak/20200113版本半接口半直连/billBoli600.py | 729c1391bc030f8184f66d7eb48bc789ce7a4078 | [] | no_license | wildmanwang/proDataInter | 8c2b65fa96ad45b21165d997b1769a28e12fc42a | f5a1f1fb195c66bf586bd999465c7e3b16453369 | refs/heads/master | 2023-06-07T11:57:16.763251 | 2023-06-03T08:54:56 | 2023-06-03T08:54:56 | 157,559,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,164 | py | # -*- coding:utf-8 -*-
"""
"""
__author__ = "Cliff.wang"
from superBillDCB import SuperBillDCB
from interMssql import MSSQL
import time, json
class BillBoli600(SuperBillDCB):
def __init__(self, sett):
super().__init__(sett)
self.station = [1,2,3,4,5,6,7,8] # 可用基站
self.db = MSSQL(self.sett.serverHost, self.sett.serverUser, self.sett.serverPwd, self.sett.serverDb)
def _getStation(self):
"""
获取基站号
:return:
"""
rtnData = {
"result":False, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
try:
conn = self.db.GetConnect()
cur = conn.cursor()
if not cur:
rtnData["info"] = "基础数据获取失败:{name}数据库[{db}]连接失败".format(name=self.sett.serverName, db=self.sett.serverDb)
else:
lsSql = "select sys_var_value from sys_t_system where sys_var_id = 'dcb_stationList'"
cur.execute(lsSql)
rsData = cur.fetchall()
if len(rsData) > 0:
staStr = rsData[0][0]
else:
staStr = "[]"
staList = json.loads(staStr)
if len(staList) == 0:
rtnData["info"] = "基站繁忙,请稍后再试"
else:
rtnData["dataNumber"] = staList.pop(0)
lsSql = "update sys_t_system set sys_var_value = '{value}' where sys_var_id = 'dcb_stationList'".format(value=json.dumps(staList))
cur.execute(lsSql)
conn.commit()
rtnData["result"] = True
except Exception as e:
rtnData["dataNumber"] = 0
rtnData["info"] = str(e)
finally:
conn.close()
return rtnData
def _putStation(self, station):
"""
释放基站号
:param station:
:return:
"""
rtnData = {
"result":False, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
try:
conn = self.db.GetConnect()
cur = conn.cursor()
if not cur:
rtnData["info"] = "基础数据获取失败:{name}数据库[{db}]连接失败".format(name=self.sett.serverName, db=self.sett.serverDb)
else:
lsSql = "select sys_var_value from sys_t_system where sys_var_id = 'dcb_stationList'"
cur.execute(lsSql)
rsData = cur.fetchall()
if len(rsData) > 0:
staStr = rsData[0][0]
staList = json.loads(staStr)
staList.append(station)
staList = list(set(staList))
staList.sort()
lsSql = "update sys_t_system set sys_var_value = '{value}' where sys_var_id = 'dcb_stationList'".format(value=json.dumps(staList))
cur.execute(lsSql)
conn.commit()
rtnData["result"] = True
else:
rtnData["info"] = "获取基站参数失败"
except Exception as e:
rtnData["info"] = str(e)
finally:
conn.close()
return rtnData
def userLogin(self, data):
"""
登录
:param data:{
"terminal":"", # 开台终端号(3位)
"factory":"", # 出厂号(10位)
"user":"", # 工号(4位)
"password":"" # 密码(8位)
}
:return:
"""
rtnData = {
"result":True, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
# 获取基站
rtnData = self._getStation()
if rtnData["result"]:
iStation = rtnData["dataNumber"]
try:
# 参数检查
if len(self.sett.softNumber) > 0:
data["terminal"] = self.sett.softNumber
elif "terminal" not in data:
raise Exception("请传入参数:点菜宝编号")
sTerminal = (data["terminal"] + chr(32) * 3)[:3]
if len(self.sett.serialNumber) > 0:
data["factory"] = self.sett.serialNumber
elif "factory" not in data:
raise Exception("请传入参数:点菜宝序列号")
sFactory = ("0" * 10 + data["factory"])[-10:]
if len(self.sett.loginUser) > 0:
data["user"] = self.sett.loginUser
data["password"] = self.sett.loginPassword
elif "user" not in data:
raise Exception("请传入参数:用户及密码")
sUser = (data["user"] + chr(32) * 5)[:4]
sPassword = (data["password"] + chr(32) * 8)[:8]
# 生成开台请求数据
sCon = []
sCon.append("DL " + chr(32) + sTerminal)
sCon.append(sFactory + chr(32) + sUser + chr(32) + sPassword)
# sCon.append(sUser + chr(32) + sPassword)
# 开台请求写入文件,并通知餐饮服务
if rtnData["result"]:
rtnData = self._writeBusiData(iStation, sCon)
# 获取执行结果
if rtnData["result"]:
rtnData = self._readRtnData(iStation, "登录", sCon, 0, "", 1)
except Exception as e:
rtnData["result"] = False
rtnData["info"] = str(e)
finally:
# 释放基站
if "iStation" in locals():
self._putStation(iStation)
# 返回执行结果
return rtnData
def billOpen(self, data):
"""
开台
:param data:{
"terminal":"", # 开台终端号(3位)
"table":"", # 桌台号(4位)
"waiter":"", # 服务员号(5位)
"guestNum":0, # 客人数量(2位)
"factory":"" # 出厂号(后7/10位)
}
:return:
"""
rtnData = {
"result":True, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
# 获取基站
rtnData = self._getStation()
if rtnData["result"]:
iStation = rtnData["dataNumber"]
try:
# 参数检查
if len(self.sett.softNumber) > 0:
data["terminal"] = self.sett.softNumber
elif "terminal" not in data:
raise Exception("请传入参数:点菜宝编号")
sTerminal = (data["terminal"] + chr(32) * 3)[:3]
if "table" in data:
sTable = (data["table"] + chr(32) * 4)[:4]
else:
rtnData["result"] = False
rtnData["info"] = "请传入桌台号"
if "waiter" in data:
sWaiter = (data["waiter"] + chr(32) * 5)[:5]
else:
sWaiter = chr(32) * 5
if "guestNum" in data:
sGuestNum = ("0" + str(int(data["guestNum"])))[-2:]
else:
sGuestNum = "01"
if len(self.sett.serialNumber) > 0:
data["factory"] = self.sett.serialNumber
elif "factory" not in data:
raise Exception("请传入参数:点菜宝序列号")
sFactory = ("0" * 10 + data["factory"])
# 生成开台请求数据
if rtnData["result"]:
sCon = []
sCon.append("KT " + chr(32) + sTerminal)
sCon.append(sTable + chr(32) + sGuestNum + chr(32) + sWaiter + chr(32) + sFactory[-7:] + chr(
32) + time.strftime("%H:%M:%S"))
# 开台请求,并获取反馈
if rtnData["result"]:
rtnData = self._writeBusiData(iStation, sCon)
# 获取执行结果
if rtnData["result"]:
rtnData = self._readRtnData(iStation, "开台", sCon, 1, "开台成功", 1)
except Exception as e:
rtnData["result"] = False
rtnData["info"] = str(e)
finally:
# 释放基站
if "iStation" in locals():
self._putStation(iStation)
# 返回执行结果
return rtnData
def billPut(self, data):
"""
点菜
:param data:{
"terminal":"", # 开台终端号(3位)
"table":"", # 桌台号+账单流水号(4+3=7位)
"factory":"", # 出厂号(4+4+2=10位)
"remark":"", # 整单备注(12位)
"item":[{
"food":"", # 菜品号(5位)
"qty":1, # 数量(4位)
"made":"", # 做法(12位)
"suit":"", # 套餐号(2位)
"waitUp":0 # 等叫标志(1位)
}]
}
:return:
"""
rtnData = {
"result":False, # 逻辑控制 True/False
"dataString":"", # 字符串
"dataNumber":0, # 数字
"info":"", # 信息
"entities": {}
}
# 获取基站
rtnData = self._getStation()
if rtnData["result"]:
iStation = rtnData["dataNumber"]
try:
# 参数检查
if len(self.sett.softNumber) > 0:
data["terminal"] = self.sett.softNumber
elif "terminal" not in data:
raise Exception("请传入参数:点菜宝编号")
sTerminal = (data["terminal"] + chr(32) * 3)[:3]
if "table" in data:
sTable = (data["table"] + chr(32) * 7)[:7]
else:
rtnData["result"] = False
rtnData["info"] = "请传入桌台号"
if len(self.sett.serialNumber) > 0:
data["factory"] = self.sett.serialNumber
elif "factory" not in data:
raise Exception("请传入参数:点菜宝序列号")
sFactory = ("0" * 10 + data["factory"])
if "remark" in data:
sRemark = (data["remark"] + chr(32) * 12)[:12]
else:
sRemark = chr(32) * 12
sFlow = time.strftime("%H:%M:%S")
# 生成开台请求数据
if rtnData["result"]:
sCon = []
sCon.append("DC " + chr(32) + sTerminal)
sCon.append(sTable + chr(32) + sFactory[:4] + chr(32) + chr(32) * 6 + sRemark + chr(32) + chr(
32) * 4 + sFlow + chr(32) + sFactory[4:8] + chr(32) + sFactory[8:10])
for line in data["item"]:
sFood = (line["food"] + chr(32) * 5)[:5]
sQty = (chr(32) * 4 + str(line["qty"]))[-4:]
if "made" in line:
sMade = (line["made"] + chr(32) * 12)[:12]
else:
sMade = chr(32) * 12
if "suit" in line:
suit = (line["suit"] + chr(32) * 2)[:2]
else:
suit = chr(32) * 2
if "waitUp" in line:
waitUp = (str(line["waitUp"]) + "0")[-1:]
else:
waitUp = "0"
sCon.append(
sTable + chr(32) + sFood + chr(32) + sQty + chr(32) + sMade + chr(32) + suit + chr(32) + waitUp)
# 开台请求写入文件,并通知餐饮服务
if rtnData["result"]:
rtnData = self._writeBusiData(iStation, sCon)
# 获取执行结果
if rtnData["result"]:
rtnData = self._readRtnData(iStation, "点菜", sCon, 1, "点菜成功", 1)
except Exception as e:
rtnData["result"] = False
rtnData["info"] = str(e)
finally:
# 释放基站
if "iStation" in locals():
self._putStation(iStation)
# 返回执行结果
return rtnData
| [
"[email protected]"
] | |
080121153af9a45d9084cd5f5233cdfb821defe7 | 23af1e2b1f29be62926ed6a8e39b4462f07f5f2b | /atcoder.jp/abc086/abc086_b/Main.py | 2caffef31e6e42003299d780de9ca6f6f538b840 | [] | no_license | IKDrocket/Atcoder | 8ef382577a377a8f35890b24a49f681f00f2f047 | fc19379de2ddf62a61b67eda33bf8aa29d503685 | refs/heads/main | 2023-02-09T11:58:00.353304 | 2021-01-02T12:06:20 | 2021-01-02T12:06:20 | 318,876,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | a,b = input().split()
num = int(a+b)
for i in range(1,10101):
ans = i*i
if num == ans:
print("Yes")
break
else:
print("No")
| [
"[email protected]"
] | |
c31db9e2643724ed66331b721d6a77560de6209a | 06167f625464c898ac95e752694a5931b9a55a55 | /src/admission/migrations/0001_initial.py | bacece5228ade3f6e66d8337c4fae54aa72fdb6d | [] | no_license | nazmul629/school_management_system | 16e2003b652b14174d6f59b4682ca366275f3207 | d0ff759645d9ba8f88d2aa63dbc867e7713455ed | refs/heads/master | 2021-06-19T18:06:56.539454 | 2019-04-20T12:35:24 | 2019-04-20T12:35:24 | 182,307,917 | 1 | 0 | null | 2021-03-20T08:15:23 | 2019-04-19T18:22:11 | CSS | UTF-8 | Python | false | false | 1,699 | py | # Generated by Django 2.0 on 2019-04-19 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='class_section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='Schoolallclass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Class', models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
name='StudentInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Student_name', models.CharField(max_length=50)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('m', 'Male'), ('f', 'Female')], max_length=10)),
('roll', models.IntegerField(unique=True)),
('fathers_name', models.CharField(max_length=50)),
('mothers_name', models.CharField(max_length=50)),
('address', models.TextField()),
('mobile', models.CharField(max_length=16)),
('Class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='admission.Schoolallclass')),
],
),
]
| [
"[email protected]"
] | |
6e7a8849b45d4e7ef435085fefc41204dd11f94a | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Maxout_0.py | 34ed3c5b0baf796738184d4faee74db735487de9 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 608 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Maxout_0():
"""test Maxout_0"""
jit_case = JitTrans(case=yml.get_case_info("Maxout_0"))
jit_case.jit_run()
| [
"[email protected]"
] | |
0b923417f2c83d1b943f897a0e067b827cc724c3 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/6d0b0f8338f7ffbc761ddc05cbdc620a99901074-<format_item>-fix.py | a42ce4e3bcf3cd32eb44b6e67fee46a95e4f787a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | def format_item(self, item):
d = item.as_dict()
containers = d['containers']
ports = d['ip_address']['ports']
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
for port_index in range(len(ports)):
ports[port_index] = ports[port_index]['port']
for container_index in range(len(containers)):
old_container = containers[container_index]
new_container = {
'name': old_container['name'],
'image': old_container['image'],
'memory': old_container['resources']['requests']['memory_in_gb'],
'cpu': old_container['resources']['requests']['cpu'],
'ports': [],
}
for port_index in range(len(old_container['ports'])):
new_container['ports'].append(old_container['ports'][port_index]['port'])
containers[container_index] = new_container
d = {
'id': d['id'],
'resource_group': resource_group,
'name': d['name'],
'os_type': d['os_type'],
'ip_address': ('public' if (d['ip_address']['type'] == 'Public') else 'none'),
'ports': ports,
'location': d['location'],
'containers': containers,
'tags': d.get('tags', None),
}
return d | [
"[email protected]"
] | |
96fdbd1d69014c062a573ce6737c753189550b8e | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-CoreServices/PyObjCTest/test_textutils.py | 2ff838ec467acf264133c95ae598c609539c4881 | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | from PyObjCTools.TestSupport import *
import CoreServices
class TestTextUtils (TestCase):
def assert_not_wrapped(self, name):
self.assertTrue(not hasattr(CoreServices, name), "%r exposed in bindings"%(name,))
def test_not_wrapped(self):
self.assert_not_wrapped('ScriptRunStatus')
self.assert_not_wrapped('BreakTable')
self.assert_not_wrapped('NBreakTable')
self.assert_not_wrapped('Munger')
self.assert_not_wrapped('NewString')
self.assert_not_wrapped('SetString')
self.assert_not_wrapped('GetString')
self.assert_not_wrapped('GetIndString')
self.assert_not_wrapped('FindWordBreaks')
self.assert_not_wrapped('LowercaseText')
self.assert_not_wrapped('UppercaseText')
self.assert_not_wrapped('StripDiacritics')
self.assert_not_wrapped('UppercaseStripDiacritics')
self.assert_not_wrapped('FindScriptRun')
self.assert_not_wrapped('UpperString')
self.assert_not_wrapped('upperstring')
self.assert_not_wrapped('UprString')
self.assert_not_wrapped('c2pstrcpy')
self.assert_not_wrapped('p2cstrcpy')
self.assert_not_wrapped('CopyPascalStringToC')
self.assert_not_wrapped('CopyCStringToPascal')
self.assert_not_wrapped('c2pstr')
self.assert_not_wrapped('C2PStr')
self.assert_not_wrapped('p2cst')
self.assert_not_wrapped('P2CStr')
self.assert_not_wrapped('p2cstr')
self.assert_not_wrapped('c2pstr')
self.assert_not_wrapped('C2PStr')
self.assert_not_wrapped('P2CStr')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7226b9cda9c338dffe84746f1f19fd9278d6e255 | 15e818aada2b18047fa895690bc1c2afda6d7273 | /lib/python/h5log_loader.py | 5cce0d76a64f14935c96f0fea66de17adea3cff0 | [
"Apache-2.0"
] | permissive | ghomsy/makani | 4ee34c4248fb0ac355f65aaed35718b1f5eabecf | 818ae8b7119b200a28af6b3669a3045f30e0dc64 | refs/heads/master | 2023-01-11T18:46:21.939471 | 2020-11-10T00:23:31 | 2020-11-10T00:23:31 | 301,863,147 | 0 | 0 | Apache-2.0 | 2020-11-10T00:23:32 | 2020-10-06T21:51:21 | null | UTF-8 | Python | false | false | 27,053 | py | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a more user friendly interface to Makani HDF5 logs.
Typical use case:
h5 = H5LogLoader()
h5.Open(['file1.h5', 'file2.h5'])
# Dictionary interface.
plt.figure()
t = h5.capture_time['FcA/FlightComputerSensor']
d = h5['FcA/FlightComputerSensor/aux/mag']
plt.plot(t, d)
# Function call interface.
plt.figure()
for f in h5.GetFields(r'^Servo[^/]+/ServoStatus$'):
t = h5.GetCaptureTime(f)
d = h5.GetData(f + '/angle_measured')
plt.plot(t, d, label=h5.GetNodeName(f))
plt.legend()
"""
# NOTICE: To ease analysis, please do not depend on the Makani repository!
import collections
import re
import sys
import h5py
import numpy as np
def PrintProgress(count, total, suffix=''):
"""Print a progress bar to stdout."""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percent = 100.0 * count / float(total)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] % 4.1f%% ...%s\r' % (bar, percent, suffix))
if count == total:
sys.stdout.write('\nDone!\n')
sys.stdout.flush()
def _H5BuildDtypeTree(dtype, prefix=''):
"""Recursively build an array of paths, starting from an HDF5 dtype."""
values = [str(prefix).lstrip('/')]
if dtype.fields:
for key, item in dtype.fields.iteritems():
values += _H5BuildDtypeTree(item[0].base, prefix + '/' + key)
return values
def _H5BuildGroupTree(group, prefix=''):
"""Recursively build an array of paths to each HDF5 dataset."""
values = []
for key, item in group.iteritems():
if isinstance(item, h5py.Dataset):
values.append(str(prefix + '/' + key).lstrip('/'))
elif isinstance(item, h5py.Group):
values += _H5BuildGroupTree(item, prefix + '/' + key)
return values
def _H5BuildGroupAndDtypeTree(group, prefix=''):
"""Recursively build an array of paths, starting from an HDF5 group."""
values = []
for key, item in group.iteritems():
if isinstance(item, h5py.Dataset):
values += _H5BuildDtypeTree(item.dtype.base, prefix + '/' + key)
elif isinstance(item, h5py.Group):
values += _H5BuildGroupAndDtypeTree(item, prefix + '/' + key)
return values
def _NormalizeFileNames(filenames):
if filenames is None:
filenames = []
elif not isinstance(filenames, list):
filenames = [filenames]
return filenames
class _H5DataCache(object):
"""Provides a simple cache interface to H5*Cache classes."""
def __init__(self):
self._data_cache = {} # Map path to cached data.
def ClearCached(self):
"""Free cached memory."""
self._data_cache = {}
def GetCached(self, path):
"""Get data and cache the result."""
data = self._data_cache.get(path)
if data is None:
data = self.GetData(path)
self._data_cache[path] = data
return np.copy(data) # Copy data to prevent changes to cached values.
def GetData(self, path): # pylint: disable=unused-argument
"""Get data without caching the result."""
raise NotImplementedError
class _H5DataLog(object):
"""Load HDF5 files."""
def __init__(self):
# Abstract HDF5 interface to improve access performance.
self._data_logs = [] # A list of HDF5 file objects.
self._data_paths = {} # A mapping from path to HDF5 data path.
def __del__(self):
self.Close()
def Open(self, filenames):
"""Open HDF5 log files.
Args:
filenames: A list of log HDF5 files, sorted in time.
"""
# Python's garbage collection does not always work well with HDF5 data
# structures. Close all files before reopening them again.
self.Close()
# Load HDF5 files as read-only.
self._data_logs = [h5py.File(f, 'r') for f in filenames]
# Index HDF5 data structure to improve path lookup performance.
paths = set()
for d in self._data_logs:
paths |= set(_H5BuildGroupAndDtypeTree(d))
self._data_paths = {self._GetShortPath(p): p for p in paths}
def _GetShortPath(self, path):
"""Remove overly verbose prefixes."""
return path.replace('kAioNode', '').replace('kMessageType', '')
def Save(self, filename, verbose=False):
"""Save data from all input files to a single, merged HDF5."""
with h5py.File(filename, 'w') as fp:
# Build paths for each HDF5 dataset.
paths = set()
for d in self._data_logs:
paths |= set(_H5BuildGroupTree(d))
dataset_paths = {self._GetShortPath(p): p for p in paths}
dataset_count = len(dataset_paths)
i = 0
for short_path, path in dataset_paths.iteritems():
fp.create_dataset(path, data=self.GetData(short_path))
i += 1
if verbose:
PrintProgress(i, dataset_count, 'Concatenating HDF5 datasets')
def Close(self):
"""Close all HDF5 log files and free associated memory."""
while self._data_logs:
d = self._data_logs.pop()
d.close()
self.__init__()
def GetData(self, path):
"""Load data from HDF5 structure as data[field 0][field 1] ... [field N]."""
arrays = []
split_path = self._data_paths[path].split('/')
for d in self._data_logs:
try:
for p in split_path:
d = d[p]
arrays.append(d)
except KeyError:
pass
if len(arrays) == 1:
return arrays[0]
return np.concatenate(arrays, axis=0)
def GetPathsRegex(self, re_match, re_sub=r'\g<0>'):
"""Get a list of paths matching the given regex pattern."""
expr = re.compile(re_match)
paths = set()
for path in self._data_paths:
match = expr.match(path)
if match:
paths.add(match.expand(re_sub))
return sorted(list(paths))
class _H5AioTimeCache(_H5DataCache):
"""Load and cache local node time."""
def __init__(self, loader):
super(_H5AioTimeCache, self).__init__()
self._loader = loader
self._aio_time_offset = collections.defaultdict(float)
def GetData(self, path):
"""Get the local node time associated with the given path."""
path = self._loader.GetAioHeaderPath(path)
data = self._loader.GetData(path + '/timestamp').astype(long)
for i in np.where(np.diff(data) < 0)[0]:
data[i + 1:] += 2**32
return data.astype(float) * 1e-6 - self._aio_time_offset[path]
def GetOffset(self, path):
"""Get the local node time offset assicated with the given path."""
path = self._loader.GetAioHeaderPath(path)
return self._aio_time_offset[path]
def SetOffset(self, path, offset):
"""Set the local node time offset assicated with the given path."""
path = self._loader.GetAioHeaderPath(path)
if path in self._data_cache:
self._data_cache[path] += self._aio_time_offset[path] - offset
self._aio_time_offset[path] = offset
def ShiftOffset(self, path, delta):
"""Shift the local node time offset assicated with the given path."""
offset = self.GetOffset(path) + delta
self.SetOffset(path, offset)
class _H5CaptureTimeCache(_H5DataCache):
"""Load and cache capture time."""
def __init__(self, loader):
super(_H5CaptureTimeCache, self).__init__()
self._loader = loader
self._capture_time_offset = 0 # Offset common to all datasets.
def GetData(self, path):
"""Get the capture time associated with the given path."""
path = self._loader.GetCaptureHeaderPath(path)
tv_sec = self._loader.GetData(path + '/tv_sec').astype(float)
tv_usec = self._loader.GetData(path + '/tv_usec').astype(float)
return tv_sec + tv_usec * 1e-6 - self._capture_time_offset
def GetOffset(self):
"""Get the global capture time offset."""
return self._capture_time_offset
def SetOffset(self, offset):
"""Set the global capture time offset."""
for t in self._data_cache.itervalues():
t += self._capture_time_offset - offset
self._capture_time_offset = offset
def ShiftOffset(self, delta):
"""Shift the global capture time offset."""
offset = self.GetOffset() + delta
self.SetOffset(offset)
class _H5GpsTimeCache(_H5DataCache):
"""Load and cache GPS time."""
def __init__(self, loader):
super(_H5GpsTimeCache, self).__init__()
self._loader = loader
self._gps_time_offset = 0 # Offset common to all datasets.
self._gps_time_cal = None # Calibration from capture time to GPS time.
def GetData(self, path):
"""Get the GPS time associated with the given path."""
if self._gps_time_cal is None:
t_cap = []
t_gps = []
loader = self._loader
fields = loader.GetPathsRegex(
r'^messages/[^/]+/NovAtelObservations/message$')
for f in fields:
time_status = loader.GetData(f + '/range/timestamp/time_status')
time_of_week_ms = loader.GetData(f + '/range/timestamp/tow').astype(int)
pps_latency_us = loader.GetData(f + '/pps_latency_usec').astype(int)
# See NovAtel OEM6 docs for "GPS Reference Time Status" on page 35.
i = np.where(time_status >= 100)[0]
# Validate time-of-week range.
gps_week_ms = 7 * 24 * 3600 * 1000
i = i[np.where(time_of_week_ms[i] < gps_week_ms)[0]]
# Validate PPS latency range.
i = i[np.where(0 < pps_latency_us[i])[0]]
i = i[np.where(pps_latency_us[i] < 1000 * 1000)[0]]
# Remove invalid indices.
cap_time = loader.GetCaptureTime(f)[i]
time_of_week_ms = time_of_week_ms[i]
pps_latency_us = pps_latency_us[i]
# Handle GPS week rollovers.
for i in np.where(np.diff(time_of_week_ms) < 0)[0]:
time_of_week_ms[i + 1:] += gps_week_ms
# To communicate GPS time precisely, the GPS receiver provides a pulse
# per second interrupt signal that occurs on the GPS time-of-week [ms]
# second transition (i.e., when time-of-week % 1000 == 0). The GPS
# receiver also transmits the time-of-week value in each message. We
# can then relate the reception time of any message to the time of
# validity by measuring the time between the PPS interrupt and message
# reception.
# Data D2 valid
# PPS | Data D2 received PPS
# | | | |
# --D19!-D0-.-D1-.-D2-.-D3-. ... .-D18.-D19!-D0-.-D1-.-D2-- ...
# | |-| Transport delay
# |<--------->| PPS latency
#
transport_delay_us = pps_latency_us - (time_of_week_ms % 1000) * 1000
# Compute times.
t_cap.append(cap_time)
t_gps.append(time_of_week_ms * 1e-3 + transport_delay_us * 1e-6)
t_cap = np.concatenate(t_cap)
t_gps = np.concatenate(t_gps)
# Reject outliers. Loop multiple times to improve estimate.
for _ in range(3):
# Compute linear fit coefficients: (gps_time) = m * (capture_time) + b.
p = np.polyfit(t_cap, t_gps, 1)
# Compute error in linear fit: (delta) = (measurement) - (estimate).
delta = t_gps - np.polyval(p, t_cap)
# Find data with error less than 3 sigma.
i = np.where(np.abs(delta) < 3.0 * np.std(delta))
t_cap = t_cap[i]
t_gps = t_gps[i]
self._gps_time_cal = np.polyfit(t_cap, t_gps, 1)
# Evaluate linear fit: (gps_time) = m * (capture_time) + b.
return np.polyval(
self._gps_time_cal,
self._loader.GetCaptureTime(path)) - self._gps_time_offset
def GetOffset(self):
"""Get the global GPS time offset."""
return self._gps_time_offset
def SetOffset(self, offset):
"""Set the global GPS time offset."""
for t in self._data_cache.itervalues():
t += self._gps_time_offset - offset
self._gps_time_offset = offset
def ShiftOffset(self, delta):
"""Shift the global GPS time offset."""
offset = self.GetOffset() + delta
self.SetOffset(offset)
class H5DataLoader(object):
"""Load and cache log data."""
def __init__(self, filenames=None):
self._filenames = _NormalizeFileNames(filenames)
self._data_log = _H5DataLog()
self._aio_time_cache = _H5AioTimeCache(self)
self._capture_time_cache = _H5CaptureTimeCache(self)
self._gps_time_cache = _H5GpsTimeCache(self)
self._relative_time_cache = _H5CaptureTimeCache(self)
def __enter__(self):
self.Open(self._filenames)
return self
def __exit__(self, *unused_args):
self.Close()
def Open(self, filenames=None):
"""Open HDF5 log files.
Args:
filenames: A list of log HDF5 files, sorted in time.
"""
self.Close()
if filenames is not None:
self._filenames = _NormalizeFileNames(filenames)
if self._filenames is not None:
self._data_log.Open(self._filenames)
if self._data_log.GetPathsRegex('^info/min_tv_[u]?sec'):
min_sec = self._data_log.GetData('info/min_tv_sec').astype(float)
min_usec = self._data_log.GetData('info/min_tv_usec').astype(float)
offset = min_sec + min_usec * 1e-6
self._relative_time_cache.SetOffset(offset)
def Save(self, filename, verbose=False):
"""Save data from all input files to a single, merged HDF5."""
self._data_log.Save(filename, verbose)
def Close(self):
"""Close all HDF5 log files and free associated memory."""
self._data_log.Close()
self._filenames = []
self.ClearCached()
def ClearCached(self):
"""Free cached memory."""
for t in self.__dict__.values():
if isinstance(t, _H5DataCache):
t.ClearCached()
def GetData(self, path):
"""Get data associated with the given path."""
return self._data_log.GetData(path)
def GetAioTime(self, path):
"""Get the local node time associated with the given path."""
return self._aio_time_cache.GetCached(self.GetAioHeaderPath(path))
def GetAioTimeOffset(self, path):
"""Get the local node time offset associated with the given path."""
return self._aio_time_cache.GetOffset(self.GetAioHeaderPath(path))
def SetAioTimeOffset(self, path, offset):
"""Set the local node time offset associated with the given path."""
self._aio_time_cache.SetOffset(self.GetAioHeaderPath(path), offset)
def ShiftAioTimeOffset(self, path, delta):
"""Shift the local node time offset associated with the given path."""
self._aio_time_cache.ShiftOffset(self.GetAioHeaderPath(path), delta)
def GetCaptureTime(self, path):
"""Get the capture time associated with the given path."""
return self._capture_time_cache.GetCached(self.GetCaptureHeaderPath(path))
def GetCaptureTimeOffset(self):
"""Get the global capture time offset."""
return self._capture_time_cache.GetOffset()
def SetCaptureTimeOffset(self, offset):
"""Set the global capture time offset."""
self._capture_time_cache.SetOffset(offset)
def ShiftCaptureTimeOffset(self, delta):
"""Shift the global capture time offset."""
self._capture_time_cache.ShiftOffset(delta)
def GetGpsTime(self, path):
"""Get the GPS time associated with the given path."""
return self._gps_time_cache.GetCached(self.GetCaptureHeaderPath(path))
def GetGpsTimeOffset(self):
"""Get the global GPS time offset."""
return self._gps_time_cache.GetOffset()
def SetGpsTimeOffset(self, offset):
"""Set the global GPS time offset."""
self._gps_time_cache.SetOffset(offset)
def ShiftGpsTimeOffset(self, delta):
"""Shift the global GPS time offset."""
self._gps_time_cache.ShiftOffset(delta)
def GetRelativeTime(self, path):
"""Get the relative time associated with the given path."""
return self._relative_time_cache.GetCached(self.GetCaptureHeaderPath(path))
def GetRelativeTimeOffset(self):
"""Get the global relative time offset."""
return self._relative_time_cache.GetOffset()
def SetRelativeTimeOffset(self, offset):
"""Set the global relative time offset."""
self._relative_time_cache.SetOffset(offset)
def ShiftRelativeTimeOffset(self, delta):
"""Shift the global relative time offset."""
self._relative_time_cache.ShiftOffset(delta)
def GetAioHeaderPath(self, path):
"""Get the AioHeader base path from the given path."""
if not self.IsMessagePath(path):
raise ValueError('Invalid path specified:', path)
return '/'.join(path.split('/')[0:3] + ['aio_header'])
def GetCaptureHeaderPath(self, path):
"""Get the CaptureHeader base path from the given path."""
if not self.IsMessagePath(path):
raise ValueError('Invalid path specified:', path)
return '/'.join(path.split('/')[0:3] + ['capture_header'])
def IsNodePath(self, path):
"""Determine if the path contains a valid node path."""
return re.match(r'^messages/[^/]+(/.+)?$', path)
def IsMessagePath(self, path):
"""Determine if the path contains a valid message path."""
return re.match(r'^messages/[^/]+/[^/]+(/.+)?$', path)
def IsDataPath(self, path):
"""Determine if the path contains a valid data path."""
return re.match(r'^messages/[^/]+/[^/]+/message/.+$', path)
def GetPathsRegex(self, re_match, re_sub=r'\g<0>'):
"""Get a list of paths matching the given regex pattern."""
return self._data_log.GetPathsRegex(re_match, re_sub)
def GetNodeName(self, path):
"""Get the node name associated with the given path."""
if not self.IsNodePath(path):
raise ValueError('Invalid path specified:', path)
return path.split('/')[1]
def GetMessageName(self, path):
"""Get the message name associated with the given path."""
if not self.IsMessagePath(path):
raise ValueError('Invalid path specified:', path)
return path.split('/')[2]
def GetDataName(self, path):
"""Get the data field name associated with the given path."""
if not self.IsDataPath(path):
raise ValueError('Invalid path specified:', path)
return path.split('/', 4)[4:]
@property
def filenames(self):
"""Get HDF5 file names."""
return self._filenames
class H5DataDict(object):
"""Creates a path abstraction to the H5DataLoader object."""
def __init__(self, loader, get_data_function, re_match_sub_dict):
"""Initialize the H5DataDict object.
Args:
loader: A H5DataLoader object.
get_data_function: A H5DataLoader function to map self.GetData().
re_match_sub_dict: A dict mapping path regex pattern to substitution.
"""
self._loader = loader
self._get_data_function = get_data_function
self._re_match_sub_dict = re_match_sub_dict
self._dict = {}
def BuildDict(self):
"""Build the dictionary of data paths."""
self._dict = {}
for re_match, re_sub in self._re_match_sub_dict.iteritems():
expr = re.compile(re_match)
for path in self._loader.GetPathsRegex(expr):
self._dict[expr.sub(re_sub, path)] = path
def GetPathsRegex(self, pattern):
"""Get a list of paths matching the given regex pattern."""
expr = re.compile(pattern)
return sorted([f for f in self._dict if expr.match(f)])
def GetPaths(self, prefix):
"""Get a list of paths with the given prefix."""
return self.GetPathsRegex(r'^(' + prefix + r')(/.+)?$')
def GetSubpaths(self, prefix, recursive=False):
"""Get a list of subpaths of the given prefix."""
if recursive:
return self.GetPathsRegex(r'^(' + prefix + r')/.+$')
else:
return self.GetPathsRegex(r'^(' + prefix + r')/[^/]+$')
def GetData(self, path):
"""Get data associated with the given path."""
return self._get_data_function(self._dict[path])
def GetAioTime(self, path):
"""Get the local node time associated with the given path."""
return self._loader.GetAioTime(self._dict[path])
def GetAioTimeOffset(self, path):
"""Get the local node time offset associated with the given path."""
return self._loader.GetAioTimeOffset(self._dict[path])
def SetAioTimeOffset(self, path, offset):
"""Set the local node time offset associated with the given path."""
self._loader.SetAioTimeOffset(self._dict[path], offset)
def ShiftAioTimeOffset(self, path, delta):
"""Shift the local node time offset associated with the given path."""
self._loader.ShiftAioTimeOffset(self._dict[path], delta)
def GetCaptureTime(self, path):
"""Get the capture time associated with the given path."""
return self._loader.GetCaptureTime(self._dict[path])
def GetCaptureTimeOffset(self):
"""Get the global capture time offset."""
return self._loader.GetCaptureTimeOffset()
def SetCaptureTimeOffset(self, offset):
"""Set the global capture time offset."""
self._loader.SetCaptureTimeOffset(offset)
def ShiftCaptureTimeOffset(self, delta):
"""Shift the global capture time offset."""
self._loader.ShiftCaptureTimeOffset(delta)
def GetGpsTime(self, path):
"""Get the GPS time associated with the given path."""
return self._loader.GetGpsTime(self._dict[path])
def GetGpsTimeOffset(self):
"""Get the global GPS time offset."""
return self._loader.GetGpsTimeOffset()
def SetGpsTimeOffset(self, offset):
"""Set the global GPS time offset."""
self._loader.SetGpsTimeOffset(offset)
def ShiftGpsTimeOffset(self, delta):
"""Shift the global GPS time offset."""
self._loader.ShiftGpsTimeOffset(delta)
def GetRelativeTime(self, path):
"""Get the relative time associated with the given path."""
return self._loader.GetRelativeTime(self._dict[path])
def GetRelativeTimeOffset(self):
"""Get the global relative time offset."""
return self._loader.GetRelativeTimeOffset()
def SetRelativeTimeOffset(self, offset):
"""Set the global relative time offset."""
self._loader.SetRelativeTimeOffset(offset)
def ShiftRelativeTimeOffset(self, delta):
"""Shift the global relative time offset."""
self._loader.ShiftRelativeTimeOffset(delta)
def GetNodeName(self, path):
"""Get the node name associated with the given path."""
return self._loader.GetNodeName(self._dict[path])
def GetMessageName(self, path):
"""Get the message name associated with the given path."""
return self._loader.GetMessageName(self._dict[path])
def GetDataName(self, path):
"""Get the data field name associated with the given path."""
return self._loader.GetDataName(self._dict[path])
def keys(self): # pylint: disable=invalid-name
"""Get all possible paths, used for dictionary self[] auto-completion."""
return sorted(self._dict.keys())
def __contains__(self, path):
"""Provide 'in' interface."""
return path in self._dict
def __getitem__(self, path):
"""Provide self[] dictionary access to data."""
return self.GetData(path)
class H5LogLoader(H5DataDict):
"""Abstract a HDF5 log files to simplify interface."""
def __init__(self, filenames=None):
self._data_loader = H5DataLoader(filenames)
super(H5LogLoader, self).__init__(
self._data_loader, self._data_loader.GetData,
{
r'^messages/([^/]+/[^/]+)$': r'\1',
r'^messages/([^/]+/[^/]+)/message/(.+)$': r'\1/\2',
})
self._aio_header_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{
r'^messages/([^/]+/[^/]+)$': r'\1',
r'^messages/([^/]+/[^/]+)/aio_header/(.+)$': r'\1/\2',
})
self._aio_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetAioTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._bad_packets_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{r'^bad_packets/(.+)$': r'\1'})
self._capture_header_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{
r'^messages/([^/]+/[^/]+)$': r'\1',
r'^messages/([^/]+/[^/]+)/capture_header/(.+)$': r'\1/\2',
})
self._capture_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetCaptureTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._gps_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetGpsTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._info_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{r'^info/(.+)$': r'\1'})
self._relative_time_dict = H5DataDict(
self._data_loader, self._data_loader.GetRelativeTime,
{r'^messages/([^/]+/[^/]+)$': r'\1'})
self._param_dict = H5DataDict(
self._data_loader, self._data_loader.GetData,
{r'^parameters/(.+)$': r'\1'})
def __enter__(self):
self.Open()
return self
def __exit__(self, *unused_args):
self.Close()
def Open(self, filenames=None):
"""Open HDF5 log files.
Args:
filenames: A list of log HDF5 files, sorted in time.
"""
self._data_loader.Open(filenames)
self.BuildDict()
def Save(self, filename, verbose=False):
"""Save data from all input files to a single, merged HDF5."""
self._data_loader.Save(filename, verbose)
def Close(self):
"""Close all HDF5 log files and free associated memory."""
self._data_loader.Close()
self.ClearCached()
self.BuildDict()
def ClearCached(self):
"""Free cached memory."""
self._data_loader.ClearCached()
def BuildDict(self):
"""Build the dictionaries of data paths."""
super(H5LogLoader, self).BuildDict()
for t in self.__dict__.values():
if isinstance(t, H5DataDict):
t.BuildDict()
def GetNodes(self):
"""Get a list of nodes found in the log file."""
pattern = r'^messages/([^/]+)/[^/]+/.+'
return self._data_loader.GetPathsRegex(pattern, r'\1')
def GetMessageTypes(self, node=r'[^/]+'):
"""Get a list of message types found in the log file."""
pattern = r'^messages/' + node + r'/([^/]+)/.+$'
return self._data_loader.GetPathsRegex(pattern, r'\1')
@property
def filenames(self):
"""Get HDF5 file names."""
return self._data_loader.filenames
@property
def aio_header(self):
return self._aio_header_dict
@property
def aio_time(self):
return self._aio_time_dict
@property
def bad_packets(self):
return self._bad_packets_dict
@property
def capture_header(self):
return self._capture_header_dict
@property
def capture_time(self):
return self._capture_time_dict
@property
def data(self):
return self
@property
def gps_time(self):
return self._gps_time_dict
@property
def info(self):
return self._info_dict
@property
def param(self):
return self._param_dict
@property
def relative_time(self):
return self._relative_time_dict
| [
"[email protected]"
] | |
fbeecaa4293179be24399fb4bb5c7eee64229141 | 50a8c057fd6d8cd0ec96ca9b79c9328432335650 | /ubisqsh.py | e945ad61a8de0c5092e765f01c13e8f9f6c84a5b | [
"MIT"
] | permissive | KurSh/qc_modem_tools | ee804b566f83e30dde13e4aaf2f55e1a95c74fda | fce2f00e226f0fce82f064d218bf6adb70ea8647 | refs/heads/master | 2023-07-07T19:16:43.556182 | 2020-12-25T20:25:52 | 2020-12-25T20:25:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | #!/usr/bin/env python3
from struct import unpack
import os,sys
def parse_ubihdr(rf):
curpos=rf.tell()
magic = rf.read(4)
if magic == b"UBI#":
rf.seek(curpos+0x10)
hdrsize = unpack(">I", rf.read(4))[0]
blksize = unpack(">I", rf.read(4))[0]
data = unpack(">I", rf.read(4))[0]
rf.seek(curpos+0x3C)
crc = unpack(">I", rf.read(4))[0]
rf.seek(curpos)
return [hdrsize,blksize,data,crc]
def parse_ubihdr2(rf):
curpos=rf.tell()
magic = rf.read(4)
if magic == b"UBI!":
flag = unpack("<I", rf.read(4))[0]
rf.seek(curpos+0xC)
blk = unpack(">I", rf.read(4))[0]
rf.seek(curpos + 0x3C)
crc = unpack(">I", rf.read(4))[0]
rf.seek(curpos)
return [flag,blk,crc]
def main():
if len(sys.argv)<2:
print("Usage: ubisqsh.py <filename>")
sys.exit()
filename=sys.argv[1]
with open(filename,'rb') as rf:
with open(filename+".out","wb") as wf:
pos=0
while pos<os.stat(filename).st_size:
hdrsize,blksize,data,crc=parse_ubihdr(rf)
rf.seek(pos+hdrsize)
flag,blk,crc=parse_ubihdr2(rf)
if flag&0xF000000==0:
print(f"Blk %d Flag %x WR" %(blk,flag))
rf.seek(pos + blksize)
rdata=rf.read(0x40000-blksize)
wf.write(rdata)
else:
print(f"Blk %d Flag %x SK" %(blk,flag))
rf.seek(pos+0x40000)
pos+=0x40000
print("Done.")
if __name__=="__main__":
main() | [
"[email protected]"
] | |
781782dc9fc9bab7ca93ae38f17db36d6e004b67 | bae5f696b76af428fb5555c147c4f1bcff1bb62e | /metalearn/examples/evaluate_test_data_envs.py | 1f25c6c717085714ed0519ac4b1425fe888f373f | [
"MIT"
] | permissive | cosmicBboy/ml-research | 1e309f881f9810e7a82a262d625db5d684752705 | 04fd31f68e7a44152caf6eaaf66ab59f136dd8f5 | refs/heads/master | 2021-01-24T09:58:25.662826 | 2020-08-10T22:08:23 | 2020-08-10T22:08:23 | 123,030,133 | 8 | 4 | MIT | 2019-06-29T20:13:37 | 2018-02-26T21:03:02 | Jupyter Notebook | UTF-8 | Python | false | false | 2,129 | py | """Evaluate controller after training."""
import joblib
import pandas as pd
import os
import torch
from pathlib import Path
from metalearn.metalearn_controller import MetaLearnController
from metalearn.inference.inference_engine import CASHInference
from metalearn.task_environment import TaskEnvironment
from metalearn.data_environments import openml_api, sklearn_classification
build_path = Path(os.path.dirname(__file__)) / ".." / "floyd_outputs" / "225"
controller = MetaLearnController.load(build_path / "controller_trial_0.pt")
experiment_results = pd.read_csv(
build_path / "rnn_metalearn_controller_experiment.csv")
base_mlf_path = build_path / "metalearn_controller_mlfs_trial_0"
# get top 10 best mlfs for each data env across all episodes.
best_mlf_episodes = (
experiment_results
.groupby("data_env_names")
.apply(lambda df: (
df.sort_values("best_validation_scores", ascending=False).head(10)))
["episode"]
.reset_index(level=1, drop=True)
)
# a dict mapping datasets to the top 10 mlfs found for those datasets.
best_mlfs = (
best_mlf_episodes.map(
lambda x: joblib.load(base_mlf_path / ("best_mlf_episode_%d.pkl" % x)))
.groupby("data_env_names")
.apply(lambda x: list(x))
.to_dict()
)
sklearn_data_envs = sklearn_classification.envs()
openml_data_envs = openml_api.classification_envs()
torch.manual_seed(10)
task_env = TaskEnvironment(
env_sources=["OPEN_ML", "SKLEARN"],
test_set_config={"OPEN_ML": {"test_size": 0.8, "random_state": 100}},
random_state=100,
enforce_limits=True,
per_framework_time_limit=720,
per_framework_memory_limit=10000,
dataset_names=list(sklearn_data_envs.keys()),
test_dataset_names=list(openml_data_envs.keys()),
error_reward=0,
target_types=["BINARY", "MULTICLASS"])
inference_engine = CASHInference(controller, task_env)
# evaluate controller on test data environments
train_env_results = inference_engine.evaluate_training_data_envs(
n=1, datasets=sklearn_data_envs.keys(), verbose=True)
test_env_results = inference_engine.evaluate_test_data_envs(n=50, verbose=True)
| [
"[email protected]"
] | |
db4c205a1c301818753f25df685020906cb5d83c | 7dccf283800b0b47aece8dc7f0c209f5fea527a2 | /ROCC/fitted_Q_iteration/fitted_Q_agents.py | 20ceb89aad9f568488536eed458b0c1b942392ae | [
"MIT"
] | permissive | ucl-cssb/ROCC | 4e713f513a96390c64df23eb414d8a8e374431cb | e7491672bcafc0fac08fe750829e4fac2805d35a | refs/heads/master | 2021-07-10T14:53:31.150305 | 2020-11-17T09:58:01 | 2020-11-17T09:58:01 | 218,557,979 | 13 | 1 | null | 2020-11-17T09:57:37 | 2019-10-30T15:20:47 | Python | UTF-8 | Python | false | false | 10,912 | py | import sys
import os
import numpy as np
import tensorflow as tf
import math
import random
from tensorflow import keras
'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
'''
import matplotlib.pyplot as plt
class FittedQAgent():
'''
abstract class for the Torch and Keras implimentations, dont use directly
'''
def get_action(self, state, explore_rate):
'''
Choses action based on enivormental state, explore rate and current value estimates
Parameters:
state: environmental state
explore_rate
Returns:
action
'''
if np.random.random() < explore_rate:
action = np.random.choice(range(self.layer_sizes[-1]))
else:
values = self.predict(state)
self.values.append(values)
action = np.argmax(values)
assert action < self.n_actions, 'Invalid action'
return action
def get_inputs_targets(self):
'''
gets fitted Q inputs and calculates targets for training the Q-network for episodic training
'''
inputs = []
targets = []
# DO THIS WITH NUMPY TO MAKE IT FASTER
for trajectory in self.memory:
for transition in trajectory:
# CHEKC TARGET IS BUILT CORRECTLY
state, action, cost, next_state, done = transition
inputs.append(state)
# construct target
values = self.predict(state)
next_values = self.predict(next_state)
assert len(values) == self.n_actions, 'neural network returning wrong number of values'
assert len(next_values) == self.n_actions, 'neural network returning wrong number of values'
#update the value for the taken action using cost function and current Q
if not done:
values[action] = cost + self.gamma*np.max(next_values) # could introduce step size here, maybe not needed for neural agent
else:
values[action] = cost
targets.append(values)
# shuffle inputs and target for IID
inputs, targets = np.array(inputs), np.array(targets)
randomize = np.arange(len(inputs))
np.random.shuffle(randomize)
inputs = inputs[randomize]
targets = targets[randomize]
assert inputs.shape[1] == self.state_size, 'inputs to network wrong size'
assert targets.shape[1] == self.n_actions, 'targets for network wrong size'
return inputs, targets
def fitted_Q_update(self, inputs = None, targets = None):
'''
Uses a set of inputs and targets to update the Q network
'''
if inputs is None and targets is None:
inputs, targets = self.get_inputs_targets()
#
#tf.initialize_all_variables() # resinitialise netowrk without adding to tensorflow graph
# try RMSprop and adam and maybe some from here https://arxiv.org/abs/1609.04747
self.reset_weights()
history = self.fit(inputs, targets)
#print('losses: ', history.history['loss'][0], history.history['loss'][-1])
return history
def run_episode(self, env, explore_rate, tmax, train = True, remember = True):
'''
Runs one fitted Q episode
Parameters:
env: the enirovment to train on and control
explore_rate: explore rate for this episodes
tmax: number of timesteps in the episode
train: does the agent learn?
remember: does the agent store eperience in its memory?
Returns:
env.sSol: time evolution of environmental states
episode reward: total reward for this episode
'''
# run trajectory with current policy and add to memory
trajectory = []
actions = []
#self.values = []
state = env.get_state()
episode_reward = 0
self.single_ep_reward = []
for i in range(tmax):
action = self.get_action(state, explore_rate)
actions.append(action)
next_state, reward, done, info = env.step(action)
#cost = -cost # as cartpole default returns a reward
assert len(next_state) == self.state_size, 'env return state of wrong size'
self.single_ep_reward.append(reward)
if done:
print(reward)
# scale populations
transition = (state, action, reward, next_state, done)
state = next_state
trajectory.append(transition)
episode_reward += reward
if done: break
if remember:
self.memory.append(trajectory)
if train:
self.actions = actions
self.episode_lengths.append(i)
self.episode_rewards.append(episode_reward)
if len(self.memory[0]) * len(self.memory) < 100:
#n_iters = 4
n_iters = 4
elif len(self.memory[0]) * len(self.memory) < 200:
#n_iters = 5
n_iters = 5
else:
n_iters = 10
#n_iters = 0
for _ in range(n_iters):
self.fitted_Q_update()
#env.plot_trajectory()
#plt.show()
return env.sSol, episode_reward
def neural_fitted_Q(self, env, n_episodes, tmax):
'''
runs a whole neural fitted Q experiment
Parameters:
env: environment to train on
n_episodes: number of episodes
tmax: timesteps in each episode
'''
times = []
for i in range(n_episodes):
print()
print('EPISODE', i)
# CONSTANT EXPLORE RATE OF 0.1 worked well
explore_rate = self.get_rate(i, 0, 1, 2.5)
#explore_rate = 0.1
#explore_rate = 0
print('explore_rate:', explore_rate)
env.reset()
trajectory, reward = self.run_episode(env, explore_rate, tmax)
time = len(trajectory)
print('Time: ', time)
times.append(time)
print(times)
def plot_rewards(self):
'''
Plots the total reward gained in each episode on a matplotlib figure
'''
plt.figure(figsize = (16.0,12.0))
plt.plot(self.episode_rewards)
def save_results(self, save_path):
'''
saves numpy arrays of results of training
'''
np.save(save_path + '/survival_times', self.episode_lengths)
np.save(save_path + '/episode_rewards', self.episode_rewards)
def get_rate(self, episode, MIN_LEARNING_RATE, MAX_LEARNING_RATE, denominator):
'''
Calculates the logarithmically decreasing explore or learning rate
Parameters:
episode: the current episode
MIN_LEARNING_RATE: the minimum possible step size
MAX_LEARNING_RATE: maximum step size
denominator: controls the rate of decay of the step size
Returns:
step_size: the Q-learning step size
'''
# input validation
if not 0 <= MIN_LEARNING_RATE <= 1:
raise ValueError("MIN_LEARNING_RATE needs to be bewteen 0 and 1")
if not 0 <= MAX_LEARNING_RATE <= 1:
raise ValueError("MAX_LEARNING_RATE needs to be bewteen 0 and 1")
if not 0 < denominator:
raise ValueError("denominator needs to be above 0")
rate = max(MIN_LEARNING_RATE, min(MAX_LEARNING_RATE, 1.0 - math.log10((episode+1)/denominator)))
return rate
class KerasFittedQAgent(FittedQAgent):
def __init__(self, layer_sizes = [2,20,20,4]):
self.memory = []
self.layer_sizes = layer_sizes
self.network = self.initialise_network(layer_sizes)
self.gamma = 0.9
self.state_size = layer_sizes[0]
self.n_actions = layer_sizes[-1]
self.episode_lengths = []
self.episode_rewards = []
self.single_ep_reward = []
self.total_loss = 0
self.values = []
def initialise_network(self, layer_sizes):
'''
Creates Q network
'''
tf.keras.backend.clear_session()
initialiser = keras.initializers.RandomUniform(minval = -0.5, maxval = 0.5, seed = None)
positive_initialiser = keras.initializers.RandomUniform(minval = 0., maxval = 0.35, seed = None)
regulariser = keras.regularizers.l1_l2(l1=0.01, l2=0.01)
network = keras.Sequential([
keras.layers.InputLayer([layer_sizes[0]]),
keras.layers.Dense(layer_sizes[1], activation = tf.nn.relu),
keras.layers.Dense(layer_sizes[2], activation = tf.nn.relu),
keras.layers.Dense(layer_sizes[3]) # linear output layer
])
network.compile(optimizer = 'adam', loss = 'mean_squared_error') # TRY DIFFERENT OPTIMISERS
return network
def predict(self, state):
'''
Predicts value estimates for each action base on currrent states
'''
return self.network.predict(state.reshape(1,-1))[0]
def fit(self, inputs, targets):
'''
trains the Q network on a set of inputs and targets
'''
history = self.network.fit(inputs, targets, epochs = 300, verbose = 0) # TRY DIFFERENT EPOCHS
return history
def reset_weights(model):
'''
Reinitialises weights to random values
'''
sess = tf.keras.backend.get_session()
sess.run(tf.global_variables_initializer())
def save_network(self, save_path):
'''
Saves current network weights
'''
self.network.save(save_path + '/saved_network.h5')
def save_network_tensorflow(self, save_path):
'''
Saves current network weights using pure tensorflow, kerassaver seems to crash sometimes
'''
saver = tf.train.Saver()
sess = tf.keras.backend.get_session()
path = saver.save(sess, save_path + "/saved/model.cpkt")
def load_network_tensorflow(self, save_path):
'''
Loads network weights from file using pure tensorflow, kerassaver seems to crash sometimes
'''
saver = tf.train.Saver()
sess = tf.keras.backend.get_session()
saver.restore(sess, save_path +"/saved/model.cpkt")
def load_network(self, load_path): #tested
'''
Loads network weights from file
'''
try:
self.network = keras.models.load_model(load_path + '/saved_network.h5') # sometimes this crashes, apparently a bug in keras
except:
self.network.load_weights(load_path + '/saved_network.h5') # this requires model to be initialised exactly the same
| [
"[email protected]"
] | |
5249a6811fad92b075afe3535e1eb24bef84ca78 | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/dashboards/settings/user/panel.py | 9b9781eb0b3ae92466380018fce619077f697488 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 863 | py | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.settings import dashboard
class UserPanel(horizon.Panel):
name = _("User Settings")
slug = 'user'
dashboard.Settings.register(UserPanel)
| [
"[email protected]"
] | |
f8e584f21699ce5bf51c3992ef099f5f3548d4d1 | 52fb627ec952bf647c625f9372581bff4764da76 | /wo_websocket.py | 71f69201f610ea526be8c98ac46edded4b559f1b | [] | no_license | syyunn/smpc-dl | b89071d277347e28979973e734b329f51020a6b0 | 41bd40ef7866062a53fb20bcff994c51f38f38d5 | refs/heads/master | 2020-08-06T00:17:01.474179 | 2019-10-05T16:39:14 | 2019-10-05T16:39:14 | 212,768,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import syft as sy
hook = sy.TorchHook(torch)
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 64
self.epochs = 10
self.lr = 0.02
self.seed = 1
self.log_interval = 1 # Log info at each batch
self.precision_fractional = 3
args = Arguments()
_ = torch.manual_seed(args.seed)
# simulation functions
def connect_to_workers(n_workers):
return [
sy.VirtualWorker(hook, id=f"worker{i+1}")
for i in range(n_workers)
]
def connect_to_crypto_provider():
return sy.VirtualWorker(hook, id="crypto_provider")
workers = connect_to_workers(n_workers=2)
crypto_provider = connect_to_crypto_provider()
# We don't use the whole dataset for efficiency purpose, but feel free to increase these numbers
n_train_items = 640
n_test_items = 640
def get_private_data_loaders(precision_fractional, workers, crypto_provider):
def one_hot_of(index_tensor):
"""
Transform to one hot tensor
Example:
[0, 3, 9]
=>
[[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]
"""
onehot_tensor = torch.zeros(*index_tensor.shape,
10) # 10 classes for MNIST
onehot_tensor = onehot_tensor.scatter(1, index_tensor.view(-1, 1), 1)
return onehot_tensor
def secret_share(tensor):
"""
Transform to fixed precision and secret share a tensor
"""
return (
tensor
.fix_precision(precision_fractional=precision_fractional)
.share(*workers, crypto_provider=crypto_provider,
requires_grad=True)
)
transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transformation),
batch_size=args.batch_size
)
private_train_loader = [
(secret_share(data), secret_share(one_hot_of(target)))
for i, (data, target) in enumerate(train_loader)
if i < n_train_items / args.batch_size
]
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True,
transform=transformation),
batch_size=args.test_batch_size
)
private_test_loader = [
(secret_share(data), secret_share(target.float()))
for i, (data, target) in enumerate(test_loader)
if i < n_test_items / args.test_batch_size
]
return private_train_loader, private_test_loader
private_train_loader, private_test_loader = get_private_data_loaders(
precision_fractional=args.precision_fractional,
workers=workers,
crypto_provider=crypto_provider)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train(args, model, private_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(
private_train_loader): # <-- now it is a private dataset
start_time = time.time()
optimizer.zero_grad()
output = model(data)
# loss = F.nll_loss(output, target) <-- not possible here
batch_size = output.shape[0]
loss = ((output - target) ** 2).sum().refresh() / batch_size
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
loss = loss.get().float_precision()
print(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTime: {:.3f}s'.format(
epoch, batch_idx * args.batch_size,
len(private_train_loader) * args.batch_size,
100. * batch_idx / len(private_train_loader),
loss.item(), time.time() - start_time))
def test(args, model, private_test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in private_test_loader:
start_time = time.time()
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum()
correct = correct.get().float_precision()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
correct.item(), len(private_test_loader) * args.test_batch_size,
100. * correct.item() / (len(
private_test_loader) * args.test_batch_size)))
model = Net()
model = model.fix_precision().share(*workers, crypto_provider=crypto_provider, requires_grad=True)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
optimizer = optimizer.fix_precision()
for epoch in range(1, args.epochs + 1):
train(args, model, private_train_loader, optimizer, epoch)
test(args, model, private_test_loader)
| [
"[email protected]"
] | |
60738160b15b49779d9eaf9e8d83139fd7afa508 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_0/Python/sleepingfire/d.py | ba96e3d60f9df7e644e9b38c0dc4523c1c6882bd | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | import sys, os, math
def main(K, C, S):
minimum = math.ceil(K / C)
if minimum > S:
return "IMPOSSIBLE"
cs = [1] * (C + 1)
for i in range(1, C+1):
cs[i] = C * cs[i-1]
tiles = []
idx = 1
depth = 0
for k in range(1, math.ceil(K / C) * C + 1):
idx = (idx - 1) * K + min(k, K)
#print(k, depth, idx)
depth += 1
if depth == C:
tiles.append(idx)
idx = 1
depth = 0
return tiles
if __name__ == "__main__":
in_path = "test.in" if len(sys.argv) == 1 else sys.argv[1]
in_file = open(in_path, 'r')
T = int(in_file.readline().rstrip())
for case_idx in range(T):
K, C, S = [int(z) for z in in_file.readline().rstrip().split()]
res = main(K, C, S)
if isinstance(res, list):
print("Case #{}: {}".format(case_idx + 1, " ".join([str(z) for z in res])))
else:
print("Case #{}: {}".format(case_idx + 1, res))
| [
"[email protected]"
] | |
a24387d89088254301d368ebf2e5e55d143a8c4c | 0f0f8b3b027f412930ca1890b0666538358a2807 | /dotop/addons/base/tests/test_translate.py | c61011c1179ece9a596ac5d562a30db71e6c1d7e | [] | no_license | konsoar/dotop_pos_v11 | 741bd5ca944dfd52eb886cab6f4b17b6d646e131 | 576c860917edd25661a72726d0729c769977f39a | refs/heads/master | 2021-09-06T13:25:34.783729 | 2018-02-07T02:11:12 | 2018-02-07T02:11:12 | 111,168,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,651 | py | # -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import unittest
from dotop.tools.translate import quote, unquote, xml_translate, html_translate
from dotop.tests.common import TransactionCase
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_text(self):
""" Test xml_translate() on plain text. """
terms = []
source = "Blah blah blah"
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_text_entity(self):
""" Test xml_translate() on plain text with HTML escaped entities. """
terms = []
source = "Blah&nbsp;blah&nbsp;blah"
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div t-translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
def test_translate_html(self):
""" Test xml_translate() and html_translate() with <i> elements. """
source = """<i class="fa-check"></i>"""
result = xml_translate(lambda term: term, source)
self.assertEquals(result, """<i class="fa-check"/>""")
result = html_translate(lambda term: term, source)
self.assertEquals(result, source)
class TestTranslation(TransactionCase):
def setUp(self):
super(TestTranslation, self).setUp()
self.env['ir.translation'].load_module_terms(['base'], ['fr_FR'])
self.customers = self.env['res.partner.category'].create({'name': 'Customers'})
self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'module':'base',
'lang': 'fr_FR',
'res_id': self.customers.id,
'value': 'Clients',
'state': 'translated',
})
def test_101_create_translated_record(self):
category = self.customers.with_context({})
self.assertEqual(category.name, 'Customers', "Error in basic name_get")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
category = self.customers.with_context({'lang': 'fr_FR'}).copy()
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Customers', "Duplication did not set untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Did not found translation for initial value")
def test_103_duplicate_record_fr(self):
category = self.customers.with_context({'lang': 'fr_FR'}).copy({'name': 'Clients (copie)'})
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Customers', "Duplication erased original untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients (copie)', "Did not used default value for translated value")
def test_104_orderby_translated_field(self):
""" Test search ordered by a translated field. """
# create a category with a French translation
padawans = self.env['res.partner.category'].create({'name': 'Padawans'})
padawans_fr = padawans.with_context(lang='fr_FR')
padawans_fr.write({'name': 'Apprentis'})
# search for categories, and sort them by (translated) name
categories = padawans_fr.search([('id', 'in', [self.customers.id, padawans.id])], order='name')
self.assertEqual(categories.ids, [padawans.id, self.customers.id],
"Search ordered by translated name should return Padawans (Apprentis) before Customers (Clients)")
| [
"Administrator@20nuo003-PC"
] | Administrator@20nuo003-PC |
b0e17c91d87c7d7e5fcc3f873986d920f6918c16 | 21a561ec0d40554a43dc5a6dfab0f4f62ddb615d | /canteen/base/__init__.py | aaae304df48bbcbcb386327709ca4b1e4a9c8d98 | [
"MIT"
] | permissive | mindis/canteen | 2745a0ebec696d1fbfcc6c4c69582711a4a7e8e6 | a0cf38333417e879712394800a49eb9d0450f96f | refs/heads/master | 2020-12-25T19:15:21.612088 | 2014-02-24T11:29:59 | 2014-02-24T11:29:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
'''
canteen base
~~~~~~~~~~~~
:author: Sam Gammon <[email protected]>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# import all the things
from .page import *
from .logic import *
from .handler import *
__all__ = (
'page',
'logic',
'handler',
'Page',
'Logic',
'Handler'
)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.