blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd5cc4556e6d1854330409d0157e50db0125950f | 3f9f7fe32c655e612f351302ad1945e92e514a31 | /ut/scrap/xgoogle/sponsoredlinks.py | e3b4515c1a2095d5cb8875038664aca09f932b8a | [
"MIT"
] | permissive | thorwhalen/ut | 12ea7e0fd9bc452d71b0cc3d8ecdb527335a3c17 | 72dbdf41b0250708ad525030128cc7c3948b3f41 | refs/heads/master | 2023-02-17T06:44:11.053826 | 2023-02-07T13:22:07 | 2023-02-07T13:22:07 | 32,152,452 | 6 | 4 | MIT | 2023-02-16T00:34:33 | 2015-03-13T11:32:31 | Python | UTF-8 | Python | false | false | 7,973 | py | #!/usr/bin/python
#
# Peteris Krumins ([email protected])
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib.request, urllib.parse, urllib.error
import random
from html.entities import name2codepoint
from .BeautifulSoup import BeautifulSoup
from .browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = 'http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en'
NEXT_PAGE_0 = (
'http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en'
)
SEARCH_URL_1 = 'http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en'
NEXT_PAGE_1 = 'http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en'
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random() * 5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = {'from': 0, 'to': 0, 'total': 0}
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(' ', ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {
'from': int(matches.group(1)),
'to': int(matches.group(2)),
'total': int(matches.group(3)),
}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % {
'query': urllib.parse.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page,
}
try:
page = self.browser.get_page(safe_url)
except BrowserError as e:
raise SLError('Failed getting %s: %s' % (e.url, e.error))
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(
result
) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(
SLParseError, 'Title tag in sponsored link was not found', result
)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(
SLParseError, 'URL inside a sponsored link was not found', result
)
return None, None
url = urllib.parse.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, '<cite> not found inside result', result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(
ParseError, 'Description tag not found in sponsored link', result
)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace('\n', ' ')
desc = desc.replace(' ', ' ')
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return chr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return chr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| [
"[email protected]"
] | |
3a3f5eca94ff903e351eda079b55486e241fbaf2 | ee4c4c2cc6c663d4233d8145b01ae9eb4fdeb6c0 | /configs/FDDB/retinanet/cfgs_res50_fddb_v4.py | 3d62297eb03eef39a8f72de29894929e06afd4ac | [
"Apache-2.0"
] | permissive | yangcyz/RotationDetection | c86f40f0be1142c30671d4fed91446aa01ee31c1 | 82706f4c4297c39a6824b9b53a55226998fcd2b2 | refs/heads/main | 2023-09-01T23:25:31.956004 | 2021-11-23T13:57:31 | 2021-11-23T13:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from configs._base_.models.retinanet_r50_fpn import *
from configs._base_.datasets.dota_detection import *
from configs._base_.schedules.schedule_1x import *
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SAVE_WEIGHTS_INTE = 2000 * 2
DECAY_EPOCH = [8, 11, 20]
MAX_EPOCH = 12
WARM_EPOCH = 1 / 16.
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'FDDB'
CLASS_NUM = 1
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# bbox head
NUM_SUBNET_CONV = 4
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 1.5, 1.5]
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0 / 5.0
REG_LOSS_MODE = None
# eval
USE_07_METRIC = False
VERSION = 'RetinaNet_FDDB_2x_20211106'
"""
RetinaNet-H + 90
FLOPs: 830085163; Trainable params: 32159286
2007
cls : face|| Recall: 0.9648760330578512 || Precison: 0.5751231527093597|| AP: 0.9071560203590661
F1:0.9482526582400714 P:0.9697624190064795 R:0.9276859504132231
mAP is : 0.9071560203590661
2012
cls : face|| Recall: 0.9648760330578512 || Precison: 0.574887156339762|| AP: 0.959204678220418
F1:0.9482526582400714 P:0.9697624190064795 R:0.9276859504132231
mAP is : 0.959204678220418
AP50:95=0.5276534556388707
0.959204678220418 0.9301560772935049 0.8749958747257098 0.7844197465233099 0.683315839522552
0.558135300551797 0.3479441339258663 0.12669957890041392 0.011630901808271605 3.2424916862513164e-05
"""
| [
"[email protected]"
] | |
033a02153fd14c2d5475e0363c33629676f59c87 | 97ffb573b2f5f615c14347f9e2e8c12660c799a8 | /libs/ignite_utils.py | 26a021b900f7b27e00f4639a54305d87b4641b46 | [] | no_license | GOSSAN0602/OCR-Ancient-characters | d2745ea133b9d4595e860f03afa1d3eed7ee2104 | b118a9f40127af505f6e324aaabf0fccd2ce9d12 | refs/heads/master | 2020-12-22T21:10:30.266685 | 2020-01-31T07:20:36 | 2020-01-31T07:20:36 | 236,933,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,756 | py | import json
from logging import getLogger
import os
from time import perf_counter
import torch
import pandas as pd
from ignite.engine.engine import Engine, Events
from ignite.metrics import Average
from ignite.metrics.metric import Metric
def save_json(filepath, params):
with open(filepath, 'w') as f:
json.dump(params, f, indent=4)
class DictOutputTransform:
def __init__(self, key, index=0):
self.key = key
self.index = index
def __call__(self, x):
if self.index >= 0:
x = x[self.index]
return x[self.key]
def create_trainer(classifier, optimizer, device):
classifier.to(device)
def update_fn(engine, batch):
classifier.train()
optimizer.zero_grad()
# batch = [elem.to(device) for elem in batch]
x, y = [elem.to(device) for elem in batch]
loss, metrics, pred_y = classifier(x, y)
loss.backward()
optimizer.step()
return metrics, pred_y, y
trainer = Engine(update_fn)
for key in classifier.metrics_keys:
Average(output_transform=DictOutputTransform(key)).attach(trainer, key)
return trainer
def create_evaluator(classifier, device):
classifier.to(device)
def update_fn(engine, batch):
classifier.eval()
with torch.no_grad():
# batch = [elem.to(device) for elem in batch]
x, y = [elem.to(device) for elem in batch]
_, metrics, pred_y = classifier(x, y)
return metrics, pred_y, y
evaluator = Engine(update_fn)
for key in classifier.metrics_keys:
Average(output_transform=DictOutputTransform(key)).attach(evaluator, key)
return evaluator
class LogReport:
def __init__(self, evaluator=None, dirpath=None, logger=None):
self.evaluator = evaluator
self.dirpath = str(dirpath) if dirpath is not None else None
self.logger = logger or getLogger(__name__)
self.reported_dict = {} # To handle additional parameter to monitor
self.history = []
self.start_time = perf_counter()
def report(self, key, value):
self.reported_dict[key] = value
def __call__(self, engine):
elapsed_time = perf_counter() - self.start_time
elem = {'epoch': engine.state.epoch,
'iteration': engine.state.iteration}
elem.update({f'train/{key}': value
for key, value in engine.state.metrics.items()})
if self.evaluator is not None:
elem.update({f'valid/{key}': value
for key, value in self.evaluator.state.metrics.items()})
elem.update(self.reported_dict)
elem['elapsed_time'] = elapsed_time
self.history.append(elem)
if self.dirpath:
save_json(os.path.join(self.dirpath, 'log.json'), self.history)
self.get_dataframe().to_csv(os.path.join(self.dirpath, 'log.csv'), index=False)
# --- print ---
msg = ''
for key, value in elem.items():
if key in ['iteration']:
# skip printing some parameters...
continue
elif isinstance(value, int):
msg += f'{key} {value: >6d} '
else:
msg += f'{key} {value: 8f} '
# self.logger.warning(msg)
print(msg)
# --- Reset ---
self.reported_dict = {}
def get_dataframe(self):
df = pd.DataFrame(self.history)
return df
class SpeedCheckHandler:
def __init__(self, iteration_interval=10, logger=None):
self.iteration_interval = iteration_interval
self.logger = logger or getLogger(__name__)
self.prev_time = perf_counter()
def __call__(self, engine: Engine):
if engine.state.iteration % self.iteration_interval == 0:
cur_time = perf_counter()
spd = self.iteration_interval / (cur_time - self.prev_time)
self.logger.warning(f'{spd} iter/sec')
# reset
self.prev_time = cur_time
def attach(self, engine: Engine):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
class ModelSnapshotHandler:
def __init__(self, model, filepath='model_{count:06}.pt',
interval=1, logger=None):
self.model = model
self.filepath: str = str(filepath)
self.interval = interval
self.logger = logger or getLogger(__name__)
self.count = 0
def __call__(self, engine: Engine):
self.count += 1
if self.count % self.interval == 0:
filepath = self.filepath.format(count=self.count)
torch.save(self.model.state_dict(), filepath)
# self.logger.warning(f'save model to {filepath}...')
| [
"[email protected]"
] | |
7be016d715c3f91b6565881bab25ad196f486f3e | 0266077854f9dd6e2b55f4e8e71f77f42bd20b21 | /apps/childcount/migrations/0021_auto__add_field_pregnancyregistrationreport_husband__chg_field_pregnan.py | b83efee908b56322835f5372190f738fffb7c778 | [] | no_license | techpub/rapidsms | 5af55880d4aa4c6035ea16d3fb1f6ea4524893d7 | b71f4c5ce93fe8ac7b1cf0b8522b698361904dbb | refs/heads/ccstable | 2020-12-03T09:19:56.397816 | 2013-05-15T07:37:46 | 2013-05-15T07:37:46 | 46,684,087 | 0 | 0 | null | 2015-11-22T22:41:45 | 2015-11-22T22:41:45 | null | UTF-8 | Python | false | false | 35,171 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PregnancyRegistrationReport.husband'
db.add_column('cc_pregregrpt', 'husband', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='husband', null=True, to=orm['childcount.Patient']), keep_default=False)
# Changing field 'PregnancyRegistrationReport.married'
db.alter_column('cc_pregregrpt', 'married', self.gf('django.db.models.fields.CharField')(max_length=1))
def backwards(self, orm):
# Deleting field 'PregnancyRegistrationReport.husband'
db.delete_column('cc_pregregrpt', 'husband_id')
# Changing field 'PregnancyRegistrationReport.married'
db.alter_column('cc_pregregrpt', 'married', self.gf('django.db.models.fields.BooleanField')(blank=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'childcount.antenatalvisitreport': {
'Meta': {'object_name': 'AntenatalVisitReport', 'db_table': "'cc_iavrpt'", '_ormbases': ['childcount.CCReport']},
'blood_drawn': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'expected_on': ('django.db.models.fields.DateTimeField', [], {}),
'hiv': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'pregnancy_week': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'childcount.appointmentreport': {
'Meta': {'object_name': 'AppointmentReport', 'db_table': "'cc_appointment'", '_ormbases': ['childcount.CCReport']},
'appointment_date': ('django.db.models.fields.DateTimeField', [], {}),
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'})
},
'childcount.bcpillreport': {
'Meta': {'object_name': 'BCPillReport', 'db_table': "'cc_bcprpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'pills': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'childcount.bednetissuedreport': {
'Meta': {'object_name': 'BednetIssuedReport', 'db_table': "'cc_bdnstc_rpt'", '_ormbases': ['childcount.CCReport']},
'bednet_received': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'})
},
'childcount.bednetreport': {
'Meta': {'object_name': 'BedNetReport', 'db_table': "'cc_bnrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'damaged_nets': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'earlier_nets': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'function_nets': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'sleeping_sites': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'childcount.bednetutilization': {
'Meta': {'object_name': 'BednetUtilization', 'db_table': "'cc_bdnutil_rpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'child_lastnite': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'child_underfive': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'hanging_bednet': ('django.db.models.fields.SmallIntegerField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'childcount.birthreport': {
'Meta': {'object_name': 'BirthReport', 'db_table': "'cc_birthrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'clinic_delivery': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'weight': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'childcount.case': {
'Meta': {'object_name': 'Case', 'db_table': "'cc_case'"},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Patient']"}),
'reports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['childcount.CCReport']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'childcount.ccreport': {
'Meta': {'object_name': 'CCReport', 'db_table': "'cc_ccrpt'"},
'encounter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Encounter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_childcount.ccreport_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"})
},
'childcount.chw': {
'Meta': {'object_name': 'CHW', 'db_table': "'cc_chw'", '_ormbases': ['reporters.Reporter']},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stationed_chw'", 'null': 'True', 'to': "orm['childcount.Clinic']"}),
'manager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CHW']", 'null': 'True', 'blank': 'True'}),
'reporter_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['reporters.Reporter']", 'unique': 'True', 'primary_key': 'True'})
},
'childcount.chwhealthid': {
'Meta': {'object_name': 'CHWHealthId', 'db_table': "'cc_chwhealthid'"},
'chw': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CHW']", 'null': 'True', 'blank': 'True'}),
'health_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.HealthId']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'childcount.clinic': {
'Meta': {'object_name': 'Clinic', 'db_table': "'cc_clinic'", '_ormbases': ['locations.Location']},
'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['locations.Location']", 'unique': 'True', 'primary_key': 'True'})
},
'childcount.codeditem': {
'Meta': {'unique_together': "(('type', 'code'),)", 'object_name': 'CodedItem', 'db_table': "'cc_codeditem'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'childcount.configuration': {
'Meta': {'unique_together': "(('key', 'value'),)", 'object_name': 'Configuration', 'db_table': "'cc_config'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'childcount.dangersignsreport': {
'Meta': {'object_name': 'DangerSignsReport', 'db_table': "'cc_dsrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'danger_signs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['childcount.CodedItem']", 'symmetrical': 'False'})
},
'childcount.deadperson': {
'Meta': {'object_name': 'DeadPerson', 'db_table': "'cc_dead_person'"},
'chw': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CHW']"}),
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Clinic']", 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'dod': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deads_household_member'", 'null': 'True', 'to': "orm['childcount.Patient']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deads_resident'", 'null': 'True', 'to': "orm['locations.Location']"}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'childcount.deathreport': {
'Meta': {'object_name': 'DeathReport', 'db_table': "'cc_deathrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'death_date': ('django.db.models.fields.DateField', [], {})
},
'childcount.drinkingwaterreport': {
'Meta': {'object_name': 'DrinkingWaterReport', 'db_table': "'cc_drnkwater_rpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'treatment_method': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'water_source': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'childcount.encounter': {
'Meta': {'object_name': 'Encounter', 'db_table': "'cc_encounter'"},
'chw': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CHW']"}),
'encounter_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Patient']"}),
'sync_omrs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'childcount.familyplanningreport': {
'Meta': {'object_name': 'FamilyPlanningReport', 'db_table': "'cc_fprpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'women': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'women_using': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'childcount.familyplanningusage': {
'Meta': {'object_name': 'FamilyPlanningUsage', 'db_table': "'cc_fpusage'"},
'count': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'fp_report': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.FamilyPlanningReport']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CodedItem']"})
},
'childcount.feverreport': {
'Meta': {'object_name': 'FeverReport', 'db_table': "'cc_fevrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'rdt_result': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'childcount.followupreport': {
'Meta': {'object_name': 'FollowUpReport', 'db_table': "'cc_furpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'improvement': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'visited_clinic': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'childcount.formgroup': {
'Meta': {'object_name': 'FormGroup', 'db_table': "'cc_frmgrp'"},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reporters.PersistantBackend']"}),
'encounter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Encounter']", 'null': 'True', 'blank': 'True'}),
'entered_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'encounters_entered'", 'null': 'True', 'to': "orm['reporters.Reporter']"}),
'entered_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'forms': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'childcount.healthid': {
'Meta': {'object_name': 'HealthId', 'db_table': "'cc_healthid'"},
'generated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'health_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issued_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Patient']", 'null': 'True', 'blank': 'True'}),
'printed_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'revoked_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'G'", 'max_length': '1'})
},
'childcount.householdvisitreport': {
'Meta': {'object_name': 'HouseholdVisitReport', 'db_table': "'cc_hhvisitrpt'", '_ormbases': ['childcount.CCReport']},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'children': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'counseling': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['childcount.CodedItem']", 'symmetrical': 'False', 'blank': 'True'})
},
'childcount.immunizationnotification': {
'Meta': {'unique_together': "(('patient', 'immunization'),)", 'object_name': 'ImmunizationNotification', 'db_table': "'cc_immunnotif'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.ImmunizationSchedule']"}),
'notified_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Patient']"})
},
'childcount.immunizationschedule': {
'Meta': {'object_name': 'ImmunizationSchedule', 'db_table': "'cc_immunsched'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'period': ('django.db.models.fields.PositiveIntegerField', [], {}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'childcount.medicinegivenreport': {
'Meta': {'object_name': 'MedicineGivenReport', 'db_table': "'cc_medsrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'medicines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['childcount.CodedItem']", 'symmetrical': 'False'})
},
'childcount.neonatalreport': {
'Meta': {'object_name': 'NeonatalReport', 'db_table': "'cc_neorpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'clinic_visits': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'childcount.nutritionreport': {
'Meta': {'object_name': 'NutritionReport', 'db_table': "'cc_nutrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'muac': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'oedema': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'childcount.patient': {
'Meta': {'object_name': 'Patient', 'db_table': "'cc_patient'"},
'chw': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CHW']"}),
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Clinic']", 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'estimated_dob': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'health_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'household_member'", 'null': 'True', 'to': "orm['childcount.Patient']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'resident'", 'null': 'True', 'to': "orm['locations.Location']"}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'mother': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'to': "orm['childcount.Patient']"}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'childcount.pregnancyregistrationreport': {
'Meta': {'object_name': 'PregnancyRegistrationReport', 'db_table': "'cc_pregregrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'husband': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'husband'", 'null': 'True', 'to': "orm['childcount.Patient']"}),
'married': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'number_of_children': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'pregnancies': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'childcount.pregnancyreport': {
'Meta': {'object_name': 'PregnancyReport', 'db_table': "'cc_pregrpt'", '_ormbases': ['childcount.CCReport']},
'anc_visits': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'pregnancy_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'weeks_since_anc': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'childcount.referral': {
'Meta': {'object_name': 'Referral', 'db_table': "'cc_referral'"},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'expires_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.Patient']"}),
'ref_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'reports': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['childcount.CCReport']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'childcount.referralreport': {
'Meta': {'object_name': 'ReferralReport', 'db_table': "'cc_refrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'urgency': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'childcount.sanitationreport': {
'Meta': {'object_name': 'SanitationReport', 'db_table': "'cc_sanitation_rpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'share_toilet': ('django.db.models.fields.SmallIntegerField', [], {}),
'toilet_lat': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'childcount.sickmembersreport': {
'Meta': {'object_name': 'SickMembersReport', 'db_table': "'cc_sickrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'on_treatment': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'positive_rdts': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'rdts': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'sick': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'childcount.spregnancy': {
'Meta': {'object_name': 'SPregnancy', 'db_table': "'cc_sauri_pregrpt'", '_ormbases': ['childcount.PregnancyReport']},
'cd4_count': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'folic_suppliment': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'iron_supplement': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'pmtc_arv': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['childcount.CodedItem']", 'null': 'True', 'blank': 'True'}),
'pregnancyreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.PregnancyReport']", 'unique': 'True', 'primary_key': 'True'}),
'tested_hiv': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'childcount.stillbirthmiscarriagereport': {
'Meta': {'object_name': 'StillbirthMiscarriageReport', 'db_table': "'cc_sbmcrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'incident_date': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'childcount.sunderone': {
'Meta': {'object_name': 'SUnderOne', 'db_table': "'cc_sauri_uonerpt'", '_ormbases': ['childcount.UnderOneReport']},
'underonereport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.UnderOneReport']", 'unique': 'True', 'primary_key': 'True'}),
'vaccine': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['childcount.Vaccine']", 'symmetrical': 'False'})
},
'childcount.underonereport': {
'Meta': {'object_name': 'UnderOneReport', 'db_table': "'cc_uonerpt'", '_ormbases': ['childcount.CCReport']},
'breast_only': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'immunized': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'childcount.vaccine': {
'Meta': {'object_name': 'Vaccine', 'db_table': "'cc_vaccine'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'childcount.verbalautopsyreport': {
'Meta': {'object_name': 'VerbalAutopsyReport', 'db_table': "'cc_autopsyrpt'", '_ormbases': ['childcount.CCReport']},
'ccreport_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['childcount.CCReport']", 'unique': 'True', 'primary_key': 'True'}),
'done': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'locations.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['locations.LocationType']"})
},
'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reporters.persistantbackend': {
'Meta': {'object_name': 'PersistantBackend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'reporters.reporter': {
'Meta': {'object_name': 'Reporter', '_ormbases': ['auth.User']},
'language': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reporters'", 'null': 'True', 'to': "orm['locations.Location']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['childcount']
| [
"[email protected]"
] | |
3cec55de5e490b496ba347b8d217cacfc2c13666 | b82057c77dd4d00ff9bca9a979a1a3075f0528c4 | /Exicom_gateway/checks/ec500_dg_run_hrs_status | f262c1d3c3aca62ea1088f680672de55d4d0d89f | [] | no_license | subhash-007/photography-blog | 7ee0c4f930fee29d76106c45b09e6b76cb19cf56 | b1ae66794b48bfe3862cb6e727a3a15a6ef79024 | refs/heads/master | 2020-03-31T04:33:00.276628 | 2019-07-12T06:00:39 | 2019-07-12T06:00:39 | 151,910,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | #!/usr/bin/python
"""
dgRunHrs of poller device.
This is part of device application.
Poller script determines the dgRunHrs of device.
poller script takes the snmp value of OID .1.3.6.1.4.1.38016.14.19.4 from snmp agent of device at specific interval.
Device dgRunHrs is sent to device application
"""
# ######################################################################
# Function: check_ec500_dg_run_hrs_status
#
# Parameters: info (SNMP Ouput) params (No Parameters)
#
# Output: device dg_run_hrs
# ######################################################################
ec500_dg_run_hrs_default_levels = ()
def check_ec500_dg_run_hrs_status(item, params, info):
"""
check_ec500_dg_run_hrs_status function fetches the dgRunHrs
Args:
item (str) Specific item on SNMP output on which we want to filter results
Kwargs:
params (tuple) Check parameters for critical and warning state of service
Returns:
state (int) :
0 : OK
1 : Warning
2: Critical
3: unknown
infotext(string):
plugin output
Example : OK - ;;;;
Raises:
Exception
"""
state = 3
infotext = "unknown_value"
perf_data = ['']
if info:
try:
state = 0
try :
ec500_dg_run_hrs = float(info[0][0])
except Exception,e:
ec500_dg_run_hrs = str(info[0][0].replace(" ","@"))
perf_data = [("ec500_dg_run_hrs", ec500_dg_run_hrs)]
return (state, "ec500_dg_run_hrs=%s" % ec500_dg_run_hrs, perf_data)
except Exception,e:
return (3, "ec500_dg_run_hrs=%s" % infotext.replace(" ","@"), perf_data)
else:
return (state, "ec500_dg_run_hrs=%s" %"No data retrieved".replace(" ","@"), perf_data)
# This check works on all SNMP hosts
"""
Dictionary-based declaration of all check types
"""
check_info["ec500_dg_run_hrs_status"] = {
'check_function': check_ec500_dg_run_hrs_status,
'service_description': 'ec500_dg_run_hrs_status',
'has_perfdata': True,
}
#########################################################################
# SNMP OID for the device dgRunHrs
#########################################################################
snmp_info["ec500_dg_run_hrs_status"] = ('.1.3.6.1.4.1.38016.14', ['19.4'])
| [
"[email protected]"
] | ||
64c49ca2cb4c7c43f39de8540150f88edbcf456f | 09b22d1bd1263e4082e6bba7afa2f2b7a66afd4a | /2 Panda/Merging Joining and concatenating.py | 90dcad0a206666caa8afe348c070f41b30c17891 | [] | no_license | yogeshkushwahait/Machine-Learning-Using-Python | b70bc5334c4178fecc175451b8b7e04e50a60917 | 8102ce7b0cba5d48e923f979ae0a8e71c25857b1 | refs/heads/master | 2022-03-28T05:21:24.332537 | 2019-11-05T06:34:00 | 2020-01-09T16:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py |
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
import pandas as pd
# In[8]:
df1 = pd.DataFrame({'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']},
index=[0,1,2,3])
# In[9]:
df2 = pd.DataFrame({'A':['A4','A5','A6','A7'],
'B':['B4','B5','B6','B7'],
'C':['C4','C5','C6','C7'],
'D':['D4','D5','D6','D7']},
index=[4,5,6,7])
# In[10]:
df3 = pd.DataFrame({'A':['A8','A9','A10','A11'],
'B':['B8','B9','B10','B11'],
'C':['C8','C9','C10','C11'],
'D':['D8','D9','D10','D11']},
index=[8,9,10,11])
# In[11]:
df1
# In[12]:
df2
# In[13]:
df3
# In[14]:
pd.concat([df1,df2,df3])
# In[15]:
pd.concat([df1,df2,df3],axis=1)
# In[18]:
left = pd.DataFrame({'key':['K0','K1','K2','K3'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']})
# In[19]:
right = pd.DataFrame({'key':['K0','K1','K2','K3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']})
# In[20]:
left
# In[21]:
right
# In[22]:
pd.merge(left,right,how='inner',on='key') #By default inner
# In[27]:
left = pd.DataFrame({'key1':['K0','K0','K1','K2'],
'key2':['K0','K1','K0','K1'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']})
right = pd.DataFrame({'key1':['K0','K1','K1','K2'],
'key2':['K0','K0','K0','K0'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']})
# In[28]:
left
# In[29]:
right
# In[30]:
pd.merge(left,right,on=['key1','key2'])
# In[31]:
pd.merge(left,right,how='outer', on=['key1','key2'])
# In[32]:
pd.merge(left,right,how='right', on=['key1','key2'])
# In[34]:
pd.merge(left,right,how='left', on=['key1','key2'])
| [
"[email protected]"
] | |
86967f12db84d645c96b6fcc9ce73c7e7323e057 | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /ros/py_ros/ur/follow_q_traj2.py | aefc0a0a5ab2d97e9674f530bc544538ff3fa55e | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | #!/usr/bin/python
#\file follow_q_traj1.py
#\brief Follow a joint angle trajectory.
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
#\date Jun.12, 2018
#Based on: ../baxter/follow_q_traj1.py
import roslib
import rospy
import actionlib
import control_msgs.msg
import trajectory_msgs.msg
import time, math, sys, copy
from get_q1 import GetState
if __name__=='__main__':
rospy.init_node('ur_test')
joint_names= ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
client= actionlib.SimpleActionClient('/follow_joint_trajectory', control_msgs.msg.FollowJointTrajectoryAction)
client.cancel_goal() #Ensure to cancel the ongoing goal.
# Wait some seconds for the head action server to start or exit
if not client.wait_for_server(rospy.Duration(5.0)):
rospy.logerr('Exiting - Joint Trajectory Action Server Not Found')
rospy.signal_shutdown('Action Server not found')
sys.exit(1)
goal= control_msgs.msg.FollowJointTrajectoryGoal()
#goal.goal_time_tolerance= rospy.Time(0.1)
goal.trajectory.joint_names= joint_names
#NOTE: We need to specify velocities. Otherwise:
#error_code: -1
#error_string: "Received a goal without velocities"
def add_point(goal, time, positions, velocities):
point= trajectory_msgs.msg.JointTrajectoryPoint()
point.positions= copy.deepcopy(positions)
point.velocities= copy.deepcopy(velocities)
point.time_from_start= rospy.Duration(time)
goal.trajectory.points.append(point)
angles= GetState().position
add_point(goal, 0.0, angles, [0.0]*6)
add_point(goal, 1.0, [q+0.02 for q in angles], [0.0]*6)
add_point(goal, 3.0, [q-0.02 for q in angles], [0.0]*6)
add_point(goal, 4.0, angles, [0.0]*6)
goal.trajectory.header.stamp= rospy.Time.now()
client.send_goal(goal)
#client.cancel_goal()
#client.wait_for_result(timeout=rospy.Duration(20.0))
print client.get_result()
#rospy.signal_shutdown('Done.')
| [
"[email protected]"
] | |
1a1e06233c6c3a7141c912a01c8cfff5ff0c0416 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/url_object.py | 301b5c0fb38a39a4473ea350b7066e607b8d92f6 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,090 | py | # coding: utf-8
import re
import six
class UrlObject:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'url': 'str',
'status': 'str',
'create_time': 'int',
'task_id': 'str',
'process_reason': 'str'
}
attribute_map = {
'id': 'id',
'url': 'url',
'status': 'status',
'create_time': 'create_time',
'task_id': 'task_id',
'process_reason': 'process_reason'
}
def __init__(self, id=None, url=None, status=None, create_time=None, task_id=None, process_reason=None):
"""UrlObject - a model defined in huaweicloud sdk"""
self._id = None
self._url = None
self._status = None
self._create_time = None
self._task_id = None
self._process_reason = None
self.discriminator = None
if id is not None:
self.id = id
if url is not None:
self.url = url
if status is not None:
self.status = status
if create_time is not None:
self.create_time = create_time
if task_id is not None:
self.task_id = task_id
if process_reason is not None:
self.process_reason = process_reason
@property
def id(self):
"""Gets the id of this UrlObject.
任务id
:return: The id of this UrlObject.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UrlObject.
任务id
:param id: The id of this UrlObject.
:type: str
"""
self._id = id
@property
def url(self):
"""Gets the url of this UrlObject.
url的地址。
:return: The url of this UrlObject.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this UrlObject.
url的地址。
:param url: The url of this UrlObject.
:type: str
"""
self._url = url
@property
def status(self):
"""Gets the status of this UrlObject.
url的状态 processing, succeed, failed,分别表示处理中,完成,失败。
:return: The status of this UrlObject.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this UrlObject.
url的状态 processing, succeed, failed,分别表示处理中,完成,失败。
:param status: The status of this UrlObject.
:type: str
"""
self._status = status
@property
def create_time(self):
"""Gets the create_time of this UrlObject.
url创建时间,相对于UTC 1970-01-01到当前时间相隔的毫秒数。
:return: The create_time of this UrlObject.
:rtype: int
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this UrlObject.
url创建时间,相对于UTC 1970-01-01到当前时间相隔的毫秒数。
:param create_time: The create_time of this UrlObject.
:type: int
"""
self._create_time = create_time
@property
def task_id(self):
"""Gets the task_id of this UrlObject.
url所属task的id。
:return: The task_id of this UrlObject.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this UrlObject.
url所属task的id。
:param task_id: The task_id of this UrlObject.
:type: str
"""
self._task_id = task_id
@property
def process_reason(self):
"""Gets the process_reason of this UrlObject.
标记处理原因。
:return: The process_reason of this UrlObject.
:rtype: str
"""
return self._process_reason
@process_reason.setter
def process_reason(self, process_reason):
"""Sets the process_reason of this UrlObject.
标记处理原因。
:param process_reason: The process_reason of this UrlObject.
:type: str
"""
self._process_reason = process_reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UrlObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f91cda1a698a8414cfa6864362ce4e7d7c5b3a8e | 6a1eba9825c67782102972aee1759f0e59c9eef7 | /naeval/morph/models/__init__.py | 9b598a2c2e6add867c5e7b31ef389418ccf23090 | [] | no_license | buriy/naeval | 6d238592ba6c02a625ccf7b643af84350b913de8 | 455cfb07047140aff2e4700a1630db7682c4d06a | refs/heads/master | 2022-08-08T00:19:06.185029 | 2020-05-05T06:52:05 | 2020-05-05T06:52:05 | 264,369,308 | 0 | 0 | null | 2020-05-16T05:52:04 | 2020-05-16T05:52:03 | null | UTF-8 | Python | false | false | 338 | py |
from .udpipe import UDPipeModel # noqa
from .spacy import SpacyModel # noqa
from .maru import MaruModel # noqa
from .rnnmorph import RNNMorphModel # noqa
from .deeppavlov import DeeppavlovModel, DeeppavlovBERTModel # noqa
from .rupostagger import RuPosTaggerModel # noqa
from .slovnet import SlovnetModel, SlovnetBERTModel # noqa
| [
"[email protected]"
] | |
45393a6b428b0bd41d5ee54fb12cc5590f3ea26f | 7f761492df0e0d1ae0fb77811ad462941854b7de | /pytorch_translate/research/unsupervised_morphology/bilingual_bpe.py | 5ff4d17a838bffa980f82c424810400d95e8f426 | [
"BSD-3-Clause"
] | permissive | blufb/translate | f5f9ee19818592dd1591d8d556bbdce82ab989f2 | 4a3465b846927dab3d53e98e57b3da1c7085fa17 | refs/heads/master | 2020-05-22T20:01:41.723629 | 2019-05-11T06:41:52 | 2019-05-11T06:48:31 | 186,503,354 | 0 | 0 | null | 2019-05-13T22:17:52 | 2019-05-13T22:17:52 | null | UTF-8 | Python | false | false | 8,419 | py | #!/usr/bin/env python3
import logging
from collections import defaultdict
from optparse import OptionParser
from typing import Dict, List, Set, Tuple
from pytorch_translate.research.unsupervised_morphology.bpe import BPE
from pytorch_translate.research.unsupervised_morphology.char_ibm_model1 import (
Word2CharIBMModel1,
)
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
logger = logging.getLogger(__name__)
def get_arg_parser():
parser = OptionParser()
parser.add_option(
"--src-file",
dest="src_train_file",
help="Source raw text as training data.",
metavar="FILE",
default=None,
)
parser.add_option(
"--dst-file",
dest="dst_train_file",
help="Target raw text as training data.",
metavar="FILE",
default=None,
)
parser.add_option(
"--vocab-size",
type="int",
dest="vocab_size",
help="Source vocabulary Size.",
default=20000,
)
parser.add_option(
"--train-out",
dest="train_output_file",
help="BPE tokenized source train file.",
metavar="FILE",
default=None,
)
parser.add_option(
"--ibm-iters",
type="int",
dest="num_ibm_iters",
help="Number of training epochs for character IBM models.",
default=3,
)
parser.add_option("--model", type="str", dest="model_path", help="Model Path.")
return parser
class BilingualBPE(BPE):
"""
An extension of the BPE model that is cross-lingual wrt parallel data.
"""
def _init_params(self, ibm_model_path: str, src_txt_path: str, dst_txt_path: str):
"""
Args:
src_txt_path: Text path for source language in parallel data.
dst_txt_path: Text path for target language in parallel data.
num_ibm_iters: Number of training epochs for the IBM model.
"""
logger.info("calculating alignment-based BPE type probs.")
self.bpe_probs_from_alignment = self._calc_bpe_prob_from_alignment(
ibm_model_path=ibm_model_path, dst_txt_path=dst_txt_path
)
# Need to call this at the end, because this funciton calls the
# _init_candidate_frequencies method (in this case, it needs dst2src_ibm_model).
self._init_vocab(txt_path=src_txt_path)
def _calc_word_probs(self, txt_path: str) -> Dict[str, float]:
"""
Calculates the probability of each word from raw counts in a text file.
"""
vocab = defaultdict(float)
with open(txt_path) as txt_file:
for line in txt_file:
for word in line.strip().split():
vocab[word] += 1
denom = sum(vocab.values())
for word in vocab.keys():
vocab[word] /= denom
return vocab
def _calc_bpe_prob_from_alignment(
self, ibm_model_path: str, dst_txt_path: str
) -> Dict[str, float]:
"""
p(subword=s) = sum_{t in target} p(s|t) p(t)
where p(t) is target_word_prob[t] from _calc_word_probs
and p(s|t) = self.dst2src_ibm_model.translation_prob[t][s]
"""
dst2src_ibm_model = Word2CharIBMModel1()
dst2src_ibm_model.load(file_path=ibm_model_path)
target_word_probs = self._calc_word_probs(txt_path=dst_txt_path)
bpe_alignment_prob = defaultdict(float)
for dst_word_id in list(dst2src_ibm_model.translation_prob.keys()):
dst_word = dst2src_ibm_model.int2str(dst_word_id)
target_word_prob = target_word_probs[dst_word]
alignment_probs = dst2src_ibm_model.translation_prob[dst_word_id]
for src_subword_id in list(alignment_probs.keys()):
src_subword = dst2src_ibm_model.int2str(src_subword_id)
bpe_alignment_prob[src_subword] += (
alignment_probs[src_subword] * target_word_prob
)
for src_subword in bpe_alignment_prob.keys():
bpe_alignment_prob[src_subword] = max(
bpe_alignment_prob[src_subword], 1e-30
)
return bpe_alignment_prob
def _init_candidate_frequencies(self) -> None:
self.merge_candidate_indices: Dict[Tuple[str, str], Set[int]] = defaultdict(set)
self.merge_candidate_freq: Dict[Tuple(str, str), float] = defaultdict(float)
for word_index, (seg, freq) in enumerate(self.current_train_data):
(seg, freq) = self.current_train_data[word_index]
for i in range(len(seg) - 1):
bpe_key = (seg[i], seg[i + 1])
bpe_token = "".join(bpe_key)
self.merge_candidate_freq[bpe_key] += self.bpe_alignment_prob(
bpe_token, freq
)
self.merge_candidate_indices[bpe_key].add(word_index)
self.vocab[seg[i]] += freq
self.vocab[seg[-1]] += freq
def bpe_alignment_prob(self, bpe_token: str, freq: int):
if bpe_token in self.bpe_probs_from_alignment:
return self.bpe_probs_from_alignment[bpe_token]
else:
# In cases where the alignment model did not cover long character
# sequences in training data.
return freq * 1e-30
def update_candidate_frequencies(
self, data_index: int, old_tokens: List[str], new_tokens: List[str]
):
"""
After each merge operation, we have to update the frequencies of the BPE
candidates, including the ones that are deprecated (old_tokens), and the
new ones (new_tokens) with respect to a training word (in data_index).
"""
freq = self.current_train_data[data_index][1]
for i in range(len(new_tokens) - 1):
self.vocab[new_tokens[i]] += freq
bpe_candidate = (new_tokens[i], new_tokens[i + 1])
bpe_token = "".join(bpe_candidate)
self.merge_candidate_freq[bpe_candidate] += self.bpe_alignment_prob(
bpe_token, freq
)
self.merge_candidate_indices[bpe_candidate].add(data_index)
self.vocab[new_tokens[-1]] += freq
for i in range(len(old_tokens) - 1):
self.vocab[old_tokens[i]] -= freq
if self.vocab[old_tokens[i]] == 0:
del self.vocab[old_tokens[i]]
bpe_candidate = (old_tokens[i], old_tokens[i + 1])
bpe_token = "".join(bpe_candidate)
pfreq = self.bpe_alignment_prob(bpe_token, freq)
if pfreq > 0: # just in case there is an underflow in value.
self.merge_candidate_freq[bpe_candidate] -= pfreq
if self.merge_candidate_freq[bpe_candidate] == 0:
del self.merge_candidate_freq[bpe_candidate]
del self.merge_candidate_indices[bpe_candidate]
self.vocab[old_tokens[-1]] -= freq
if self.vocab[old_tokens[-1]] == 0:
del self.vocab[old_tokens[-1]]
def build_vocab(
self, ibm_model_path: str, src_txt_path: str, dst_txt_path: str, vocab_size: int
):
"""
Note that except initalization, other parts are the same as the
original bpe build_vocab method.
"""
self._init_params(
ibm_model_path=ibm_model_path,
src_txt_path=src_txt_path,
dst_txt_path=dst_txt_path,
)
return self._build_vocab_loop(vocab_size=vocab_size)
if __name__ == "__main__":
arg_parser = get_arg_parser()
options, args = arg_parser.parse_args()
# Note the reverse side of the model. Target is word based, that is why
# we give it a reverse order.
dst2src_ibm_model = Word2CharIBMModel1()
dst2src_ibm_model.learn_ibm_parameters(
src_path=options.dst_train_file,
dst_path=options.src_train_file,
num_iters=options.num_ibm_iters,
)
dst2src_ibm_model.save(file_path=options.model_path + ".ibm")
bpe_model = BilingualBPE()
bpe_model.build_vocab(
ibm_model_path=options.model_path + ".ibm",
src_txt_path=options.src_train_file,
dst_txt_path=options.dst_train_file,
vocab_size=options.vocab_size,
)
bpe_model.segment_txt(
input_path=options.src_train_file, output_path=options.train_output_file
)
bpe_model.save(file_path=options.model_path)
| [
"[email protected]"
] | |
bb46203f0ac1b4d9b9656df02a3f5910772f7b1a | 3017b7399869057a8be7fb11ee9341b9c8f97ba4 | /genfiles/legacysky_randoms.py | 12bc592ff3e947e2d5310237717d2e385ab9a3ea | [] | no_license | michaelJwilson/SV-QA | 8f486422eb71b3fbd0d395904fd654ba432bd777 | dd6095d570442852bb28ac9da0f18be7b83cddce | refs/heads/master | 2020-07-29T16:04:55.759155 | 2019-12-20T14:37:23 | 2019-12-20T14:37:23 | 209,872,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,717 | py | import os
import sys
import json
import glob
import fitsio
import matplotlib
import pylab as pl
import pandas as pd
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import numpy.lib.recfunctions as rfn
import healpy as hp
from mpl_toolkits.axes_grid1 import make_axes_locatable
from fast_scatter import fast_scatter
from matplotlib import rc
from astropy.table import Table, vstack
from desitarget.targets import encode_targetid
from desitarget.geomask import is_in_box
from desitarget.targetmask import desi_mask
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from make_mask import get_hone
## plt.style.use(['dark_background'])
rc('font', **{'family':'serif', 'serif':['Times']})
rc('text', usetex=True)
nside = np.int(sys.argv[1])
camera = str.encode(sys.argv[2]) ## [b'90prime', b'mosaic', b'decam']
band = str.encode(sys.argv[3]) ## [b'g', b'r', b'z']
recompute = False
noplot = False
plot_elgs = True
if camera == b'mosaic':
band = b'z'
nrandom = np.int(20000)
## Sky rms for the entire image (in counts).
## Our pipeline (not the CP) estimate of the sky level, average over the image, in ADU.
## Standard deviation of our sky level.
## Sky surface brightness (in AB mag/arcsec2).
## Min. of our sky level.
## Max. of our sky level.
## FWHM (in pixels) measured by the CP.
## Community pipeline number.
cols = ['expnum', 'camera', 'filter', 'ra_center', 'dec_center', 'ra0', 'dec0', 'ra1', 'dec1', 'ra2', 'dec2', 'ra3', 'dec3']
skies = ['exptime',\
'skyrms',\
'meansky',\
'stdsky',\
'ccdskysb',\
'minsky',\
'maxsky',\
'fwhm',\
'plver',\
'airmass']
def remap(x, printit=False):
uentries, cnts = np.unique(x, return_counts = True)
result = np.zeros(len(x), dtype=[('plverf', np.float32)])
for u in uentries:
entry = u.decode('UTF-8')[1:].replace('.', '').strip()
lentry = len(entry)
rentry = np.float(entry) / 10. ** (lentry - 1)
print('Remapping {}, {} ({}) {}.'.format(u, entry, lentry, rentry))
result[x == u] = rentry
## print(result['plverf'])
##
print(np.unique(result['plverf'], return_counts=True))
return result
##
randoms = fitsio.FITS('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randoms/randoms-inside-dr8-0.31.0-2.fits')
randoms = randoms[1]['RA', 'DEC'][:nrandom]
## randoms = Table(fits.open('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randoms/randoms-inside-dr8-0.31.0-2.fits')[1].data)
## randoms.pprint()
_file = '/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/skies/skies_{}_{}.txt'.format(camera.decode('UTF-8'), band.decode('UTF-8'))
nfail = 0
if (not os.path.exists(_file)) | recompute:
print('{} not found, recalculating.'.format(_file))
ccd = fitsio.FITS('/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/ccds/ccds-annotated-{}-dr8.fits'.format(camera.decode('UTF-8')))
dtype = ccd[1].get_rec_dtype()[0]
ccd = ccd[1].read(vstorage='object')
ccd = np.array(ccd, dtype=dtype)[cols + skies]
plverf = remap(ccd['plver']) ## np.array(remap(ccd['plver']), dtype=[('plverf', np.float32)])
##
ccd = rfn.merge_arrays([ccd, plverf], flatten = True, usemask = False)
skies.remove('plver')
skies = skies + ['plverf']
ccd = ccd[cols + skies]
exps = ccd[ccd['filter'] == band]
result = np.zeros(len(randoms) * len(skies)).reshape(len(randoms), len(skies))
count = np.zeros(len(randoms), dtype=np.int32)
for i, x in enumerate(exps):
try:
## http://legacysurvey.org/ccdordering/
if camera == b'decam':
inccd = is_in_box(randoms, [x['ra3'], x['ra1'], x['dec3'], x['dec1']])
elif camera == b'90prime':
## BASS
inccd = is_in_box(randoms, [x['ra2'], x['ra0'], x['dec2'], x['dec0']])
elif camera == b'mosaic':
## MzLS
inccd = is_in_box(randoms, [x['ra0'], x['ra2'], x['dec0'], x['dec2']])
else:
raise ValueError('Invalid inut for camera.')
except:
nfail += 1
print('Failed for {}'.format([x['ra0'], x['ra2'], x['dec0'], x['dec2']]))
continue
##
if len(inccd) > 0:
print('Solving for {} of {}.'.format(i, len(exps)))
toadd = np.array(list(x[skies]))
result[inccd] += toadd
count[inccd] += 1
##
result[count > 0] /= count[count > 0].astype(np.float)[:,None]
np.savetxt(_file, result, fmt='%.6le')
if noplot:
exit(0)
else:
result = np.loadtxt(_file)
## Make room for Airmass in plot.
skies.remove('plver')
skies = skies + ['plverf']
result = Table(data=result, names=skies)
##
del result['minsky']
del result['maxsky']
##
skies.remove('minsky')
skies.remove('maxsky')
result.pprint()
##
skies = skies + ['HI']
##
ncol = 2
nrow = np.int(np.ceil(len(skies) / 2))
##
fig, axarr = plt.subplots(nrows=np.int(np.ceil(len(skies) / 2)), ncols=2, figsize=(10, 10))
##
plt.subplots_adjust(left = 0.05, right = 0.95, hspace=0.6, wspace=0.4, top = 0.925, bottom = 0.05)
## Wrap randoms
randoms['RA'][randoms['RA'] > 300.] -= 360.
randoms['RA'] += 60.
## Cut to non-DES.
result = result[(-30. < randoms['DEC'])]
randoms = randoms[(-30. < randoms['DEC'])]
if camera == b'decam':
result, randoms = [result[(randoms['DEC'] < 30.)], randoms[(randoms['DEC'] < 30.)]]
else:
result, randoms = [result[(randoms['DEC'] > 35.)], randoms[(randoms['DEC'] > 35.)]]
##
for i, _ in enumerate(skies):
row = i % nrow
col = i % 2
print(i, row, col, _)
if _ == 'HI':
hpra, hpdec, values = get_hone()
## Cut to non-DES.
isin = hpdec > -30.
values = values[isin]
hpra = hpra[isin]
hpdec = hpdec[isin]
if camera == b'decam':
isin = hpdec < 30.
values = values[isin]
hpra = hpra[isin]
hpdec = hpdec[isin]
else:
isin = hpdec > 35.
values = values[isin]
hpra = hpra[isin]
hpdec = hpdec[isin]
## Wrap randoms
hpra[hpra > 300.] -= 360.
hpra += 60.
vmin = values.min()
vmax = values.max()
else:
nresult = result[_]
isin = np.isfinite(nresult)
nresult = nresult[isin]
## nresult = nresult - np.median(nresult)
## nresult /= np.std(nresult)
parea = hp.nside2pixarea(nside, degrees = True)
hppix = hp.ang2pix(nside, (90. - randoms['DEC'][isin]) * np.pi / 180., randoms['RA'][isin] * np.pi / 180., nest=False)
hpind, cnts = np.unique(hppix, return_counts=True)
theta,phi = hp.pix2ang(nside, hpind, nest=False)
hpra, hpdec = 180. / np.pi * phi, 90. -180. / np.pi * theta
values = np.array([np.mean(nresult[hppix == x]) for x in hpind])
vmin = np.quantile(values, 0.05)
vmax = np.quantile(values, 0.95)
##
fast_scatter(axarr[row][col], hpra, hpdec, values, vmin, vmax, 50, markersize=0.7, cmap='jet')
if i == 0:
ylims = axarr[row][col].get_ylim()
axarr[row][col].set_title(skies[i].upper())
axarr[row][col].set_xlim(360., 0.)
##
if plot_elgs:
nside = 512
binary = np.load('/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/healmaps/elg_tdensity_{}.npy'.format(nside))
hpind = binary[:,0].astype(np.int)
hpra = binary[:,1]
hpdec = binary[:,2]
tdensity = binary[:,3]
pix_mask = np.load('/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/elgs/pix_area.npy')
mhpind = pix_mask[:,0]
mhpra = pix_mask[:,1]
mhpdec = pix_mask[:,2]
mask = pix_mask[:,3]
##
## inmask = hpind[mask[hpind] > 0.0]
## tinmask = [x in inmask for x in hpind]
##
## tdensity[tinmask] /= mask[inmask]
## Cut to non-DES.
hpra = hpra[hpdec > -30.]
tdensity = tdensity[hpdec > -30.]
hpdec = hpdec[hpdec > -30.]
if camera == b'decam':
hpra = hpra[hpdec < 30.]
tdensity = tdensity[hpdec < 30.]
hpdec = hpdec[hpdec < 30.]
else:
hpra = hpra[hpdec > 35.]
tdensity = tdensity[hpdec > 35.]
hpdec = hpdec[hpdec > 35.]
## Wrap randoms.
hpra[hpra > 300.] -= 360.
hpra += 60.
## Digitize.
vmin = np.quantile(tdensity, 0.001)
vmax = np.quantile(tdensity, 0.999)
fast_scatter(axarr[-1][-1], hpra, hpdec, tdensity, vmin, vmax, 50, cmap='jet')
axarr[-1][-1].set_title('ELG DENSITY')
axarr[-1][-1].set_xlim(365., -5.)
axarr[-1][-1].set_ylim(ylims)
##
fig.suptitle(r'{} ${}$-band'.format(camera.decode('UTF-8').upper(), band.decode('UTF-8')), fontsize=14)
print('Number of failures: {}'.format(nfail))
pl.savefig('skydepths/skydepth_{}_{}.png'.format(camera.decode('UTF-8'), band.decode('UTF-8')))
print('\n\nDone.\n\n')
| [
"[email protected]"
] | |
e3c18ae427bff89a74c4a155958e3776f118fc9d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_142/828.py | 0dd233e9b038c694d7b9e783862aca4c210c8aa3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,573 | py | import sys,math
cases = int(raw_input())
plays = []
def skeleton(string):
last_char = string[0]
skel = last_char
for char in string[1:]:
if char != last_char:
skel += char
last_char = char
return skel
def is_possible(strings):
skel = skeleton(strings[0])
for string in strings[1:]:
if skeleton(string) != skel:
return False
return True
def mean_length(strings):
#print strings
cum = 0
for string in strings:
cum += len(string)
#print cum
m = float(cum) / len(strings)
m = int(round(m))
nearest_to_mean = 0
for i in range(len(strings)):
if (len(strings[i])-m)*(len(strings[i])-m) < (len(strings[nearest_to_mean])-m)*(len(strings[nearest_to_mean])-m):
nearest_to_mean = i
return nearest_to_mean
def numberLetter(string, letter, group):
n = 0
last_char = ''
curr = -1
for char in string:
if last_char != char and char == letter:
curr+=1
if char == letter and group == curr:
n+=1
last_char = char
return n
def vass(num):
if num > 0: return num
else: return -num
def moves(skeleton,string,target):
m = 0
for i in range(len(skeleton)):
letter = skeleton[i]
curr = 0
for j in range(i):
if skeleton[j] == letter:
curr+=1
m += vass(numberLetter(string,letter,curr)- numberLetter(target,letter,curr))
return m
def target(strings,skel):
target = ""
for i in range(len(skel)):
letter = skel[i]
curr = 0
for j in range(i):
if skel[j] == letter:
curr+=1
cum = 0
for string in strings:
cum += numberLetter(string,letter,curr)
num = int(round(float(cum)/len(strings)))
#print "letter " + letter + " : " + str(num)
for i in range(num):
target += letter
return target
for i in range(cases):
n_strings = int(raw_input())
#print answer
strings = []
for j in range(n_strings):
strings.append(raw_input())
plays.append(strings)
case = 0
for strings in plays:
case += 1
sys.stdout.write("Case #"+str(case)+": ")
#print rows
if is_possible(strings):
skel = skeleton(strings[0])
tg = target(strings,skel)
#print strings
#print tg
tot_moves = 0
for string in strings:
tot_moves += moves(skel,string,tg)
print tot_moves
else:
print "Fegla Won";
| [
"[email protected]"
] | |
ac6c1b5d5cf591fdb17877696c50099d56778da8 | ae7b262ecd72f2fac76c7fe2cff3b8efd7224cb9 | /ContainsDuplicateII.py | cfe3bcb7c5be59b9be1922b3e736ae55d010407c | [] | no_license | FengFengHan/LeetCode | 02a8041f7413b14bed5ac17af1ba68237b159959 | c2d449f2a93815f31c432805a6b4b8008d09d3df | refs/heads/master | 2021-01-10T18:44:06.374304 | 2016-04-16T13:18:55 | 2016-04-16T13:18:55 | 56,384,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
indexs = {}
for i in range(len(nums)):
indexs.setdefault(nums[i], [])
indexs[nums[i]].append(i)
for lis in indexs.values():
if len(lis) > 1:
for j in range(1,len(lis)):
if (lis[j] - lis[j-1]) <= k:
return True
return False
| [
"[email protected]"
] | |
542da5ea76d78e1c3c42f517cd2c7ba1233314d2 | 43530c02696704af51742144638df037b151a259 | /apps/friend/migrations/0001_initial.py | f45900443d3e401f9a587a9677a99737475e2e2d | [] | no_license | LAdkins81/friends | 58160f4eb5096d96f5a59edc45de38ba5cd388f7 | 8f189965fede9fb13fb94ecbd6d0f9912a0162cf | refs/heads/master | 2021-01-17T08:00:08.866341 | 2017-03-03T18:05:48 | 2017-03-03T18:05:48 | 83,826,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 15:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login_reg', '0002_auto_20170303_0957'),
]
operations = [
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('friend', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userfriend', to='login_reg.User')),
],
),
]
| [
"[email protected]"
] | |
84fa63fb19d08df62b8211a7894d160f837f3aae | 2e0396c23d592338bec48daf73d5fd1e423b4f41 | /use_max_rssi_localization.py | 44ac3535ce9d92bfdafdf4445680de04dfbdea3f | [] | no_license | wystephen/localization-wifi | 4504e8fd819847e9b18641641769bf93c081c4f9 | 5dca0d0df6bced8a519f02711692c6ddfaa57e12 | refs/heads/master | 2016-09-11T13:01:17.162633 | 2015-12-29T01:32:29 | 2015-12-29T01:32:29 | 31,436,755 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,559 | py | __author__ = 'Administrator'
# -*- coding:utf-8 -*-
import numpy
import data_transfor
import data_preprocessing
import matplotlib.pyplot as plt
def pose_of_max_rssi(pose, wifi, max_rssi):
'''
找到某个ap达到最大值的第一个pose的序号
:param pose:
:param wifi:
:param max_rssi:
:return:
'''
max_rssi_index = numpy.zeros(len(max_rssi))
for i in range(len(wifi[:, 1])):
for j in range(len(max_rssi)):
if max_rssi[j] == wifi[i, j]:
max_rssi_index[j] = i
print 'max_rssi_index_len:', len(max_rssi_index)
pose_array = numpy.zeros([len(max_rssi_index), 2])
for i in range(len(max_rssi_index)):
pose_array[i, :] = pose[max_rssi_index[i], :]
return max_rssi_index, pose_array
def simple_location(pose, wifi, pose_array):
'''
根据信号最强的三个ap估计自己的位置
:param pose: 实际上貌似没用到,以后再改
:param wifi: 输入wifi的特征数组
:param pose_array: 有多少个ap就有多少个点,保存的是距这个ap(理想是最近)较近的点
:return:输出估计的坐标
'''
out_pose = numpy.zeros([len(pose[:, 1]), 2])
max_rssi_tmp = numpy.zeros(2)
for i in range(len(pose[:, 1])):
#find max 4 index in the wifi
max_rssi = numpy.zeros([4, 2])
for j in range(len(wifi[i, :])):
if wifi[i, j] > max_rssi[3, 1]:
max_rssi[3,0] = j
max_rssi[3,1] = wifi[i,j]
for k in range(0,2):
k = 2-k
if max_rssi[k+1,1] > max_rssi[k,1]:
max_rssi_tmp[:] = max_rssi[k,:]
max_rssi[k,:] = max_rssi[k+1,:]
max_rssi[k+1,:] = max_rssi_tmp[:]
out_pose[i,0] = pose_array[max_rssi[0,0],0]/4.0 +\
pose_array[max_rssi[1,0],0]/4.0+\
pose_array[max_rssi[2,0],0]/4.0+\
pose_array[max_rssi[3,0],0]/4.0
out_pose[i,1] = pose_array[max_rssi[0,0],1]/4.0 +\
pose_array[max_rssi[1,0],1]/4.0+\
pose_array[max_rssi[2,0],1]/4.0+\
pose_array[max_rssi[3,0],1]/4.0
#测试直接用最大的那个 看误差,效果不好
#out_pose[i,0] = pose_array[max_rssi[0,0],0]
#out_pose[i,1] = pose_array[max_rssi[0,0],1]
return out_pose
if __name__ == '__main__':
pose, wifi = data_preprocessing.read_end_data('20153221527end_wifi.txt', '20153221527end_pose.txt')
pose2, wifi2 = data_preprocessing.read_end_data('20153141218end_wifi.txt', '20153141218end_pose.txt')
pose3, wifi3 = data_preprocessing.read_end_data('20153141231end_wifi.txt', '20153141231end_pose.txt')
pose4, wifi4 = data_preprocessing.read_end_data('20153221517end_wifi.txt', '20153221517end_pose.txt')
max_rssi = data_preprocessing.find_ap_pose(pose, wifi)
max_rssi2 = data_preprocessing.find_ap_pose(pose2, wifi2)
max_rssi3 = data_preprocessing.find_ap_pose(pose3, wifi3)
max_rssi4 = data_preprocessing.find_ap_pose(pose4, wifi4)
max_rssi_index, pose_array = pose_of_max_rssi(pose, wifi, max_rssi)
max_rssi_index2, pose_array2 = pose_of_max_rssi(pose2, wifi2, max_rssi2)
max_rssi_index3, pose_array3 = pose_of_max_rssi(pose3, wifi3, max_rssi3)
max_rssi_index4, pose_array4 = pose_of_max_rssi(pose4, wifi4, max_rssi4)
# print pose_array
plt.figure(1)
#plt.axis([-50, 200, -50, 200])
#plt.plot(pose_array[:,0],pose_array[:, 1], 'o')
plt.plot(pose_array2[:, 0], pose_array2[:, 1], 'o')
plt.plot(pose_array3[:, 0], pose_array3[:, 1], 'o')
#plt.plot(pose_array4[:,0],pose_array4[:,1], 'o')
plt.grid(1)
plt.figure(2)
source_pose_array = pose_array/4.0+pose_array2/4.0+pose_array3/4.0+pose_array4/4.0
#source_pose_array = pose_array3
out_pose1 = simple_location(pose,wifi,source_pose_array)
err1 = data_preprocessing.pose_dis(out_pose1,pose)
plt.plot(err1,'r')
out_pose2 = simple_location(pose2,wifi2,source_pose_array)
err2 = data_preprocessing.pose_dis(out_pose2,pose2)
plt.plot(err2,'b')
out_pose3 = simple_location(pose3,wifi3,source_pose_array)
err3 = data_preprocessing.pose_dis(out_pose3,pose3)
plt.plot(err3,'y')
out_pose4 = simple_location(pose4,wifi4,source_pose_array)
err4 = data_preprocessing.pose_dis(out_pose4,pose4)
plt.plot(err4,'g')
plt.grid(2)
plt.figure(3)
ok_times = 0
for i in range(len(err1)):
if err1[i] < 5:
ok_times+=1
print 'acc:', ok_times*1.0/len(err1)
plt.show()
| [
"[email protected]"
] | |
7e1d7f9452e893ef89f39038ead722d31fe328a4 | 3481356e47dcc23d06e54388153fe6ba795014fa | /swig_test/pybuffer/pybuffer.py | b1375b20698a70cb441ca9be087f2906c4867c61 | [] | no_license | Chise1/pyhk | c09a4c5a06ce93e7fe50c0cc078429f7f63fcb2f | 44bdb51e1772efad9d0116feab1c991c601aa68a | refs/heads/master | 2021-01-03T08:24:47.255171 | 2020-02-29T04:05:30 | 2020-02-29T04:05:30 | 239,998,705 | 1 | 0 | null | 2020-02-28T07:35:46 | 2020-02-12T11:40:39 | C | UTF-8 | Python | false | false | 3,056 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_pybuffer')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_pybuffer')
_pybuffer = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pybuffer', [dirname(__file__)])
except ImportError:
import _pybuffer
return _pybuffer
try:
_mod = imp.load_module('_pybuffer', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_pybuffer = swig_import_helper()
del swig_import_helper
else:
import _pybuffer
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
# This file is compatible with both classic and new-style classes.
| [
"[email protected]"
] | |
cb58f01bb96d29b45a0501db61d84089565b32e1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02853/s487102081.py | d4f290c3634e67a7bf9d48e374c08fa37acc1935 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | x,y = map(int,input().split())
def point(a):
if a == 1:
return 300000
elif a == 2:
return 200000
elif a == 3:
return 100000
else:
return 0
c = point(x)
b = point(y)
if x == 1 and y == 1:
print(1000000)
else:
print(c+b) | [
"[email protected]"
] | |
ff177921548f852db1a384ec33200275af66728e | 94c1805df5a09c39159d502f420d19ad54b567fc | /runtime/deps/gyp/test/configurations/invalid/gyptest-configurations.py | bd844b95dd8a330a237123acbaf741c1c816187d | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tmikov/jscomp | 9805a5a4d06520549c57380f0df4a1c0aa0dab56 | 83828441cb38ec96603a6a60be06977d4852940a | refs/heads/develop | 2021-01-19T02:56:35.102659 | 2016-04-12T06:19:30 | 2016-04-12T06:19:30 | 36,981,674 | 237 | 13 | Apache-2.0 | 2018-10-14T09:48:12 | 2015-06-06T13:49:26 | C | UTF-8 | Python | false | false | 923 | py | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
test = TestGyp.TestGyp()
for test_key in invalid_configuration_keys:
test.run_gyp('%s.gyp' % test_key, status=1, stderr=None)
expect = ['%s not allowed in the Debug configuration, found in target '
'%s.gyp:configurations#target' % (test_key, test_key)]
test.must_contain_all_lines(test.stderr(), expect)
test.pass_test()
| [
"[email protected]"
] | |
867fbd9385ee1f515de8c6bdcfc4433562c0711f | 5e324af46c554b88b97ee26886b05c88457ff0f5 | /clients/api/client_list.py | c29315aecdbe43ce46d49900a6fc6013ae660bf1 | [] | no_license | doubleclickdetroit/dindintonight | 1bda8851e49782d4dc16ca77d46e4b1f431c2b52 | 9769e1a96730b02511d25af8828b075dff5c35b5 | refs/heads/master | 2016-08-04T22:01:08.083566 | 2014-07-26T18:58:58 | 2014-07-26T18:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | from clients.models import Client, ClientUser
from clients.serializers import ClientSerializer
from core.api import RESTView
class ClientList(RESTView):
"""
Client List API Class
Example URLs:
/api/v1/clients/
"""
URL_NAME = 'api-v1-client-list'
def _handle_get(self, request, *args, **kwargs):
results = Client.objects.all()
user = request.GET.get('user', None)
if user is not None:
results = results.filter(users__pk=user)
return self.list_results(request, results, ClientSerializer, use_cache=True,
cache_time=self.CACHE_30_DAYS, cache_version=1)
def _handle_post(self, request, *args, **kwargs):
"""
Sample post data:
{
"name": "Test by Rob"
}
"""
serializer = ClientSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
# create the user to link them to the client they just created
ClientUser.objects.create(client=serializer.object, user=request.user)
return serializer.data
return self.raise_bad_request(serializer.errors)
| [
"[email protected]"
] | |
450b6295dff2b84499d8e5a4ad95db6e63d3b811 | 6c8305ea1df9687df1c0d2b0ace56733516c6322 | /readthedocs/builds/migrations/0044_alter_version_documentation_type.py | 86ad50cfe4c8429849bfd08a5e13b50d29d23d61 | [
"MIT"
] | permissive | readthedocs/readthedocs.org | 9806083aa744c2308267919480a692e1e003e45d | bf88ce6d1085d922322a5fadce63a22c5544c830 | refs/heads/main | 2023-09-05T20:22:34.281891 | 2023-09-05T12:41:52 | 2023-09-05T12:41:52 | 841,835 | 2,894 | 1,509 | MIT | 2023-09-14T20:36:00 | 2010-08-16T19:18:06 | Python | UTF-8 | Python | false | false | 958 | py | # Generated by Django 3.2.13 on 2022-05-30 10:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("builds", "0043_add_cancelled_state"),
]
operations = [
migrations.AlterField(
model_name="version",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
("mkdocs_html", "Mkdocs Html Pages"),
("generic", "Generic"),
],
default="sphinx",
help_text="Type of documentation the version was built with.",
max_length=20,
verbose_name="Documentation type",
),
),
]
| [
"[email protected]"
] | |
63b02058dd9984a335a7f72ff4650f3fda2d6879 | b9bc60cca34c6b4f8a750af6062f357f18dfcae2 | /tensorflow/contrib/copy_graph/python/util/copy_test.py | 68865fab497d3b72ff411643e196c193ac79df2e | [
"Apache-2.0"
] | permissive | lidenghui1110/tensorflow-0.12.0-fpga | 7c96753aafab5fe79d5d0c500a0bae1251a3d21b | f536d3d0b91f7f07f8e4a3978d362cd21bad832c | refs/heads/master | 2022-11-20T11:42:11.461490 | 2017-07-28T09:28:37 | 2017-07-28T09:28:37 | 98,633,565 | 3 | 2 | Apache-2.0 | 2022-11-15T05:22:07 | 2017-07-28T09:29:01 | C++ | UTF-8 | Python | false | false | 3,323 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import tensor_util
graph1 = tf.Graph()
graph2 = tf.Graph()
class CopyVariablesTest(tf.test.TestCase):
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = tf.Variable(2)
#Initialize session
sess1 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = tf.contrib.copy_graph.copy_variable_to_graph(
some_var, graph2)
#Make another copy with different scope
copy2 = tf.contrib.copy_graph.copy_variable_to_graph(
some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = tf.Session()
#Initialize the Variables
tf.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, tf.Variable)
assert isinstance(copy2, tf.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(tf.test.TestCase):
def testOpsCopy(self):
with graph1.as_default():
#Initialize a basic expression y = ax + b
x = tf.placeholder("float")
a = tf.Variable(3.0)
b = tf.constant(4.0)
ax = tf.mul(x, a)
y = tf.add(ax, b)
#Initialize session
sess1 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = tf.contrib.copy_graph.copy_variable_to_graph(
a, graph2)
#Initialize a1 in graph2
with graph2.as_default():
#Initialize session
sess2 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = tf.contrib.copy_graph.copy_op_to_graph(
y, graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = tf.contrib.copy_graph.get_copied_op(x, graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
9392be60bb332ad98a912eadab328e3f523a5a0c | 200ec10b652f9c504728890f6ed7d20d07fbacae | /views.py | d236861e828287d0c761fe880d0bf9fc996219ee | [] | no_license | Ks-Ksenia/flask_shop | f4edc17669c29ae02a89e836c3c48230147ae84f | 9eb44fd22bf99913c9824ea35e3922cb14ef2451 | refs/heads/master | 2023-03-01T13:55:20.749127 | 2021-02-14T09:29:04 | 2021-02-14T09:29:04 | 338,767,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | from app import app, db
from flask import render_template, redirect, request, flash, url_for
from flask_login import login_required, login_user, logout_user, current_user
from models import User, Product
from forms import LoginForm, RegistrationForm
@app.route('/')
def index():
return redirect(url_for('menu.catalog'))
@app.route('/search/')
def search():
q = request.args.get('q')
page = request.args.get('page')
if page and page.isdigit():
page = int(page)
else:
page = 1
pages, products = [], []
if q:
products = Product.query.filter(Product.product_name.contains(q)|Product.product_name.contains(q))
pages = products.paginate(page=page, per_page=1)
return render_template('search.html', products=products, pages=pages, q=q)
@app.login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
@app.route('/login', methods=['POST', 'GET'])
def login():
if not current_user.is_authenticated:
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter(User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember.data)
return redirect(request.args.get('next') or url_for('index'))
else:
flash('Неверный email или пароль')
return render_template('login.html', form=form, title="Вход")
return redirect(url_for('index'))
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/register', methods=['POST', 'GET'])
def registration():
form = RegistrationForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password1.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
else:
flash('Пользователь с таким email уже существует')
return render_template('singup.html', form=form, title='Регистрация')
| [
"[email protected]"
] | |
c7100455f984252260f69c417f013297d223251a | 5147c18763fcc9624c5796e332c8fa1b2b91e063 | /project_name/settings.py | 520d956f350443aade6ce68126c37c03a9da2c87 | [
"MIT"
] | permissive | upeu-2018-pro/heroku-django-template-upeu | c0b259fdde50b702750066a3eea4bb3bf38010e5 | e7917ac14476b4dcd043be692ea5578bd5955804 | refs/heads/master | 2021-08-29T14:40:06.786471 | 2017-12-14T04:55:43 | 2017-12-14T04:55:43 | 113,873,492 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,115 | py | """
Django settings for {{ project_name }} project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template-upeu
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "{{ secret_key }}"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'social_django',
'core', # if AUTH_USER_MODEL = 'core.User' and SOCIAL_AUTH_USER_MODEL = 'core.User'
'accounts',
'django.contrib.admin',
'bootstrap3',
'django.contrib.admindocs',
'oauth2_provider',
'rest_framework',
'corsheaders',
# Add your apps here.
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASESx = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'd6rvmpj68r6l4n',
'USER': 'vouyocdcjggiwt',
'PASSWORD': 'a59df75e117f358691a9e2b460c2d41fc9d2371286e840791a9cafb8219b6463',
'HOST': 'ec2-54-221-204-213.compute-1.amazonaws.com',
'PORT': '5432',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# add vars
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
AUTH_USER_MODEL = 'core.User' # if 'core' app in INSTALLED_APPS
SOCIAL_AUTH_USER_MODEL = 'core.User' # if 'core' app in INSTALLED_APPS
LOGIN_REDIRECT_URL = reverse_lazy('dashboard')
LOGIN_URL = reverse_lazy('login')
LOGOUT_URL = reverse_lazy('logout')
# 設定 Django 在 console 中輸出 e-mail 內容來代替通過 SMTP 服務發送郵件
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = '12345678a'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = '[email protected]'
AUTHENTICATION_BACKENDS = (
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.github.GithubOAuth2',
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
'accounts.authentication.EmailAuthBackend',
#'oauth2_provider.backends.OAuth2Backend',
)
AUTHENTICATION_BACKENDSx = (
'oauth2_provider.backends.OAuth2Backend',
# Uncomment following if you want to access the admin
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_FACEBOOK_KEY = '1505995752817741'
SOCIAL_AUTH_FACEBOOK_SECRET = 'a1b671143334bf268f0881655dd7f08c'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
SOCIAL_AUTH_GITHUB_KEY = 'ddcde373489a4be6deec'
SOCIAL_AUTH_GITHUB_SECRET = '39319ff7120c0f1cb57af7130f323ebf7cb35669'
SOCIAL_AUTH_GITHUB_SCOPE = ['email']
SOCIAL_AUTH_GITHUB_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '1082996671890-fbu61vmp0gfehh7tg0tgs7feenqr95qj.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'kYj_RFjlDyDQbt6SnscPrC1j'
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['email']
SOCIAL_AUTH_GOOGLE_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
#SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['upeu.edu.pe', ]
SOCIAL_AUTH_TWITTER_KEY = 'SYaVSEKyHbeoFrUJm6zEU1XVa'
SOCIAL_AUTH_TWITTER_SECRET = 'deltB2apIjLqThoJXDRPkPVI8ure7x9Ik5RD3g6mF6H64gOrnJ'
SOCIAL_AUTH_TWITTER_OAUTH2_SCOPE = ['email']
SOCIAL_AUTH_TWITTER_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
#CSRF_USE_SESSIONS = False
CORS_ORIGIN_ALLOW_ALL = True # False
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ('127.0.0.1:9001', '127.0.0.1:9003',
'localhost:9003', 'localhost:8003')
# Also
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {
'read': 'Read scope',
'write': 'Write scope',
'openid': 'Access to your openid connect',
'home': 'Access to your home page',
'backend': 'Access to your backend app',
'catalogo': 'Access to your catalogo app',
'ubigeo': 'Access to your ubigeo app',
'eventos': 'Access to your admision app',
'acuerdos': 'Access to your admision app',
'admision': 'Access to your admision app',
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
#'rest_framework.authentication.SessionAuthentication',
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
)
}
import datetime
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose0': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'verbose': {
'format': "[%(asctime)s] [%(levelname)s] [%(name)s:%(lineno)s] [%(path)s] [%(ip)s] [%(user)s] [%(method)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'verbose_dj': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
},
'handlers': {
'file_django': {
'level': 'DEBUG',
#'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'class': 'logging.FileHandler',
'filename': os.path.join(
BASE_DIR, 'temp/logs',
'dj%s.txt' % (datetime.datetime.now().strftime("%Y-%m-%d"))
),
'formatter': 'verbose_dj'
},
'file_log': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(
BASE_DIR, 'temp/logs',
'log%s.txt' % (datetime.datetime.now().strftime("%Y-%m-%d"))
),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose0'
},
},
'loggers': {
'django': {
'handlers': ['file_django'],
'propagate': False,
'level': 'DEBUG',
},
'apps': {
'handlers': ['file_log'],
'level': 'DEBUG',
},
},
'root': {
'handlers': ['console', 'file_log', ],
'level': 'INFO'
},
}
# Backup/restore database https://code.djangoproject.com/wiki/Fixtures
FIXTURE_DIRS = (
os.path.join(BASE_DIR, 'fixtures'),
)
| [
"[email protected]"
] | |
c4565b937eb19cdaaa5ebe8be80861d3cd91b7e1 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/container/v1beta1/container-v1beta1-py/google/container_v1beta1/services/cluster_manager/__init__.py | 84b9194002cdc5f4efa095c5b0833921af9178e8 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import ClusterManagerClient
from .async_client import ClusterManagerAsyncClient
__all__ = (
'ClusterManagerClient',
'ClusterManagerAsyncClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
5f8498060a4d282003fadb26e8a0b61c79616b80 | deb31ab5397c8a669e30bea0f428afaf8a2ebd30 | /web/migrations/0024_auto_20190412_0008.py | fe783346d9264b6a461c6b470fdf443e9bf853dc | [] | no_license | zgd0228/product_base | 1b8bcc43f0a96e5bac09e77f363ed97b582b48cc | 83948b0b929c852c52503bca3c66b55f3f352f1c | refs/heads/master | 2020-05-15T03:59:17.727156 | 2019-04-18T13:07:05 | 2019-04-18T13:07:05 | 182,077,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2019-04-11 16:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0023_auto_20190411_2317'),
]
operations = [
migrations.CreateModel(
name='ScoreRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='理由')),
('score', models.IntegerField(help_text='违纪扣分写负值,表现邮寄加分写正值', verbose_name='分值')),
],
),
migrations.AddField(
model_name='student',
name='score',
field=models.IntegerField(default=100, verbose_name='积分'),
),
migrations.AddField(
model_name='scorerecord',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.Student', verbose_name='学生'),
),
migrations.AddField(
model_name='scorerecord',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.UserInfo', verbose_name='执行人'),
),
]
| [
"[email protected]"
] | |
10a026637a4917ff43db50d14672d19b4e50d5ef | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /xlsxwriter/test/comparison/test_merge_cells01.py | a5c44bfd4b9d19ec1b384c2313ced7464f9620a3 | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('merge_cells01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format = workbook.add_format({'align': 'center'})
worksheet.set_selection('A4')
worksheet.merge_range('A1:A2', 'col1', format)
worksheet.merge_range('B1:B2', 'col2', format)
worksheet.merge_range('C1:C2', 'col3', format)
worksheet.merge_range('D1:D2', 'col4', format)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
fa053c5bed1b6394b9372bb95a76023cf9f6d614 | 2d0da5d8f45e1906bb2a2eee0901e7fddd5dc7ad | /scripts/python_scripts/lgreader/lgreader.py | 9eee17bdc185300229171f768f5ea820ada4d9ac | [
"MIT"
] | permissive | akazachk/pha | 09afd2fa6764ef9133a8ae91bb189e2896e076c6 | 4120f70554cb0a149d5ab52e04409302e78059fa | refs/heads/master | 2021-09-25T01:02:42.488470 | 2021-09-15T17:51:34 | 2021-09-15T17:51:34 | 194,751,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63,878 | py | from os import environ
PHA_DIR = environ['PHA_DIR']
ADD_TO_PATH = PHA_DIR + '/scripts/python_scripts'
from sys import path
path.append(ADD_TO_PATH)
from processcsv import ProcessCSV
from processcsv import StatEnum
import sys
import utility as util
import copy
import csv
import numpy as np
from itertools import izip
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from scipy import stats
import shutil
import os
class LGReader(ProcessCSV):
"""
class LGReader
Performs various functions related to the output file from PHA code
"""
default_data_dir = os.environ['PHA_DIR'] + '/data'
default_results_dir = os.environ['PHA_DIR'] + '/results'
default_ip_opt_fname = default_data_dir + '/' + "ip_opt.csv"
default_in_fname = default_results_dir + '/' + "pha.csv"
max_num_dec_places = 10
# Column names
colname_inst = "INSTANCE"
colname_lpobj = "LP OBJ"
colname_lpopt = "LP OBJ"
colname_lpbound = "LP OBJ"
colname_ipopt = "IP OBJ"
colname_ipobj = "IP OBJ"
colname_ipbound = "IP OBJ"
colname_numrows = "NUM ROWS"
colname_numcols = "NUM COLS"
colname_fraccore = "FRAC CORE"
colname_numpoints = "NUM POINTS (TOTAL)"
colname_numfinalpoints = "NUM FINAL POINTS (TOTAL)"
colname_numsics = "NUM SIC"
colname_numpha = "NUM PHA"
colname_sicbound = "SIC BOUND"
colname_phabound = "PHA BOUND"
colname_allcutsbound = "ALL CUTS (WITH SIC)"
colname_activesics = "ACTIVE SIC"
colname_activegics = "ACTIVE PHA"
colname_objtried = "OBJ"
colname_dupsicfails = "DUPLICATE_SIC_FAILS"
colname_dupgicfails = "DUPLICATE_GIC_FAILS"
colname_totaltime = "TOTAL_TIME"
colname_cutfails = [
"DUAL_CUT_SOLVER_FAILS",
"DUPLICATE_SIC_FAILS",
"DUPLICATE_GIC_FAILS",
"ORTHOGONALITY_FAILS",
"TIMELIMIT_FAILS",
"ITERATION_FAILS",
"ABANDONED_FAILS",
"NON_CUTTING_FAILS",
"SCALING_FAILS",
"UNSPEC_FAILS",
"CUT_LIMIT_FAILS",
"PRIMAL_CUT_SOLVER_FAILS",
"PRIMAL_CUT_SOLVER_NO_OBJ_FAILS",
"NUMERICAL_ISSUES_WARNING_NO_OBJ",
"NUMERICAL_ISSUES_NO_OBJ"
]
colname_numcuts = [
"CUT_VERTICES_CUT_HEUR",
# "DUMMY_OBJ_CUT_HEUR",
"ITER_BILINEAR_CUT_HEUR",
"UNIT_VECTORS_CUT_HEUR",
"TIGHT_POINTS_CUT_HEUR",
"SPLIT_SHARE_CUT_HEUR"
]
colname_activecuts = [
"ACTIVE CUT_VERTICES_CUT_HEUR",
"ACTIVE DUMMY_OBJ_CUT_HEUR",
"ACTIVE ITER_BILINEAR_CUT_HEUR",
"ACTIVE UNIT_VECTORS_CUT_HEUR",
"ACTIVE TIGHT_POINTS_CUT_HEUR",
"ACTIVE SPLIT_SHARE_CUT_HEUR"
]
def __init__(self, in_fname = None, hasipval = None, fill_ip_vals = None,
ip_opt_fname = None, out_fname = None,
inst_name = None, col_info = None, num_header_lines = None,
compute_gap_closed = True):
""" Constructor, sets reader object and instances to all instances """
if (in_fname is None):
self._in_fname = copy.deepcopy(self.default_in_fname)
else:
self._in_fname = copy.deepcopy(in_fname)
if __debug__:
print( "\n## In LGReader(): Opening file %s for reading ##" % self._in_fname )
# If requested, set IP values if they are not available
if (fill_ip_vals is None):
fill_ip_vals = False
if (hasipval is None):
self._hasipval = True # This is so we do not needlessly recreate the ip file
else:
self._hasipval = hasipval
if (ip_opt_fname is not None):
self._ip_opt_fname = copy.deepcopy(ip_opt_fname)
else:
self._ip_opt_fname = copy.deepcopy(self.default_ip_opt_fname)
if ((not self._hasipval) and fill_ip_vals):
self.fill_ip_opt(out_fname = self._in_fname) # Do overwrite the input file
self._hasipval = True
super(LGReader, self).__init__(self._in_fname, inst_name, col_info, num_header_lines, compute_gap_closed)
# If the IP values exist, calculate gap closed
# Note that we check the hasipval boolean, since otherwise gap_closed()
# will grab the IP values whether or not fill_ip_vals is True
if ((inst_name is not None) and (self._hasipval) and (not hasattr(self, '_ip_opt'))):
self.get_ip_opt()
if (compute_gap_closed):
self.gap_closed()
def set_inst(self, inst_name = None, compute_gap_closed = True):
""" Override set_inst from parent class so class values will be reset """
super(LGReader, self).set_inst(inst_name, compute_gap_closed)
if ((inst_name is not None) and (self._hasipval)):
self._ip_opt = None
self.get_ip_opt()
if (compute_gap_closed):
self.gap_closed()
def get_ip_opt(self, ip_opt_fname = None):
"""
Grabs IP optimum values from ip_opt file, only for relevant instances
Saves the values internally as self._ip_opt, a numpy array
"""
# TODO fix bug here that ip_opt_fname might have changed... really not a bug
if ((hasattr(self, '_ip_opt')) and (self._ip_opt is not None)):
return self._ip_opt
if (ip_opt_fname is None):
ip_opt_fname = copy.deepcopy(self._ip_opt_fname)
if __debug__:
print( "\n## Reading IP file: %s ##" % ip_opt_fname )
# Read IP opt file in
ip_opt_reader = ProcessCSV(ip_opt_fname, num_header_lines = 1)
ip_opt_reader._num_dec_places = self.max_num_dec_places
inst_names = super(LGReader, self).get_param(self.colname_inst)
self._ip_opt = ['' for i in range(len(inst_names))]
for inst in range(len(inst_names)):
curr_inst = inst_names[inst]
# find_first_val returns a table, with a header row
# The first row contains all the column information
val_str = ip_opt_reader.find_first_val(col_info = self.colname_ipobj, inst_name = curr_inst)[1][1]
if (len(val_str) > 0):
curr_inst_ip_obj = float(val_str)
self._ip_opt[inst] = curr_inst_ip_obj
if __debug__:
print( "Instance: %s\tIP obj: %f" % (curr_inst, curr_inst_ip_obj) )
elif __debug__:
print( "Instance %s not found in IP file" % curr_inst )
del ip_opt_reader
self._ip_opt = np.asarray(self._ip_opt)
return self._ip_opt
def inst_info(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
cut_presolve = None):
""" Get instance basic information (rows, columns, avg time) """
stat = [StatEnum.FIRST, StatEnum.FIRST, StatEnum.AVG]
col_info = [self.colname_numrows, self.colname_numcols, self.colname_totaltime]
typecol = [int, int, float]
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, cut_presolve)
# Remove the columns that are not relevant
tab = [[tab[i][j] for j in [0,1,3,5]] for i in range(len(tab))]
tab[0] = ["Instance", "Rows", "Cols", "Time"]
# num_rows
col_index = 1
self._num_rows = np.asarray([int(round(float(tab[i][col_index]))) for i in range(1,len(tab))])
# num_cols
col_index += 1
self._num_cols = np.asarray([int(round(float(tab[i][col_index]))) for i in range(1,len(tab))])
util.print_pretty_table(tab)
return tab
def get_best_row(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None):
"""
Get best-performing row per instance
"""
stat = [StatEnum.MAX]
col_info = [self.colname_allcutsbound] #[self.colname_activesics, self.colname_numpha, self.colname_activegics]
typecol = [float] #[int, int, int]
secondary_stat = [StatEnum.MAXRATIO]
secondary_col_info = [[self.colname_activegics, self.colname_numpha]]
tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear,
use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve,
secondary_stat = secondary_stat,
secondary_col_info = secondary_col_info)
# Get best row for each instance
self._best_row = [int(tab[i][2]) for i in range(1,len(tab))]
def obj_fails_table(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
cut_presolve = None):
"""
Analyze number of cuts generated in relation to maximum possible,
as well as some potential common problems (e.g., duplicate SICs in the cut LP)
Returns 2D list with the information
[instance, num splits, num sics, num active sics, num gics, num active gics, obj tried, fails...]
"""
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
num_inst = len(inst_name)
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut,
max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear,
use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
#stat = [StatEnum.MAX]
#col_info = [self.colname_activesics, self.colname_numpha, self.colname_activegics]
#typecol = [int, int, int]
#tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
# num_alg2_rounds, num_rays_cut, cut_limit,
# use_split_share, num_cuts_iter_bilinear,
# use_cut_vert_heur, cut_presolve)
out_tab = [[
"Instance", "Splits", "SICs", "Active SICs", "GICs", "Active GICs", "Obj",
#"Duplicate tilt", "Unbounded tilt",
"Unbounded", "Dup SIC", "Dup GIC", "Orth", "Time limit", "Iter limit", "Abandoned",
"Non-cutting", "Scaling", "Unspec", "Cut limit", "Primal infeas", "Primal infeas (setup)",
"Numerics warnings", "Numerics errors"]]
outcol_obj_tried = out_tab[0].index("Obj");
numcols_out_tab = len(out_tab[0]);
np_out_tab = np.zeros(shape = (num_inst, numcols_out_tab), dtype=int)
# Get column indices for each of the relevant stats
index_numsplits = super(LGReader, self).get_col_index(self.colname_fraccore)
index_numsics = super(LGReader, self).get_col_index(self.colname_numsics)
index_activesics = super(LGReader, self).get_col_index(self.colname_activesics)
index_numpha = super(LGReader, self).get_col_index(self.colname_numpha)
index_activegics = super(LGReader, self).get_col_index(self.colname_activegics)
index_objtried = super(LGReader, self).get_col_index(self.colname_objtried)
index_cutfail = [-1 for i in range(len(self.colname_cutfails))]
for i in range(len(self.colname_cutfails)):
index_cutfail[i] = super(LGReader, self).get_col_index(self.colname_cutfails[i])
for i in range(len(np_out_tab)):
if __debug__:
print( "## Obj_fails_table: Filling in information for instance %d with name %s ##" % (i,inst_name[i]) )
if self._best_row[i] >= 0:
row = super(LGReader, self).get_row(self._best_row[i])
np_out_tab[i,1] = int(row[index_numsplits])
np_out_tab[i,2] = int(row[index_numsics])
np_out_tab[i,3] = int(row[index_activesics])
np_out_tab[i,4] = int(row[index_numpha])
np_out_tab[i,5] = int(row[index_activegics])
obj_tried = int(row[index_objtried])
np_out_tab[i,outcol_obj_tried] = obj_tried
for j in range(len(index_cutfail)):
curr_fail = int(row[index_cutfail[j]])
np_out_tab[i,outcol_obj_tried+1+j] = 100. * curr_fail / obj_tried
else:
for j in range(len(index_cutfail)):
np_out_tab[i,outcol_obj_tried+1+j] = 0
out_tab.extend(np_out_tab.tolist())
for i in range(1,num_inst+1):
out_tab[i][0] = inst_name[i-1]
util.print_pretty_table(out_tab)
return out_tab
def active_cuts_table(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None):
"""
Analyze which heuristics led to most active cuts
Returns 2D list with the information
[instance, num sics, num active sics, num gics, num active gics, active from which heur...]
"""
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
num_inst = len(inst_name)
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
out_tab = [[
"Instance", "SICs", "Active SICs", "GICs", "Active GICs",
"V", "Active V", #
"B", "Active B",
"R", "Active R",
"T", "Active T",
"S", "Active S"
]]
numcols_out_tab = len(out_tab[0]);
np_out_tab = np.zeros(shape = (num_inst, numcols_out_tab), dtype=int)
# Get column indices for number of each type of cut
# Number active of that type is the subsequent column (be aware could change)
colindex_first = [super(LGReader, self).get_col_index(self.colname_numsics),
super(LGReader, self).get_col_index(self.colname_activesics),
super(LGReader, self).get_col_index(self.colname_numpha),
super(LGReader, self).get_col_index(self.colname_activegics)]
colindex_numcuts = [super(LGReader, self).get_col_index(self.colname_numcuts[i]) for i in range(len(self.colname_numcuts))]
for i in range(len(np_out_tab)):
if __debug__:
print( "## Active_cuts_table: Filling in information for instance %d with name %s ##" % (i,inst_name[i]) )
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
for j in range(len(colindex_first)):
np_out_tab[i,j+1] = curr_row[colindex_first[j]]
for j in range(len(colindex_numcuts)):
np_out_tab[i,2*j+1+len(colindex_first)] = curr_row[colindex_numcuts[j]]
np_out_tab[i,2*j+1+len(colindex_first)+1] = curr_row[colindex_numcuts[j]+1]
else:
for j in range(len(colindex_first)):
np_out_tab[i,j+1] = 0
for j in range(len(colindex_numcuts)):
np_out_tab[i,2*j+1+len(colindex_first)] = 0
np_out_tab[i,2*j+1+len(colindex_first)+1] = 0
out_tab.extend(np_out_tab.tolist())
for i in range(1,num_inst+1):
out_tab[i][0] = inst_name[i-1]
util.print_pretty_table(out_tab)
return out_tab
def write_best_params(self, out_dir = None):
""" Writes best parameters for each instance to file """
if (out_dir is None):
out_dir = self.default_data_dir + "/params"
try:
os.makedirs(out_dir)
except OSError: # for the race condition, however unlikely
if not os.path.isdir(out_dir):
raise
#num_params = super(LGReader, self)._param_container.num_params
num_params = self._param_container.num_params
inst_names = super(LGReader, self).get_param(self.colname_inst)
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_names, cut_limit=[1000], cut_presolve=[0]) # these are current defaults we want to report for, but this can be confusing if we forget we set it...
out_tab = [copy.deepcopy(self._header[0])]
out_tab.append(copy.deepcopy(self._header[1]))
for i in range(len(inst_names)):
inst = inst_names[i]
row = super(LGReader, self).get_row(self._best_row[i]) if self._best_row[i] >= 0 else [0 for i in range(num_params)]
out_tab.append(row)
curr_fname = out_dir + '/' + inst + "_params.txt"
with open(curr_fname, 'wb') as out_f:
if __debug__:
print( "## Writing parameters for %s ##" % inst )
for p in range(1,num_params):
out_f.write(str(self._param_container.param_names[p]).lower() + ' ')
curr_val = self._param_container.type_param[p](row[p])
out_f.write(str(curr_val) + '\n')
# Save parameter information
with open(out_dir + '/' + "best_runs.csv", 'wb') as out_f:
out_writer = csv.writer(out_f)
out_writer.writerows(out_tab)
del out_writer
def gap_closed(self, inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None, recompute_best_row = None):
"""
Adds gap closed information to the instance of LGReader
In addition, keeps lp opt, ip opt, osic best, and all cuts best
Defaults to all instances, unless inst_name is specified
"""
# Make sure that ip values are available
# (i.e., reread the file if necessary with ip values filled)
if (not self._hasipval):
self.fill_ip_opt(out_fname = self._in_fname)
self._hasipval = True
# Get best_row
if (not hasattr(self, '_best_row')) or recompute_best_row:
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
# Set defaults
col_info = [self.colname_lpobj, self.colname_sicbound, self.colname_allcutsbound]
stat = [StatEnum.FIRST, StatEnum.MAX, StatEnum.MAX]
typecol = [float, float, float]
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
# Save the current number of output decimal places, and set current outputted decimal places
saved_num_dec_places = self._num_dec_places
self._num_dec_places = self.max_num_dec_places
if __debug__:
print( "\n## Calculating gap_closed ##" )
# tab will have columns
# [inst, lp opt, row, osic opt, row, all cut opt, row, num rows]
tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
self._num_dec_places = saved_num_dec_places
#self._best_row = [int(tab[i][6]) for i in range(1,len(tab))]
# ip opt
if (not hasattr(self, '_ip_opt')):
self.get_ip_opt()
# Add information from instances
lp_col_index = super(LGReader, self).get_col_index(self.colname_lpobj)
sics_col_index = super(LGReader, self).get_col_index(self.colname_sicbound)
allcuts_col_index = super(LGReader, self).get_col_index(self.colname_allcutsbound)
self._lp_opt = []
self._sic_opt= []
self._sics_gap = []
self._allcuts_opt = []
self._allcuts_gap = []
self._gap_closed = []
for i in range(len(inst_name)):
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
self._lp_opt.append(float(curr_row[lp_col_index]))
self._sic_opt.append(float(curr_row[sics_col_index]))
self._sics_gap.append(100 * (self._sic_opt[i] - self._lp_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]))
self._allcuts_opt.append(float(curr_row[allcuts_col_index]))
self._allcuts_gap.append(100 * (self._allcuts_opt[i] - self._lp_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]))
self._gap_closed.append(100 * (self._allcuts_opt[i] - self._sic_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]))
else:
self._lp_opt.append(0.0)
self._sic_opt.append(0.0)
self._sics_gap.append(0.0)
self._allcuts_opt.append(0.0)
self._allcuts_gap.append(0.0)
self._gap_closed.append(0.0)
return self._gap_closed
def gap_closed_table(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None,
recompute = False):
""" Create table with gap closed information """
if (not hasattr(self, '_gap_closed')) or recompute:
self.gap_closed(inst_name, pha_act_option, num_alg2_rounds,
num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur, use_cut_vert_heur,
use_tight_points_heur, cut_presolve, recompute)
if (not hasattr(self, '_num_rows') or not hasattr(self, '_num_cols')):
self.inst_info()
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
append_average = True
eps = 1e-5
nonzero_gap_indices = [i for i in range(len(self._gap_closed)) if (self._allcuts_gap[i] > eps)]
zero_gap_indices = [i for i in range(len(self._gap_closed)) if (self._allcuts_gap[i] <= eps)]
# Set up transpose of out_table
out_tab_tr = [[super(LGReader, self).get_param(self.colname_inst)[i] for i in nonzero_gap_indices]]
out_tab_tr.append(np.around([self._num_rows[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._num_cols[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._lp_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._ip_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._sic_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._allcuts_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._sics_gap[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._allcuts_gap[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._gap_closed[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
# Also add num SICs, num active SICs, num GICs, num active GICs
numsics_col_index = super(LGReader, self).get_col_index(self.colname_numsics)
activesics_col_index = super(LGReader, self).get_col_index(self.colname_activesics)
numpha_col_index = super(LGReader, self).get_col_index(self.colname_numpha)
activegics_col_index = super(LGReader, self).get_col_index(self.colname_activegics)
num_sics_tab = []
num_active_sics_tab = []
num_gics_tab = []
num_active_gics_tab = []
percent_active_tab = []
for i in nonzero_gap_indices:
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
num_sics_tab.append(int(curr_row[numsics_col_index]))
num_active_sics_tab.append(int(curr_row[activesics_col_index]))
num_gics_tab.append(int(curr_row[numpha_col_index]))
num_active_gics_tab.append(int(curr_row[activegics_col_index]))
num_pha = float(curr_row[numpha_col_index])
percent_active_tab.append(
(100. * float(curr_row[activegics_col_index]) / num_pha) if num_pha > 0 else 0)
else:
num_sics_tab.append(0)
num_active_sics_tab.append(0)
num_gics_tab.append(0)
num_active_gics_tab.append(0)
percent_active_tab.append(0.0)
out_tab_tr.append(num_sics_tab) # num SICs
out_tab_tr.append(num_active_sics_tab) # active SICs
out_tab_tr.append(num_gics_tab) # num GICs
out_tab_tr.append(num_active_gics_tab) # active GICs
out_tab_tr.append(percent_active_tab) # % active
# Header
out_tab = [
[
'', '', '',
"Opt", "Opt", "Opt", "Opt",
"Best % gap closed", "Best % gap closed", "Best % gap closed",
"# cuts", "# cuts", "# cuts", "# cuts",
''
],
[
"Instance", "Rows", "Cols",
"LP", "IP", "SIC", "GIC+SIC",
"SIC", "GIC", "Diff",
"SICs", "Active SICs", "GICs", "Active GICs",
"% active"
]
]
out_tab.extend([list(t) for t in izip(*out_tab_tr)])
if (append_average):
out_tab.append(
[
"Average",'','','','','','',
np.around(np.mean([self._sics_gap[i] for i in nonzero_gap_indices], dtype=np.float64), decimals=self._num_dec_places),
np.around(np.mean([self._allcuts_gap[i] for i in nonzero_gap_indices], dtype=np.float64), decimals=self._num_dec_places),
np.around(np.mean([self._gap_closed[i] for i in nonzero_gap_indices], dtype=np.float64), decimals=self._num_dec_places),
'','','','',
np.around(np.mean(percent_active_tab, dtype=np.float64), decimals=self._num_dec_places)
]
)
out_tab_tr_zero = [[super(LGReader, self).get_param(self.colname_inst)[i] for i in zero_gap_indices]]
out_tab_tr_zero.append(np.around([self._num_rows[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._num_cols[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._lp_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._ip_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._sic_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._allcuts_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._sics_gap[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._allcuts_gap[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._gap_closed[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
num_sics_tab = []
num_active_sics_tab = []
num_gics_tab = []
num_active_gics_tab = []
percent_active_tab = []
for i in zero_gap_indices:
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
num_sics_tab.append(int(curr_row[numsics_col_index]))
num_active_sics_tab.append(int(curr_row[activesics_col_index]))
num_gics_tab.append(int(curr_row[numpha_col_index]))
num_active_gics_tab.append(int(curr_row[activegics_col_index]))
num_pha = float(curr_row[numpha_col_index])
percent_active_tab.append(
(100. * float(curr_row[activegics_col_index]) / num_pha) if num_pha > 0 else 0)
else:
num_sics_tab.append(0)
num_active_sics_tab.append(0)
num_gics_tab.append(0)
num_active_gics_tab.append(0)
percent_active_tab.append(0.0)
out_tab_tr_zero.append(num_sics_tab) # num SICs
out_tab_tr_zero.append(num_active_sics_tab) # active SICs
out_tab_tr_zero.append(num_gics_tab) # num GICs
out_tab_tr_zero.append(num_active_gics_tab) # active GICs
out_tab_tr_zero.append(percent_active_tab) # % active
out_tab.extend([list(t) for t in izip(*out_tab_tr_zero)])
util.print_pretty_table(out_tab)
return out_tab
def hplane_analysis(self, hh_start = 0, hh_end = 2, num_act_start = 0, num_act_end = 4):
"""
Sees effect of the various hplane selection heuristics,
as well as the number of activated hyperplanes
Outputs 2D list with the data
"""
if (not hasattr(self, '_gap_closed')):
self.gap_closed()
col_name = [self.colname_allcutsbound]
stat = StatEnum.MAX
make_int = False
inst_names = super(LGReader, self).get_param(self.colname_inst)
numpoints_col_index = super(LGReader, self).get_col_index(self.colname_numpoints)
numfinalpoints_col_index = super(LGReader, self).get_col_index(self.colname_numfinalpoints)
points_tab_tr = []
finalpoints_tab_tr = []
out_tab_tr = [super(LGReader, self).get_param(self.colname_inst)]
out_tab_tr.append(np.around(self._sics_gap, self._num_dec_places).tolist())
out_tab_tr.append(np.around(self._allcuts_gap, self._num_dec_places).tolist())
out_tab_tr.append(np.around(self._gap_closed, self._num_dec_places).tolist())
append_average = True
if (append_average):
out_tab_tr[0].append("Average")
out_tab_tr[1].append(np.around(np.mean(self._sics_gap, dtype=np.float64), self._num_dec_places))
out_tab_tr[2].append(np.around(np.mean(self._allcuts_gap, dtype=np.float64), self._num_dec_places))
out_tab_tr[3].append(np.around(np.mean(self._gap_closed, dtype=np.float64), self._num_dec_places))
saved_num_dec_places = self._num_dec_places
# First, best results for Alg 3 on its own
for hh in range(hh_start,hh_end+1):
self._num_dec_places = self.max_num_dec_places
tab = super(LGReader, self).stat_col(col_info = col_name, pha_act_option = hh, stat = stat, num_alg2_rounds = 0)
self._num_dec_places = saved_num_dec_places
for c in range(len(col_name)):
if (stat in [StatEnum.MIN, StatEnum.MAX, StatEnum.FIRST]):
tab_col_index = 1 + 2 * c # tab cols are [inst, stat, row, stat, row, ..., num_inst]
elif (stat in [StatEnum.AVG]):
tab_col_index = 1 + c # tab cols are [inst, stat, stat, ..., num_inst]
curr_gap = self.helper_col_analysis(col_name, stat,
tab = tab, tab_col_index = tab_col_index,
append_average = append_average)
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
curr_col = []
for inst in range(len(inst_names)):
curr_row = int(tab[inst+1][2])
curr_col.append(super(LGReader, self).get_entry(curr_row, [numpoints_col_index, numfinalpoints_col_index]))
points_tab_tr.append([int(curr_col[i][0]) for i in range(len(inst_names))])
finalpoints_tab_tr.append([int(curr_col[i][1]) for i in range(len(inst_names))])
# Now, best results for Alg 3 w/Alg 2, best among all hyperplane activation choices
for num_act in range(num_act_start,num_act_end+1):
self._num_dec_places = self.max_num_dec_places
tab = super(LGReader, self).stat_col(col_info = col_name, num_alg2_rounds = num_act, stat = stat)
self._num_dec_places = saved_num_dec_places
for c in range(len(col_name)):
if (stat in [StatEnum.MIN, StatEnum.MAX, StatEnum.FIRST]):
tab_col_index = 1 + 2 * c # tab cols are [inst, stat, row, stat, row, ..., num_inst]
elif (stat in [StatEnum.AVG]):
tab_col_index = 1 + c # tab cols are [inst, stat, stat, ..., num_inst]
curr_gap = self.helper_col_analysis(col_name, stat,
tab = tab, tab_col_index = tab_col_index,
append_average = append_average)
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
curr_col = []
for inst in range(len(inst_names)):
curr_row = int(tab[inst+1][2])
curr_col.append(super(LGReader, self).get_entry(curr_row, [numpoints_col_index, numfinalpoints_col_index]))
points_tab_tr.append([int(curr_col[i][0]) for i in range(len(inst_names))])
finalpoints_tab_tr.append([int(curr_col[i][1]) for i in range(len(inst_names))])
# Lastly, best results for Alg 2 on its own
for num_act in range(num_act_start+1,num_act_end+1): # +1 because we do not want 0 alg 2 activations
self._num_dec_places = self.max_num_dec_places
tab = super(LGReader, self).stat_col(col_info = col_name, num_alg2_rounds = -1*num_act, stat = stat)
self._num_dec_places = saved_num_dec_places
for c in range(len(col_name)):
if (stat in [StatEnum.MIN, StatEnum.MAX, StatEnum.FIRST]):
tab_col_index = 1 + 2 * c # tab cols are [inst, stat, row, stat, row, ..., num_inst]
elif (stat in [StatEnum.AVG]):
tab_col_index = 1 + c # tab cols are [inst, stat, stat, ..., num_inst]
curr_gap = self.helper_col_analysis(col_name, stat,
tab = tab, tab_col_index = tab_col_index,
append_average = append_average)
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
curr_col = []
for inst in range(len(inst_names)):
curr_row = int(tab[inst+1][2])
curr_col.append(super(LGReader, self).get_entry(curr_row, [numpoints_col_index, numfinalpoints_col_index]))
points_tab_tr.append([int(curr_col[i][0]) for i in range(len(inst_names))])
finalpoints_tab_tr.append([int(curr_col[i][1]) for i in range(len(inst_names))])
#out_tab_tr.extend(points_tab_tr)
#out_tab_tr.extend(finalpoints_tab_tr)
out_tab = [
[ '' ]
+ 3 * [ '' ]
+ 3 * [ 'Only Alg. 3' ]
+ 5 * [ 'Alg. 3 with Alg. 2' ]
+ 4 * [ 'Only Alg. 2' ],
# + (hh_end-hh_start+1 + num_act_end-num_act_start+1 + 4) * ['Points']
# + (hh_end-hh_start+1 + num_act_end-num_act_start+1 + 4) * ['Final Points'],
["Instance", "SIC", "Best", "Diff"]
+ ["H"+str(hh) for hh in range(hh_start+1,hh_end+2)]
+ [ "T+"+str(act) for act in range(num_act_start,num_act_end+1)]
+ [ "+"+str(act) for act in range(1,4+1)]
# + ["H"+str(hh) for hh in range(hh_start+1,hh_end+2)]
# + [ "T+"+str(act) for act in range(num_act_start,num_act_end+1)]
# + [ "+"+str(act) for act in range(1,4+1)]
# + ["H"+str(hh) for hh in range(hh_start+1,hh_end+2)]
# + [ "T+"+str(act) for act in range(num_act_start,num_act_end+1)]
# + [ "+"+str(act) for act in range(1,4+1)]
]
out_tab.extend([list(x) for x in izip(*out_tab_tr)])
util.print_pretty_table(out_tab)
return out_tab
def hplane_point_analysis(self, hh_start = 0, hh_end = 2, num_act_start = 0, num_act_end = 3):
"""
Sees effect of the various hplane selection heuristics,
as well as the number of activated hyperplanes,
on the number of points and final points
Outputs 2D list with the data
"""
if (not hasattr(self, '_gap_closed')):
self.gap_closed()
col_name = [self.colname_allcutsbound]
stat = StatEnum.MAX
make_int = False
numpoints_col_index = super(LGReader, self).get_col_index(self.colname_numpoints)
numfinalpoints_col_index = super(LGReader, self).get_col_index(self.colname_numfinalpoints)
inst_names = super(LGReader, self).get_param(self.colname_inst)
out_tab_tr = [inst_names]
points_tab_tr = []
finalpoints_tab_tr = []
saved_num_dec_places = self._num_dec_places
for hh in range(hh_start,hh_end+1):
self._num_dec_places = self.max_num_dec_places
tab = super(LGReader, self).stat_col(col_info = col_name, pha_act_option = hh, stat = stat, num_alg2_rounds = 0)
self._num_dec_places = saved_num_dec_places
curr_col = []
for inst in range(len(inst_names)):
curr_row = int(tab[inst+1][2])
curr_col.append(super(LGReader, self).get_entry(curr_row, [numpoints_col_index, numfinalpoints_col_index]))
points_tab_tr.append([int(curr_col[i][0]) for i in range(len(inst_names))])
finalpoints_tab_tr.append([int(curr_col[i][1]) for i in range(len(inst_names))])
for num_act in range(num_act_start,num_act_end+1):
self._num_dec_places = self.max_num_dec_places
tab = super(LGReader, self).stat_col(col_info = col_name, num_alg2_rounds = num_act, stat = stat)
self._num_dec_places = saved_num_dec_places
curr_col = []
for inst in range(len(inst_names)):
curr_row = int(tab[inst+1][2])
curr_col.append(super(LGReader, self).get_entry(curr_row, [numpoints_col_index, numfinalpoints_col_index]))
points_tab_tr.append([int(curr_col[i][0]) for i in range(len(inst_names))])
finalpoints_tab_tr.append([int(curr_col[i][1]) for i in range(len(inst_names))])
for num_act in range(1,4+1):
self._num_dec_places = self.max_num_dec_places
tab = super(LGReader, self).stat_col(col_info = col_name, num_alg2_rounds = -1*num_act, stat = stat)
self._num_dec_places = saved_num_dec_places
curr_col = []
for inst in range(len(inst_names)):
curr_row = int(tab[inst+1][2])
curr_col.append(super(LGReader, self).get_entry(curr_row, [numpoints_col_index, numfinalpoints_col_index]))
points_tab_tr.append([int(curr_col[i][0]) for i in range(len(inst_names))])
finalpoints_tab_tr.append([int(curr_col[i][1]) for i in range(len(inst_names))])
out_tab_tr.extend(points_tab_tr)
out_tab_tr.extend(finalpoints_tab_tr)
out_tab = [
[ '' ]
+ (hh_end-hh_start+1 + num_act_end-num_act_start+1 + 4) * ['Points']
+ (hh_end-hh_start+1 + num_act_end-num_act_start+1 + 4) * ['Final Points'],
["Instance"]
+ ["HH"+str(hh) for hh in range(hh_start+1,hh_end+2)]
+ [ "+"+str(act)+"H" for act in range(num_act_start,num_act_end+1)]
+ [ "-"+str(act)+"H" for act in range(1,4+1)]
+ ["HH"+str(hh) for hh in range(hh_start+1,hh_end+2)]
+ [ "+"+str(act)+"H" for act in range(num_act_start,num_act_end+1)]
+ [ "-"+str(act)+"H" for act in range(1,4+1)]]
out_tab.extend([list(x) for x in izip(*out_tab_tr)])
util.print_pretty_table(out_tab)
return out_tab
def cut_heur_analysis(self):
"""
Analyzes effect of different cut selection heuristics that were used
Outputs 2D list with the data
"""
col_name = [self.colname_allcutsbound]
stat = StatEnum.MAX
out_tab_tr = [super(LGReader, self).get_param(self.colname_inst)]
append_average = True
if (append_average):
out_tab_tr[0].append("Average")
saved_num_dec_places = self._num_dec_places
# Best
out_tab_tr.append(np.around(self._allcuts_gap, self._num_dec_places).tolist())
if (append_average):
out_tab_tr[1].append(np.around(np.mean(self._allcuts_gap, dtype=np.float64), self._num_dec_places))
# R
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# S
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# T
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
if False:
# B
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = 0, num_cuts_iter_bilinear = [], use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+R
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = [], use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+S
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = [], num_cuts_iter_bilinear = [], use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+T
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = 0, num_cuts_iter_bilinear = [], use_cut_vert_heur = 0, use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = 0, num_cuts_iter_bilinear = [], use_cut_vert_heur = [], use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+R+S
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = [], num_cuts_iter_bilinear = [], use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+R+T
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = [], use_cut_vert_heur = 0, use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# B+R+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = [], use_cut_vert_heur = [], use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# R+S
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# R+T
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# R+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# S+T
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# S+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# T+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# R+S+T
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = 0, use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# R+S+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = 0)
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# R+T+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = [], use_split_share = 0, num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# S+T+V
self._num_dec_places = self.max_num_dec_places
curr_gap = self.helper_col_analysis(col_name, stat, append_average = append_average,
use_unit_vectors_heur = 0, use_split_share = [], num_cuts_iter_bilinear = 0, use_cut_vert_heur = [], use_tight_points_heur = [])
self._num_dec_places = saved_num_dec_places
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
# Set header and transpose
out_tab = [["Instance", "Best",
"R", "S", "T", "V",
#"B",
#"B+R", "B+S", "B+T", "B+V",
#"B+R+S", "B+R+T", "B+R+V", "B+V+S", "B+V+T", "B+S+T",
"R+S", "R+T", "R+V",
"S+T", "S+V", "T+V",
"R+S+T", "R+S+V", "R+T+V",
"S+T+V"]]
out_tab.extend([list(x) for x in izip(*out_tab_tr)])
#util.print_pretty_table(out_tab_tr)
util.print_pretty_table(out_tab)
return out_tab
def point_analysis(self):
"""
Analyzes number of points and final points
Outputs 2D list with the data
"""
col_name = [self.colname_numpoints, self.colname_numfinalpoints]
stat = StatEnum.MAX
make_int = True
out_tab_tr = [super(LGReader, self).get_param(self.colname_inst)]
total_num_act = 4
for num_act in range(0,total_num_act):
tab = super(LGReader, self).stat_col(col_info = col_name, num_alg2_rounds = num_act, stat = stat)
for c in range(len(col_name)):
if (stat in [StatEnum.MIN, StatEnum.MAX, StatEnum.FIRST]):
curr_index = 1 + 2 * c # tab cols are [inst, stat, row, stat, row, ..., num_inst]
elif (stat in [StatEnum.AVG]):
curr_index = 1 + c # tab cols are [inst, stat, stat, ..., num_inst]
if (make_int):
curr_col = [int(float(tab[i][curr_index])) for i in range(1,len(tab))] # one header row
else:
curr_col = [float(tab[i][curr_index]) for i in range(1,len(tab))] # one header row
out_tab_tr.append(curr_col)
# Rearrange (same columns should be next to each other)
out_tab_tr = [
out_tab_tr[0],
out_tab_tr[1],
out_tab_tr[3],
out_tab_tr[5],
out_tab_tr[7],
out_tab_tr[2],
out_tab_tr[4],
out_tab_tr[6],
out_tab_tr[8]
]
nInst = len(out_tab_tr[0])
percent_final_points = [
[
round(100. * float(out_tab_tr[j][i]) / out_tab_tr[j-total_num_act][i], self._num_dec_places)
if out_tab_tr[j-total_num_act][i] != 0
else 0
for j in range(5,9)
]
for i in range(nInst)
]
max_index = [percent_final_points[i].index(max(percent_final_points[i])) for i in range(nInst)]
#percent_final_points =\
# [
# round(max(
# [100. * float(out_tab_tr[j][i]) / out_tab_tr[j-4][i]
# if out_tab_tr[j-4][i] != 0
# else 0
# for j in range(5,9)]
# ), self._num_dec_places)
# for i in range(nInst)
# ]
out_tab = [["Instance", "Points", "Final", "% final"]]
out_tab.extend([
[out_tab_tr[0][i],
out_tab_tr[1+max_index[i]][i],
out_tab_tr[1+max_index[i]+total_num_act][i],
percent_final_points[i][max_index[i]]]
for i in range(nInst)
])
#out_tab = [
# # ['']+4*["Points"]+4*["Final points"]+[''],
# ["Instance", "+0H", "+1H", "+2H", "+3H",
# "+0H", "+1H", "+2H", "+3H", "% final"]
# ]
#out_tab.extend([list(x) for x in izip(*tmp_out_tab_tr)])
util.print_pretty_table(out_tab)
return out_tab
def helper_col_analysis(self, col_name, stat, tab = None,
tab_col_index = None, make_int = None, return_gap_closed = None, append_average = None,
use_unit_vectors_heur = None,
use_split_share = None, num_cuts_iter_bilinear = None,
use_cut_vert_heur = None, use_tight_points_heur = None, cut_limit = None):
"""
Helper function (saves repeating of same lines)
Calculates gap closed from best result for each instance using the given options
If tab is given, then do not recalculate it
Returns the numpy array with the gap closed for each instance or the column as a row
"""
# Defaults
if (tab_col_index is None):
tab_col_index = 1
if (make_int is None):
make_int = False
if (return_gap_closed is None):
return_gap_closed = True
if (append_average is None):
append_average = True
if (tab is None):
tab = super(LGReader, self).stat_col(col_info = col_name, stat = stat,
use_unit_vectors_heur = use_unit_vectors_heur,
use_split_share = use_split_share,
num_cuts_iter_bilinear = num_cuts_iter_bilinear,
use_cut_vert_heur = use_cut_vert_heur,
use_tight_points_heur = use_tight_points_heur,
cut_limit = cut_limit)
nonzero_indices = [i-1 for i in range(1,len(tab)) if tab[i][3] > 0]
if (make_int):
curr_col = [
int(round(float(tab[i][tab_col_index])))
if tab[i][3] > 0
else 0
for i in range(1,len(tab))
] # one header row
else:
curr_col = [
float(tab[i][tab_col_index])
if tab[i][3] > 0
else 0
for i in range(1,len(tab))
] # one header row
if (return_gap_closed):
#out_col = 100 * np.true_divide(np.asarray(curr_col) - self._lp_opt, self._ip_opt - self._lp_opt) - self._sics_gap
out_col = [
100 * (curr_col[i] - self._lp_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]) #- self._sics_gap[i]
if tab[i+1][3] > 0
else 0
for i in range(0,len(tab)-1)
]
else:
out_col = curr_col
if (append_average):
if (isinstance(out_col, np.ndarray)):
#avg = np.around(np.mean(out_col, dtype=np.float64), self._num_dec_places)
avg = np.around(np.mean([out_col[i] for i in nonzero_indices], dtype=np.float64), self._num_dec_places)
out_col = np.append(out_col, avg)
elif (type(out_col) is list):
if (len(nonzero_indices) > 0):
avg = float(sum([out_col[i] for i in nonzero_indices])) / len(nonzero_indices)
else:
avg = 0
out_col.append(avg)
else:
raise TypeError("Somehow out_col is not list or numpy.ndarray. Has type %s." % type(out_col))
if (make_int is not True):
out_col = np.around(out_col, self._num_dec_places)
return out_col
def param_analysis(self, cut_limit = None):
"""
CURRENTLY NON FUNCTIONING
Checks effect of # rays cut, limit cuts per split, and cut presolve
Outputs 2D list with the data
"""
if (not hasattr(self, '_gap_closed')):
self.gap_closed()
col_name = [self.colname_allcutsbound]
stat = StatEnum.MAX
make_int = False
out_tab_tr = [super(LGReader, self).get_param(self.colname_inst)]
out_tab_tr.append(np.around(self._sics_gap, self._num_dec_places).tolist())
out_tab_tr.append(np.around(self._allcuts_gap, self._num_dec_places).tolist())
out_tab_tr.append(np.around(self._gap_closed, self._num_dec_places).tolist())
append_average = True
if (append_average):
out_tab_tr[0].append("Average")
out_tab_tr[1].append(np.around(np.mean(self._sics_gap, dtype=np.float64), self._num_dec_places))
out_tab_tr[2].append(np.around(np.mean(self._allcuts_gap, dtype=np.float64), self._num_dec_places))
out_tab_tr[3].append(np.around(np.mean(self._gap_closed, dtype=np.float64), self._num_dec_places))
saved_num_dec_places = self._num_dec_places
self._num_dec_places = self.max_num_dec_places
for val in [0,1]:
tab = super(LGReader, self).stat_col(col_info = col_name, stat = stat, limit_cuts_per_cgs = val, cut_limit = cut_limit)
self._num_dec_places = saved_num_dec_places
for c in range(len(col_name)):
if (stat in [StatEnum.MIN, StatEnum.MAX, StatEnum.FIRST]):
tab_col_index = 1 + 2 * c # tab cols are [inst, stat, row, stat, row, ..., num_inst]
elif (stat in [StatEnum.AVG]):
tab_col_index = 1 + c # tab cols are [inst, stat, stat, ..., num_inst]
curr_gap = self.helper_col_analysis(col_name, stat,
tab = tab, tab_col_index = tab_col_index,
append_average = append_average)
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
for val in [0,1]:
tab = super(LGReader, self).stat_col(col_info = col_name, stat = stat, cut_presolve = val, cut_limit = cut_limit)
self._num_dec_places = saved_num_dec_places
for c in range(len(col_name)):
if (stat in [StatEnum.MIN, StatEnum.MAX, StatEnum.FIRST]):
tab_col_index = 1 + 2 * c # tab cols are [inst, stat, row, stat, row, ..., num_inst]
elif (stat in [StatEnum.AVG]):
tab_col_index = 1 + c # tab cols are [inst, stat, stat, ..., num_inst]
curr_gap = self.helper_col_analysis(col_name, stat,
tab = tab, tab_col_index = tab_col_index,
append_average = append_average)
out_tab_tr.append(np.around(curr_gap, self._num_dec_places).tolist())
out_tab = [["Instance", "SIC", "GIC", "Best", "Limit 0", "Limit 1", "Presolve 0", "Presolve 1"]]
out_tab.extend([list(x) for x in izip(*out_tab_tr)])
util.print_pretty_table(out_tab)
return out_tab
#def plotScatter(self, data_x, data_y, xTitle, yTitle, plotTitle, saveAsFile, folder):
# """
# Makes scatter plot
# """
# plt.plot(data_x, data_y, 'o')
# plt.xlabel(xTitle)
# plt.ylabel(yTitle)
# #plt.axis([0,100,0,100])
# plt.grid(False)
# plt.title(plotTitle + ' scatter')
# plt.savefig('/home/akazachk/repos/pointcuts4/data/figs/' + folder + '/' + saveAsFile + '_scatter.png')
# #plt.savefig('./figs/'+saveAsFile + '_scatter.png')
# plt.clf()
def run_param_regression(self,
inst_list = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
cut_limit = None,
limit_cuts_per_cgs = None, use_split_share = None,
num_cuts_iter_bilinear = None, use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None):
"""
Reports correlation and p-val of gap closed vs. various statistics
(1) # FP, (2) # points, (3) % FP, (4) depth, (5) # cuts
"""
if (not hasattr(self, '_gap_closed')):
self.gap_closed(inst_name, pha_act_option, num_alg2_rounds,
num_rays_cut, cut_limit, limit_cuts_per_cgs,
use_split_share, num_cuts_iter_bilinear, use_cut_vert_heur,
use_tight_points_heur, cut_presolve)
if (inst_list is None):
inst_list = super(LGReader, self).get_param("INSTANCE")
for inst_index in range(len(inst_list)):
inst_name = inst_list[inst_index]
if __debug__:
print( "## Regressions for %s ##" % inst_name )
# Points regression
tab = super(LGReader, self).create_table([inst_name],['NUM FINAL POINTS (TOTAL)', 'NUM POINTS (TOTAL)', 'ALL CUTS (WITH SIC)', 'NUM PHA', 'SIC DEPTH POINTS (AVG)'])
x_final_points = [(int)(tab[i][0]) for i in range(len(tab))]
x_total_points = [(int)(tab[i][1]) for i in range(len(tab))]
x_percent_fp = [100 * (float) (x_final_points[i])/x_total_points[i] for i in range (len(x_final_points))]
x_num_pha = [(int)(tab[i][3]) for i in range(len(tab))]
x_depth_points = [(float)(tab[i][4]) for i in range(len(tab))]
curr_lp_opt = self._lp_opt[inst_index]
curr_sic_opt = self._sic_opt[inst_index]
curr_ip_opt = self._ip_opt[inst_index]
y_gap = [100. * ((float)(tab[i][2]) - curr_lp_opt)/(curr_ip_opt - curr_lp_opt) for i in range(len(tab))]
y_diff_gap = [100. * ((float)(tab[i][2]) - curr_sic_opt)/(curr_ip_opt - curr_lp_opt) for i in range(len(tab))]
x = x_final_points
y = y_gap
self.plotScatter(x,y,'Num Final Points','% Gap Closed','Num FP vs Gap',inst_name,'NumFP')
y = y_diff_gap
self.plotScatter(x,y,'Num Final Points','Diff % Gap Closed','Num FP vs Diff Gap',inst_name + '_Diff','Diff/NumFP')
x = x_total_points
y = y_gap
self.plotScatter(x,y,'Num Total Points','% Gap Closed','Num Points vs Gap',inst_name,'NumPoints')
y = y_diff_gap
self.plotScatter(x,y,'Num Total Points','Diff % Gap Closed','Num Points vs Diff Gap',inst_name + '_Diff','Diff/NumPoints')
x = x_percent_fp
y = y_gap
self.plotScatter(x,y,'% Final Points','% Gap Closed','% FP vs Gap',inst_name,'PercentFP')
y = y_diff_gap
self.plotScatter(x,y,'% Final Points','Diff % Gap Closed','% FP vs Diff Gap',inst_name + '_Diff','Diff/PercentFP')
x = x_num_pha
y = y_gap
self.plotScatter(x,y,'Num PHA','% Gap Closed','Num PHA vs Gap',inst_name,'NumCuts')
y = y_diff_gap
self.plotScatter(x,y,'Num PHA','Diff % Gap Closed','Num PHA vs Diff Gap',inst_name + '_Diff','Diff/NumCuts')
x = x_depth_points
y = y_gap
self.plotScatter(x,y,'SIC Depth Points (Avg)','% Gap Closed','SIC Depth vs Gap',inst_name,'SICDepth')
y = y_diff_gap
self.plotScatter(x,y,'SIC Depth Points (Avg)','Diff % Gap Closed','SIC Depth vs Diff Gap',inst_name + '_Diff','Diff/SICDepth')
#(slope,intercept,rval,pval,stderr)=stats.linregress(x,y)
#print('inst=%s regression: slope=%f intercept=%f, rval=%f, pval=%f, std error= %f' % (inst_name,slope,intercept,rval,pval,stderr))
def fill_ip_opt(self, ip_opt_fname = None, out_fname = None, overwrite = None):
"""
Fills in IP opt for each instance in the relevant column
Creates a processcsv.ProcessCSV instance for lg_info and ip_opt
Finds IP opt for each row in lg_info, and creates a new file (out_f) with all info
"""
if __debug__:
print( "## Filling in IP values ##" )
if (overwrite is None):
overwrite = False
# Process parameters
assert self._in_fname is not None
find_dot = util.index_of('.', self._in_fname)
if (find_dot >= 0):
in_fname_stub = self._in_fname[:find_dot]
else:
in_fname_stub = copy.deepcopy(self._in_fname)
if (out_fname is None):
out_fname = in_fname_stub + "_ip.csv"
elif (out_fname == self._in_fname):
overwrite = True
out_fname = in_fname_stub + "_ip.csv"
else: # In case user mistakenly set overwrite = True
overwrite = False
if __debug__:
print( "Infile: %s, Outfile: %s" % (self._in_fname, out_fname) )
# Read IP opt file in
self.get_ip_opt(ip_opt_fname)
# Open out file
out_f = open(out_fname, 'wb')
output = csv.writer(out_f)
# Write file line by line
lg_ip_obj_col = super(LGReader, self).get_col_index(self.colname_ipobj)
assert lg_ip_obj_col >= 0
# Write header
#for i in range(len(super(LGReader, self)._header)):
# output.writerow(super(LGReader, self)._header[i])
for i in range(len(self._header)):
output.writerow(self._header[i])
# Write each row with filled-in value
super(LGReader, self).restart_reader()
#for row in super(LGReader, self)._reader:
for row in self._reader:
curr_inst = row[super(LGReader, self).inst_col]
curr_inst_index = super(LGReader, self).get_inst(curr_inst)
# find_first_val returns a table, with a header row
# The first row contains all the column information
#val_str = ip_opt_reader.find_first_val(col_info = self.colname_ipobj, inst_name = curr_inst)[1][1]
print( "Curr inst: %s, curr_inst_index: %d" % (curr_inst, curr_inst_index) )
val_str = str(self._ip_opt[curr_inst_index]) # Might be a float already, but might be the '' string
if (len(val_str) > 0):
curr_inst_ip_obj = float(val_str)
if __debug__:
print( "Instance: %s\tIP obj: %f" % (curr_inst, curr_inst_ip_obj) )
row[lg_ip_obj_col] = curr_inst_ip_obj
elif __debug__:
print( "Instance %s not found in IP file" % curr_inst )
output.writerow(row)
# Close
out_f.close()
del output
# Overwrite if out_fname and in_fname coincide
if (overwrite):
# Necessarily will need to recreate it later
super(LGReader, self).close_f()
# Overwite in_fname
shutil.move(out_fname, self._in_fname)
# Restart the file
super(LGReader, self).reopen_reader()
def get_command(self, row_info, inst_dir = None, out_dir = None):
"""
Get PointCuts command from row
"""
if (inst_dir is None):
inst_dir = "../"
elif (inst_dir[-1] is not '/'):
inst_dir = inst_dir + '/'
if (out_dir is None):
out_dir = "."
if (type(row_info) is int):
row = super(LGReader, self).get_row(row_info)[0:self._param_container.num_params-2]
elif (type(row_info) is list):
assert(len(row_info) >= self._param_container.num_params-2)
row = row_info[0:self._param_container.num_params-2]
else:
raise TypeError("Type of row is not int (for row number) or list (the row itself), but is %s." % type(row_info))
return "$POINTCUTS" + " " + inst_dir + row[0] + ".mps" + " " + out_dir + " " + " ".join(row[1:])
def __del__(self):
""" Destructor """
super(LGReader, self).__del__()
def main(argv):
from os import environ
HOME_DIR = os.environ['HOME']
test_dir = HOME_DIR + "/repos/pha2/results/saved_results"
test_name_stub = "pha.csv"
test_name = test_dir + '/' + test_name_stub
reader = LGReader(test_name);
#reader = LGReader("/Users/akazachk/repos/pha2/results/pha.csv");
reader.set_param("CUT_LIMIT",1000); reader.set_param("CUT_PRESOLVE",0);
reader.set_inst("mas74");
#tab = reader.gap_closed_table()
#reader.hplane_point_analysis()
#tab = reader.inst_info()
#tab = reader.point_analysis()
#tab = reader.hplane_analysis()
#tab = reader.cut_heur_analysis()
#reader.write_best_params()
#tab = reader.obj_fails_table()
if __name__ == "__main__":
main(sys.argv)
| [
"None"
] | None |
77564f1e0f6f6a59e0bc5ce08ac4e1446e6b1360 | 8ba62e9ceb9307f2fe81db0cbfaed79fee12f51a | /Baekjoon/Dynamic Programming - New/타일 채우기.py | 29441bfd83cbaa189fa5b02b88ba0feaa001b0e0 | [] | no_license | Seoyoung2/Algorithm_Study | 9478e2ef183eed60c9670a05688cd835a2f69acd | ea2073e788f3c67a03b1168bbeaa9609e5e6e1bf | refs/heads/master | 2023-02-13T05:04:46.045416 | 2021-01-14T08:01:37 | 2021-01-14T08:01:37 | 199,292,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # 3×N 크기의 벽을 2×1, 1×2 크기의 타일로 채우는 경우의 수를 구해보자.
# 입력 : 첫째 줄에 n이 주어진다. (1 ≤ n ≤ 30)
# 출력 : 첫째 줄에 경우의 수를 출력한다.
# dp[n] = 3 * dp[n-2] + 2 * (dp[n-4] + dp[n-6] + ... + dp[0])
# n이 홀수면 타일로 채우기 불가능
import sys
n = int(sys.stdin.readline())
dp = [0 for _ in range(31)]
dp[0], dp[1], dp[2] = 1, 0, 3
for i in range(4, n+1, 2):
dp[i] = 3 * dp[i-2]
for j in range(4, i+1, 2):
dp[i] += 2 * dp[i-j]
print(dp[n]) | [
"[email protected]"
] | |
d64a3757a6473ecc106814852095fdc3456b4424 | d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b | /espresso/vinil/content/actors/test/index.odb | 20b348324b0ce513e258e924725f89b2f08481d1 | [
"Apache-2.0"
] | permissive | danse-inelastic/AbInitio | 6f1dcdd26a8163fa3026883fb3c40f63d1105b0c | 401e8d5fa16b9d5ce42852b002bc2e4274afab84 | refs/heads/master | 2021-01-10T19:16:35.770411 | 2011-04-12T11:04:52 | 2011-04-12T11:04:52 | 34,972,670 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | odb | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Alex Dementsov
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from luban.content import select
from luban.content.Paragraph import Paragraph
from luban.content.Document import Document
from luban.content import load
from luban.content.Link import Link
from vinil.components.Actor import Actor as base
class Actor(base):
class Inventory(base.Inventory):
import pyre.inventory
id = pyre.inventory.str('id')
def content(self, director):
document = Document()
# Implement tests for jmd:
# 1. Echo
# 2. Simple calculator
document.add(Link(label="Test jmd",
onclick=select(id='test-jmd').replaceContent(Paragraph(text="World", id="test-jmd")) )
)
document.add(Paragraph(text=director.blah, id="test-jmd")) # "Hello"director.blah
return document
def pathlist(self, director):
self.pathlist = (["Home", None, None],)
return self.pathlist
def __init__(self, *args, **kwds):
super(Actor, self).__init__(*args, **kwds)
return
def _configure(self):
super(Actor, self)._configure()
self.id = self.inventory.id
return
def _init(self):
super(Actor, self)._init()
return
def actor():
return Actor('test/index')
__date__ = "$Nov 12, 2009 1:26:34 PM$"
| [
"[email protected]"
] | |
7f75e3a4e524969dd57adf3d766bf1a31c84bf50 | 9709a98a04285d86acad6112bc335e8f2995e9b1 | /Widgets/RearrangeMod/DialogMod.py | dcfb00442446b74f3667febe598df5138812543d | [] | no_license | redhog/webwidgets | 37eb56d92c7421207f78f73961a8b58dc7592ebb | cd0094db9f9f1348ab380d7aef40ab23a7b8b1ba | refs/heads/master | 2021-01-17T02:59:45.566859 | 2013-04-06T21:14:34 | 2013-04-06T21:14:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,056 | py | #! /bin/env python
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
# Webwidgets web developement framework
# Copyright (C) 2006 uAnywhere, Egil Moeller <[email protected]>
# Copyright (C) 2007 Egil Moeller <[email protected]>
# Copyright (C) 2007 FreeCode AS, Egil Moeller <[email protected]>
# Copyright (C) 2007 FreeCode AS, Axel Liljencrantz <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Widgets for user input.
"""
import types
import Webwidgets.Utils
import Webwidgets.Constants
import Webwidgets.Widgets.Base
import Webwidgets.Widgets.ApplicationMod.WindowMod
import Webwidgets.Widgets.InputMod.BaseInput
import Webwidgets.Widgets.FormattingMod.BaseFormatting
class InfoFrame(Webwidgets.Widgets.Base.StaticComposite):
def draw_head(self, children, output_options):
if 'Head' not in children:
children['Head'] = children['Body'].title
return """<div class="%(html_head_classes)s" id="%(ww_untranslated__html_id)s-head">
%(Head)s
</div>""" % children
def draw_body(self, children, output_options):
return """<div class="%(html_body_classes)s" id="%(ww_untranslated__html_id)s-body">
%(Body)s
</div>""" % children
def draw_foot(self, children, output_options):
return ""
def draw(self, output_options):
children = self.draw_children(
output_options,
invisible_as_empty = True,
include_attributes = True)
children['html_head_classes'] = Webwidgets.Utils.classes_to_css_classes(self.ww_classes, ['head'])
children['html_body_classes'] = Webwidgets.Utils.classes_to_css_classes(self.ww_classes, ['body'])
children['html_foot_classes'] = Webwidgets.Utils.classes_to_css_classes(self.ww_classes, ['foot'])
children['head'] = self.draw_head(children, output_options)
children['body'] = self.draw_body(children, output_options)
children['foot'] = self.draw_foot(children, output_options)
return """
<div %(html_attributes)s>
%(head)s
%(body)s
%(foot)s
</div>
""" % children
class StaticDialog(InfoFrame):
"""Dialogs provides an easy way to let the user select one of a
few different options, while providing the user with some longer
explanation/description of the options. Options are described
using a dictionary of description-value pairs."""
__wwml_html_override__ = False
buttons = {'Cancel': '0', 'Ok': '1'}
def draw_foot(self, children, output_options):
return """<div class="%(html_foot_classes)s" id="%(ww_untranslated__html_id)s-foot">
%(Buttons)s
</div>""" % children
class Buttons(Webwidgets.Widgets.InputMod.BaseInput.ButtonArray):
def selected(self, path, value):
self.parent.notify('selected', value)
raise StopIteration
class Buttons(object):
def __get__(self, instance, owner):
if not instance.parent: return None
return instance.parent.buttons
buttons = Buttons()
class AbstractDialog(StaticDialog, Webwidgets.Widgets.Base.DirectoryServer):
remove_on_close = False
def draw(self, output_options):
Webwidgets.Widgets.ApplicationMod.WindowMod.HtmlWindow.register_script_link(
self,
self.calculate_url_to_directory_server(
'Webwidgets.Dialog',
['Dialog','dialog_iefix.js'],
output_options))
return StaticDialog.draw(self, output_options)
def close(self):
if self.remove_on_close:
del self.parent[self.name]
else:
self.visible = False
def selected(self, path, value):
if path != self.path: return
self.close()
class Dialog(AbstractDialog):
pass
class AbstractInfoDialog(AbstractDialog):
pass
class InfoDialog(AbstractInfoDialog):
buttons = {'Ok': '1'}
class ConfirmationDialog(AbstractInfoDialog):
class Head(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Really perform action?"""
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Do you really want to perform this action?"""
class DisableConfirmationDialog(ConfirmationDialog):
class Head(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Really disable this item?"""
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Do you really want to disable this item?"""
class DeleteConfirmationDialog(ConfirmationDialog):
class Head(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Really delete this item?"""
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Do you really want to delete this item?"""
class DialogContainer(Webwidgets.Widgets.FormattingMod.BaseFormatting.Div):
is_dialog_container = True
__wwml_html_override__ = False
html = "%(Dialogs)s%(Body)s"
class Dialogs(Webwidgets.Widgets.FormattingMod.BaseFormatting.ReplacedList): pass
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html): pass
def add_dialog(self, dialog, name = None):
if name is None: name = str(len(self['Dialogs'].children))
self['Dialogs'][name] = dialog
dialog.remove_on_close = True
def add_dialog_to_nearest(cls, widget, dialog, name = None):
widget.get_ansestor_by_attribute(
"is_dialog_container", True
).add_dialog(dialog, name)
add_dialog_to_nearest = classmethod(add_dialog_to_nearest)
class Hide(Webwidgets.Widgets.Base.StaticComposite):
"""
A hide/show widget
Change the value of the title variable to change the text in the button.
TODO:
Implement an alternative javascript implementation for faster
update at the expense of longer reloads
"""
class HideButton(Webwidgets.Widgets.InputMod.BaseInput.ToggleButton):
true_title = "Hide"
false_title = "Show"
def draw(self, path):
self['Child'].visible = self['HideButton'].value
children = self.draw_children(path, invisible_as_empty=True, include_attributes=True)
return """<div %(html_attributes)s>%(HideButton)s %(Child)s</div>""" % children
| [
"[email protected]"
] | |
dac6b6d0e619f601a6ec338da6db45412c183f49 | 6191bad7750404bc0bcaec43a8dea51b52980f04 | /Seção_06/Exercício_51.py | 0228181ef57c32ca0fe87335b02289edbf78a35c | [] | no_license | Lehcs-py/guppe | abfbab21c1b158b39251fa6234a4a98ce5f31c2a | 2ff007bce88e065e6d3020971efd397ec7f7084b | refs/heads/main | 2023-02-26T18:43:06.052699 | 2021-02-07T18:22:53 | 2021-02-07T18:22:53 | 330,180,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | print("""
51. Um funcionário recebe aumento anual. Em 1995 foi contratado por 2000 reais. Em 1996 recebeu aumento de 1.5%.
A partir de 1997, os aumentos sempre correspondem ao dobro do ano anterior. Faça programa que determine o salário atual do funcionário.
""")
salario_variavel = 2000
salario_final = 0
porcento = 0.75
for num in range((2021 - 1995) + 1):
porcento *= 2
salario_final = (salario_variavel + ((salario_variavel / 100) * porcento))
salario_variavel = salario_final
print(f'Salário final: {salario_final}')
| [
"[email protected]"
] | |
2193b859ac3796c8cbc52c1b23cf377f2ce0eeb6 | bb150497a05203a718fb3630941231be9e3b6a32 | /inference/python_api_test/test_int8_model/base_mkldnn_int8.py | 36875b65f27555bcf814ad671a8399dc9eea69a8 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 5,869 | py | """
mkldnn_int8 base values
"""
mkldnn_int8 = {
"PPYOLOE": {
"model_name": "PPYOLOE",
"jingdu": {
"value": 0.008505799229272469,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 284.9,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PicoDet": {
"model_name": "PicoDet",
"jingdu": {
"value": 0.29576267147717544,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 15.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"YOLOv5s": {
"model_name": "YOLOv5s",
"jingdu": {
"value": 0.337513986405508,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 41.9,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"YOLOv6s": {
"model_name": "YOLOv6s",
"jingdu": {
"value": 0.38167538696759734,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 36.3,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"YOLOv7": {
"model_name": "YOLOv7",
"jingdu": {
"value": 0.4599616751537943,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 101.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"ResNet_vd": {
"model_name": "ResNet_vd",
"jingdu": {
"value": 0.78542,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 6.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"MobileNetV3_large": {
"model_name": "MobileNetV3_large",
"jingdu": {
"value": 0.70114,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 4.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PPLCNetV2": {
"model_name": "PPLCNetV2",
"jingdu": {
"value": 0.75986,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 3.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PPHGNet_tiny": {
"model_name": "PPHGNet_tiny",
"jingdu": {
"value": 0.77626,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 8.0,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"EfficientNetB0": {
"model_name": "EfficientNetB0",
"jingdu": {
"value": 0.75366,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 9.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PP-HumanSeg-Lite": {
"model_name": "PP-HumanSeg-Lite",
"jingdu": {
"value": 0.9596980417424789,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 42.2,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PP-Liteseg": {
"model_name": "PP-Liteseg",
"jingdu": {
"value": 0.6646508698054427,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 375.9,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"HRNet": {
"model_name": "HRNet",
"jingdu": {
"value": 0.7899464457999261,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 532.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"UNet": {
"model_name": "UNet",
"jingdu": {
"value": 0.6434970135618086,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 1105.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"Deeplabv3-ResNet50": {
"model_name": "Deeplabv3-ResNet50",
"jingdu": {
"value": 0.7900994083314681,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 861.7,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"ERNIE_3.0-Medium": {
"model_name": "ERNIE_3.0-Medium",
"jingdu": {
"value": 0.6809545875810936,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 102.71,
"unit": "ms",
"batch_size": 32,
"th": 0.05,
},
},
"PP-MiniLM": {
"model_name": "PP-MiniLM",
"jingdu": {
"value": 0.6899907321594069,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 115.12,
"unit": "ms",
"batch_size": 32,
"th": 0.05,
},
},
"BERT_Base": {
"model_name": "BERT_Base",
"jingdu": {
"value": 0.051546658541685234,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 18.94,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
}
| [
"[email protected]"
] | |
8210ae56f9b2440b7514fbf2b9ffd550ffa01dbd | b9481ebae49cf19de3b5718c69b84f1b59a8e421 | /apps/quotes/migrations/0001_initial.py | feb7681a9e81c817dc6330890a45b12644142612 | [] | no_license | arun-skaria/eracks | 06db7e3715afa2c6992fe09f05d6546520c65459 | 532d8a2be31199e7b78ca5e29944deb0a1400753 | refs/heads/master | 2023-01-08T01:40:10.036585 | 2017-07-13T13:10:42 | 2017-07-13T13:10:42 | 97,123,722 | 0 | 0 | null | 2022-12-26T20:16:17 | 2017-07-13T13:08:12 | HTML | UTF-8 | Python | false | false | 3,357 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('customers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quote_number', models.CharField(help_text=b'eRacks quote id - letters/numbers/underscore/dashes ok, no spaces', unique=True, max_length=20)),
('valid_for', models.IntegerField(default=10, help_text=b'Number of days the quote is valid for')),
('purchase_order', models.CharField(help_text=b'Customer Purchase Order number, etc', max_length=20, blank=True)),
('customer_reference', models.CharField(help_text=b'Other customer reference number, RFQ, contact name, etc', max_length=30, blank=True)),
('terms', models.CharField(default=b'ccard', help_text=b'Net 5, Wire Transfer, ccard, etc', max_length=20)),
('discount', models.FloatField(default=0, help_text=b'Dollars or percent, according to type', blank=True)),
('discount_type', models.CharField(default=b'$', max_length=1, blank=True, choices=[(b'$', b'Dollars'), (b'%', b'Percent')])),
('shipping', models.FloatField(help_text=b'Estimated weight - lbs', blank=True)),
('shipping_method', models.CharField(help_text=b'UPS, FedEx, Freight, 3-Day, etc', max_length=40, blank=True)),
('target', models.FloatField(help_text=b"The customer's budget, or where the customer would like the quote to be")),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('approved_by', models.ForeignKey(default=2, to=settings.AUTH_USER_MODEL, help_text=b'Manager or admin person approving quote')),
('customer', models.ForeignKey(blank=True, to='customers.Customer', help_text=b'click "+" to create new', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QuoteLineItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('model', models.CharField(help_text=b'eRacks Model name, eg "OPTERNATOR", or make one up for custom quotes', max_length=60)),
('quantity', models.IntegerField()),
('description', models.TextField(help_text=b'Start with a line for general description, then one config item per line for components')),
('cost', models.FloatField(help_text=b'our cost')),
('price', models.FloatField(help_text=b'customer price')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('quote', models.ForeignKey(to='quotes.Quote')),
],
options={
},
bases=(models.Model,),
),
]
| [
"[email protected]"
] | |
6a5765f8581e4dd9031765969d6f25ba5aa1ed0b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_butterfly.py | 59efacbed543d8dcd7c2d0e9cc28ef78436400d2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py |
#calss header
class _BUTTERFLY():
def __init__(self,):
self.name = "BUTTERFLY"
self.definitions = [u'a type of insect with large, often brightly coloured wings', u'a person who is not responsible or serious, and who is likely to change activities easily or only be interested in pleasure: ', u'the small metal part put on the back of a stud (= piece of jewellery worn in the ear) that keeps it in place', u'a way of swimming on your front by kicking with your legs while raising your arms together out of the water and then bringing them down in front of you ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
8c98a7a678031de0e2fecb6ae93884089c5bd54c | b3699724907850fd26cbce4509fec83a33b89760 | /python/ray/tests/test_stress_failure.py | 83d9f40f24ed0cf0cf2dfd01e93fedf4bb1ecd74 | [
"Apache-2.0",
"MIT"
] | permissive | BonsaiAI/ray | 5e2f26a81d865a795261d11f9182aca7f07c7b97 | 941d30f082fe879ea30618af14327c25b5a21a74 | refs/heads/master | 2023-06-12T05:15:29.370188 | 2021-05-06T07:03:53 | 2021-05-06T07:03:53 | 233,708,687 | 3 | 5 | Apache-2.0 | 2023-05-27T08:06:37 | 2020-01-13T22:41:47 | Python | UTF-8 | Python | false | false | 11,936 | py | import numpy as np
import pytest
import sys
import time
import ray
from ray.cluster_utils import Cluster
import ray.ray_constants as ray_constants
from ray.test_utils import get_error_message
@pytest.fixture(params=[1, 4])
def ray_start_reconstruction(request):
num_nodes = request.param
plasma_store_memory = int(0.5 * 10**9)
cluster = Cluster(
initialize_head=True,
head_node_args={
"num_cpus": 1,
"object_store_memory": plasma_store_memory // num_nodes,
"redis_max_memory": 10**8,
"_system_config": {
"object_timeout_milliseconds": 200
}
})
for i in range(num_nodes - 1):
cluster.add_node(
num_cpus=1, object_store_memory=plasma_store_memory // num_nodes)
ray.init(address=cluster.address)
yield plasma_store_memory, num_nodes, cluster
# Clean up the Ray cluster.
ray.shutdown()
cluster.shutdown()
@pytest.mark.skip(reason="Failing with new GCS API on Linux.")
def test_simple(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = int(plasma_store_memory * 1.5 / (num_objects * 8))
# Define a remote task with no dependencies, which returns a numpy
# array of the given size.
@ray.remote
def foo(i, size):
array = np.zeros(size)
array[0] = i
return array
# Launch num_objects instances of the remote task.
args = []
for i in range(num_objects):
args.append(foo.remote(i, size))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get values sequentially, in chunks.
num_chunks = 4 * num_nodes
chunk = num_objects // num_chunks
for i in range(num_chunks):
values = ray.get(args[i * chunk:(i + 1) * chunk])
del values
assert cluster.remaining_processes_alive()
def sorted_random_indexes(total, output_num):
random_indexes = [np.random.randint(total) for _ in range(output_num)]
random_indexes.sort()
return random_indexes
@pytest.mark.skip(reason="Failing with new GCS API on Linux.")
def test_recursive(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = int(plasma_store_memory * 1.5 / (num_objects * 8))
# Define a root task with no dependencies, which returns a numpy array
# of the given size.
@ray.remote
def no_dependency_task(size):
array = np.zeros(size)
return array
# Define a task with a single dependency, which returns its one
# argument.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
# Launch num_objects instances of the remote task, each dependent on
# the one before it.
arg = no_dependency_task.remote(size)
args = []
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get 10 values randomly.
random_indexes = sorted_random_indexes(num_objects, 10)
for i in random_indexes:
value = ray.get(args[i])
assert value[0] == i
# Get values sequentially, in chunks.
num_chunks = 4 * num_nodes
chunk = num_objects // num_chunks
for i in range(num_chunks):
values = ray.get(args[i * chunk:(i + 1) * chunk])
del values
assert cluster.remaining_processes_alive()
@pytest.mark.skip(reason="This test often hangs or fails in CI.")
def test_multiple_recursive(ray_start_reconstruction):
plasma_store_memory, _, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a root task with no dependencies, which returns a numpy array
# of the given size.
@ray.remote
def no_dependency_task(size):
array = np.zeros(size)
return array
# Define a task with multiple dependencies, which returns its first
# argument.
@ray.remote
def multiple_dependency(i, arg1, arg2, arg3):
arg1 = np.copy(arg1)
arg1[0] = i
return arg1
# Launch num_args instances of the root task. Then launch num_objects
# instances of the multi-dependency remote task, each dependent on the
# num_args tasks before it.
num_args = 3
args = []
for i in range(num_args):
arg = no_dependency_task.remote(size)
args.append(arg)
for i in range(num_objects):
args.append(multiple_dependency.remote(i, *args[i:i + num_args]))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
args = args[num_args:]
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get 10 values randomly.
random_indexes = sorted_random_indexes(num_objects, 10)
for i in random_indexes:
value = ray.get(args[i])
assert value[0] == i
assert cluster.remaining_processes_alive()
def wait_for_errors(p, error_check):
# Wait for errors from all the nondeterministic tasks.
errors = []
time_left = 100
while time_left > 0:
errors.extend(get_error_message(p, 1))
if error_check(errors):
break
time_left -= 1
time.sleep(1)
# Make sure that enough errors came through.
assert error_check(errors)
return errors
@pytest.mark.skip("This test does not work yet.")
def test_nondeterministic_task(ray_start_reconstruction, error_pubsub):
p = error_pubsub
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 1000
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a nondeterministic remote task with no dependencies, which
# returns a random numpy array of the given size. This task should
# produce an error on the driver if it is ever reexecuted.
@ray.remote
def foo(i, size):
array = np.random.rand(size)
array[0] = i
return array
# Define a deterministic remote task with no dependencies, which
# returns a numpy array of zeros of the given size.
@ray.remote
def bar(i, size):
array = np.zeros(size)
array[0] = i
return array
# Launch num_objects instances, half deterministic and half
# nondeterministic.
args = []
for i in range(num_objects):
if i % 2 == 0:
args.append(foo.remote(i, size))
else:
args.append(bar.remote(i, size))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
def error_check(errors):
if num_nodes == 1:
# In a single-node setting, each object is evicted and
# restarted exactly once, so exactly half the objects will
# produce an error during reconstruction.
min_errors = num_objects // 2
else:
# In a multinode setting, each object is evicted zero or one
# times, so some of the nondeterministic tasks may not be
# reexecuted.
min_errors = 1
return len(errors) >= min_errors
errors = wait_for_errors(p, error_check)
# Make sure all the errors have the correct type.
assert all(error.type == ray_constants.HASH_MISMATCH_PUSH_ERROR
for error in errors)
assert cluster.remaining_processes_alive()
@pytest.mark.skip(reason="Failing with new GCS API on Linux.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**9], indirect=True)
def test_driver_put_errors(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
plasma_store_memory = ray_start_object_store_memory
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
# Launch num_objects instances of the remote task, each dependent on
# the one before it. The first instance of the task takes a numpy array
# as an argument, which is put into the object store.
args = []
arg = single_dependency.remote(0, np.zeros(size))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value starting from the beginning to force reconstruction.
# Currently, since we're not able to reconstruct `ray.put` objects that
# were evicted and whose originating tasks are still running, this
# for-loop should hang on its first iteration and push an error to the
# driver.
ray.wait([args[0]], timeout=30)
def error_check(errors):
return len(errors) > 1
errors = wait_for_errors(p, error_check)
assert all(error.type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
or "ray.exceptions.ObjectLostError" in error.error_messages
for error in errors)
# NOTE(swang): This test tries to launch 1000 workers and breaks.
# TODO(rkn): This test needs to be updated to use pytest.
# class WorkerPoolTests(unittest.TestCase):
#
# def tearDown(self):
# ray.shutdown()
#
# def testBlockingTasks(self):
# @ray.remote
# def f(i, j):
# return (i, j)
#
# @ray.remote
# def g(i):
# # Each instance of g submits and blocks on the result of another remote
# # task.
# object_refs = [f.remote(i, j) for j in range(10)]
# return ray.get(object_refs)
#
# ray.init(num_workers=1)
# ray.get([g.remote(i) for i in range(1000)])
# ray.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| [
"[email protected]"
] | |
0e5b50178ecfb886cf916bcefd9725dbee4c4d43 | cfa08425d0a457e0c673543b6e16f0a02effe05f | /projects/admm_cigre/source/admm_agent.py | 06bba2bf24c0fa1d7eb29fa83d61e894e209d329 | [] | no_license | missinglpf/Distributed_optimization | 5b3dfea8b2a29225761537531322e421be83d7a8 | 84040eebd3f04acf4c09e5e4ff2e59e752bf3fae | refs/heads/master | 2020-08-01T03:42:36.455932 | 2018-06-25T15:59:44 | 2018-06-25T15:59:44 | 210,850,421 | 1 | 0 | null | 2019-09-25T13:20:11 | 2019-09-25T13:20:10 | null | UTF-8 | Python | false | false | 32,623 | py | import threading
import time
from concurrent import futures
import grpc
import admm_pb2
import admm_pb2_grpc
import admin_pb2
import admin_pb2_grpc
import json
from optparse import OptionParser
from acopf_nlopt_agent import ACopf_nlopt
from acopf_pips_agent import ACopf_pips
import os
import sys
import logging
import csv
import datetime
import re
from numpy import *
from ReadConfigFile import ReadConfiguration
import datetime
import urllib2
# sys.path.append('D:/phd/These_asys/source/nlopt_test/admm/opf_pypower')
# admm events
start_event = threading.Event()
all_beta_event = threading.Event()
all_z_event = threading.Event()
# prim_ctrl_finished_event = threading.Event()
# system events
# agent_enabled_event = threading.Event()
# reconfiguration_event = threading.Event()
# system locks
measurement_lock = threading.Lock() # manages the access to the measurement signals
references_lock = threading.Lock() # manages the access to the reference signals
beta_lock = threading.Lock() # manages the access to the beta & beta_received variables
z_lock = threading.Lock() # manages the access to the z & z_received variables
x0_lock = threading.Lock() # manages the access to the x0 variable
# the grpc server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
# flag for stopping the agent
running = True
# variables for timing the performance of the agent
rpc_counter = 0
rpc_total_time = 0
opal_total_time = 0
opal_counter = 0
opt_total_time = 0
opt_counter = 0
trigger_counter = 0
# measurement signals of the agent
v_real_meas = 0.0
v_imag_meas = 0.0
p_meas = 0.0
q_meas = 0.0
# reference signals of the agent computed by the secondary control
p_ref = 0.0
q_ref = 0.0
# in case the communication with OPAL will have some errors, count them
opal_com_error_count = 0
admin_ip = "169.254.35.100"
admin_port = 8000
MAX_ADMIN_COMM_RETRY = 5 # number of communication retries, in case of failing to contact the admin
MAX_OPAL_COMM_RETRY = 10 # number of communication retries, in case of failing to contact OPAL-RT
MEASUREMENTS_TO_ADMIN_DOWNSAMPLE = 500 # send to the admin each 500th measurement taken from OPAL-RT
MEASUREMENTS_TO_NEIGH_DOWNSAMPLE = 20 # send to the neighbours each 20th measurement taken from OPAL-RT
TRIGGER_SAMPLES = 50 # number of samples that have to meet a condition before taking any decision
NON_TRIVIAL_DV = 0.001 # voltage deviation that is considered serious and that should trigger a system wide
# recalculation of set-points
RAMP_TIME_OF_PRIMARY = 15.0 # apply the reference from the secondary control as a ramp lasting 15 seconds
DATA_LOG_PERIOD = 1 # write a line to the data_log_file every second
# global variables and price vectors for the ADMM algorithm. we keep the history of all the iterations
# for debugging reasons
admm_it = 0
admm_running = False
x = {}
beta = {}
z = {}
nu = {}
z_received = {}
beta_received = {}
x0 = []
lb = []
ub = []
class AgentServer(admm_pb2_grpc.ADMMAgentServicer):
# ========================== Functions for the RPC server that can be called remotely
# starts the admm algorithm
# def start_admm(self, request, context):
# try:
# start_event.set()
# return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
# except Exception as exc:
# logging.critical(exc.message)
# return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# sets a beta value in the beta vector
# function called by the neighbours to set their corresponding beta value in the beta vector
def set_beta_element(self, request, context):
try:
set_local_beta_element(request.value_real, request.value_imag, request.agent_id, request.admm_it)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# function called by the neighbours to set their corresponding z value in the z vector
def set_z_element(self, request, context):
try:
set_local_z_element(request.value_real, request.value_imag, request.agent_id, request.admm_it)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# set a x0 value in the x0 vector
def set_x0_element(self, request, context):
try:
set_local_x0_element(request.value_real, request.value_imag, request.agent_id)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# sets the step size for the admm algorithm
def set_admm_rho(self, request, context):
global config
try:
config.rho = request.value
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# sets the number of iterations for the admm algorithm
def set_admm_max_iter(self, request, context):
global config
try:
config.max_iter = request.value
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# function called by the administrator to set the configuration of the communication links
# def set_comm_link_to_neigh(self, request, context):
# try:
# set_local_comm_link_to_neigh(request.neigh_id, request.delay, request.loss)
# return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
# except Exception as exc:
# logging.critical(exc.message)
# return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
def set_measurement_webserver(self, request, context):
try:
config.url_opal = "http://" + request.server_ip + ":" + str(request.server_port) + "/asyncsrv/"
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# shuts down the agent remotely
def remote_shutdown(self, request, context):
global running
try:
running = False
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# sets the step size for the admm algorithm
def set_admm_rho(self, request, context):
global config
try:
config.rho = request.value
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# sets the number of iterations for the admm algorithm
def set_admm_max_iter(self, request, context):
global config
try:
config.max_iter = request.value
except Exception as exc:
logging.critical(exc.message)
return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# enable the agent
# def enable(self, request, context):
# try:
# agent_enabled_event.set()
# return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('SUCCESS'))
# except Exception as exc:
# logging.critical(exc.message)
# return admm_pb2.CommReply(status=admm_pb2.OperationStatus.Value('FAILED'), message=exc.message)
# def configure_comm_links():
# if config.running_on_wireless:
# device = "wlan0"
# else:
# device = "eth0"
# config_script = "sudo tc qdisc del dev " + device + " root;" \
# "sudo tc qdisc add dev " + device + " handle 1: root htb;" \
# "sudo tc class add dev " + device + " parent 1: classid 1:1 htb rate 100Mbps;"
# for p in config.partners:
# tmpl = "sudo tc class add dev " + device + " parent 1:1 classid 1:1{0} htb rate 100Mbps;" \
# "sudo tc qdisc add dev " + device + " parent 1:1{0} handle lamda10{0}: netem delay {3}s loss {4}%;" \
# "sudo tc filter add dev " + device + " protocol ip parent 1:0 prio 3 u32 match ip dport {2} 0xffff match ip dst {1} flowid 1:1{0};"
# # id, ip, port, delay, loss
# config_script += tmpl.format(p[0], p[1], p[2], p[3], p[4])
# #print(config_script)
# os.system(config_script)
# def set_local_comm_link_to_neigh(neighbour, delay, loss):
# if config.running_on_wireless:
# device = "wlan0"
# else:
# device = "eth0"
# for p in config.partners:
# if p[0] == neighbour:
# command = "sudo tc qdisc change dev " + device + " handle lamda10{0}: netem delay {1}s loss {2}%;".format(p[0], delay, loss)
# os.system(command)
# def pool_opal():
# # if not agent_enabled_event.isSet():
# # logging.info("Waiting for the agent to be enabled")
# # agent_enabled_event.wait() # blocking call until the enable event is detected
# global opal_com_error_count
# global q_meas, v_meas, p_meas # measurement signals
# # make access to shared resources thread safe
# # key_p = "valout" + str(config.opal_get_ids["P"])
# # key_q = "valout" + str(config.opal_get_ids["Q"])
# # key_v = "valout" + str(config.opal_get_ids["V"])
# # # compose the URL for the webserver
# # get_url = config.url_opal + 'get?' + 'name0=' + key_p + '&' + 'name1=' + key_q + \
# # '&' + 'name2=' + key_v
# #
# # req = urllib2.Request(url=get_url)
# # try:
# # tic = datetime.datetime.now()
# # f = urllib2.urlopen(req, timeout=1)
# # toc = datetime.datetime.now()
# # response = f.read()
# # delta = (toc - tic).total_seconds()
# # get_opal_statistics(delta)
# # d = json.loads(response)
# # with measurement_lock: # measurements are accessed from several threads, therefore they need to be protected
# # p_meas = float(d[key_p])
# # q_meas = float(d[key_q])
# # v_meas = float(d[key_v])
# # # test_if_secondary_control_should_start()
# # except Exception as exc:
# # opal_com_error_count += 1
# # # if opal_com_error_count >= MAX_OPAL_COMM_RETRY:
# # # notify_administrator("There seems to be a problem with the WEB-SERVER")
# # # notify_administrator(exc.message)
# # # opal_com_error_count = 0
# # logging.critical(exc.message)
#
# # reschedule the function to start again
# t = threading.Timer(config.ts_opal, pool_opal)
# # t = threading.Timer(config.ts_opal, pool_opal, args=(agent_enabled_event,))
# t.name = "measurement-thread"
# t.daemon = True
# if running:
# t.start()
# set the initial voltage and power values in OPAL's web server
# ATTENTION! remember the sign change for the power reference. in opal the load power is with positive sign while in
# the optimization problem is with a negative sign
def init_opal():
set_url = config.url_opal + 'set?valin' + \
str(config.opal_set_ids["P"]) + '=' + str(-config.opal_default_set["P"]) + \
'&valin' + str(config.opal_set_ids["Q"]) + '=' + str(-config.opal_default_set["Q"])
global p_ref, q_ref
with references_lock:
p_ref = config.opal_default_set["P"]
q_ref = config.opal_default_set["Q"]
# req = urllib2.Request(url=set_url)
# f = urllib2.urlopen(req)
# response = f.read()
# if 'Ok' not in response:
# notify_administrator("Cannot set initial voltage & power references in OPAL-RT")
def get_optimization_statistics(delta):
global opt_total_time, opt_counter
opt_total_time += delta
opt_counter += 1
def get_rpc_statistics(delta):
global rpc_total_time, rpc_counter
rpc_total_time += delta
rpc_counter += 1
def get_opal_statistics(delta):
global opal_total_time, opal_counter
opal_total_time += delta
opal_counter += 1
# if opal_counter % MEASUREMENTS_TO_ADMIN_DOWNSAMPLE == 0:
# notify_administrator("measurements")
# ========================== ADMM Algorithm
def opf(all_beta_event, all_z_event):
global admm_it, x, nu, x, z, admm_running, x0, config, lb, ub, p_ref, q_ref
while running:
logging.debug('Waiting for admm_start event')
start_event.wait() # blocking wait call until the event is detected
admm_running = True
# notify_administrator("admm_started")
admm_it = 0
for i in range(1, config.max_iter+1):
# set the iteration number
# admm step 1 - update the local variables
# run the local optimization and compute the local variables
# =========================non-linear approach=======================
# problem = ACopf_nlopt(x0, config.z_pk, config.z_qk, lb, ub, config.pd, config.qd,
# config.p_max, config.p_min, config.q_max, config.q_min, config.node_type,
# config.n, z[admm_it], nu[admm_it], config.rho)
# ====================================================================
# print(nu[admm_it],z[admm_it])
problem = ACopf_pips(x0, config.z_pk, config.z_qk, lb, ub, config.pd, config.qd,
config.p_max, config.p_min, config.q_max, config.q_min, config.node_type,
config.n, z[admm_it], nu[admm_it], config.rho)
# ====================================================================
tic = datetime.datetime.now()
results = problem.solve_opf()
toc = datetime.datetime.now()
delta = (toc - tic).total_seconds()
# print(delta)
get_optimization_statistics(delta)
xopt = results[0]
logging.info(
"Agent " + str(config.me[0][0]) + "========ADMM iteration " + str(admm_it) + " solved in " + str(delta)
+ " [s]")
# save the local results of the optimization
admm_it += 1
x[admm_it] = xopt
# compute the beta variable that is shared with the neighbours
beta2distr = (1 / config.rho) * nu[admm_it-1] + xopt
# admm step 2 - update the global variables
# distribute the beta values to the neighbours
distribute_beta(beta2distr)
# wait to receive all the betas from the neighbours
logging.info(" Waiting to receive all betas")
all_beta_event.wait()
# compute the z value corresponding to this agent
z_real2distr = float(sum(beta[admm_it][0:config.n]) / float(config.n))
z_imag2distr = float(sum(beta[admm_it][config.n:(2*config.n)]) / float(config.n))
# distribute the z value to the neighbours
distribute_z(z_real2distr, z_imag2distr)
# wait to receive all the z values from the neighbours
logging.info(" Waiting to receive all z's")
all_z_event.wait()
# admm step 3 - update the lagrange multipliers
nu[admm_it] = nu[admm_it - 1] + config.rho * (x[admm_it] - z[admm_it])
data_snapshot_to_file(include_admm_data=True)
all_beta_event.clear()
all_z_event.clear()
logging.info("Agent " + str(config.me[0][0]) + ": algorithm finished " + str(config.max_iter) + " iterations")
admm_running = False
idx = config.all_ids.index(config.me[0][0])
x1 = asmatrix(x[admm_it])
p = x1 * config.z_pk * x1.T + config.pd
q = x1 * config.z_qk * x1.T + config.qd
p_ref = float(p[idx])
q_ref = float(q[idx])
# notify_administrator("admm_finished")
logging.info("Agent " + str(config.me[0][0]) + ": waiting for the primary to finish")
# send_references_to_opal()
# prim_ctrl_finished_event.wait()
# re-initialize the buffers
init_admm_buffers()
# start_event.clear()
all_beta_event.clear()
all_z_event.clear()
def set_local_beta_element(value_real, value_imag, agent_id, admm_it):
idx = config.all_ids.index(agent_id) # get the position in the vector where this value should go
with beta_lock:
try:
if admm_it not in beta:
beta[admm_it] = zeros(2*config.n, dtype=float_)
beta_received[admm_it] = zeros(config.n, dtype=float_)
beta[admm_it][idx] = value_real
beta[admm_it][idx + config.n] = value_imag
beta_received[admm_it][idx] = 1.0
logging.debug("Agent " + str(config.me[0][0]) + ": Beta[" + str(admm_it) + "]=" + str(beta[admm_it]) +
"-> from Agent " + str(agent_id))
# received all the information
if sum(beta_received[admm_it]) == config.n:
logging.info("Agent " + str(config.me[0][0]) + ": Received all beta info for iteration " +
str(admm_it) + ". Updating z.")
all_beta_event.set()
except KeyError as exc:
logging.critical(
"Agent " + str(config.me[0][0]) + ": WTFFF!!! Iteration:" + str(admm_it) + " Beta:" + str(beta))
logging.critical(exc.message)
def distribute_beta(beta2distr):
# distribute the beta variable
# first locally
# idx = config.all_ids.index(config.me[0])
value_real = float(beta2distr[0])
value_imag = float(beta2distr[config.n])
set_local_beta_element(value_real, value_imag, config.me[0][0], admm_it)
logging.info(" finish set local beta")
# and then to the neighbours
for p in config.partners:
idx = config.all_ids.index(p[0]) # get the index of the neighbour
value_real = float(beta2distr[idx]) # get the value of beta to be sent
value_imag = float(beta2distr[idx + config.n]) # get the value of beta to be sent
try:
req = admm_pb2.SetBetaRequest(value_real=value_real, value_imag=value_imag, agent_id=config.me[0][0], admm_it=admm_it)
tic = datetime.datetime.now()
p[-1].set_beta_element(req) # call RPC for each neighbour
toc = datetime.datetime.now()
delta = (toc - tic).total_seconds()
get_rpc_statistics(delta)
except Exception as exc:
logging.critical(
"Agent " + str(config.me[0][0]) + ": Can't contact agent " + str(p[0]) + " for setting beta = ")
logging.exception(exc.message)
logging.info("Agent " + str(config.me[0][0]) + ": I finished distributing all betas")
def distribute_z(z_real2distr, z_imag2distr):
global rpc_total_time, rpc_counter
# distribute the z variable
# first locally
set_local_z_element(z_real2distr, z_imag2distr, config.me[0][0], admm_it)
# and then to the neighbours
for p in config.partners:
try:
req = admm_pb2.SetZRequest(value_real=z_real2distr, value_imag=z_imag2distr, agent_id=config.me[0][0], admm_it=admm_it)
tic = datetime.datetime.now()
p[-1].set_z_element(req)
toc = datetime.datetime.now()
delta = (toc - tic).total_seconds()
get_rpc_statistics(delta)
except Exception as exc:
logging.critical(
"Agent " + str(config.me[0][0]) + ": can't contact agent " + str(p[0]) + " for setting z = ")
logging.exception(exc.message)
def set_local_z_element(z_value_real, z_value_imag, agent_id, admm_it):
try:
idx = config.all_ids.index(agent_id) # get the position in the vector where this value should go
# access the z variable in a thread safe manner
with z_lock:
if admm_it not in z:
z[admm_it] = zeros(2*config.n, dtype=float_)
z_received[admm_it] = zeros(config.n, dtype=float_)
z[admm_it][idx] = z_value_real
z[admm_it][idx + config.n] = z_value_imag
z_received[admm_it][idx] = 1.0
logging.debug("Agent " + str(config.me[0][0]) + ": Z[" + str(admm_it) + "]=" + str(z[admm_it]))
# received all the information
if sum(z_received[admm_it]) == config.n:
logging.info("Agent " + str(config.me[0][0]) + ": Received all z info for iteration " + str(
admm_it) + ". Updating nu.")
all_z_event.set()
except KeyError as exc:
logging.critical(
"Agent " + str(config.me[0][0]) + ": WTFFF!!! Iteration:" + str(admm_it) + " Z:" + str(z))
logging.critical(exc.message)
def set_local_x0_element(x0_value, agent_id):
global x0
idx = config.all_ids.index(agent_id)
with x0_lock:
x0[idx] = x0_value
logging.debug("Agent " + str(config.me[0][0]) + ": X0 = " + str(x0))
def init_admm_buffers():
global admm_it, z, nu, x, beta, beta_received, z_received, x0, lb, ub
if admm_it == 0: # cold start of the ADMM
logging.info("Agent " + str(config.me[0][0]) +
" initialized the ADMM buffers. First run. Populating first iteration with a cold start.")
# global variables and price vectors for the ADMM algorithm. we keep the history of all the iterations
# for debugging reasons
z = {}
nu = {}
z[admm_it] = ones(2*config.n, dtype=float_) * 1.1
nu[admm_it] = zeros(2*config.n, dtype=float_)
else: # warm start of the ADMM. Using the last values for nu and z
logging.info("Agent " + str(config.me[0][0]) +
" re-initialized the ADMM buffers. Populating first iteration with a warm start")
z_in = z[admm_it]
nu_in = nu[admm_it]
admm_it = 0
z = {}
nu = {}
z[admm_it] = z_in
nu[admm_it] = nu_in
x = {}
beta = {}
z_received = {}
beta_received = {}
beta[admm_it] = zeros(2*config.n, dtype=float_)
beta_received[admm_it] = zeros(2*config.n, dtype=float_)
z_received[admm_it] = zeros(2*config.n, dtype=float_)
x0 = ones(2*config.n, dtype=float_)
x0[config.n:(2*config.n)] = zeros(config.n, dtype=float)
lb = ones(2*config.n, dtype=float_) * (-1.1)
ub = ones(2*config.n, dtype=float_) * 1.1
if config.node_type == 3:
lb[0] = 1.0
ub[0] = 1.0 + 1e-5
lb[config.n] = 0.0
ub[config.n] = 0.0 + 1e-5
def data_snapshot_to_file(include_admm_data):
global x, nu, z, admm_it, config
try:
if not os.path.isfile(dataFile):
header = ['Id', 'Time', 'ADMM_IT']
header += ['X_real']
header += ['X_imag']
header += ['Nu_real']
header += ['Nu_imag']
header += ['Z_real']
header += ['Z_imag']
header += ['P']
header += ['Q']
with open(dataFile, 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
fields = []
if include_admm_data:
fields += [admm_it]
fields += [x[admm_it][0]]
fields += [x[admm_it][config.n]]
fields += [nu[admm_it][0]]
fields += [nu[admm_it][config.n]]
fields += [z[admm_it][0]]
fields += [z[admm_it][config.n]]
# P = V.*(G*V)
# pz = np.multiply(z[admm_it], np.dot(config.G, z[admm_it]))
x1 = asmatrix(x[admm_it])
p = x1 * config.z_pk * x1.T + config.pd
q = x1 * config.z_qk * x1.T + config.qd
fields += [p[0,0]]
fields += [q[0,0]]
else:
fields += [0] * 9 # add zeros to the file in order to create a consistent .csv table
with open(dataFile, 'a') as f:
writer = csv.writer(f)
time_stamp = time.time()
line = [config.me[0][0], time_stamp]
line += ['{:3.4f}'.format(xval) for xval in fields]
writer.writerow(line)
except Exception as ex:
print(ex.message)
# def send_references_to_opal():
# global p_ref, q_ref, z
# # change the references only if you are a generator. don't forget about the sign change for the power
# if config.node_type > 1:
# set_url = config.url_opal + 'set?valin' + \
# str(config.opal_set_ids["P"]) + '=' + str(-p_ref) + '&valin' + \
# str(config.opal_set_ids["Q"]) + '=' + str(-q_ref)
# req = urllib2.Request(url=set_url)
# f = urllib2.urlopen(req)
# response = f.read()
# if 'Ok' not in response:
# notify_administrator("Cannot send the new references to the OPAL-RT")
# notify the secondary when the primary is finished ramping towards the new reference
# time.sleep(RAMP_TIME_OF_PRIMARY) # just wait the amount of time required by the primary
# in a real system the primary would notify the secondary when done
# prim_ctrl_finished_event.set() # notify the secondary
# ========================== Functions for communicating with the administrator and data logging
# handles the communication with the administrator
# def notify_administrator(topic):
# comm_error_count = 0
# while comm_error_count < MAX_ADMIN_COMM_RETRY:
# try:
# if topic is "online":
# req = admin_pb2.AgentRequest(agent_id=config.me[0])
# admin_stub.agent_online(req)
# elif topic is "offline":
# req = admin_pb2.AgentRequest(agent_id=config.me[0])
# admin_stub.agent_offline(req)
# elif topic is "admm_started":
# req = admin_pb2.AgentRequest(agent_id=config.me[0])
# admin_stub.agent_started_admm(req)
# elif topic is "admm_finished":
# req = admin_pb2.ADMMResults(agent_id=config.me[0], avg_opt_time = opt_total_time * 1000 / opt_counter,
# avg_rpc_time=rpc_total_time * 1000 / rpc_counter, p_ref=p_ref, q_ref = q_ref)
# admin_stub.agent_finished_admm(req)
# elif topic is "measurements":
# req = admin_pb2.Measurements(agent_id=config.me[0], avg_opal_time=opal_total_time * 1000 / opal_counter,
# v_meas=v_meas, p_meas=p_meas, trip=t_meas)
# admin_stub.agent_measurements(req)
# else: # if topic not in list send a general message to the admin
# req = admin_pb2.GenericMessage(agent_id=config.me[0], text=topic)
# admin_stub.agent_general_use_message(req)
# break
# except Exception as exc:
# logging.error("Agent " + str(config.me[0]) + ": Can't contact the administrator for sending data")
# comm_error_count += 1
# if comm_error_count >= MAX_ADMIN_COMM_RETRY:
# logging.critical("Agent " + str(config.me[0]) + ": Something is definetly wrong. ABORTING!")
# logging.exception(exc.message)
# else:
# logging.info(
# "Agent " + str(config.me[0]) + ": The communication might be busy. I will retry in lamda10 ms!")
# time.sleep(0.01)
def log_experiment_data_loop():
# if not agent_enabled_event.isSet():
# logging.info("Waiting for the agent to be enabled")
# agent_enabled_event.wait()
if not admm_running:
data_snapshot_to_file(include_admm_data=False)
t = threading.Timer(DATA_LOG_PERIOD, log_experiment_data_loop)
t.daemon = True
t.name = "log-thread"
if running:
t.start()
optp = OptionParser()
optp.add_option("-f", "--filename", dest="jsonFile",
help="json file containing the configuration of the agent")
opts, args = optp.parse_args()
if opts.jsonFile is None:
opts.jsonFile = raw_input("Name of the json file containing the configuration of the agent:")
# log = "logs/log_A_" + re.search(r'\d+', opts.jsonFile).group() + ".txt"
log = "logs/log_A_" + re.search(r'\d+', "agent32").group() + ".txt"
print(log)
dataFile = "data/data_A_" + re.search(r'\d+', opts.jsonFile).group() + ".csv"
# log to file
logging.basicConfig(level=logging.DEBUG, filename=log, filemode="w",
format='%(asctime)s (%(threadName)-9s) %(levelname)s: %(message)s')
logging.info("Reading the configuration file")
# read the configuration of the agent
# print(opts.jsonFile)
config = ReadConfiguration(opts.jsonFile)
logging.info("Setting the initial values in OPAL-RT")
# set the voltage in the opal
init_opal()
# logging.info("Configuring the communication links")
# # configure the communication links
# configure_comm_links()
logging.info("Initializing the ADMM buffers")
# initialize the ADMM buffers
init_admm_buffers()
time.sleep(5)
logging.info("Opening communication channels to neighbours")
# open communication channels towards the neighbours
for p in config.partners:
channel = grpc.insecure_channel(p[1] + ":" + str(p[2]))
stub = admm_pb2_grpc.ADMMAgentStub(channel)
p += [stub] # store the rpc stub in the config.partners collection
# logging.info("Opening the communication channels to the admin")
# open the communication channel to the admin
# admin_channel = grpc.insecure_channel(admin_ip + ":" + str(admin_port))
# admin_stub = admin_pb2_grpc.AdminStub(channel=admin_channel)
# logging.info("Starting the measurement thread")
# # configure and start the program threads
# # meas_thread = threading.Thread(name='measurement-thread', target=pool_opal, args=(agent_enabled_event,))
# meas_thread = threading.Thread(name='measurement-thread', target=pool_opal)
# meas_thread.daemon = True
# meas_thread.start() # start the measurement thread
logging.info("Starting the data loging thread")
log_thread = threading.Thread(name='log-thread', target=log_experiment_data_loop)
log_thread.daemon = True
log_thread.start() # start the log thread
logging.info("Starting the opf thread")
admm_ctrl_thread = threading.Thread(name='opf-thread', target=opf,
args=(all_beta_event, all_z_event))
admm_ctrl_thread.daemon = True
admm_ctrl_thread.start() # start the admm thread
# create the RPC server for the agent
logging.info("Starting the agent's RPC server")
admm_pb2_grpc.add_ADMMAgentServicer_to_server(AgentServer(), server)
server.add_insecure_port(config.me[0][1] + ":" + str(config.me[0][2]))
server.start()
logging.info("Agent " + str(config.me[0][0]) + " starting at:" + config.me[0][1] + ":" + str(config.me[0][2]))
# notify the administrator that I am online
# notify_administrator("online")
# time.sleep(lamda10)
while running:
try:
time.sleep(1)
except KeyboardInterrupt:
running = False
# notify_administrator("offline")
server.stop(0) | [
"[email protected]"
] | |
f5aed57a1d491a9acda13887ef8b3b19ee883a79 | 4ef80242cf22a1ccd0d7a2042476b5b6ac1eb03e | /scadparser/commands/cmd_deps.py | 121d7db81fe8369ef421c06396e918c8d957676c | [] | no_license | rblack42/ScadParser | 71081adb99ec03e78bc78b4101562b7fa1bab134 | a9cc10b23c6515a53065dfb58b23881d0145f88d | refs/heads/master | 2023-07-11T03:51:53.434534 | 2021-08-27T02:03:37 | 2021-08-27T02:03:37 | 397,718,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from scadparser import __version__
import click
from scadparser.cli import pass_environment
@click.command("deps", help="Display dependency versions.")
@pass_environment
def cli(ctx):
"""Display current dependency versions."""
click.echo(f"scadparser: {__version__}")
| [
"[email protected]"
] | |
90cd71d7b8c6e81838f40845f2a33e8dd698090e | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 10496 - Collecting Beepers/main.py | bee7a62d1be3255db3c7266bb98c3130f4d8cc08 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | '''
Created on Jul 15, 2013
@author: Yubin Bai
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 31
def solve(par):
N, M, startI, startJ, nBeepers, beepers = par
minCost = [INF]
path = set()
def backtrack(step, i, j, cost):
if cost > minCost:
return
if step == nBeepers:
cost += abs(i - startI) + abs(j - startJ)
minCost[0] = min(minCost[0], cost)
return
for i1, j1 in beepers:
if (i1, j1) not in path:
dist = abs(i1 - i) + abs(j1 - j)
path.add((i1, j1))
backtrack(step + 1, i1, j1, cost + dist)
path.remove((i1, j1))
backtrack(0, startI, startJ, 0)
return 'The shortest path has length %d' % minCost[0]
class Solver:
def getInput(self):
self.numOfTests = int(self.fIn.readline().strip())
self.input = []
for iterTest in range(self.numOfTests):
N, M = map(int, self.fIn.readline().strip().split())
startI, startJ = map(int, self.fIn.readline().strip().split())
nBeepers = int(self.fIn.readline())
beepers = []
for i in range(nBeepers):
beepers.append(tuple(map(int, self.fIn.readline().split())))
self.input.append((N, M, startI, startJ, nBeepers, beepers))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
9
| [
"[email protected]"
] | |
77a03933e3ee5d0b0c5d43fbcc2f04d949a0cbd2 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/nltk/chat/zen.py | 69114e88353e699c061f61489ea174946a3acd32 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 10,281 | py | # Natural Language Toolkit: Zen Chatbot
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Amy Holland <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Zen Chatbot talks in gems of Zen wisdom.
This is a sample conversation with Zen Chatbot:
ZC: Welcome, my child.
me: Good afternoon.
ZC: Ask the question you have come to ask.
me: How can I achieve enlightenment?
ZC: How do you suppose?
me: Through meditation.
ZC: Form is emptiness, and emptiness form.
me: How can I empty my mind of worldly troubles?
ZC: Will an answer to that really help in your search for enlightenment?
me: Yes.
ZC: It is better to be right than to be certain.
me: I seek truth and wisdom.
ZC: The search for truth is a long journey.
me: Are you sure?
ZC: Maybe sure, maybe not sure.
The chatbot structure is based on that of chat.eliza. Thus, it uses
a translation table to convert from question to response
i.e. "I am" --> "you are"
Of course, since Zen Chatbot does not understand the meaning of any words,
responses are very limited. Zen Chatbot will usually answer very vaguely, or
respond to a question by asking a different question, in much the same way
as Eliza.
"""
from __future__ import print_function
from nltk.chat.util import Chat, reflections
# responses are matched top to bottom, so non-specific matches occur later
# for each match, a list of possible responses is provided
responses = (
# Zen Chatbot opens with the line "Welcome, my child." The usual
# response will be a greeting problem: 'good' matches "good morning",
# "good day" etc, but also "good grief!" and other sentences starting
# with the word 'good' that may not be a greeting
(r'(hello(.*))|(good [a-zA-Z]+)',
( "The path to enlightenment is often difficult to see.",
"Greetings. I sense your mind is troubled. Tell me of your troubles.",
"Ask the question you have come to ask.",
"Hello. Do you seek englightenment?")),
# "I need" and "I want" can be followed by a thing (eg 'help')
# or an action (eg 'to see you')
#
# This is a problem with this style of response -
# person: "I need you"
# chatbot: "me can be achieved by hard work and dedication of the mind"
# i.e. 'you' is not really a thing that can be mapped this way, so this
# interpretation only makes sense for some inputs
#
(r'i need (.*)',
( "%1 can be achieved by hard work and dedication of the mind.",
"%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
"Focus your mind on%1, and you will find what you need.")),
(r'i want (.*)',
( "Desires of the heart will distract you from the path to enlightenment.",
"Will%1 help you attain enlightenment?",
"Is%1 a desire of the mind, or of the heart?")),
# why questions are separated into three types:
# "why..I" e.g. "why am I here?" "Why do I like cake?"
# "why..you" e.g. "why are you here?" "Why won't you tell me?"
# "why..." e.g. "Why is the sky blue?"
# problems:
# person: "Why can't you tell me?"
# chatbot: "Are you sure I tell you?"
# - this style works for positives (e.g. "why do you like cake?")
# but does not work for negatives (e.g. "why don't you like cake?")
(r'why (.*) i (.*)\?',
( "You%1%2?",
"Perhaps you only think you%1%2")),
(r'why (.*) you(.*)\?',
( "Why%1 you%2?",
"%2 I%1",
"Are you sure I%2?")),
(r'why (.*)\?',
( "I cannot tell you why%1.",
"Why do you think %1?" )),
# e.g. "are you listening?", "are you a duck"
(r'are you (.*)\?',
( "Maybe%1, maybe not%1.",
"Whether I am%1 or not is God's business.")),
# e.g. "am I a duck?", "am I going to die?"
(r'am i (.*)\?',
( "Perhaps%1, perhaps not%1.",
"Whether you are%1 or not is not for me to say.")),
# what questions, e.g. "what time is it?"
# problems:
# person: "What do you want?"
# chatbot: "Seek truth, not what do me want."
(r'what (.*)\?',
( "Seek truth, not what%1.",
"What%1 should not concern you.")),
# how questions, e.g. "how do you do?"
(r'how (.*)\?',
( "How do you suppose?",
"Will an answer to that really help in your search for enlightenment?",
"Ask yourself not how, but why.")),
# can questions, e.g. "can you run?", "can you come over here please?"
(r'can you (.*)\?',
( "I probably can, but I may not.",
"Maybe I can%1, and maybe I cannot.",
"I can do all, and I can do nothing.")),
# can questions, e.g. "can I have some cake?", "can I know truth?"
(r'can i (.*)\?',
( "You can%1 if you believe you can%1, and have a pure spirit.",
"Seek truth and you will know if you can%1.")),
# e.g. "It is raining" - implies the speaker is certain of a fact
(r'it is (.*)',
( "How can you be certain that%1, when you do not even know yourself?",
"Whether it is%1 or not does not change the way the world is.")),
# e.g. "is there a doctor in the house?"
(r'is there (.*)\?',
( "There is%1 if you believe there is.",
"It is possible that there is%1.")),
# e.g. "is it possible?", "is this true?"
(r'is(.*)\?',
( "%1 is not relevant.",
"Does this matter?")),
# non-specific question
(r'(.*)\?',
( "Do you think %1?",
"You seek the truth. Does the truth seek you?",
"If you intentionally pursue the answers to your questions, the answers become hard to see.",
"The answer to your question cannot be told. It must be experienced.")),
# expression of hate of form "I hate you" or "Kelly hates cheese"
(r'(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)',
( "Perhaps it is not about hating %2, but about hate from within.",
"Weeds only grow when we dislike them",
"Hate is a very strong emotion.")),
# statement containing the word 'truth'
(r'(.*) truth(.*)',
( "Seek truth, and truth will seek you.",
"Remember, it is not the spoon which bends - only yourself.",
"The search for truth is a long journey.")),
# desire to do an action
# e.g. "I want to go shopping"
(r'i want to (.*)',
( "You may %1 if your heart truly desires to.",
"You may have to %1.")),
# desire for an object
# e.g. "I want a pony"
(r'i want (.*)',
( "Does your heart truly desire %1?",
"Is this a desire of the heart, or of the mind?")),
# e.g. "I can't wait" or "I can't do this"
(r'i can\'t (.*)',
( "What we can and can't do is a limitation of the mind.",
"There are limitations of the body, and limitations of the mind.",
"Have you tried to%1 with a clear mind?")),
# "I think.." indicates uncertainty. e.g. "I think so."
# problem: exceptions...
# e.g. "I think, therefore I am"
(r'i think (.*)',
( "Uncertainty in an uncertain world.",
"Indeed, how can we be certain of anything in such uncertain times.",
"Are you not, in fact, certain that%1?")),
# "I feel...emotions/sick/light-headed..."
(r'i feel (.*)',
( "Your body and your emotions are both symptoms of your mind."
"What do you believe is the root of such feelings?",
"Feeling%1 can be a sign of your state-of-mind.")),
# exclaimation mark indicating emotion
# e.g. "Wow!" or "No!"
(r'(.*)!',
( "I sense that you are feeling emotional today.",
"You need to calm your emotions.")),
# because [statement]
# e.g. "because I said so"
(r'because (.*)',
( "Does knowning the reasons behind things help you to understand"
" the things themselves?",
"If%1, what else must be true?")),
# yes or no - raise an issue of certainty/correctness
(r'(yes)|(no)',
( "Is there certainty in an uncertain world?",
"It is better to be right than to be certain.")),
# sentence containing word 'love'
(r'(.*)love(.*)',
( "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
"Free love!")),
# sentence containing word 'understand' - r
(r'(.*)understand(.*)',
( "If you understand, things are just as they are;"
" if you do not understand, things are just as they are.",
"Imagination is more important than knowledge.")),
# 'I', 'me', 'my' - person is talking about themself.
# this breaks down when words contain these - eg 'Thyme', 'Irish'
(r'(.*)(me )|( me)|(my)|(mine)|(i)(.*)',
( "'I', 'me', 'my'... these are selfish expressions.",
"Have you ever considered that you might be a selfish person?",
"Try to consider others, not just yourself.",
"Think not just of yourself, but of others.")),
# 'you' starting a sentence
# e.g. "you stink!"
(r'you (.*)',
( "My path is not of conern to you.",
"I am but one, and you but one more.")),
# say goodbye with some extra Zen wisdom.
(r'exit',
( "Farewell. The obstacle is the path.",
"Farewell. Life is a journey, not a destination.",
"Good bye. We are cups, constantly and quietly being filled."
"\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.")),
# fall through case -
# when stumped, respond with generic zen wisdom
#
(r'(.*)',
( "When you're enlightened, every word is wisdom.",
"Random talk is useless.",
"The reverse side also has a reverse side.",
"Form is emptiness, and emptiness is form.",
"I pour out a cup of water. Is the cup empty?"))
)
zen_chatbot = Chat(responses, reflections)
def zen_chat():
print('*'*75)
print("Zen Chatbot!".center(75))
print('*'*75)
print('"Look beyond mere words and letters - look into your mind"'.center(75))
print("* Talk your way to truth with Zen Chatbot.")
print("* Type 'quit' when you have had enough.")
print('*'*75)
print("Welcome, my child.")
zen_chatbot.converse()
def demo():
zen_chat()
if __name__ == "__main__":
demo()
| [
"[email protected]"
] | |
c2f985cc04425adca0e7b65ce826d18b573f9ce1 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/flair/models/tars_tagger_model.py | ab659471cf8a9dab55842904ddabec9cb67d5ac5 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0d1594783dc93d010b71f1a23fcc38fb70ad5f99f0cf54f87f6b88c38486bc74
size 30192
| [
"[email protected]"
] | |
f44282bae37f63740ff1b8c780ad60d944f81ef9 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cisco/asa/plugins/terminal/asa.py | 83f339186d356b8421c95976a5f8e5b3f2cf6792 | [
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-or-later"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 2,532 | py | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
]
terminal_stderr_re = [
re.compile(br"error:", re.I),
re.compile(br"Removing.* not allowed, it is being used"),
re.compile(br"^Command authorization failed\r?$", re.MULTILINE),
]
def on_open_shell(self):
if self._get_prompt().strip().endswith(b"#"):
self.disable_pager()
def disable_pager(self):
try:
self._exec_cli_command(u"no terminal pager")
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure("unable to disable terminal pager")
def on_become(self, passwd=None):
if self._get_prompt().strip().endswith(b"#"):
return
cmd = {u"command": u"enable"}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u"prompt"] = to_text(
r"[\r\n]?[Pp]assword: $", errors="surrogate_or_strict"
)
cmd[u"answer"] = passwd
try:
self._exec_cli_command(
to_bytes(json.dumps(cmd), errors="surrogate_or_strict")
)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure(
"unable to elevate privilege to enable mode"
)
self.disable_pager()
| [
"[email protected]"
] | |
1d2133d364a2de163a1af19cd4f8ebc83f4cb579 | a84dfa25c827a2979a811513ac888288d378b980 | /OpenCV/list15_8.py | 314a875e0f200a5038a1a8889d9cceca9c0ed694 | [] | no_license | sunho-park/study1 | d49b9d27b0069dbeb7cc31199177f6771a84d3be | 0386fbea0282c2135407cad608b4ffa84b02d298 | refs/heads/master | 2022-12-16T23:17:14.746575 | 2020-09-11T06:04:06 | 2020-09-11T06:04:06 | 264,140,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | import numpy as np
import cv2
img = cv2.imread("./OpenCV/sample.jpg")
size = img.shape
# 이미지를 나타내는 행렬의 일부를 꺼내면 그것이 트리밍이 됩니다.
# n등분하려면 가로와 세로 크기를 나눕니다.
my_img = img[: size[0]//2, size[1]//3]
# 여기에서는 원래의 배율을 유지하면서 폭과 높이를 각각 2배로 합니다. 크기를 지정할 때는 (폭, 높이) 순서라는 점을 유의하세요
my_img = cv2.resize(my_img, (my_img.shape[1]*2, my_img.shape[0]*2))
cv2.imshow("sample", my_img)
cv2.imwrite("list15_8.jpg", my_img) | [
"[email protected]"
] | |
a8dbeb3447ef7323b9161b5593319f0d7991ffac | c71af56951d1c661a5819db72da1caccd9130df2 | /javascript/cpp-libraries/test1/binding.gyp | d1876d8625fb7543597db12f5121ca3f738e8656 | [] | no_license | adrianpoplesanu/personal-work | 2940a0dc4e4e27e0cc467875bae3fdea27dd0d31 | adc289ecb72c1c6f98582f3ea9ad4bf2e8e08d29 | refs/heads/master | 2023-08-23T06:56:49.363519 | 2023-08-21T17:20:51 | 2023-08-21T17:20:51 | 109,451,981 | 0 | 1 | null | 2022-10-07T04:53:24 | 2017-11-03T23:36:21 | Python | UTF-8 | Python | false | false | 406 | gyp | {
"targets": [
{
"target_name": "greet",
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"sources": [
"./src/greeting.cpp",
"./src/search.cpp",
"./src/index.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ],
}
]
}
| [
"[email protected]"
] | |
8039568ad432cafc9c4e614d96115a4addf76f96 | dd6ee732613966b899df8a514f2907084e433c3f | /setup.py | 4358797461aff42c90277e9271152ad16a5e1ec1 | [] | no_license | rixx/ramble | 410588025f0cfae04d75078c5c007a53538b526e | f838171517035edfa03c1afacb0bd8cb157eb90a | refs/heads/master | 2023-05-12T17:20:50.744796 | 2023-04-29T13:30:19 | 2023-04-29T13:30:19 | 244,879,387 | 1 | 0 | null | 2022-07-22T05:34:06 | 2020-03-04T11:08:16 | Python | UTF-8 | Python | false | false | 424 | py | from setuptools import setup
setup(
name="ramble-rixx-de",
author="Tobias Kunze",
author_email="[email protected]",
url="https://github.com/rixx/ramble.rixx.de",
packages=["scripts"],
entry_points="""
[console_scripts]
posts=scripts.cli:cli
""",
install_requires=[
"click",
"inquirer==2.6.*",
"python-frontmatter==0.5.*",
"unidecode==1.1.*",
],
)
| [
"[email protected]"
] | |
9b0ec8b0123f758441aa60a40a32b9f3d96346c3 | b3330bd3365767b89afb9c432f4deb722b39ac1c | /python/last_nth_element.py | 899c425e6c5110fa5844947eec110e1d1acb856e | [] | no_license | hguochen/algorithms | 944df332d5b39220bd59cbd62dc74b12e335fb9e | 703e71a5cd9e002d800340df879ed475a404d092 | refs/heads/master | 2022-02-27T12:11:10.607042 | 2022-02-18T21:04:00 | 2022-02-18T21:04:00 | 13,767,503 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | ##################################
### Title: Last Nth element ######
### Author: GuoChen Hou ########
##################################
# Implement an algorithm to find the nth to last element of a
# singly linked list.
from ADT.LinkedList import LinkedList
class NthLinkedList(LinkedList):
def nth_to_last(self, position):
if self.size is 0:
return
# get the node position counting from head
node_position = self.size - position - 1 # offset since node starts at 1 instead of 0
trav = self.head
while trav is not None and node_position is not 0:
trav = trav.next
node_position -= 1
return trav.data
if __name__ == "__main__":
test_list = NthLinkedList()
test_list.insert(1)
test_list.insert(2)
test_list.insert(3)
test_list.insert(4)
test_list.print_list()
print test_list.nth_to_last(2)
| [
"[email protected]"
] | |
e0410615b113f2b713aca3503c38d512f4309812 | 941c1bfd4edf4619c4b66391453abe8994ccc0bc | /src/api/admin.py | 1ca790712ac550adfc72a4853ebdc86d2bb83b80 | [
"MIT"
] | permissive | websiteinspiration/back-end | 11a0da9fb1b252557305b56867b1adc82c5da66b | e9762149aaa3ce08278e357950b35ac168122d95 | refs/heads/master | 2020-06-08T20:39:36.513962 | 2019-06-21T14:38:15 | 2019-06-21T14:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | from django.contrib import admin
from api.models import (
CodeSchool,
Location,
Scholarship,
ScholarshipApplication,
TeamMember,
)
@admin.register(Scholarship)
class ScholarshipAdmin(admin.ModelAdmin):
list_display = (
"name",
"location",
"open_time",
"close_time",
"created_at",
"updated_at",
)
@admin.register(ScholarshipApplication)
class ScholarshipApplicationAdmin(admin.ModelAdmin):
list_display = ("user", "scholarship", "terms_accepted", "created_at", "updated_at")
@admin.register(TeamMember)
class TeamMemberAdmin(admin.ModelAdmin):
list_display = ("name", "email", "role", "group", "image_src")
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = (
"code_school",
"va_accepted",
"address1",
"address2",
"city",
"state",
"zip",
)
@admin.register(CodeSchool)
class CodeSchoolAdmin(admin.ModelAdmin):
list_display = (
"name",
"url",
"full_time",
"hardware_included",
"has_online",
"online_only",
"has_housing",
"mooc",
"is_partner",
"rep_name",
"rep_email",
)
list_filter = (
"full_time",
"hardware_included",
"has_online",
"online_only",
"has_housing",
"mooc",
"is_partner",
)
search_fields = ("name", "rep_name", "rep_email", "url")
| [
"[email protected]"
] | |
486c7da5a8c0c5378fe0a03acb83e39ba404cc7c | ad16b0c0178e4543d0c44ad3d90f90c6beeb4f5a | /filter_array_by_column.py | f45a6bb704a3f3db5b46886e74be352c30600a4b | [] | no_license | timmonspatrick/HemoDub | 09cb61e8e33ee8b64c9e6011d4ae8679d07950d9 | 4e6cceb44456c498cc1d6d55f8369099d0d5d947 | refs/heads/master | 2021-04-27T09:34:40.935684 | 2018-05-31T08:29:04 | 2018-05-31T08:29:04 | 122,491,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 2 12:20:31 2018
@author: Patrick
"""
import numpy as np
def filter_array_by_column(X, cut_off=0.95):
n_cols = X.shape[1]
bad_cols = set()
for n in range(n_cols):
Y = X[:,n]
unique, counts = np.unique(Y, return_counts=True)
counts_sum = sum(counts)
counts = [i / counts_sum for i in counts]
if len([i for i in counts if i >= cut_off]) > 0:
bad_cols.add(n)
good_cols = [i for i in range(n_cols) if i not in bad_cols]
X_new = X[:,good_cols]
return X_new | [
"[email protected]"
] | |
6caaa7467d19c252f251757d6eb6c91863cc3273 | fb124e51024917d6479fa626d9607ff10f7a3aba | /storm-control/storm_control/steve/qtdesigner/steve_ui.py | 221af7d78ac059e59e624ce48a2a75c603c577dc | [
"MIT"
] | permissive | BehnamAbaie/storm-control | 054bd7bbd903ed9635e4d1121c30544f58473c4f | 0c686321142eccad62ce3365eae22c3b69229b0d | refs/heads/main | 2023-06-18T08:04:01.108874 | 2021-07-14T00:51:15 | 2021-07-14T00:51:15 | 342,049,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,764 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'steve.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1148, 831)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.mosaicTab = QtWidgets.QWidget()
self.mosaicTab.setObjectName("mosaicTab")
self.tabWidget.addTab(self.mosaicTab, "")
self.sectionsTab = QtWidgets.QWidget()
self.sectionsTab.setObjectName("sectionsTab")
self.tabWidget.addTab(self.sectionsTab, "")
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1148, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuMosaic = QtWidgets.QMenu(self.menubar)
self.menuMosaic.setObjectName("menuMosaic")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionConnect = QtWidgets.QAction(MainWindow)
self.actionConnect.setObjectName("actionConnect")
self.actionDisconnect = QtWidgets.QAction(MainWindow)
self.actionDisconnect.setObjectName("actionDisconnect")
self.actionSave_Positions = QtWidgets.QAction(MainWindow)
self.actionSave_Positions.setObjectName("actionSave_Positions")
self.actionSave_Mosaic = QtWidgets.QAction(MainWindow)
self.actionSave_Mosaic.setObjectName("actionSave_Mosaic")
self.actionSet_Working_Directory = QtWidgets.QAction(MainWindow)
self.actionSet_Working_Directory.setObjectName("actionSet_Working_Directory")
self.actionLoad_Mosaic = QtWidgets.QAction(MainWindow)
self.actionLoad_Mosaic.setObjectName("actionLoad_Mosaic")
self.actionDelete_Images = QtWidgets.QAction(MainWindow)
self.actionDelete_Images.setObjectName("actionDelete_Images")
self.actionLoad_Positions = QtWidgets.QAction(MainWindow)
self.actionLoad_Positions.setObjectName("actionLoad_Positions")
self.actionSave_Snapshot = QtWidgets.QAction(MainWindow)
self.actionSave_Snapshot.setObjectName("actionSave_Snapshot")
self.actionLoad_Movies = QtWidgets.QAction(MainWindow)
self.actionLoad_Movies.setObjectName("actionLoad_Movies")
self.actionLoad_Dax_By_Pattern = QtWidgets.QAction(MainWindow)
self.actionLoad_Dax_By_Pattern.setObjectName("actionLoad_Dax_By_Pattern")
self.actionAdjust_Contrast = QtWidgets.QAction(MainWindow)
self.actionAdjust_Contrast.setObjectName("actionAdjust_Contrast")
self.menuFile.addAction(self.actionSet_Working_Directory)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionDelete_Images)
self.menuFile.addAction(self.actionLoad_Movies)
self.menuFile.addAction(self.actionLoad_Mosaic)
self.menuFile.addAction(self.actionLoad_Positions)
self.menuFile.addAction(self.actionSave_Mosaic)
self.menuFile.addAction(self.actionSave_Positions)
self.menuFile.addAction(self.actionSave_Snapshot)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuMosaic.addAction(self.actionAdjust_Contrast)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuMosaic.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Steve"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.mosaicTab), _translate("MainWindow", "Mosaic"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.sectionsTab), _translate("MainWindow", "Sections"))
self.menuFile.setTitle(_translate("MainWindow", "Fi&le"))
self.menuMosaic.setTitle(_translate("MainWindow", "Mosaic"))
self.actionQuit.setText(_translate("MainWindow", "&Quit (Ctrl+Q)"))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionConnect.setText(_translate("MainWindow", "Connect"))
self.actionDisconnect.setText(_translate("MainWindow", "Disconnect"))
self.actionSave_Positions.setText(_translate("MainWindow", "Sav&e Positions"))
self.actionSave_Positions.setShortcut(_translate("MainWindow", "Ctrl+T"))
self.actionSave_Mosaic.setText(_translate("MainWindow", "Sa&ve Mosaic"))
self.actionSave_Mosaic.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionSet_Working_Directory.setText(_translate("MainWindow", "&Set Working Directory"))
self.actionLoad_Mosaic.setText(_translate("MainWindow", "Load &Mosaic"))
self.actionLoad_Mosaic.setShortcut(_translate("MainWindow", "Ctrl+M"))
self.actionDelete_Images.setText(_translate("MainWindow", "&Delete Images"))
self.actionDelete_Images.setShortcut(_translate("MainWindow", "Ctrl+D"))
self.actionLoad_Positions.setText(_translate("MainWindow", "Load &Positions"))
self.actionLoad_Positions.setShortcut(_translate("MainWindow", "Ctrl+P"))
self.actionSave_Snapshot.setText(_translate("MainWindow", "Save S&napshot"))
self.actionSave_Snapshot.setShortcut(_translate("MainWindow", "Ctrl+I"))
self.actionLoad_Movies.setText(_translate("MainWindow", "&Load Movie(s)"))
self.actionLoad_Movies.setShortcut(_translate("MainWindow", "Ctrl+L"))
self.actionLoad_Dax_By_Pattern.setText(_translate("MainWindow", "Load Dax By Pattern"))
self.actionAdjust_Contrast.setText(_translate("MainWindow", "Adjust Contrast"))
| [
"[email protected]"
] | |
7b970c8b63cc83b8e1c803325c3ed9db37f2dbf2 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/ListFaceEntitiesRequest.py | ca3071dad2dcdc723906cf8526313cf463ec6eb4 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,351 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkfacebody.endpoint import endpoint_data
class ListFaceEntitiesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'facebody', '2019-12-30', 'ListFaceEntities','facebody')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EntityIdPrefix(self):
return self.get_body_params().get('EntityIdPrefix')
def set_EntityIdPrefix(self,EntityIdPrefix):
self.add_body_params('EntityIdPrefix', EntityIdPrefix)
def get_Limit(self):
return self.get_body_params().get('Limit')
def set_Limit(self,Limit):
self.add_body_params('Limit', Limit)
def get_Order(self):
return self.get_body_params().get('Order')
def set_Order(self,Order):
self.add_body_params('Order', Order)
def get_Offset(self):
return self.get_body_params().get('Offset')
def set_Offset(self,Offset):
self.add_body_params('Offset', Offset)
def get_Token(self):
return self.get_body_params().get('Token')
def set_Token(self,Token):
self.add_body_params('Token', Token)
def get_Labels(self):
return self.get_body_params().get('Labels')
def set_Labels(self,Labels):
self.add_body_params('Labels', Labels)
def get_DbName(self):
return self.get_body_params().get('DbName')
def set_DbName(self,DbName):
self.add_body_params('DbName', DbName) | [
"[email protected]"
] | |
e7f9ef879ca1ae30a2bd11327b902db8fc44b076 | 3f5a1ef51620fd8c35ef38064ca5aa00776ab6f4 | /ds_and_algo_educative/Doubly_LinkedList/Reverse.py | 80a3dfd6673c3b0a39f02c490c0536c87db82c1b | [] | no_license | poojagmahajan/python_exercises | 1b290a5c0689f703538caf89bca5bc6c1fdb392a | 65539cf31c5b2ad5768d652ed5fe95054ce5f63f | refs/heads/master | 2022-11-12T03:52:13.533781 | 2020-07-04T20:50:29 | 2020-07-04T20:54:46 | 263,151,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py |
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
def append(self, data):
if self.head is None:
new_node = Node(data)
new_node.prev = None
self.head = new_node
else:
new_node = Node(data)
cur = self.head
while cur.next:
cur = cur.next
cur.next = new_node
new_node.prev = cur
new_node.next = None
def print_list(self):
cur = self.head
while cur:
print(cur.data)
cur = cur.next
def reverse(self):
tmp = None
cur = self.head
while cur:
tmp = cur.prev
cur.prev = cur.next
cur.next = tmp
cur = cur.prev
if tmp:
self.head = tmp.prev
dllist = DoublyLinkedList()
dllist.append(1)
dllist.append(2)
dllist.append(3)
dllist.append(4)
dllist.print_list()
print("\n Reverse list is:")
dllist.reverse()
dllist.print_list() | [
"[email protected]"
] | |
d1cece45846995c7aadd790e8a6b01fc5cea7f56 | 7dc65b6d2e857c807bd2f75e2586af5f8e933fe5 | /tcutils/parsers/pingparse.py | 51a28244cc66ce56730f37eb0238cc8ccf493219 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | vkolli/contrail-test-perf | d6fdc20f4a2004066c5a6316afd915ecdc9366c2 | db04b8924a2c330baabe3059788b149d957a7d67 | refs/heads/master | 2021-01-18T15:36:18.120487 | 2017-03-30T19:19:30 | 2017-03-30T19:19:30 | 86,661,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | "parser to parse the ping output."""
import re
class PingParser(object):
"""Parser to parse the ping output."""
def __init__(self, output):
self.output = output
self.parsed_output = {}
self.parse()
def parse(self):
match = re.search(
"rtt\s+(min/avg/max/mdev)\s+=\s+(\d+.\d+/\d+.\d+/\d+.\d+/\d+.\d+)\s+(\w+)", self.output)
output_req = []
output_req.append(match.group(1))
output_req.append(match.group(2))
self.parsed_output = dict(
zip(output_req[0].split('/'), output_req[1].split('/')))
self.parsed_output['unit'] = match.group(3)
def get_ping_latency(self):
ping_output=self.parsed_output['avg']+" "+self.parsed_output['unit']
return ping_output
| [
"[email protected]"
] | |
cef019d360cd07dab58312341d87ea996f7a6c32 | abc1a497c41ddd8669c8c41da18af65d08ca54e4 | /Analysis2gamma/fit/admin.py | 230d2f8df1a1fe4c5666176d8b51793ebc0ade35 | [] | no_license | gerakolt/direxeno_privet | fcef5e3b654720e277c48935acc168472dfd8ecc | 75e88fb1ed44fce32fce02677f64106121259f6d | refs/heads/master | 2022-12-20T22:01:30.825891 | 2020-10-04T06:01:07 | 2020-10-04T06:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | import multiprocessing
import numpy as np
import sys
import time
from memory_profiler import profile
# Rec=np.recarray(5000, dtype=[
# ('Q', 'f8', len(pmts)),
# ('T', 'f8', len(pmts)),
# ('St', 'f8', len(pmts)),
# ('mu', 'f8', 1),
# ('N', 'f8', 1),
# ('F', 'f8', 1),
# ('Tf', 'f8', 1),
# ('Ts', 'f8', 1),
# ('R', 'f8', 1),
# ('a', 'f8', 1),
# ('eta', 'f8', 1),
# ])
n=6
def make_glob_array(p):
Q=multiprocessing.Array('d', p[:n])
T=multiprocessing.Array('d', p[n:2*n])
St=multiprocessing.Array('d', p[2*n:3*n])
mu=multiprocessing.Array('d', [p[3*n]])
W=multiprocessing.Array('d', [p[3*n+1]])
g=multiprocessing.Array('d', [p[3*n+2]])
F=multiprocessing.Array('d', [p[3*n+3]])
Tf=multiprocessing.Array('d', [p[3*n+4]])
Ts=multiprocessing.Array('d', [p[3*n+5]])
R=multiprocessing.Array('d', [p[3*n+6]])
a=multiprocessing.Array('d', [p[3*n+7]])
return Q, T, St, mu, W, g, F, Tf, Ts, R, a
def make_iter(N, Q, T, St, F, Tf, Ts, R, a, v):
for i in range(len(N)):
np.random.seed(int(i*time.time()%2**32))
yield [Q, T, St, N[i], F, Tf, Ts, R, a, v[i]]
| [
"[email protected]"
] | |
4f359ebcc8ffaef000df67e034006f85c9765a5f | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /samples/v2/pipeline_with_volume.py | 47b9099c6e00ae80368dc3da8796a43c37d26cab | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 1,890 | py | # Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline with volume creation, mount and deletion in v2 engine pipeline."""
from kfp import dsl
from kfp import kubernetes
@dsl.component
def producer() -> str:
with open('/data/file.txt', 'w') as file:
file.write('Hello world')
with open('/data/file.txt', 'r') as file:
content = file.read()
print(content)
return content
@dsl.component
def consumer() -> str:
with open('/data/file.txt', 'r') as file:
content = file.read()
print(content)
return content
@dsl.pipeline
def pipeline_with_volume():
pvc1 = kubernetes.CreatePVC(
pvc_name_suffix='-my-pvc',
access_modes=['ReadWriteOnce'],
size='5Mi',
storage_class_name='standard',
)
task1 = producer()
task2 = consumer().after(task1)
kubernetes.mount_pvc(
task1,
pvc_name=pvc1.outputs['name'],
mount_path='/data',
)
kubernetes.mount_pvc(
task2,
pvc_name=pvc1.outputs['name'],
mount_path='/data',
)
delete_pvc1 = kubernetes.DeletePVC(
pvc_name=pvc1.outputs['name']).after(task2)
if __name__ == '__main__':
# execute only if run as a script
compiler.Compiler().compile(
pipeline_func=pipeline_with_volume,
package_path='pipeline_with_volume.json') | [
"[email protected]"
] | |
505d724b709f9a90e03e85a4ff7a185472bcbe00 | 2c22736309a50968896b4724df4a7a1d1a150d88 | /0x0F-python-object_relational_mapping/12-model_state_update_id_2.py | 0c0445e0c1b773c8f23caefc87d4363ffd43d9b0 | [] | no_license | gcifuentess/holbertonschool-higher_level_programming | ce9f263c0eef07facc1e02b719a8ae7193233d6d | 75e405ec7f1aa9138aa54e86f7b41aa08ead7f2a | refs/heads/master | 2023-06-18T08:36:22.580908 | 2021-07-18T20:46:40 | 2021-07-18T20:46:40 | 291,871,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/python3
'''
Query with SQLAlchemy, changes the name of a State object
from the database hbtn_0e_6_usa
'''
from sys import argv
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'
.format(argv[1], argv[2], argv[3]),
encoding='utf-8', pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
to_update = session.query(State).filter_by(id=2).first()
to_update.name = "New Mexico"
session.commit()
session.close()
| [
"[email protected]"
] | |
036fd1629ac4f8c3c9ece01c9b37124bd5d8a92b | 759f52976ad2cd9236da561ca254e11e08003487 | /part5/ex32/proc/guess_number_core.py | 1cfa8a70b0f502de992f5608518efcd3bacb1b16 | [] | no_license | mbaeumer/fiftyseven | 57b571c3e09640a2ab0ed41e5d06643c12b48001 | d79b603d5b37bf1f4127d9253f8526ea3897dc08 | refs/heads/master | 2020-06-10T20:52:25.311992 | 2017-11-15T18:28:38 | 2017-11-15T18:28:38 | 75,877,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | #!/usr/bin/python
import random
from level import Difficulty
from comparison_result import ComparisonResult
def generate_my_number(difficulty_level):
max = 10
if difficulty_level == Difficulty.MEDIUM:
max = 50
elif difficulty_level == Difficulty.HARD:
max = 100
number = random.randint(0,max)
return number
# get the user's input as string
def get_user_guess(attempts):
message = "What is your next guess: "
if attempts == 1:
message = "Make the first guess: "
guess = input(message)
return guess
def get_difficulty_level():
choice = 0
while choice < 1 or choice > 3:
print("Select difficulty")
print("Easy \t - 1")
print("Medium \t - 2")
print("Hard \t - 3")
try:
choice = int(input("Your choice: "))
except ValueError:
print("ERROR: Please enter a valid choice!")
choice = 0
difficulty_level = Difficulty(choice)
print(difficulty_level)
return difficulty_level
def get_play_again():
user_input = ''
while user_input != 'y' and user_input != 'n':
user_input = input("Play again? ")
return user_input == 'y'
def isValidGuess(guess_as_string):
try:
guess = int(guess_as_string)
return True
except ValueError:
return False
def validateGuess(guess, my_number):
if my_number > guess:
return ComparisonResult.HIGHER
elif my_number < guess:
return ComparisonResult.LOWER
return ComparisonResult.EQUAL
def get_validation_message(comparison_result):
message = "You got it"
if comparison_result == ComparisonResult.HIGHER:
message = "The number is higher"
elif comparison_result == ComparisonResult.LOWER:
message = "The number is lower"
return message
| [
"[email protected]"
] | |
12a2a28e54708661a3440be67a2a67a961697c4d | fb98249ee3dece1f3ec5a7b2ba541a5ca07d170b | /python/developer.usr.py | 86e797cfd9a3b14372b77b83c10b4ecd3db8ec43 | [] | no_license | polikashechkin/developer | 5d4f50783c96fbe2078423ff689d91ab47408f05 | f9c6193440457ba4e78d4b5430a1d21c34cd9af1 | refs/heads/main | 2023-02-10T03:39:40.175655 | 2021-01-04T23:30:09 | 2021-01-04T23:30:09 | 326,832,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,115 | py | #!/usr/bin/python3.6
import sys, os, shutil, json
#os.chdir(os.dirname(__file__))
from domino.core import log, Version, Server, ProductInfo, VersionInfo, DOMINO_ROOT
from domino.jobs import Job, Задача
from domino.globalstore import GlobalStore
from domino.cli import Console, print_error, print_comment, print_warning, print_help
def arg(n):
try:
return sys.argv[n].lower()
except:
return None
class PRODUCT:
@staticmethod
def create(product):
draft = max(Server.get_drafts('domino'))
product_folder = Server.product_folder(product)
version_folder = Server.version_folder(product, draft)
os.makedirs(version_folder, exist_ok=True)
os.makedirs(os.path.join(version_folder, 'web'), exist_ok=True)
os.makedirs(os.path.join(version_folder, 'python'), exist_ok=True)
with open(os.path.join(product_folder, 'info.json'), 'w') as f:
json.dump({'id':product},f)
info = VersionInfo()
info.version = draft
info.product = product
info.write(version_folder)
#manifest = {'', 'products' : []}
def help():
print(os.path.abspath(__file__))
print('')
print('product.create\tСоздание нового продукта')
print('')
def последняя_версия_продукта(product):
последняя_версия = None
for name in os.listdir(f'/DOMINO/products/{product}'):
try:
версия = Version.parse(name)
if версия is not None and версия.is_draft:
if последняя_версия is None or последняя_версия < версия:
последняя_версия = версия
except:
pass
return последняя_версия
def hard_link(file_name, common_folder, product_folder):
common_file = os.path.join(common_folder, file_name)
product_file = os.path.join(product_folder, file_name)
if not os.path.isfile(common_file):
print_warning(f'Нет файла "{common_file}"')
return
os.makedirs(os.path.dirname(product_file), exist_ok=True)
if os.path.isfile(product_file):
os.remove(product_file)
os.link(common_file, product_file)
print_help(file_name)
print_comment(f'{common_file} => {product_file}')
def copy_folder(product, draft, folder):
draft = f'{draft}'
product_dir = os.path.join(DOMINO_ROOT, 'products', product, f'{draft}', 'python', 'domino', folder)
common_dir = os.path.join(DOMINO_ROOT,'products','_system','python', folder)
for name0 in os.listdir(common_dir):
dir0 = os.path.join(common_dir, name0)
if os.path.isdir(dir0):
for name1 in os.listdir(dir0):
dir1 = os.path.join(dir0, name1)
if os.path.isdir(dir1):
for name2 in os.listdir(dir1):
dir2 = os.path.join(dir1, name2)
if os.path.isdir(dir2):
pass
else:
hard_link(os.path.join(name0, name1, name2), common_dir, product_dir)
else:
hard_link(os.path.join(name0, name1), common_dir, product_dir)
else:
hard_link(name0, common_dir, product_dir)
if __name__ == "__main__":
dir = os.path.dirname(os.path.abspath(__file__))
gs = GlobalStore()
action = arg(1)
if action is None:
help()
elif action == 'create_next':
product = arg(2)
#draft = max(Server.get_drafts(product), default = None)
draft = последняя_версия_продукта(product)
if draft is None:
raise Exception(f'Не найдено последней версии продукта "{product}"')
#if draft is None:
# print (f'Не найдено рабочего макета для "{product}"')
proc = os.path.join(dir, 'create_next_version.py')
os.system(f'python3.6 {proc} {product} {draft}')
elif action == 'download':
path = arg(2)
file = arg(3)
if file is None:
file = os.path.basename(path)
gs.download(path, file)
elif action == 'upload':
gs.upload(arg(2), arg(3))
elif action == 'upload_distro':
gs.upload_distro(arg(2), arg(3), arg(4))
elif action == 'listdir':
for name in gs.listdir(arg(2)):
print(name)
elif action == 'get_versions':
for version in gs.get_versions(arg(2)):
print(version.id)
elif action == 'download_distro':
gs.download_distro(arg(2), arg(3), arg(4))
elif action == 'include':
c = Console()
product = arg(2)
if product is None:
print_warning('Формат вызова: domino include <продукт> <модуль>')
print_warning(' <модуль> := <имя модуля> | domino | templates | exists > ')
sys.exit()
module = arg(3)
if module is None:
c.error(f'Не задан модуль')
sys.exit()
else:
print_comment(f'{module}')
product_draft = max(Server.get_drafts(product), default = None)
if product_draft is None:
c.error(f'Нет макета для "{product}"')
print_comment(f'{product}.{product_draft}')
filenames = []
# определение директорй
if module == 'templates':
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/templates'
common_folder = '/DOMINO/products/_system/python/templates'
# определение списка файлов
for filename in os.listdir(common_folder):
filenames.append(filename)
elif module == 'tables':
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/domino/tables'
common_folder = '/DOMINO/products/_system/python/tables'
for database_name in os.listdir(common_folder):
database_folder = os.path.join(common_folder, database_name)
if os.path.isdir(database_folder):
for table_name in os.listdir(database_folder):
hard_link(os.path.join(database_name, table_name), common_folder, product_folder)
elif module in ['components', 'responses', 'pages', 'databases', 'dicts', 'enums']:
copy_folder(product, product_draft, module)
elif module == 'all':
for module in ['components', 'responses', 'pages', 'databases', 'dicts', 'enums', 'tables']:
copy_folder(product, product_draft, module)
elif module == 'domino':
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/domino'
common_folder = '/DOMINO/products/_system/python/domino'
for filename in os.listdir(common_folder):
filenames.append(filename)
else:
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/domino'
common_folder = '/DOMINO/products/_system/python/domino'
# определение списка файлов
if module == 'exists':
filenames = os.listdir(product_folder)
else:
for filename in os.listdir(common_folder):
#print(filename)
if filename.startswith(module):
filenames.append(filename)
for address, dirs, files in os.walk(common_folder):
print('------------')
print(address, dirs, files)
#print(f'From "{common_folder}"')
#print(f'To "{product_folder}"')
#print(f'{filenames}')
#print(f'{filenames}')
for filename in filenames:
hard_link(filename, common_folder, product_folder)
else:
print(f'Неизвестная команда {action}')
| [
"[email protected]"
] | |
8ea7cda1c1ee33ae5c76fa40f4c8bb4b8f4314c3 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /tests/cloudformation/checks/resource/aws/test_AmazonMQBrokerPublicAccess.py | 37710ba33578e981c9f2a0828d48921faa7ce386 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,444 | py | import os
import unittest
from checkov.cloudformation.checks.resource.aws.AmazonMQBrokerPublicAccess import check
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestAmazonMQBrokerPublicAccess(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_AmazonMQBrokerPublicAccess"
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
passing_resources = {
"AWS::AmazonMQ::Broker.PrivateBroker0",
"AWS::AmazonMQ::Broker.PrivateBroker1",
}
failing_resources = {
"AWS::AmazonMQ::Broker.PublicBroker0",
"AWS::AmazonMQ::Broker.PublicBroker1",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary['passed'], 2)
self.assertEqual(summary['failed'], 2)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7f41a27952caaadddc38b8c99ca3115a8f56128a | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2common/st2common/persistence/trigger.py | 9a01acf09952ed40a2b9784989224410b6b9a9bd | [
"Apache-2.0"
] | permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 3,200 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common import log as logging
from st2common import transport
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.trigger import triggertype_access, trigger_access, triggerinstance_access
from st2common.persistence.base import (Access, ContentPackResource)
from st2common.transport import utils as transport_utils
LOG = logging.getLogger(__name__)
class TriggerType(ContentPackResource):
impl = triggertype_access
@classmethod
def _get_impl(cls):
return cls.impl
class Trigger(ContentPackResource):
impl = trigger_access
publisher = None
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_publisher(cls):
if not cls.publisher:
cls.publisher = transport.reactor.TriggerCUDPublisher(
urls=transport_utils.get_messaging_urls())
return cls.publisher
@classmethod
def delete_if_unreferenced(cls, model_object, publish=True, dispatch_trigger=True):
# Found in the innards of mongoengine.
# e.g. {'pk': ObjectId('5609e91832ed356d04a93cc0')}
delete_query = model_object._object_key
delete_query['ref_count__lte'] = 0
cls._get_impl().delete_by_query(**delete_query)
# Since delete_by_query cannot tell if teh delete actually happened check with a get call
# if the trigger was deleted. Unfortuantely, this opens up to races on delete.
confirmed_delete = False
try:
cls.get_by_id(model_object.id)
except (StackStormDBObjectNotFoundError, ValueError):
confirmed_delete = True
# Publish internal event on the message bus
if confirmed_delete and publish:
try:
cls.publish_delete(model_object)
except Exception:
LOG.exception('Publish failed.')
# Dispatch trigger
if confirmed_delete and dispatch_trigger:
try:
cls.dispatch_delete_trigger(model_object)
except Exception:
LOG.exception('Trigger dispatch failed.')
return model_object
class TriggerInstance(Access):
impl = triggerinstance_access
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def delete_by_query(cls, *args, **query):
return cls._get_impl().delete_by_query(*args, **query)
| [
"[email protected]"
] | |
88a459054fe086e6f2a96d15ab8e887f827114b3 | 1180c0bfe29959d95f3c131e6e839950e528d4ee | /42/pgmilenkov/regex.py | db3e891349a6a6ed5fab6a8521b5513867ebd992 | [] | no_license | pybites/challenges | e3e461accd8e7f890aee8007ba5070086ef983fc | 02b77652d0901e6e06cb9b1e7cb3e59c675445c2 | refs/heads/community | 2023-08-20T18:19:02.982214 | 2022-11-17T09:23:31 | 2022-11-17T09:23:31 | 78,264,928 | 764 | 3,115 | null | 2023-07-21T05:58:19 | 2017-01-07T07:17:50 | Jupyter Notebook | UTF-8 | Python | false | false | 2,457 | py | import re
def extract_course_times():
'''Use re.findall to capture all mm:ss timestamps in a list'''
flask_course = ('Introduction 1 Lecture 01:47'
'The Basics 4 Lectures 32:03'
'Getting Technical! 4 Lectures 41:51'
'Challenge 2 Lectures 27:48'
'Afterword 1 Lecture 05:02')
return re.findall(r'\d{2}:\d{2}', flask_course)
def split_on_multiple_chars():
'''Use re.split to split log line by ; , .
but not on the last ... so list should have len of 4
(hint check re.split docs for extra switches)'''
logline = ('2017-11-03T01:00:02;challenge time,regex!.'
'hope you join ... soon')
return re.split(r';|,|\.',logline, maxsplit=3)
def get_all_hashtags_and_links():
'''Use re.findall to extract the URL and 2 hashtags of this tweet'''
tweet = ('New PyBites article: Module of the Week - Requests-cache '
'for Repeated API Calls - http://pybit.es/requests-cache.html '
'#python #APIs')
# return re.findall(r'#\S+',tweet)
return re.findall(r'(http\S+|#\S+)',tweet)
def match_first_paragraph():
'''Use re.sub to extract the content of the first paragraph (excl tags)'''
html = ('<p>pybites != greedy</p>'
'<p>not the same can be said REgarding ...</p>')
return re.sub(r'<p>(.*?)</p>.*',r'\1',html)
def find_double_words():
'''Use re.search(regex, text).group() to find the double word'''
text = 'Spain is so nice in the the spring'
result = re.search(r''
r'\b' # begin of word
r'(' # start group
r'[a-z' # lower case letters
r'A-Z' # upper case letters
r'0-9]' # digits
r'+' # zero or more occurences
r')' # end of group
r'\s+' # whitespaces
r'\1' # match group
r'\b', # end of word
text,re.VERBOSE)
text = text[:result.span()[0]] + result.group(1) + text[result.span()[1]:]
return result
def match_ip_v4_address(ip):
'''Use re.match to match an ip v4 address (no need for exact IP ranges)'''
return re.match(r'(\d{1,3}\.){3}\d{1,3}',ip)
if __name__ == '__main__':
print(match_first_paragraph())
| [
"[email protected]"
] | |
b02027bdd2169431f6313f70a90f250241b4c30e | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/aio/operations_async/_virtual_machine_extension_images_operations_async.py | 296bc94769725b14078d4c3f9f390e439d4c9718 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 10,925 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineExtensionImagesOperations:
"""VirtualMachineExtensionImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
location: str,
publisher_name: str,
type: str,
version: str,
**kwargs
) -> "models.VirtualMachineExtensionImage":
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtensionImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtensionImage"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtensionImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'} # type: ignore
async def list_types(
self,
location: str,
publisher_name: str,
**kwargs
) -> List["models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.VirtualMachineExtensionImage"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self.list_types.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_types.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'} # type: ignore
async def list_versions(
self,
location: str,
publisher_name: str,
type: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs
) -> List["models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.VirtualMachineExtensionImage"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self.list_versions.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_versions.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'} # type: ignore
| [
"[email protected]"
] | |
00c4adf04fdb31e76a8271a3d839b907cf5d21fd | a508ffe0942f75721d4623fcda9e57808f93f07d | /input_test/s.py | a5aa5c9bcccbb47b880e6950bb649080e34c2a96 | [] | no_license | ag8/magic | 3a14a81f3c06fa67cd77de07045ee3dc3899ca7f | 2768fc7490e6cc55b522be68926ad24d3caa939c | refs/heads/master | 2021-01-22T06:49:29.561849 | 2017-10-30T23:34:57 | 2017-10-30T23:34:57 | 102,300,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import pickle
with open('/data/affinity/2d/overlap_micro/OVERLAP_AREAS') as fp:
overlap_areas = pickle.load(fp)
print(overlap_areas)
| [
"[email protected]"
] | |
2fb669fc4597c7fbbb01eb650d16264493c9fb0f | 33c1c5d0f48ad952776fe546a85350a441d6cfc2 | /ABC/102/D.py | 65484cf137a326b30baf30c052b7b04eae161ddc | [] | no_license | hisyatokaku/Competition | 985feb14aad73fda94804bb1145e7537b057e306 | fdbf045a59eccb1b2502b018cab01810de4ea894 | refs/heads/master | 2021-06-30T18:48:48.256652 | 2020-11-16T11:55:12 | 2020-11-16T11:55:12 | 191,138,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,310 | py | import math,string,itertools,fractions,heapq,collections,re,array,bisect,sys,random,time,copy,functools
from collections import deque
sys.setrecursionlimit(10**7)
inf = 10**20
mod = 10**9 + 7
DR = [1, -1, 0, 0]
DC = [0, 0, 1, -1]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI_(): return [int(x)-1 for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def S(): return input()
def main():
N = I()
A = LI()
cumsum = [0 for _ in range(N+1)]
for i in range(N):
cumsum[i+1] = cumsum[i] + A[i]
def get_cumsum(a, b):
# get cumsum from [a, b] element
return cumsum[b+1] - cumsum[a]
def separate_opt_sum(l, r, offset):
tot_sum = get_cumsum(l, r)
targ = offset + tot_sum / 2
left_i = bisect.bisect_left(cumsum, targ)
diff1 = inf
diff2 = inf
diff3 = inf
left_sum1, right_sum1, left_sum2, right_sum2 = inf, inf, inf, inf
left_sum3, right_sum3 = inf, inf
if l <= left_i - 2:
left_sum3 = get_cumsum(l, left_i - 2)
right_sum3 = tot_sum - left_sum3
diff3 = abs(right_sum3 - left_sum3)
if l <= left_i - 1:
left_sum1 = get_cumsum(l, left_i - 1)
right_sum1 = tot_sum - left_sum1
diff1 = abs(right_sum1 - left_sum1)
if left_i < r:
left_sum2 = get_cumsum(l, left_i)
right_sum2 = tot_sum - left_sum2
diff2 = abs(right_sum2 - left_sum2)
if min(diff1, diff2, diff3) == diff1:
return left_sum1, right_sum1
elif min(diff1, diff2, diff3) == diff2:
return left_sum2, right_sum2
return left_sum3, right_sum3
def _separate_opt_sum(l, r):
# find arr1, arr2 s.t. |arr1 - arr2| = min
# arr1 = get_cumsum(l, k), arr2 = get_cumsum(k+1, r)
tot_sum = get_cumsum(l, r)
m = (l + r) // 2
m_min = l - 1
m_max = r + 1
cur_min_diff = abs(2 * get_cumsum(l, m) - tot_sum)
cur_min_m = m
while m_min < m and m < m_max:
left_sum = get_cumsum(l, m)
right_sum = tot_sum - left_sum
cur_diff = abs(left_sum - right_sum)
if cur_diff < cur_min_diff:
cur_min_diff = cur_diff
cur_min_m = m
if left_sum < right_sum:
m_min = m
m = (m + r) // 2
elif left_sum > right_sum:
m_max = m
m = (l + m) // 2
else:
break
l_ans = get_cumsum(l, cur_min_m)
r_ans = get_cumsum(cur_min_m + 1, r)
return l_ans, r_ans
ans = inf
for sep in range(1, N-2):
left_S = get_cumsum(0, sep)
right_S = get_cumsum(sep+1, N-1)
# import pdb
# pdb.set_trace()
p, q = separate_opt_sum(0, sep, 0)
r, s = separate_opt_sum(sep+1, N-1, left_S)
# print('sep:', sep, ' ', p, q, r, s)
# print('\tleft_S:', left_S, ' right_S:', right_S)
ans = min(ans, max(p, q, r, s) - min(p, q, r, s))
print(ans)
main()
| [
"[email protected]"
] | |
bcd820a2e7d5a5a1cc617759d8a7456ea44b3f69 | 7e8cee08e8a583cfcefbf86f9272a65bca4dd2e4 | /Test/Wx/GridCustEditor.py | 90505b7730f05f209dc030190c349d8991dff6f1 | [] | no_license | PREM1980/ecomstore | 01adb86b8423100421806097a518df08ab30c4c8 | 0a01e1826699c8656fdb2502741f8b638948a6e4 | refs/heads/master | 2016-09-05T16:58:30.396618 | 2013-05-02T23:15:35 | 2013-05-02T23:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,967 | py |
import string
import wx
import wx.grid as gridlib
#---------------------------------------------------------------------------
class MyCellEditor(gridlib.PyGridCellEditor):
"""
This is a sample GridCellEditor that shows you how to make your own custom
grid editors. All the methods that can be overridden are shown here. The
ones that must be overridden are marked with "*Must Override*" in the
docstring.
"""
def __init__(self, log):
self.log = log
self.log.write("MyCellEditor ctor\n")
gridlib.PyGridCellEditor.__init__(self)
def Create(self, parent, id, evtHandler):
"""
Called to create the control, which must derive from wx.Control.
*Must Override*
"""
self.log.write("MyCellEditor: Create\n")
self._tc = wx.TextCtrl(parent, id, "")
self._tc.SetInsertionPoint(0)
self.SetControl(self._tc)
if evtHandler:
self._tc.PushEventHandler(evtHandler)
def SetSize(self, rect):
"""
Called to position/size the edit control within the cell rectangle.
If you don't fill the cell (the rect) then be sure to override
PaintBackground and do something meaningful there.
"""
self.log.write("MyCellEditor: SetSize %s\n" % rect)
self._tc.SetDimensions(rect.x, rect.y, rect.width+2, rect.height+2,
wx.SIZE_ALLOW_MINUS_ONE)
def Show(self, show, attr):
"""
Show or hide the edit control. You can use the attr (if not None)
to set colours or fonts for the control.
"""
self.log.write("MyCellEditor: Show(self, %s, %s)\n" % (show, attr))
super(MyCellEditor, self).Show(show, attr)
def PaintBackground(self, rect, attr):
"""
Draws the part of the cell not occupied by the edit control. The
base class version just fills it with background colour from the
attribute. In this class the edit control fills the whole cell so
don't do anything at all in order to reduce flicker.
"""
self.log.write("MyCellEditor: PaintBackground\n")
def BeginEdit(self, row, col, grid):
"""
Fetch the value from the table and prepare the edit control
to begin editing. Set the focus to the edit control.
*Must Override*
"""
self.log.write("MyCellEditor: BeginEdit (%d,%d)\n" % (row, col))
self.startValue = grid.GetTable().GetValue(row, col)
self._tc.SetValue(self.startValue)
self._tc.SetInsertionPointEnd()
self._tc.SetFocus()
# For this example, select the text
self._tc.SetSelection(0, self._tc.GetLastPosition())
def EndEdit(self, row, col, grid):
"""
Complete the editing of the current cell. Returns True if the value
has changed. If necessary, the control may be destroyed.
*Must Override*
"""
self.log.write("MyCellEditor: EndEdit (%d,%d)\n" % (row, col))
changed = False
val = self._tc.GetValue()
if val != self.startValue:
changed = True
grid.GetTable().SetValue(row, col, val) # update the table
self.startValue = ''
self._tc.SetValue('')
return changed
def Reset(self):
"""
Reset the value in the control back to its starting value.
*Must Override*
"""
self.log.write("MyCellEditor: Reset\n")
self._tc.SetValue(self.startValue)
self._tc.SetInsertionPointEnd()
def IsAcceptedKey(self, evt):
"""
Return True to allow the given key to start editing: the base class
version only checks that the event has no modifiers. F2 is special
and will always start the editor.
"""
self.log.write("MyCellEditor: IsAcceptedKey: %d\n" % (evt.GetKeyCode()))
## We can ask the base class to do it
#return super(MyCellEditor, self).IsAcceptedKey(evt)
# or do it ourselves
return (not (evt.ControlDown() or evt.AltDown()) and
evt.GetKeyCode() != wx.WXK_SHIFT)
def StartingKey(self, evt):
"""
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
"""
self.log.write("MyCellEditor: StartingKey %d\n" % evt.GetKeyCode())
key = evt.GetKeyCode()
ch = None
if key in [ wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7,
wx.WXK_NUMPAD8, wx.WXK_NUMPAD9
]:
ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0)
elif key < 256 and key >= 0 and chr(key) in string.printable:
ch = chr(key)
if ch is not None:
# For this example, replace the text. Normally we would append it.
#self._tc.AppendText(ch)
self._tc.SetValue(ch)
self._tc.SetInsertionPointEnd()
else:
evt.Skip()
def StartingClick(self):
"""
If the editor is enabled by clicking on the cell, this method will be
called to allow the editor to simulate the click on the control if
needed.
"""
self.log.write("MyCellEditor: StartingClick\n")
def Destroy(self):
"""final cleanup"""
self.log.write("MyCellEditor: Destroy\n")
super(MyCellEditor, self).Destroy()
def Clone(self):
"""
Create a new object which is the copy of this one
*Must Override*
"""
self.log.write("MyCellEditor: Clone\n")
return MyCellEditor(self.log)
#---------------------------------------------------------------------------
class GridEditorTest(gridlib.Grid):
def __init__(self, parent, log):
gridlib.Grid.__init__(self, parent, -1)
self.log = log
self.CreateGrid(10, 3)
# Somebody changed the grid so the type registry takes precedence
# over the default attribute set for editors and renderers, so we
# have to set null handlers for the type registry before the
# default editor will get used otherwise...
#self.RegisterDataType(wxGRID_VALUE_STRING, None, None)
#self.SetDefaultEditor(MyCellEditor(self.log))
# Or we could just do it like this:
#self.RegisterDataType(wx.GRID_VALUE_STRING,
# wx.GridCellStringRenderer(),
# MyCellEditor(self.log))
# )
# but for this example, we'll just set the custom editor on one cell
self.SetCellEditor(1, 0, MyCellEditor(self.log))
self.SetCellValue(1, 0, "Try to edit this box")
# and on a column
attr = gridlib.GridCellAttr()
attr.SetEditor(MyCellEditor(self.log))
self.SetColAttr(2, attr)
self.SetCellValue(1, 2, "or any in this column")
self.SetColSize(0, 150)
self.SetColSize(1, 150)
self.SetColSize(2, 150)
#---------------------------------------------------------------------------
class TestFrame(wx.Frame):
def __init__(self, parent, log):
wx.Frame.__init__(self, parent, -1, "Custom Grid Cell Editor Test",
size=(640,480))
grid = GridEditorTest(self, log)
#---------------------------------------------------------------------------
if __name__ == '__main__':
import sys
app = wx.PySimpleApp()
frame = TestFrame(None, sys.stdout)
frame.Show(True)
app.MainLoop()
| [
"[email protected]"
] | |
172a466d5f80f6441ed6b874517a024f13c5aa06 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_123/656.py | d804f5541e5fb0795836e9f5a04694acf4147beb | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,797 | py | #
# By Allan Douglas R. de Oliveira
# This program is under public license
#
########################
# makes python 2.x behave like python 3k
from __future__ import print_function, unicode_literals, division
# common imports
import sys
import operator
import math
from io import StringIO
from itertools import chain, combinations, product, permutations, islice
from collections import namedtuple
from functools import reduce
if sys.version_info[0] >= 3:
#import numpy as np
pass
else:
# for pypy until it doesn't support py3k
from itertools import izip as zip, ifilter as filter, imap as map
range = xrange
# numpypy may not work well on windows, yet
#import numpypy as np
# integer square root
def isqrt(n):
x = n
y = (x + n // x) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
# Parallel map with switch to serial map
def pmap(f, iterable, parallel=True):
if parallel:
from multiprocessing import Pool
p = Pool(7)
return p.imap(f, iterable, chunksize=1)
else:
return map(f, iterable)
def dot_product(v1, v2, sum=sum, map=map, mul=operator.mul):
return sum(map(mul, v1, v2))
# some linalg utility definitions
Line = namedtuple('Line', 'A, B, C') # Line is Ax + By = C
Point = namedtuple('Point', 'x, y')
def iszero(n):
return abs(n) < 1e-12
def point_to_line(p1, m):
A, B, C = (m, -1, m*p1.x - p1.y)
return Line(A, B, C)
def points_to_line(p1, p2):
L = p2.y - p1.y
K = p2.x - p1.x
A, B, C = (L, -K, L * p1.x - K * p1.y)
return Line(A, B, C)
def line_intersection2D(line1, line2):
A1, B1, C1 = line1
A2, B2, C2 = line2
det = A1*B2 - A2*B1
if iszero(det): # parallel
return None
else:
x = (B2*C1 - B1*C2) / det
y = (A1*C2 - A2*C1) / det
return Point(x, y)
def calc_coord_y(line, x):
y = (line.C - line.A * x) / line.B
return y
# end of standard stuff
########################
sample_input = StringIO('''4
2 2
2 1
2 4
2 1 1 6
10 4
25 20 9 100
1 4
1 1 1 1''')
def check(motes_ordered, A):
for i, mote in enumerate(motes_ordered):
if A > mote:
A += mote
else:
return (i, A)
return None
def try_delete(i, motes_ordered, A, changes):
#print ('try delete', A, changes, motes_ordered)
motes_deleted = motes_ordered[i:-1]
return process(motes_deleted, A, changes + 1)
def try_add(i, motes_ordered, A, changes):
#print ('try add', A, changes, motes_ordered)
#minimum_new_mote_size = motes_ordered[i] - A + 1
#assert minimum_new_mote_size > 0
if A <= 1:
return None
maximum_new_mote_size = A - 1
results = []
for new_mote_size in range(maximum_new_mote_size, maximum_new_mote_size+1):
motes_added_new = [new_mote_size] + motes_ordered[i:]
process_result = process(motes_added_new, A, changes + 1)
if process_result is not None:
results.append(process_result)
return None if len(results) == 0 else min(results)
def process(motes_ordered, A, changes):
#print (A, changes, motes_ordered)
if len(motes_ordered) == 0:
#print ('empty list, returning')
return changes
result = check(motes_ordered, A)
if result is None:
return changes
else:
i, a = result
result_delete = try_delete(i, motes_ordered, a, changes)
result_add = try_add(i, motes_ordered, a, changes)
assert result_delete is not None or result_add is not None
if result_delete is None:
return result_add
elif result_add is None:
return result_delete
else:
return min(result_add, result_delete)
def process_test_case(inputs):
A, N, motes = inputs
motes_ordered = sorted(motes)
changes = 0
process_result = process(motes_ordered, A, changes)
assert process_result is not None
return process_result
def read_test_case(f):
A, N = [int(x) for x in f.readline().split()]
motes = [int(x) for x in f.readline().split()]
return (A, N, motes)
def print_result(i, result):
if result is None:
print('Case #%d: %s' % (i+1, 'Error'))
else:
print('Case #%d: %d' % (i+1, result))
##########################
# basic test case reading and processing skeleton
def read_test_cases(f):
T = int(f.readline())
return [read_test_case(f) for t in range(T)]
def main(stream, parallel):
for i, result in enumerate(pmap(process_test_case, read_test_cases(stream), parallel=parallel)):
print_result(i, result)
if __name__ == '__main__':
if len(sys.argv) > 1:
main(open(sys.argv[1]), True)
else:
main(sample_input, False)
########################## | [
"[email protected]"
] | |
39d1e033b8bb386f1fc6b9d116ec599b624a8828 | dd4d1a61ec680a86d4b569490bf2a898ea0d7557 | /appengine/predator/analysis/test/clusterfuzz_data_test.py | d1d1f2e8d5719c1e04bee66fb5c8919fa642a706 | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-infra | f1a68914b47bcbe3cd8a424f43741dd74fedddf4 | 09064105713603f7bf75c772e8354800a1bfa256 | refs/heads/master | 2022-10-29T23:21:46.894543 | 2017-05-16T06:22:50 | 2017-05-16T06:22:50 | 91,423,078 | 1 | 1 | BSD-3-Clause | 2022-10-01T18:48:03 | 2017-05-16T06:23:34 | Python | UTF-8 | Python | false | false | 5,279 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from analysis.analysis_testcase import AnalysisTestCase
from analysis.clusterfuzz_data import ClusterfuzzData
from analysis.clusterfuzz_parser import ClusterfuzzParser
from analysis.stacktrace import CallStack
from analysis.stacktrace import StackFrame
from analysis.stacktrace import Stacktrace
from analysis.type_enums import CrashClient
from analysis.type_enums import SanitizerType
from libs.deps.chrome_dependency_fetcher import ChromeDependencyFetcher
from libs.deps.dependency import Dependency
from libs.deps.dependency import DependencyRoll
class CusterfuzzDataTest(AnalysisTestCase):
"""Tests ``ClusterfuzzData`` class."""
def testProperties(self):
"""Tests ``ClusterfuzzData`` specific properties."""
raw_crash_data = self.GetDummyClusterfuzzData(sanitizer='ASAN')
crash_data = ClusterfuzzData(raw_crash_data)
self.assertEqual(crash_data.crashed_address,
raw_crash_data['customized_data']['crashed_address'])
self.assertEqual(crash_data.crashed_type,
raw_crash_data['customized_data']['crashed_type'])
self.assertEqual(crash_data.sanitizer,
SanitizerType.ADDRESS_SANITIZER)
self.assertEqual(crash_data.job_type,
raw_crash_data['customized_data']['job_type'])
self.assertEqual(crash_data.regression_range,
raw_crash_data['customized_data']['regression_range'])
self.assertEqual(crash_data.testcase,
raw_crash_data['customized_data']['testcase'])
@mock.patch('analysis.clusterfuzz_parser.ClusterfuzzParser.Parse')
def testParseStacktraceFailed(self, mock_parse):
"""Tests that ``stacktrace`` is None when failed to pars stacktrace."""
mock_parse.return_value = None
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
self.assertIsNone(crash_data.stacktrace)
def testParseStacktraceSucceeded(self):
"""Tests parsing ``stacktrace``."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
stack = CallStack(0)
stacktrace = Stacktrace([stack], stack)
with mock.patch(
'analysis.clusterfuzz_parser.ClusterfuzzParser.Parse') as mock_parse:
mock_parse.return_value = stacktrace
self._VerifyTwoStacktracesEqual(crash_data.stacktrace, stacktrace)
def testParseStacktraceReturnsCache(self):
"""Tests that ``stacktrace`` returns cached ``_stacktrace`` value."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
stack = CallStack(1)
stacktrace = Stacktrace([stack], stack)
crash_data._stacktrace = stacktrace
self._VerifyTwoStacktracesEqual(crash_data.stacktrace, stacktrace)
def testDependencies(self):
"""Tests ``dependencies`` property."""
dep = Dependency('src/', 'https://repo', 'rev1')
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData(
dependencies=[{'dep_path': dep.path,
'repo_url': dep.repo_url,
'revision': dep.revision}]))
self.assertEqual(len(crash_data.dependencies), 1)
self.assertTrue(dep.path in crash_data.dependencies)
self.assertEqual(crash_data.dependencies[dep.path].path, dep.path)
self.assertEqual(crash_data.dependencies[dep.path].repo_url, dep.repo_url)
self.assertEqual(crash_data.dependencies[dep.path].revision, dep.revision)
def testDependenciesReturnsCache(self):
"""Tests that ``dependencies`` returns cached ``_dependencies`` value."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
deps = {'src/': Dependency('src/', 'https://repo', 'rev')}
crash_data._dependencies = deps
self.assertEqual(crash_data.dependencies, deps)
def testDependencyRollsReturnsCache(self):
"""Tests that ``dependency_rolls`` returns cached ``_dependency_rolls``."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
dep_roll = {'src/': DependencyRoll('src/', 'https://repo', 'rev0', 'rev3')}
crash_data._dependency_rolls = dep_roll
self.assertEqual(crash_data.dependency_rolls, dep_roll)
def testDependencyRolls(self):
"""Tests ``regression_rolls`` property."""
dep_roll = DependencyRoll('src/', 'https://repo', 'rev1', 'rev6')
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData(
dependency_rolls=[{'dep_path': dep_roll.path,
'repo_url': dep_roll.repo_url,
'old_revision': dep_roll.old_revision,
'new_revision': dep_roll.new_revision}]))
self.assertEqual(len(crash_data.dependency_rolls), 1)
self.assertTrue(dep_roll.path in crash_data.dependency_rolls)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].path,
dep_roll.path)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].repo_url,
dep_roll.repo_url)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].old_revision,
dep_roll.old_revision)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].new_revision,
dep_roll.new_revision)
| [
"[email protected]"
] | |
35d07eeece96e275825fbdd83830f5c389cb5f9c | 6b8b6e5c7a31342b781909623d4fe60a563482d4 | /sensor/DS18B20.py | ec091e5f56edd07f7219f5a7edd77bf711220b0d | [] | no_license | ariebroere/sensor | 7d3d4cd6ef5348bf5f3e3bdc6731c7b237200447 | 1ede75d51d67622352cb4a3e918255f5cae3c061 | refs/heads/master | 2021-01-18T22:50:02.361613 | 2017-04-02T06:29:40 | 2017-04-02T06:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | # The MIT License (MIT)
#
# Copyright (c) 2015 Nick Lee
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Include the sensor directory, so this file may be run as a test script.
if __name__ == "__main__" and __package__ is None:
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
import sensor
from sensor.util import Temperature
class DS18B20(sensor.SensorBase):
def __init__(self, addr):
super(DS18B20, self).__init__(self._update_sensor_data)
self._device = '/sys/bus/w1/devices/%s/w1_slave' % addr
self._temperature = None
def temperature(self):
self._update()
return Temperature(C=self._temperature) if self._temperature is not None else None
@sensor.w1_lock
def _update_sensor_data(self):
# Try at most 3 times
for i in range(0,3):
# Split output into separate lines.
lines = subprocess.check_output(['cat', self._device]).split('\n')
# If the first line does not end with 'YES', try again.
if lines[0][-3:] != 'YES':
time.sleep(0.2)
continue
# If the second line does not have a 't=', try again.
pos = lines[1].find('t=')
if pos < 0:
time.sleep(0.2)
continue
# Extract the temperature.
self._temperature = float(lines[1][pos+2:]) / 1000.0
return
# Failed reading
self._temperature = None
""" Run this file as a test script
1. Find the sensor's 1-wire address
$ cd /sys/bus/w1/devices
$ ls
Look for '28-..........'. That is the address.
Then:
$ python DS18B20.py <address>
"""
if __name__ == '__main__':
import sys, time
ds = DS18B20(sys.argv[1])
for i in range(0, 30):
print ds.temperature()
time.sleep(1)
| [
"[email protected]"
] | |
4fddac42ee4bc07489ef1b4e274f85afb21e0de4 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/azuredata/v20170301preview/get_sql_server_registration.py | 212ce2e8e70f2e01cb921271035d0102a9a6da0b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,406 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSqlServerRegistrationResult',
'AwaitableGetSqlServerRegistrationResult',
'get_sql_server_registration',
]
@pulumi.output_type
class GetSqlServerRegistrationResult:
"""
A SQL server registration.
"""
def __init__(__self__, id=None, location=None, name=None, property_bag=None, resource_group=None, subscription_id=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if property_bag and not isinstance(property_bag, str):
raise TypeError("Expected argument 'property_bag' to be a str")
pulumi.set(__self__, "property_bag", property_bag)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="propertyBag")
def property_bag(self) -> Optional[str]:
"""
Optional Properties as JSON string
"""
return pulumi.get(self, "property_bag")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
Resource Group Name
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
Subscription Id
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlServerRegistrationResult(GetSqlServerRegistrationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlServerRegistrationResult(
id=self.id,
location=self.location,
name=self.name,
property_bag=self.property_bag,
resource_group=self.resource_group,
subscription_id=self.subscription_id,
tags=self.tags,
type=self.type)
def get_sql_server_registration(resource_group_name: Optional[str] = None,
sql_server_registration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlServerRegistrationResult:
"""
A SQL server registration.
:param str resource_group_name: Name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str sql_server_registration_name: Name of the SQL Server registration.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['sqlServerRegistrationName'] = sql_server_registration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azuredata/v20170301preview:getSqlServerRegistration', __args__, opts=opts, typ=GetSqlServerRegistrationResult).value
return AwaitableGetSqlServerRegistrationResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
property_bag=__ret__.property_bag,
resource_group=__ret__.resource_group,
subscription_id=__ret__.subscription_id,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] | |
610b9ee40927adccd43e1fe32dcc8a11699359b4 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/442497_Pattern_List/recipe-442497.py | 7a7e212a314061dc34d2a57bd9d425d6b849bcf9 | [
"Python-2.0",
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 800 | py | import re
class PatternList( object ):
"""A Patternlist is a list of regular expressions. the 'in' operator
allows a string to be compared against each expression (using search
NOT match)"""
def __init__(self , patterns = []):
self.patterns = []
for p in patterns:
self.add( p )
def add( self , pattern ):
pat = re.compile( pattern )
self.patterns.append( pat )
def __contains__(self , item ):
ret = False
for p in self.patterns:
if p.search( item ):
ret= True
break
return ret
if __name__=="__main__":
examplelist = PatternList( [ ".*txt$" , ".*doc$" ])
assert( "test.txt" in examplelist )
assert( "test.xls" not in examplelist )
| [
"[email protected]"
] | |
96038f0fdec7f1c6c7c9e3d1da8063fe493d6e40 | 1674e40a5dab691961ae676b3d6752870df1c60b | /.cache/JetBrains/PyCharm2020.2/python_stubs/-988789078/_multibytecodec.py | fdb881ed202160e3f7d75a9bd8465130cfb97a9a | [] | no_license | AiperiAkhumbai/group_project | a7c0efacbdcfc4a35d62b6321b255e3ed9e3436c | 9c62b9964776306ab85901b501536eb667d3c337 | refs/heads/main | 2023-01-23T08:12:47.433544 | 2020-11-24T18:57:49 | 2020-11-24T18:57:49 | 313,209,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,428 | py | # encoding: utf-8
# module _multibytecodec
# from /usr/lib/python3.8/lib-dynload/_multibytecodec.cpython-38-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# no imports
# functions
def __create_codec(*args, **kwargs): # real signature unknown
pass
# classes
class MultibyteIncrementalDecoder(object):
# no doc
def decode(self, *args, **kwargs): # real signature unknown
pass
def getstate(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def setstate(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
class MultibyteIncrementalEncoder(object):
# no doc
def encode(self, *args, **kwargs): # real signature unknown
pass
def getstate(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def setstate(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
class MultibyteStreamReader(object):
# no doc
def read(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def readlines(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
stream = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class MultibyteStreamWriter(object):
# no doc
def reset(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def writelines(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
stream = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f4bd3248460>'
__spec__ = None # (!) real value is "ModuleSpec(name='_multibytecodec', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f4bd3248460>, origin='/usr/lib/python3.8/lib-dynload/_multibytecodec.cpython-38-x86_64-linux-gnu.so')"
| [
"[email protected]"
] | |
f6342e270e1ecba03ce71951ba4143ce52143b96 | 8d753bb8f19b5b1f526b0688d3cb199b396ed843 | /osp_sai_2.1.8/system/apps/openconfig/python_module/oc_lr/local_routes/static_routes/static/next_hops/next_hop/config/__init__.py | 25073b2cc082c56c4acfc713abb347fbea0fd845 | [] | no_license | bonald/vim_cfg | f166e5ff650db9fa40b564d05dc5103552184db8 | 2fee6115caec25fd040188dda0cb922bfca1a55f | refs/heads/master | 2023-01-23T05:33:00.416311 | 2020-11-19T02:09:18 | 2020-11-19T02:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,369 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-local-routing - based on the path /local-routes/static-routes/static/next-hops/next-hop/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the next-hop
entry
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__index','__next_hop','__metric','__recurse',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__index = YANGDynClass(base=unicode, is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='string', is_config=True)
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='uint32', is_config=True)
self.__next_hop = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}},),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='union', is_config=True)
self.__recurse = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="recurse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'local-routes', u'static-routes', u'static', u'next-hops', u'next-hop', u'config']
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/index (string)
YANG Description: An user-specified identifier utilised to uniquely reference
the next-hop entry in the next-hop list. The value of this
index has no semantic meaning other than for referencing
the entry.
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/index (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: An user-specified identifier utilised to uniquely reference
the next-hop entry in the next-hop list. The value of this
index has no semantic meaning other than for referencing
the entry.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """index must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='string', is_config=True)""",
})
self.__index = t
if hasattr(self, '_set'):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(base=unicode, is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='string', is_config=True)
def _get_next_hop(self):
"""
Getter method for next_hop, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/next_hop (union)
YANG Description: The next-hop that is to be used for the static route
- this may be specified as an IP address, an interface
or a pre-defined next-hop type - for instance, DROP or
LOCAL_LINK. When this leaf is not set, and the interface-ref
value is specified for the next-hop, then the system should
treat the prefix as though it is directly connected to the
interface.
"""
return self.__next_hop
def _set_next_hop(self, v, load=False):
"""
Setter method for next_hop, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/next_hop (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop() directly.
YANG Description: The next-hop that is to be used for the static route
- this may be specified as an IP address, an interface
or a pre-defined next-hop type - for instance, DROP or
LOCAL_LINK. When this leaf is not set, and the interface-ref
value is specified for the next-hop, then the system should
treat the prefix as though it is directly connected to the
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}},),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop must be of a type compatible with union""",
'defined-type': "openconfig-local-routing:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}},),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='union', is_config=True)""",
})
self.__next_hop = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hop(self):
self.__next_hop = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'oc-loc-rt:DROP': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}, u'LOCAL_LINK': {'@namespace': u'http://openconfig.net/yang/local-routing', '@module': u'openconfig-local-routing'}},),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='union', is_config=True)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/metric (uint32)
YANG Description: A metric which is utilised to specify the preference of
the next-hop entry when it is injected into the RIB. The
lower the metric, the more preferable the prefix is. When
this value is not specified the metric is inherited from
the default metric utilised for static routes within the
network instance that the static routes are being
instantiated. When multiple next-hops are specified for a
static route, the metric is utilised to determine which of
the next-hops is to be installed in the RIB. When multiple
next-hops have the same metric (be it specified, or simply
the default) then these next-hops should all be installed
in the RIB
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: A metric which is utilised to specify the preference of
the next-hop entry when it is injected into the RIB. The
lower the metric, the more preferable the prefix is. When
this value is not specified the metric is inherited from
the default metric utilised for static routes within the
network instance that the static routes are being
instantiated. When multiple next-hops are specified for a
static route, the metric is utilised to determine which of
the next-hops is to be installed in the RIB. When multiple
next-hops have the same metric (be it specified, or simply
the default) then these next-hops should all be installed
in the RIB
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='uint32', is_config=True)""",
})
self.__metric = t
if hasattr(self, '_set'):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='uint32', is_config=True)
def _get_recurse(self):
"""
Getter method for recurse, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/recurse (boolean)
YANG Description: Determines whether the next-hop should be allowed to
be looked up recursively - i.e., via a RIB entry which has
been installed by a routing protocol, or another static route
- rather than needing to be connected directly to an
interface of the local system within the current network
instance. When the interface reference specified within the
next-hop entry is set (i.e., is not null) then forwarding is
restricted to being via the interface specified - and
recursion is hence disabled.
"""
return self.__recurse
def _set_recurse(self, v, load=False):
"""
Setter method for recurse, mapped from YANG variable /local_routes/static_routes/static/next_hops/next_hop/config/recurse (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_recurse is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_recurse() directly.
YANG Description: Determines whether the next-hop should be allowed to
be looked up recursively - i.e., via a RIB entry which has
been installed by a routing protocol, or another static route
- rather than needing to be connected directly to an
interface of the local system within the current network
instance. When the interface reference specified within the
next-hop entry is set (i.e., is not null) then forwarding is
restricted to being via the interface specified - and
recursion is hence disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="recurse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """recurse must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="recurse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='boolean', is_config=True)""",
})
self.__recurse = t
if hasattr(self, '_set'):
self._set()
def _unset_recurse(self):
self.__recurse = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="recurse", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='boolean', is_config=True)
index = __builtin__.property(_get_index, _set_index)
next_hop = __builtin__.property(_get_next_hop, _set_next_hop)
metric = __builtin__.property(_get_metric, _set_metric)
recurse = __builtin__.property(_get_recurse, _set_recurse)
_pyangbind_elements = {'index': index, 'next_hop': next_hop, 'metric': metric, 'recurse': recurse, }
| [
"[email protected]"
] | |
cec0a70d5e0bcd8539d350cd0653bff447945982 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_natural.py | 32e26c958383409ba88754ec0a96fc02003e41a2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
#calss header
class _NATURAL():
def __init__(self,):
self.name = "NATURAL"
self.definitions = [u'someone who was born with the right characteristics or abilities for doing a particular thing: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
669697f1390a3820e77ef4494a2cc4471ac77157 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/list_set_element_oob_1-9.py | cd195199cb937479e44117a2d333d4b0b0041d98 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | x:[int] = $Literal
x = [1, 2, 3]
x[-1] = 4
print(x[0])
print(x[1])
print(x[2])
| [
"[email protected]"
] | |
88de148d9408767a1d93796ed6d7be6a97acbda6 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/site-packages/django/views/generic/__init__.py | 00119bf785b030b880c58b3e822cbc718741626b | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5939f31179cac89ab31e52eefd5b17227620f86a240cd042518355fd4e94fa89
size 822
| [
"[email protected]"
] | |
e9b75525afbde18fac325b06becac1b37aafe034 | 9c85d132b2ed8c51f021f42ed9f20652827bca45 | /source/res/scripts/client/gui/Scaleform/framework/tooltip_mgr.py | 541c0f410e6c97aa6b1628f62d9bec9ec2b250a1 | [] | no_license | Mododejl/WorldOfTanks-Decompiled | 0f4063150c7148184644768b55a9104647f7e098 | cab1b318a58db1e428811c41efc3af694906ba8f | refs/heads/master | 2020-03-26T18:08:59.843847 | 2018-06-12T05:40:05 | 2018-06-12T05:40:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,025 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/framework/tooltip_mgr.py
import logging
import Keys
from gui.Scaleform.framework.entities.abstract.ToolTipMgrMeta import ToolTipMgrMeta
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.shared import events
from gui.shared.tooltips import builders
from gui.app_loader import g_appLoader
from gui import InputHandler
_logger = logging.getLogger(__name__)
class ToolTip(ToolTipMgrMeta):
def __init__(self, settings, advComplexSettings, *noTooltipSpaceIDs):
super(ToolTip, self).__init__()
self._areTooltipsDisabled = False
self._isAllowedTypedTooltip = True
self._noTooltipSpaceIDs = noTooltipSpaceIDs
self._complex = builders.ComplexBuilder(TOOLTIPS_CONSTANTS.DEFAULT, TOOLTIPS_CONSTANTS.COMPLEX_UI, advComplexSettings)
self._builders = builders.LazyBuildersCollection(settings)
self._builders.addBuilder(builders.SimpleBuilder(TOOLTIPS_CONSTANTS.DEFAULT, TOOLTIPS_CONSTANTS.COMPLEX_UI))
self._dynamic = {}
self.__fastRedraw = False
self.__isAdvancedKeyPressed = False
self.__isComplex = False
self.__tooltipID = None
self.__args = None
self.__stateType = None
return
def show(self, data, linkage):
self.as_showS(data, linkage, self.__fastRedraw)
def handleKeyEvent(self, event):
tooltipType = self.__tooltipID
altPressed = event.key == Keys.KEY_LALT or event.key == Keys.KEY_RALT
self.__isAdvancedKeyPressed = event.isKeyDown() and altPressed
if tooltipType is None or not altPressed:
return
else:
args = self.__args
isSupportAdvanced = self.isSupportAdvanced(tooltipType, *args)
if isSupportAdvanced:
self.__fastRedraw = True
if self.__isComplex:
self.onCreateComplexTooltip(tooltipType, self.__stateType)
else:
self.onCreateTypedTooltip(tooltipType, args, self.__stateType)
return
def onCreateTypedTooltip(self, tooltipType, args, stateType):
if self._areTooltipsDisabled:
return
elif not self._isAllowedTypedTooltip:
return
else:
builder = self._builders.getBuilder(tooltipType)
if builder is not None:
data = builder.build(self, stateType, self.__isAdvancedKeyPressed, *args)
else:
_logger.warning('Tooltip can not be displayed: type "%s" is not found', tooltipType)
return
self.__cacheTooltipData(False, tooltipType, args, stateType)
if data is not None and data.isDynamic():
data.changeVisibility(True)
if tooltipType not in self._dynamic:
self._dynamic[tooltipType] = data
return
def onCreateComplexTooltip(self, tooltipID, stateType):
if self._areTooltipsDisabled:
return
self._complex.build(self, stateType, self.__isAdvancedKeyPressed, tooltipID)
self.__cacheTooltipData(True, tooltipID, tuple(), stateType)
def onHideTooltip(self, tooltipId):
if not self._areTooltipsDisabled and tooltipId in self._dynamic:
self._dynamic[tooltipId].changeVisibility(False)
self.__tooltipID = None
self.__fastRedraw = False
return
def _populate(self):
super(ToolTip, self)._populate()
g_appLoader.onGUISpaceEntered += self.__onGUISpaceEntered
self.addListener(events.AppLifeCycleEvent.CREATING, self.__onAppCreating)
InputHandler.g_instance.onKeyDown += self.handleKeyEvent
InputHandler.g_instance.onKeyUp += self.handleKeyEvent
def _dispose(self):
self._builders.clear()
g_appLoader.onGUISpaceEntered -= self.__onGUISpaceEntered
self.removeListener(events.AppLifeCycleEvent.CREATING, self.__onAppCreating)
while self._dynamic:
_, data = self._dynamic.popitem()
data.stopUpdates()
InputHandler.g_instance.onKeyDown -= self.handleKeyEvent
InputHandler.g_instance.onKeyUp -= self.handleKeyEvent
super(ToolTip, self)._dispose()
def __onGUISpaceEntered(self, spaceID):
self._isAllowedTypedTooltip = spaceID not in self._noTooltipSpaceIDs
def __onAppCreating(self, appNS):
if self.app.appNS != appNS:
self._areTooltipsDisabled = True
def isSupportAdvanced(self, tooltipType, *args):
builder = self._complex if self.__isComplex else self._builders.getBuilder(tooltipType)
return False if builder is None else builder.supportAdvanced(tooltipType, *args)
def __cacheTooltipData(self, isComplex, tooltipID, args, stateType):
self.__isComplex = isComplex
self.__tooltipID = tooltipID
self.__args = args
self.__stateType = stateType
| [
"[email protected]"
] | |
91bde4973cd20d507e01377170f7db767aba6dc2 | ca59d18e503ef22fbc920c6de48ffc8eac5a1443 | /tools/Polygraphy/tests/util/test_misc.py | fc3978c829d4d1d3d5bd3621ac14202fb4aa9d29 | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | boh-inspur/TensorRT | 9fc0ae0ad4e31da040d10728b63d9dc284852b67 | e4d2f7f4406f1c8f4632cc67de33728cef90ca29 | refs/heads/master | 2023-04-13T21:24:13.912673 | 2021-04-23T09:55:18 | 2021-04-23T09:55:18 | 265,431,588 | 0 | 0 | Apache-2.0 | 2021-04-23T09:55:19 | 2020-05-20T02:49:58 | null | UTF-8 | Python | false | false | 3,213 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.util import misc
import numpy as np
import pytest
VOLUME_CASES = [
((1, 1, 1), 1),
((2, 3, 4), 24),
(tuple(), 1),
]
@pytest.mark.parametrize("case", VOLUME_CASES)
def test_volume(case):
it, vol = case
assert misc.volume(it) == vol
class FindInDictCase(object):
def __init__(self, name, map, index, expected):
self.name = name
self.map = map
self.index = index
self.expected = expected
FIND_IN_DICT_CASES = [
FindInDictCase("resnet50_v1.5/output/Softmax:0", map={"resnet50_v1.5/output/Softmax:0": "x"}, index=None, expected="resnet50_v1.5/output/Softmax:0"),
FindInDictCase("resnet50_v1.5/output/Softmax:0", map={"resnet50_v1.5/output/softmax:0": "x"}, index=None, expected="resnet50_v1.5/output/softmax:0"),
]
@pytest.mark.parametrize("case", FIND_IN_DICT_CASES)
def test_find_in_dict(case):
actual = misc.find_in_dict(case.name, case.map, case.index)
assert actual == case.expected
SHAPE_OVERRIDE_CASES = [
((1, 3, 224, 224), (None, 3, 224, 224), True),
]
@pytest.mark.parametrize("case", SHAPE_OVERRIDE_CASES)
def test_is_valid_shape_override(case):
override, shape, expected = case
assert misc.is_valid_shape_override(new_shape=override, original_shape=shape) == expected
SHAPE_MATCHING_CASES = [
(np.zeros((1, 1, 3, 3)), (3, 3), (3, 3)), # Squeeze array shape
(np.zeros((1, 3, 3, 1)), (1, 1, 3, 3), (1, 1, 3, 3)), # Permute
(np.zeros((3, 3)), (1, 1, 3, 3), (3, 3)), # Squeeze specified shape
(np.zeros((3, 3)), (-1, 3), (3, 3)), # Infer dynamic
(np.zeros((3 * 224 * 224)), (None, 3, 224, 224), (1, 3, 224, 224)), # Reshape and Permute
(np.zeros((1, 3, 224, 224)), (None, 224, 224, 3), (1, 224, 224, 3)), # Permute
]
@pytest.mark.parametrize("case", SHAPE_MATCHING_CASES)
def test_shape_matching(case):
out, shape, expected_shape = case
out = misc.try_match_shape(out, shape)
assert out.shape == expected_shape
UNPACK_ARGS_CASES = [
((0, 1, 2), 3, (0, 1, 2)), # no extras
((0, 1, 2), 4, (0, 1, 2, None)), # 1 extra
((0, 1, 2), 2, (0, 1)), # 1 fewer
]
@pytest.mark.parametrize("case", UNPACK_ARGS_CASES)
def test_unpack_args(case):
args, num, expected = case
assert misc.unpack_args(args, num) == expected
UNIQUE_LIST_CASES = [
([], []),
([3, 1, 2], [3, 1, 2]),
([1, 2, 3, 2, 1], [1, 2, 3]),
([0, 0, 0, 0, 1, 0, 0], [0, 1]),
([5, 5, 5, 5, 5], [5]),
]
@pytest.mark.parametrize("case", UNIQUE_LIST_CASES)
def test_unique_list(case):
lst, expected = case
assert misc.unique_list(lst) == expected
| [
"[email protected]"
] | |
280bb1419d313f14f695994d51b7b7c91de537e3 | 834d7ea5179414f17d37f3bb58164b8f6ac11b24 | /python/ThirteenTeV/DisappTrksAMSB/createPoints.py | 579c1c5a331e3b70880d28547c2fb1073e64627e | [] | no_license | diracyoon/genproductions | aa9ee41ac1dde9e14ed039496c3259328ece7073 | a7740f4d28c7bfff4e71827dc807d57d974e29b7 | refs/heads/master | 2021-01-11T11:22:44.685243 | 2018-11-23T14:05:36 | 2018-11-23T14:05:36 | 72,719,084 | 1 | 0 | null | 2016-11-03T07:21:29 | 2016-11-03T07:21:29 | null | UTF-8 | Python | false | false | 2,089 | py | #!/usr/bin/env python
import os
def insertSLHA(outputName, massValue):
with open(outputName, 'r+') as f:
for x in range(5):
f.readline()
pos = f.tell()
f_remainder = f.read()
f.seek(pos)
with open('slha/AMSB_chargino_%dGeV_Isajet780.slha' % massValue, 'r') as slhaFile:
f.write(slhaFile.read())
f.write(f_remainder)
def findMassValue(fileName, particleName):
inputFile = open(fileName, 'r')
for line in inputFile:
if particleName in line:
return line.split()[1]
baseConfigFile = 'AMSB_chargino_M-XXXGeV_CTau-YYYcm_TuneCP5_13TeV_pythia8_cff.py'
baseParticleFile = 'geant4_AMSB_chargino.slha'
c = 299792458.0 * 100.0 # cm/s
# xsecs[mass in GeV] = xsec (pb)
xsecs = {
100 : 34.282,
200 : 2.709959,
300 : 0.577095,
400 : 0.179644,
500 : 0.06848,
600 : 0.029636,
700 : 0.013949,
800 : 0.0069704,
900 : 0.00364968
}
ctaus = [10, 100, 1000, 10000] # cm
for mass in xsecs:
for ctau in ctaus:
outputConfigFile = 'test/AMSB_chargino_M-%dGeV_CTau-%dcm_TuneCP5_13TeV_pythia8_cff.py' % (mass, ctau)
outputParticleFile = 'test/geant4_AMSB_chargino_%dGeV_ctau%dcm.slha' % (mass, ctau)
os.system('sed "s/XXX/' + str(mass) + '/g" ' + baseConfigFile + ' > ' + outputConfigFile)
os.system('sed -i "s/YYY/' + str(int(ctau * 10.0)) + '/g" ' + outputConfigFile) # mm
os.system('sed -i "s/ZZZ/' + str(xsecs[mass]) + '/g" ' + outputConfigFile)
insertSLHA(outputConfigFile, mass)
mW1ss = findMassValue(outputConfigFile, 'w1ss')
mZ1ss = findMassValue(outputConfigFile, 'z1ss')
tau = ctau / c * 1.e9 # ns
width = (1.97326979e-14 / ctau) # GeV
os.system('sed "s/_MW1SS/' + str(mW1ss) + '/g" ' + baseParticleFile + ' > ' + outputParticleFile)
os.system('sed -i "s/_MZ1SS/' + str(mZ1ss) + '/g" ' + outputParticleFile)
os.system('sed -i "s/_CTAU/' + str(ctau) + '/g" ' + outputParticleFile)
os.system('sed -i "s/_TAU/' + str(tau) + '/g" ' + outputParticleFile)
os.system('sed -i "s/_WIDTH/' + str(width) + '/g" ' + outputParticleFile)
print 'Created configuration fragments and particle files in directory: ' + os.getcwd() + '/test/'
| [
"[email protected]"
] | |
675ef227f915e22fa1d680d51d7ffb08fa130735 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/pliabl.py | 157b7fecb973a84a5b8b21fe598713d03a5ba164 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 158 | py | ii = [('RennJIT.py', 2), ('AubePRP.py', 1), ('AdamWEP.py', 3), ('CarlTFR.py', 1), ('LandWPA.py', 2), ('MedwTAI.py', 2), ('WilkJMC.py', 1), ('MartHRW2.py', 1)] | [
"[email protected]"
] | |
673f619abea67c616bf67a61024d4c5ad5f2befe | 698176804e16c7ae59f66ccebdff746f74998662 | /python/piketty/generatetmslice.py | 62a1d462742ae093a247c9e9e1d7dd2c61d159c3 | [
"MIT"
] | permissive | tedunderwood/GenreProject | a7e30883123523b967214af28f4a137c60f3564b | 7577f39f0cc89b9e85d0fbe67ae3e7797033588f | refs/heads/master | 2021-01-23T19:12:59.786921 | 2016-03-17T16:11:36 | 2016-03-17T16:11:36 | 10,231,792 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | with open('/Users/tunder/Dropbox/GenreProject/metadata/topicmodelingsample.tsv', encoding = 'utf-8') as f:
filelines = f.readlines()
with open('/Users/tunder/Dropbox/GenreProject/metadata/tmslice.txt', mode = 'w', encoding = 'utf-8') as f:
for line in filelines[1:]:
label = line.split('\t')[0]
f.write(label + '\n')
| [
"[email protected]"
] | |
223b84417602ea6d36d639b9d8f1ed616d760904 | fd55b40f4279c25c0d06dccd6bcd77b9a7dbff28 | /bivariate2/plotting/plot_dkgrid.py | b3d619043556f39ab6cdb029873b011d26915a84 | [] | no_license | albertfxwang/mypytools | 0e30420e2a570a3e18564fd28fc56eb55ecccbf9 | c6cdc8f1914cbc3a5b94dd25501782a3daf542f5 | refs/heads/master | 2020-04-10T19:06:54.193920 | 2015-04-20T01:36:37 | 2015-04-20T01:36:37 | 30,038,302 | 1 | 1 | null | 2015-01-29T19:55:09 | 2015-01-29T19:55:09 | null | UTF-8 | Python | false | false | 8,296 | py | #!/usr/bin/env python
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
import pickle_util as pu
from pygoods import *
from my_mplfonts import Helvetica
from bivariate2 import dropout_kernels as dk
from scipy.interpolate import interp1d, RectBivariateSpline, UnivariateSpline
import pickle_util as pu
import os
import pmscolors
class PlotDKGrid(object):
"""
A class for making plots of the dropout-selection kernel grid.
"""
def __init__(self, dkgrid, field):
self.dkgrid = dkgrid
self.field = field
# self.grid are for showing things like selection/detection completeness
# in each (M, logR) bin
self.grid = np.zeros((dkgrid.Nx, dkgrid.Ny))
if dkgrid.interpolated:
self.zlimits = dkgrid.zlimits_old
self.dz = dkgrid.dz_old
self.Nz = dkgrid.Nz_old
else:
self.zlimits = dkgrid.zlimits
self.dz = dkgrid.dz
self.Nz = dkgrid.Nz
self.grid_zedges = np.linspace(self.zlimits[0], self.zlimits[1],
num=self.Nz+1)
self.grid_shape = self.grid.shape
self.vmin = 0.
self.vmax = 1.
self.xname = dkgrid.xname
self.yname = dkgrid.yname
self.xticks = dkgrid.xedges()[::2]
self.yticks = dkgrid.yedges()[::2]
self.title = ""
def combine_kernels(self, m1500_lim, logr_lim):
"""
Combine the statistics (Ninput, Ndetect, Nselect) from all kernels
within the specified range.
"""
assert (m1500_lim[1] > m1500_lim[0])
assert (logr_lim[1] > logr_lim[0])
# Find the range of kernel indices
i0 = self.dkgrid.find_bin(m1500_lim[0], logr_lim[0])
i1 = self.dkgrid.find_bin(m1500_lim[1], logr_lim[1])
# Now add all kernels within the range to klist
klist = []
for ix in range(i0[0], i1[0]+1):
for iy in range(i0[1], i1[1]+1):
klist += [self.dkgrid.kernels[(ix, iy)]]
# Figure out what the redshift range is
# Assume that self.dkgrid.interpolated == True
zrange = np.linspace(*self.zlimits, num=self.Nz)
zcenters = zrange + self.dz / 2.
n_zbins = len(zcenters)
# Add the statistics from all kernels
Ninput = np.zeros(n_zbins, 'int')
Ndetect = np.zeros(n_zbins, 'int')
Nselect = np.zeros(n_zbins, 'int')
for k in klist:
Ninput = Ninput + k.Ninput
Ndetect = Ndetect + k.Ndetect
Nselect = Nselect + k.Nselect
return zcenters, Ninput, Ndetect, Nselect
def plot_Pz_single(self, m1500_lim, logr_lim, ax=None, **plot_kwargs):
"""
Plot P(z) for a combination of kernels within the specified limits.
"""
zcenters0, N_in, N_det, N_sel = self.combine_kernels(m1500_lim, logr_lim)
# interpolate
Pz0 = N_sel / np.maximum(N_in, 1).astype('float')
f = interp1d(zcenters0, Pz0, kind='cubic')
# evaluate at the new redshift values
Pz_new = f(self.dkgrid.zcenters())
Pz_new = np.maximum(Pz_new, 0.)
# Now plot
if ax == None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.dkgrid.zcenters(), Pz_new, **plot_kwargs)
ax.set_ylim(0., 1.)
return ax
def show_grid(self, ax=None, vmin=-1., vmax=-1.):
"""
Show self.grid
"""
if ax == None:
fig = plt.figure()
ax = fig.add_subplot(111)
if vmin < 0: vmin = self.vmin
if vmax < 0: vmax = self.vmax
m = ax.imshow(self.grid.T,
extent=(0,self.grid_shape[0],0,self.grid_shape[1]),
vmin=vmin, vmax=vmax)
ax.set_title(self.title, size=20)
ax.set_xticks(np.arange(self.dkgrid.Nx)[::2])
ax.set_xticklabels(map(lambda x:'%.1f'%x, self.xticks))
ax.set_yticks(np.arange(self.dkgrid.Ny)[::2])
ax.set_yticklabels(map(lambda y:'%.1f'%y, self.yticks))
ax.set_xlabel(self.xname, size=16)
ax.set_ylabel(self.yname, size=16)
plt.colorbar(m)
def selection_completeness(self, z0=None, z1=None, detect_only=False, show=True, ax=None, vmax=-1):
if z0 == None:
z0 = self.zlimits[0]
if z1 == None:
z1 = self.zlimits[1]
assert (z1 - z0) > self.dz, "z1 must be at least z0 + self.dz"
iz0 = np.searchsorted(self.grid_zedges, z0) - 1
iz0 = np.maximum(iz0, 0)
iz0 = np.minimum(iz0, self.Nz-1)
iz1 = np.searchsorted(self.grid_zedges, z1) - 1
iz1 = np.maximum(iz1, 0)
iz1 = np.minimum(iz1, self.Nz-1)
for k in self.dkgrid.kernels:
kern = self.dkgrid.kernels[k]
if detect_only:
N_input = kern.Ndetect[iz0:iz1+1].sum()
else:
N_input = kern.Ninput[iz0:iz1+1].sum()
N_select = kern.Nselect[iz0:iz1+1].sum()
comp = float(N_select) / float(np.maximum(N_input, 1))
self.grid[k[0], k[1]] = comp
self.title = "%s\nSelection Completeness z=[%.1f,%.1f] in %s" % (self.dkgrid.filename, z0, z1, self.field.upper())
self.vmin = 0.
if vmax < 0:
self.vmax = self.grid.max()
else:
self.vmax = vmax
if show:
self.show_grid(ax=ax)
## Define arguments for plotting P(z) grid
## The panel grid should have this arrangement:
## -------------------
## 1 | 2 |
## -------------------
## 3 | 4 |
## -------------------
dkgrid_dir = '/Users/khuang/Dropbox/Research/bivariate/bivariate_fit/dropsim_kernels'
dkgrid3 = pu.load_pickle(os.path.join(dkgrid_dir, 'udrops_gds_deep_kgrid_m2h_140815.p'))
dkgrid4 = pu.load_pickle(os.path.join(dkgrid_dir, 'bdrops_goods_kgrid_140317.p'))
dkgrid5 = pu.load_pickle(os.path.join(dkgrid_dir, 'vdrops_goods_kgrid_140402.p'))
dkgrid6 = pu.load_pickle(os.path.join(dkgrid_dir, 'idrops_gds_deep_kgrid_140228.p'))
dkgrid_list = [dkgrid3, dkgrid4, dkgrid5, dkgrid6]
pms = pmscolors.pmscolors()
colors = map(pms, ['Blue Purples', 'Periwinkle', 'Olive Green', 'Bright Red'])
m1500_lims = np.array([[-23.,-21.], [-21.,-19.], [-23.,-21.], [-21.,-19]])
logr_lims = np.array([[-0.5,0.5], [-0.5,0.5], [-1.5,0.5], [-1.5,0.5]])
def plot_dkgrid_multiple(dkgrids=dkgrid_list, colors=colors, m1500_lims=m1500_lims, logr_lims=logr_lims, nrows=2, zrange=[2.,7.], axes_pad=0.1):
"""
Plot multiple P(z) kernels on a grid of plots. All P(z) kernels are supposed
to be for different redshift ranges but in the same field.
dkgrids -- a list of P(z) kernel grids
colors -- colors for each curve (same across all panels)
## len(dkgrids) == len(colors) is the number of kernel grids shown in each panel.
m1500_lims -- the M_1500 limits in each panel
logr_lims -- the log(R) limits (in arcsec) in each panel
## len(m1500_lims) == len(logr_lims) is the number of panels.
nrows -- the number of rows for the grid of plots
zrange -- the redshift range shown in each panel
"""
fig = plt.figure(figsize=(11,9))
ngrids = len(m1500_lims)
ncols = ngrids / nrows
print ngrids % nrows
if ngrids % nrows > 0:
ncols += 1
# First initialize each kernel grids
plots = []
for i in range(len(dkgrids)):
plots += [PlotDKGrid(dkgrids[i], 'DEEP')]
grid = AxesGrid(fig, 111,
nrows_ncols=(nrows, ncols),
axes_pad=axes_pad,
share_all=True,
label_mode="L",
aspect=False)
for j in range(len(m1500_lims)):
ax_j = grid[j]
for i in range(len(dkgrids)):
plots[i].plot_Pz_single(m1500_lims[j], logr_lims[j], ax=ax_j,
lw=2, color=colors[i])
bintext = r"$%.1f \leq M_{1500} \leq %.1f$" % tuple(m1500_lims[j])
bintext = bintext + '\n'
bintext = bintext + r"$%.1f \leq \log R_e \leq %.1f$" % tuple(logr_lims[j])
ax_j.text(0.95, 0.95, bintext, ha='right', va='top', size='large',
transform=ax_j.transAxes,
bbox=dict(boxstyle='round', facecolor='none'))
ax_j.set_xlim(zrange)
ax_j.set_ylim(0., 1.1)
ax_j.set_xlabel('Redshift')
ax_j.set_xticks(np.linspace(3., 6., 4))
ax_j.set_ylabel('P(z)')
ax_j.set_yticks(np.linspace(0.25, 1.0, 4))
plt.draw()
return grid
| [
"[email protected]"
] | |
df60d9fd72aee467d847e5b7e7a8ec3c8ae8a680 | 7f20b1bddf9f48108a43a9922433b141fac66a6d | /csplugins/trunk/ucsd/rsaito/rs_Progs/rs_Python/rs_Python_Pack/tags/rs_Python_Pack090515/IVV_Packages/YO_IP/example2.py | 4744447119df04b6f11f0fa2c5a5507d576157f6 | [] | no_license | ahdahddl/cytoscape | bf783d44cddda313a5b3563ea746b07f38173022 | a3df8f63dba4ec49942027c91ecac6efa920c195 | refs/heads/master | 2020-06-26T16:48:19.791722 | 2013-08-28T04:08:31 | 2013-08-28T04:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #!/usr/bin/env python
from lpsolve55 import *
lp = lpsolve('make_lp', 0, 4)
#lpsolve('set_verbose', lp, IMPORTANT)
lpsolve('set_obj_fn', lp, [1, 3, 6.24, 0.1])
lpsolve('add_constraint', lp, [0, 78.26, 0, 2.9], GE, 92.3)
lpsolve('add_constraint', lp, [0.24, 0, 11.31, 0], LE, 14.8)
lpsolve('add_constraint', lp, [12.68, 0, 0.08, 0.9], GE, 4)
lpsolve('set_lowbo', lp, [28.6, 0, 0, 18])
lpsolve('set_upbo', lp, [Infinite, Infinite, Infinite, 48.98])
lpsolve('set_col_name', lp, ['COLONE', 'COLTWO', 'COLTHREE', 'COLFOUR'])
lpsolve('set_row_name', lp, ['THISROW', 'THATROW', 'LASTROW'])
lpsolve('write_lp', lp, 'a.lp')
print lpsolve('get_mat', lp)[0]
lpsolve('solve', lp)
print lpsolve('get_objective', lp)
print lpsolve('get_variables', lp)[0]
print lpsolve('get_constraints', lp)[0]
lpsolve('delete_lp', lp)
| [
"rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5"
] | rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5 |
7e6d9fee92beb15c5d4c3f6610393baa76668ab6 | 05643b9b4d20db912c3dbfbc191cadea3143016c | /instrumentation/opentelemetry-instrumentation-grpc/tests/test_client_interceptor_filter.py | a15268464b46f90712c7b6a39cc406d65ca2390f | [
"Apache-2.0"
] | permissive | open-telemetry/opentelemetry-python-contrib | 35566cd088aa0b23ca977109fcd435ee480784b9 | 0871dd455c0adfa125a2f258a0b55c47a5da5227 | refs/heads/main | 2023-08-26T07:30:40.212226 | 2023-08-21T16:42:12 | 2023-08-21T16:42:12 | 220,524,743 | 476 | 401 | Apache-2.0 | 2023-09-14T21:36:33 | 2019-11-08T18:23:43 | Python | UTF-8 | Python | false | false | 23,591 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import grpc
from tests.protobuf import ( # pylint: disable=no-name-in-module
test_server_pb2_grpc,
)
import opentelemetry.instrumentation.grpc
from opentelemetry import context, trace
from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient, filters
from opentelemetry.instrumentation.grpc._client import (
OpenTelemetryClientInterceptor,
)
from opentelemetry.instrumentation.grpc.grpcext._interceptor import (
_UnaryClientInfo,
)
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
from opentelemetry.propagate import get_global_textmap, set_global_textmap
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.mock_textmap import MockTextMapPropagator
from opentelemetry.test.test_base import TestBase
from ._client import (
bidirectional_streaming_method,
client_streaming_method,
server_streaming_method,
simple_method,
simple_method_future,
)
from ._server import create_test_server
from .protobuf.test_server_pb2 import Request
# User defined interceptor. Is used in the tests along with the opentelemetry client interceptor.
class Interceptor(
grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor,
):
def __init__(self):
pass
def intercept_unary_unary(
self, continuation, client_call_details, request
):
return self._intercept_call(continuation, client_call_details, request)
def intercept_unary_stream(
self, continuation, client_call_details, request
):
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
return self._intercept_call(
continuation, client_call_details, request_iterator
)
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
return self._intercept_call(
continuation, client_call_details, request_iterator
)
@staticmethod
def _intercept_call(
continuation, client_call_details, request_or_iterator
):
return continuation(client_call_details, request_or_iterator)
class TestClientProtoFilterMethodName(TestBase):
def setUp(self):
super().setUp()
GrpcInstrumentorClient(
filter_=filters.method_name("SimpleMethod")
).instrument()
self.server = create_test_server(25565)
self.server.start()
# use a user defined interceptor along with the opentelemetry client interceptor
interceptors = [Interceptor()]
self.channel = grpc.insecure_channel("localhost:25565")
self.channel = grpc.intercept_channel(self.channel, *interceptors)
self._stub = test_server_pb2_grpc.GRPCTestServerStub(self.channel)
def tearDown(self):
super().tearDown()
GrpcInstrumentorClient().uninstrument()
self.server.stop(None)
self.channel.close()
def test_unary_unary_future(self):
simple_method_future(self._stub).result()
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/SimpleMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
def test_unary_unary(self):
simple_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/SimpleMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
self.assertSpanHasAttributes(
span,
{
SpanAttributes.RPC_METHOD: "SimpleMethod",
SpanAttributes.RPC_SERVICE: "GRPCTestServer",
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[
0
],
},
)
def test_unary_stream(self):
server_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_stream_unary(self):
client_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_stream_stream(self):
bidirectional_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_error_simple(self):
with self.assertRaises(grpc.RpcError):
simple_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_error_stream_unary(self):
with self.assertRaises(grpc.RpcError):
client_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_error_unary_stream(self):
with self.assertRaises(grpc.RpcError):
server_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_error_stream_stream(self):
with self.assertRaises(grpc.RpcError):
bidirectional_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_client_interceptor_trace_context_propagation(
self,
): # pylint: disable=no-self-use
"""ensure that client interceptor correctly inject trace context into all outgoing requests."""
previous_propagator = get_global_textmap()
try:
set_global_textmap(MockTextMapPropagator())
interceptor = OpenTelemetryClientInterceptor(trace.NoOpTracer())
carrier = tuple()
def invoker(request, metadata):
nonlocal carrier
carrier = metadata
return {}
request = Request(client_id=1, request_data="data")
interceptor.intercept_unary(
request,
{},
_UnaryClientInfo(
full_method="/GRPCTestServer/SimpleMethod", timeout=None
),
invoker=invoker,
)
assert len(carrier) == 2
assert carrier[0][0] == "mock-traceid"
assert carrier[0][1] == "0"
assert carrier[1][0] == "mock-spanid"
assert carrier[1][1] == "0"
finally:
set_global_textmap(previous_propagator)
class TestClientProtoFilterMethodPrefix(TestBase):
def setUp(self):
super().setUp()
GrpcInstrumentorClient(
filter_=filters.method_prefix("Simple")
).instrument()
self.server = create_test_server(25565)
self.server.start()
# use a user defined interceptor along with the opentelemetry client interceptor
interceptors = [Interceptor()]
self.channel = grpc.insecure_channel("localhost:25565")
self.channel = grpc.intercept_channel(self.channel, *interceptors)
self._stub = test_server_pb2_grpc.GRPCTestServerStub(self.channel)
def tearDown(self):
super().tearDown()
GrpcInstrumentorClient().uninstrument()
self.server.stop(None)
self.channel.close()
def test_unary_unary_future(self):
simple_method_future(self._stub).result()
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/SimpleMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
def test_unary_unary(self):
simple_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/SimpleMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
self.assertSpanHasAttributes(
span,
{
SpanAttributes.RPC_METHOD: "SimpleMethod",
SpanAttributes.RPC_SERVICE: "GRPCTestServer",
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[
0
],
},
)
def test_unary_stream(self):
server_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_stream_unary(self):
client_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_stream_stream(self):
bidirectional_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_error_simple(self):
with self.assertRaises(grpc.RpcError):
simple_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_error_stream_unary(self):
with self.assertRaises(grpc.RpcError):
client_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_error_unary_stream(self):
with self.assertRaises(grpc.RpcError):
server_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_error_stream_stream(self):
with self.assertRaises(grpc.RpcError):
bidirectional_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_client_interceptor_trace_context_propagation(
self,
): # pylint: disable=no-self-use
"""ensure that client interceptor correctly inject trace context into all outgoing requests."""
previous_propagator = get_global_textmap()
try:
set_global_textmap(MockTextMapPropagator())
interceptor = OpenTelemetryClientInterceptor(trace.NoOpTracer())
carrier = tuple()
def invoker(request, metadata):
nonlocal carrier
carrier = metadata
return {}
request = Request(client_id=1, request_data="data")
interceptor.intercept_unary(
request,
{},
_UnaryClientInfo(
full_method="/GRPCTestServer/SimpleMethod", timeout=None
),
invoker=invoker,
)
assert len(carrier) == 2
assert carrier[0][0] == "mock-traceid"
assert carrier[0][1] == "0"
assert carrier[1][0] == "mock-spanid"
assert carrier[1][1] == "0"
finally:
set_global_textmap(previous_propagator)
class TestClientProtoFilterByEnv(TestBase):
def setUp(self):
with mock.patch.dict(
os.environ,
{
"OTEL_PYTHON_GRPC_EXCLUDED_SERVICES": "GRPCMockServer,GRPCTestServer"
},
):
super().setUp()
GrpcInstrumentorClient().instrument()
self.server = create_test_server(25565)
self.server.start()
# use a user defined interceptor along with the opentelemetry client interceptor
interceptors = [Interceptor()]
self.channel = grpc.insecure_channel("localhost:25565")
self.channel = grpc.intercept_channel(self.channel, *interceptors)
self._stub = test_server_pb2_grpc.GRPCTestServerStub(self.channel)
def tearDown(self):
super().tearDown()
GrpcInstrumentorClient().uninstrument()
self.server.stop(None)
self.channel.close()
def test_unary_unary_future(self):
simple_method_future(self._stub).result()
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
def test_unary_unary(self):
simple_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
class TestClientProtoFilterByEnvAndOption(TestBase):
def setUp(self):
with mock.patch.dict(
os.environ,
{"OTEL_PYTHON_GRPC_EXCLUDED_SERVICES": "GRPCMockServer"},
):
super().setUp()
GrpcInstrumentorClient(
filter_=filters.service_prefix("GRPCTestServer")
).instrument()
self.server = create_test_server(25565)
self.server.start()
# use a user defined interceptor along with the opentelemetry client interceptor
interceptors = [Interceptor()]
self.channel = grpc.insecure_channel("localhost:25565")
self.channel = grpc.intercept_channel(self.channel, *interceptors)
self._stub = test_server_pb2_grpc.GRPCTestServerStub(self.channel)
def tearDown(self):
super().tearDown()
GrpcInstrumentorClient().uninstrument()
self.server.stop(None)
self.channel.close()
def test_unary_unary_future(self):
simple_method_future(self._stub).result()
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/SimpleMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
def test_unary_unary(self):
simple_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/SimpleMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
self.assertSpanHasAttributes(
span,
{
SpanAttributes.RPC_METHOD: "SimpleMethod",
SpanAttributes.RPC_SERVICE: "GRPCTestServer",
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[
0
],
},
)
def test_unary_stream(self):
server_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/ServerStreamingMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
self.assertSpanHasAttributes(
span,
{
SpanAttributes.RPC_METHOD: "ServerStreamingMethod",
SpanAttributes.RPC_SERVICE: "GRPCTestServer",
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[
0
],
},
)
def test_stream_unary(self):
client_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "/GRPCTestServer/ClientStreamingMethod")
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
self.assertSpanHasAttributes(
span,
{
SpanAttributes.RPC_METHOD: "ClientStreamingMethod",
SpanAttributes.RPC_SERVICE: "GRPCTestServer",
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[
0
],
},
)
def test_stream_stream(self):
bidirectional_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(
span.name, "/GRPCTestServer/BidirectionalStreamingMethod"
)
self.assertIs(span.kind, trace.SpanKind.CLIENT)
# Check version and name in span's instrumentation info
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.grpc
)
self.assertSpanHasAttributes(
span,
{
SpanAttributes.RPC_METHOD: "BidirectionalStreamingMethod",
SpanAttributes.RPC_SERVICE: "GRPCTestServer",
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[
0
],
},
)
def test_error_simple(self):
with self.assertRaises(grpc.RpcError):
simple_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_error_stream_unary(self):
with self.assertRaises(grpc.RpcError):
client_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_error_unary_stream(self):
with self.assertRaises(grpc.RpcError):
server_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_error_stream_stream(self):
with self.assertRaises(grpc.RpcError):
bidirectional_streaming_method(self._stub, error=True)
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_client_interceptor_trace_context_propagation(
self,
): # pylint: disable=no-self-use
"""ensure that client interceptor correctly inject trace context into all outgoing requests."""
previous_propagator = get_global_textmap()
try:
set_global_textmap(MockTextMapPropagator())
interceptor = OpenTelemetryClientInterceptor(trace.NoOpTracer())
carrier = tuple()
def invoker(request, metadata):
nonlocal carrier
carrier = metadata
return {}
request = Request(client_id=1, request_data="data")
interceptor.intercept_unary(
request,
{},
_UnaryClientInfo(
full_method="/GRPCTestServer/SimpleMethod", timeout=None
),
invoker=invoker,
)
assert len(carrier) == 2
assert carrier[0][0] == "mock-traceid"
assert carrier[0][1] == "0"
assert carrier[1][0] == "mock-spanid"
assert carrier[1][1] == "0"
finally:
set_global_textmap(previous_propagator)
def test_unary_unary_with_suppress_key(self):
token = context.attach(
context.set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)
)
try:
simple_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
finally:
context.detach(token)
self.assertEqual(len(spans), 0)
def test_unary_stream_with_suppress_key(self):
token = context.attach(
context.set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)
)
try:
server_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
finally:
context.detach(token)
self.assertEqual(len(spans), 0)
def test_stream_unary_with_suppress_key(self):
token = context.attach(
context.set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)
)
try:
client_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
finally:
context.detach(token)
self.assertEqual(len(spans), 0)
def test_stream_stream_with_suppress_key(self):
token = context.attach(
context.set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)
)
try:
bidirectional_streaming_method(self._stub)
spans = self.memory_exporter.get_finished_spans()
finally:
context.detach(token)
self.assertEqual(len(spans), 0)
| [
"[email protected]"
] | |
31d0b3108fb5597fc1566914fe1a6fd5bae45ff6 | 9db1103b05dc5053a984c2f46491e71216cbe13d | /everest/cascade/hierarchy.py | 52bb27c03bc4a861f478cc873914137eda4e144c | [
"MIT"
] | permissive | rsbyrne/everest | f396d11743fc0633992bb49bf40d6d5851c3fffa | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | refs/heads/master | 2023-07-27T12:55:06.426748 | 2021-06-18T00:31:21 | 2021-06-18T00:31:21 | 222,559,267 | 2 | 1 | MIT | 2021-06-18T00:31:22 | 2019-11-18T22:43:49 | Python | UTF-8 | Python | false | false | 4,292 | py | ###############################################################################
''''''
###############################################################################
from collections.abc import Mapping as _Mapping
from functools import lru_cache as _lru_cache
from . import _reseed
def flatten_hierarchy(hierarchy):
return dict(_flatten_hierarchy(hierarchy))
def _flatten_hierarchy(hierarchy):
for k, v in hierarchy.items():
if isinstance(v, Hierarchy):
for sk, sv in _flatten_hierarchy(v):
yield sk, sv
else:
yield k, v.value
class Item:
__slots__ = ('key', '_value')
def __init__(self, key, val, /):
self.key = key
self._value = val
@property
def value(self):
return self._value
@value.setter
def value(self, newval):
self._value = newval
def __str__(self):
return repr(self.value)
def __repr__(self):
return f'{type(self).__name__}({self.key}: {str(self)})'
class Hierarchy(_Mapping):
__slots__ = ('content', 'parent', 'subs', 'randhash')
# def __init__(self, *args, parent=None, **kwargs):
# super().__init__(*args, **kwargs)
def __init__(self, *args, parent = None, **kwargs):
self.content = dict(*args, **kwargs)
self.parent = parent
self.subs = dict()
self.randhash = _reseed.rdigits()
def flatten(self) -> dict:
return flatten_hierarchy(self)
# def remove_ghosts(self):
# for key, val in list(self.items()):
# if key.startswith('_'):
# del self[key]
# elif isinstance(val, type(self)):
# val.remove_ghosts()
def sub(self, key) -> 'Hierarchy':
self.subs[key] = subhier = type(self)(parent=self)
self.content.__setitem__(key, subhier)
return subhier
def __iter__(self):
return iter(self.content)
def __len__(self):
return len(self.content)
def __getitem__(self, arg, /):
out = self.raw_getitem(arg)
if isinstance(out, Item):
return out.value
return out
@_lru_cache
def raw_getitem(self, arg) -> Item:
if isinstance(arg, tuple):
out = self
for subarg in arg:
out = out.raw_getitem(subarg)
return out
try:
return super().__getitem__(arg)
except KeyError as exc:
for sub in self.subs.values():
try:
return sub.raw_getitem(arg)
except KeyError:
pass
raise KeyError from exc
def __setitem__(self, key, val):
try:
targ = self.raw_getitem(key)
if isinstance(targ, Item):
targ.value = val
else:
raise ValueError("Cannot manually set hierarchy.")
except KeyError:
if isinstance(val, Hierarchy):
sub = self.sub(key)
sub.update(val)
else:
if isinstance(val, Item):
val = val.value
self.content.__setitem__(key, Item(key, val))
def __delitem__(self, key):
self.content.__delitem__(key)
def update(self, source):
for key in source:
self[key] = source[key]
def __hash__(self):
return self.randhash
def __repr__(self):
return type(self).__name__ + super().__repr__()
def _repr_pretty_(self, p, cycle):
typnm = type(self).__name__
if cycle:
p.text(typnm + '{...}')
else:
with p.group(4, typnm + '({', '})'):
for idx, (key, val) in enumerate(self.items()):
if isinstance(val, Item):
val = val.value
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(val)
p.breakable()
def copy(self):
return type(self)(**self)
###############################################################################
###############################################################################
| [
"[email protected]"
] | |
763815d5ec1337053e3915354962c8fc3b3256e0 | 758c60aed145cf6b780d3cee4a79eb603452e3bd | /code/CNN/CNNemTest.py | 213b23b7898a7ead17d80e5e2dfb81391bcaca11 | [] | no_license | aiedward/CCIR | 8be5d9b49a1cd653564c4fc00c2ef60f1f1d292d | 215179ac38fbde3899e55b078622606b31595d2d | refs/heads/master | 2020-03-24T09:55:45.382723 | 2018-04-07T14:15:30 | 2018-04-07T14:15:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,485 | py | # -*-coding: utf-8 -*-
from keras.layers import Input, Dense, Dropout, Flatten, merge,concatenate
from keras.layers import Conv2D, MaxPooling2D, Embedding, Reshape
from keras.models import Model
import numpy as np
from keras import backend as K
from keras.optimizers import Adam
from keras.losses import hinge
from keras.models import load_model, model_from_json
from keras import regularizers
from keras import initializers
from keras.utils.np_utils import to_categorical
from get_words_for_CCIR import *
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
def compute_score(model, length_a, length_q, q, item):
train_file_dir = './'
train_file_name = 'train.1.json'
# [q, item] = process_train_file(train_file_dir, train_file_name)
train_file_name = 'CCIR_train_word_num.txt'
word, word_remove, count = word_table(train_file_dir, train_file_name)
Total_score_dcg3 = []
Total_score_dcg5 = []
for echo in range(100):
print 'the echo is', echo
[test_question, test_answer] = get_test_data(length_a, length_q, echo*5+4000, q, item, word)
test_label = get_test_label(echo*5+4000)
for i in range(len(test_question)):
temp = model.predict([test_question[i], test_answer[i]], batch_size=len(test_question[i]))
print len(test_question[i]), len(test_answer[i])
temp_label = test_label[i]
temp_score = []
for my_number2 in range(len(temp)):
temp_score.append(temp[my_number2][0])
temp_score = np.array(temp_score)
# 在这里将我们测试出来的score和最后的label写入文件
#if not os.path.exists('./CNNModel2'):
# os.makedirs('./CNNModel2')
#file_object = open('./CNNModel2/%d' % (echo*5+i), 'w')
print temp_score
#for my_number in range(len(temp_score)):
# a = "%d %lf \n" % (temp_label[my_number], temp_score[my_number])
# file_object.write(a)
temp_sort = np.argsort(temp_score)
temp_sort = temp_sort[-1::-1]
Dcg3 = 0
Dcg5 = 0
print temp_label
for number in range(1, 4, 1):
a = temp_sort[number-1]
a = int(a)
Dcg3 = Dcg3 + (np.power(2, temp_label[a])-1) / np.log2(number+1)
for number in range(1, 6, 1):
a = temp_sort[number-1]
a = int(a)
Dcg5 = Dcg5 + (np.power(2, temp_label[a])-1) / np.log2(number+1)
Total_score_dcg3.append(Dcg3)
Total_score_dcg5.append(Dcg5)
print 'The score for Dcg3 is', np.mean(Total_score_dcg3)
print 'The score for Dcg5 is', np.mean(Total_score_dcg5)
del q
del item
M = np.mean(Total_score_dcg3)
print M
return M
def get_test_data(length_a, length_q, start, q, item, word):
question, answer = get_word_vector(start, q, item, word, word_remove, start + 5)
answer2, question2 = my_padding(question, answer, length_a, length_q)
question_new = []
for i in range(len(question2)):
question_new.append([])
for j in range(len(answer2[i])):
question_new[i].append(question2[i])
del question
del question2
height_q = len(question_new[0][0])
height_a = len(answer2[0][0])
data_question = np.array(question_new)
data_answer = np.array(answer2)
question_data = []
for i in range(len(question_new)):
data_question[i] = np.array(data_question[i])
temp = data_question[i].reshape((len(question_new[i]), height_q, 1))
question_data.append(temp)
answer_data = []
for i in range(len(answer2)):
data_answer[i] = np.array(data_answer[i])
temp = data_answer[i].reshape((len(answer2[i]), height_a, 1))
answer_data.append(temp)
return question_data, answer_data
def word_table(train_file_dir,train_file_name):
f = open(os.path.join(train_file_dir, train_file_name), 'r')
lines = f.readlines()
word = {}
word_remove = []
count = 0
for line in lines:
a = line.replace("\n","").split(" ")
if len(a) == 1:
continue
if int(a[1]) < 100:
count = count + 1
word[str(a[0])] = int(count)
continue
word_remove.append(a[0])
#for i in range(len(word)):
# print word[i]
return word, word_remove, count
def my_padding(data_q, data_a, length_a, length_q):
if length_q != 0 and length_a != 0:
for i in range(len(data_q)):
m = len(data_q[i])
a = int((length_q - m)/2)
b = length_q - m - a
for j in range(a):
temp = 0
data_q[i].append(temp)
for j in range(b):
temp = 0
data_q[i].insert(0, temp)
for i in range(len(data_a)):
for j in range(len(data_a[i])):
m = len(data_a[i][j])
a = int((length_a-m))/2
b = length_a-m-a
for number in range(a):
temp = 0
data_a[i][j].append(temp)
for number in range(b):
temp = 0
data_a[i][j].insert(0, temp)
return data_a, data_q
def get_word_vector(start, q, item, word, word_remove, end):
question_matrix = []
answer_matrix = []
for i in range(start, end, 1):
question_matrix.append([])
for j in range(len(q[i])):
if q[i][j] in word_remove:
continue
if q[i][j] in word.keys():
question_matrix[i - start].append(word[str(q[i][j])])
for i in range(start, end, 1):
answer_matrix.append([])
for j in range(len(item[i])):
answer_matrix[i - start].append([])
for k in range(len(item[i][j])):
if item[i][j][k] in word_remove:
continue
if item[i][j][k] in word.keys():
answer_matrix[i - start][j].append(word[str(item[i][j][k])])
return question_matrix, answer_matrix
def get_train_data(start, lengtha, lengthq, q, item, word, word_remove):
question, answer = get_word_vector(start, q, item, word, word_remove, start+1)
answer2, question2 = my_padding(question, answer, lengtha, lengthq)
question_new = []
for i in range(len(question2)):
question_new.append([])
for j in range(len(answer2[i])):
question_new[i].append(question2[i])
del question
del answer
del question2
final_question = []
final_answer = []
height_q = len(question_new[0][0])
height_a = len(answer2[0][0])
for i in range(len(question_new)):
for j in range(len(question_new[i])):
temp1 = np.array(question_new[i][j])
temp2 = temp1.reshape(height_q, 1)
final_question.append(temp2)
for i in range(len(answer2)):
for j in range(len(answer2[i])):
temp1 = np.array(answer2[i][j])
temp2 = temp1.reshape(height_a, 1)
final_answer.append(temp2)
del answer2
del question_new
final_answer = np.array(final_answer)
final_question = np.array(final_question)
return final_question, final_answer, height_a, height_q
def cnn(height_a, height_q, count):
question_input = Input(shape=(height_q, 1), name='question_input')
embedding_q = Embedding(input_dim=count, output_dim=128, input_length=height_q)(question_input)
re_q = Reshape((height_q, 128, 1), input_shape=(height_q,))(embedding_q)
conv1_Q = Conv2D(512, (2, 128), activation='sigmoid', padding='valid',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(re_q)
Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
F1_Q = Flatten()(Max1_Q)
Drop1_Q = Dropout(0.5)(F1_Q)
predictQ = Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(Drop1_Q)
# kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01)
answer_input = Input(shape=(height_a, 1), name='answer_input')
embedding_a = Embedding(input_dim=count, output_dim=128, input_length=height_a)(answer_input)
re_a = Reshape((height_a, 128, 1), input_shape=(height_a,))(embedding_a)
conv1_A = Conv2D(512, (2, 128), activation='sigmoid', padding='valid',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(re_a)
Max1_A = MaxPooling2D((399, 1), strides=(1, 1), padding='valid')(conv1_A)
F1_A = Flatten()(Max1_A)
Drop1_A = Dropout(0.5)(F1_A)
predictA = Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.02),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(Drop1_A)
predictions = merge([predictA, predictQ], mode='dot')
model = Model(inputs=[question_input, answer_input],
outputs=predictions)
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
# model.compile(loss='mean_squared_error',
# optimizer='nadam')
return model
def get_label(start):
train_file_dir = './'
train_file_name = 'train.1.json'
f = open(os.path.join(train_file_dir, train_file_name), 'r')
label = []
count = 0
for line in f:
if count < start:
count = count + 1
continue
if count >= start + 1:
break
file_dict_temp = json.loads(line)
passages_list = file_dict_temp['passages']
label.append([])
for item in passages_list:
temp = item['label']
if temp == 0:
temp = 0
elif temp == 1:
temp = 1
elif temp == 2:
temp = 2
label[count-start].append(temp)
count = count + 1
final_label = []
for i in range(len(label)):
for j in range(len(label[i])):
final_label.append(label[i][j])
del label
final_label = np.array(final_label)
return final_label
def get_test_label(start):
train_file_dir = './'
train_file_name = 'train.1.json'
f = open(os.path.join(train_file_dir, train_file_name), 'r')
label = []
count = 0
for line in f:
if count < start:
count = count + 1
continue
if count >= start + 5:
break
file_dict_temp = json.loads(line)
passages_list = file_dict_temp['passages']
label.append([])
for item in passages_list:
temp = item['label']
label[count-start].append(temp)
count = count + 1
label = np.array(label)
return label
if __name__ == '__main__':
train_file_dir = './'
train_file_name = 'train.1.json'
[q, item] = process_train_file(train_file_dir, train_file_name)
train_file_name = 'CCIR_train_word_num.txt'
word, word_remove, count = word_table(train_file_dir, train_file_name)
# a = word.keys()
height_a = 400
height_q = 30
#model = cnn(height_a, height_q, count)
model = model_from_json(open('my_model_architecture3.json').read())
model.load_weights('my_model_weights3.h5')
for echo in range(4000):
continue
if echo == 2392:
continue
print 'the echo is', echo
final_question, final_answer, height_a, height_q = get_train_data(echo, height_a, height_q, q, item,
word, word_remove)
label = get_label(echo)
t = model.train_on_batch([final_question, final_answer], label)
print 'loss=', t
json_string = model.to_json()
open('my_model_architecture3.json', 'w').write(json_string)
model.save_weights('my_model_weights3.h5')
del final_question
del final_answer
del label
#model.save('my_model3.h5')
#model.save_weights('my_model_weights3.h5')
#print(model.summary())
M = compute_score(model, height_a, height_q, q, item) | [
"[email protected]"
] | |
86d43341acd4d75d7e9f6444a05667b88c3356c0 | 42b9bafc3c757543328d93fb60269ad4255aae17 | /env/lib/python3.7/site-packages/jet/tests/settings.py | 0c8cb379fe107502b4dc3633895113b84b4a7f85 | [
"MIT"
] | permissive | mejeng/kasir | 4fe66d1828e72b64d770426d71185cdd3c54127e | cc6f9158b61c0cb45078ddf798af9588c8771311 | refs/heads/master | 2020-09-25T03:36:10.144439 | 2019-11-30T07:59:23 | 2019-11-30T07:59:23 | 225,908,795 | 2 | 0 | MIT | 2019-12-04T16:21:15 | 2019-12-04T16:21:15 | null | UTF-8 | Python | false | false | 1,945 | py | import os
import django
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '!DJANGO_JET_TESTS!'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ROOT_URLCONF = 'jet.tests.urls'
INSTALLED_APPS = (
'jet.dashboard',
'jet',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'jet.tests',
)
MIDDLEWARE = MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if django.VERSION[:2] < (1, 9):
TEMPLATE_CONTEXT_PROCESSORS = tuple(global_settings.TEMPLATE_CONTEXT_PROCESSORS) + (
'django.core.context_processors.request',
)
else:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-US'
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_URL = '/static/'
JET_INDEX_DASHBOARD = 'jet.tests.dashboard.TestIndexDashboard'
JET_APP_INDEX_DASHBOARD = 'jet.tests.dashboard.TestAppIndexDashboard'
| [
"[email protected]"
] | |
731d0f6696c9cae00ba15f73151571a42ef6dae3 | 877bd731bc97f220c363914d1e66970e2d9e599e | /python_stack/_django/full_stack/tv_shows/tv_shows_app/migrations/0003_auto_20200604_0321.py | 4dac5bd390a36b9dc89b42752fc9a70c942eba9d | [] | no_license | mpresto/dojo | eaccc08465298d35ae5a8e0d60e547a90bc24e05 | aec14ee041950eea7c35003fa03b0728b4606754 | refs/heads/master | 2021-05-26T00:15:16.551562 | 2020-10-04T00:09:48 | 2020-10-04T00:09:48 | 253,975,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # Generated by Django 2.2 on 2020-06-04 03:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tv_shows_app', '0002_auto_20200603_0658'),
]
operations = [
migrations.AlterField(
model_name='show',
name='title',
field=models.CharField(max_length=255),
),
]
| [
"[email protected]"
] | |
1bd128680cb0beaed2787f330cbaf980eefe2ce1 | 01552dc88e7c170de857f5ff0b52178326d5f003 | /guild/query_cmd.py | 00e09e3036d434e24efa3924aaef22b73e522633 | [
"Apache-2.0"
] | permissive | guildai/_guild-python-legacy | b8516f38b3dd4f27859850ec07fe9c4747f4fd8b | e552eff820d8edcfeb10b26bd5c8651548507b4a | refs/heads/master | 2021-01-01T15:52:35.875726 | 2017-09-27T18:58:59 | 2017-09-27T18:58:59 | 97,719,256 | 0 | 0 | Apache-2.0 | 2018-10-20T23:44:54 | 2017-07-19T13:28:21 | HTML | UTF-8 | Python | false | false | 880 | py | import os
import sys
import guild.cmd_support
import guild.db
import guild.op_util
def main(args):
run = guild.cmd_support.run_for_args(args)
if args.details == "series":
_print_series(run)
elif args.details == "files":
_print_files(run)
else:
_print_header(run)
def _print_header(run):
rundir = run.opdir
run_name = os.path.basename(rundir)
status = guild.op_util.extended_op_status(rundir)
sys.stdout.write("%s\t%s\n" % (run_name, status))
def _print_series(run):
db = guild.db.init_for_opdir(run.opdir)
for key in db.series_keys():
sys.stdout.write(key)
sys.stdout.write("\n")
def _print_files(run):
cwd = os.path.abspath(".")
for root, _dirs, files in os.walk(run.opdir):
for f in files:
path = os.path.join(root, f)
print(os.path.relpath(path, cwd))
| [
"[email protected]"
] | |
bffb24fdd89319ceb3cdb0061dc12f8695ef8b9d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03157/s695062063.py | 8c31e11022db6359dcc34abd1270b221bad01b66 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | from collections import deque
h,w=map(int,input().split())
p=['-'*(w+2)]
for i in range(h):
p.append('-'+input()+'-')
p.append('-'*(w+2))
isl=[]
v=[[0 for i in range(w+2)] for j in range(h+2)]
d=[[0,1],[1,0],[-1,0],[0,-1]]
def bfs(x,y):
if v[x][y]!=0:
return
q=deque()
q.append((x,y))
v[x][y]=1
br,wh=0,0
cnt=0
while len(q)>0:
ch,cw=q.popleft()
#v[ch][cw]=1
if p[ch][cw]=='#':
br+=1
for dh,dw in d:
if p[ch+dh][cw+dw]=='.' and v[ch+dh][cw+dw]==0:
q.append((ch+dh,cw+dw))
v[ch+dh][cw+dw]=1
elif p[ch][cw]=='.':
wh+=1
for dh,dw in d:
if p[ch+dh][cw+dw]=='#' and v[ch+dh][cw+dw]==0:
q.append((ch+dh,cw+dw))
v[ch+dh][cw+dw]=1
#print('xy=',x,y,'chw=',ch,cw,'bw=',br,wh,q)
isl.append((br,wh))
for i in range(1,h+1):
for j in range(1,w+1):
bfs(i,j)
ans=0
for br,wh in isl:
ans+=br*wh
print(ans) | [
"[email protected]"
] | |
dad6dabb0c0bf27caff808349deda0c12cdca566 | 21bf726bf895569a41a8b8d2db6772dc51f46cfd | /MachineLearning/machine_learning_examples/unsupervised_class/hcluster.py | 4f40738d7ea106f838ee6efa0df6eb069eda4234 | [] | no_license | jeffsnguyen/Python-1 | dd924d25337cd6ac21e321d7b2c5ac17c065d94b | 463d32a61a760d076656c73c9f8c9fadf262438d | refs/heads/master | 2022-03-23T09:50:04.476094 | 2019-12-23T12:32:49 | 2019-12-23T12:32:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | # https://deeplearningcourses.com/c/cluster-analysis-unsupervised-machine-learning-python
# https://www.udemy.com/cluster-analysis-unsupervised-machine-learning-python
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
def main():
D = 2 # so we can visualize it more easily
s = 4 # separation so we can control how far apart the means are
mu1 = np.array([0, 0])
mu2 = np.array([s, s])
mu3 = np.array([0, s])
N = 900 # number of samples
X = np.zeros((N, D))
X[:300, :] = np.random.randn(300, D) + mu1
X[300:600, :] = np.random.randn(300, D) + mu2
X[600:, :] = np.random.randn(300, D) + mu3
Z = linkage(X, 'ward')
print "Z.shape:", Z.shape
# Z has the format [idx1, idx2, dist, sample_count]
# therefore, its size will be (N-1, 4)
plt.title("Ward")
dendrogram(Z)
plt.show()
Z = linkage(X, 'single')
plt.title("Single")
dendrogram(Z)
plt.show()
Z = linkage(X, 'complete')
plt.title("Complete")
dendrogram(Z)
plt.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0b6e69480371618c0daeb8640584b5d89d5114f4 | 9218fe2f12a3f8209a71a7775178a084da4212c0 | /crawler/dspider/spiders/stockFinancialDisclosureTimeSpider.py | be04e6013553e5a05b5bfff82e3a794c736eec91 | [] | no_license | betterManzZ/smart_deal_tool | 678e7f7ecf431df4fb6cef5faf9c5c1ddd397697 | a74cbab04393d60dc829c0110a98c625ba896f22 | refs/heads/master | 2020-06-12T03:31:29.404228 | 2019-06-23T08:05:34 | 2019-06-23T08:05:34 | 194,182,470 | 2 | 0 | null | 2019-06-28T00:57:13 | 2019-06-28T00:57:12 | null | UTF-8 | Python | false | false | 7,762 | py | # -*- coding: utf-8 -*-
import os
import re
import time
import datetime
import const as ct
import pandas as pd
from datetime import datetime
from scrapy import FormRequest
from scrapy.http import TextResponse, HtmlResponse
from pyquery import PyQuery as pq
from dspider.myspider import BasicSpider
from urllib.request import urlopen, Request
from base.cdate import report_date_list_with, one_report_date_list
from dspider.straight_flush import StraightFlushSession
from dspider.items import StockFinancialDisclosureTimeItem
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class StockFinancialDisclosureTimeSpider(BasicSpider):
name = 'stockFinancialDisclosureTimeSpider'
custom_settings = {
'ROBOTSTXT_OBEY': False,
'COOKIES_ENABLED': True,
'RETRY_ENABLED': False,
'REFERER_ENABLED': False,
'SPIDERMON_ENABLED': False,
'DOWNLOAD_DELAY': 5,
'DOWNLOAD_TIMEOUT': 20.0,
'RANDOMIZE_DOWNLOAD_DELAY': True,
'CONCURRENT_REQUESTS_PER_IP': 1,
'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
'DOWNLOADER_MIDDLEWARES': {
'dspider.proxy.RandomProxy':100,
'dspider.user_agent.RandomUserAgent': 200,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None
},
'USER_AGENTS': ct.USER_AGENTS,
'SIGNEWNYM_RATE': 60, # new ip rate, minimal value is 60 (seconds)
'PROXY_HOST': 'http://ip_proxy-container:5010',
'NEW_IP_HTTP_CODES': [500, 502, 503, 504, 522, 524, 408, 429, 403, 407, 404]
}
data_dict = dict()
#date_list = report_date_list_with()
date_list = one_report_date_list(datetime.now().strftime('%Y-%m-%d'))
sfsession = StraightFlushSession()
allowed_domains = ['data.10jqka.com.cn', 's.thsi.cn']
start_urls = ['https://s.thsi.cn/js/chameleon/time.{}.js', 'http://data.10jqka.com.cn/financial/yypl/date/{}/board/ALL/field/stockcode/order/DESC/page/{}/ajax/1/']
repatten = 'http://data.10jqka.com.cn/financial/yypl/date/(.+?)/board/ALL/field/stockcode/order/DESC/page/(.+?)/ajax/1/'
headers = {"Accept-Language": "en-US,en;q=0.5","Connection": "keep-alive"}
def start_requests(self):
if len(self.date_list) > 0:
while not self.update_cookie(): time.sleep(3)
mdate = self.date_list.pop()
self.data_dict[mdate] = list()
mcookie = {"v": self.sfsession.encode()}
page_url = self.start_urls[1].format(mdate, 1)
self.logger.info("start_request:%s", page_url)
yield FormRequest(url = page_url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item)
def parse_item(self, response):
try:
url = response.url
self.update_cookie()
mcookie = {"v": self.sfsession.encode()}
if type(response) is TextResponse:
time.sleep(60)
print("parse_item3", response.url)
yield FormRequest(url = url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback=self.errback_httpbin, dont_filter=True)
else:
reg = re.compile(self.repatten)
if reg.search(url) is not None:
doc = pq(response.text)
max_page = self.get_max_page(doc)
cur_date, cur_page = reg.search(url).groups()
cur_page = int(cur_page)
if not self.update_data(doc, cur_date): print("empty url", url)
if cur_page < max_page:
cur_page += 1
page_url = self.start_urls[1].format(cur_date, cur_page)
print("parse_item1", page_url)
yield FormRequest(url = page_url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback=self.errback_httpbin)
else:
self.store_items(cur_date)
if len(self.date_list) > 0:
mdate = self.date_list.pop()
self.data_dict[mdate] = list()
page_url = self.start_urls[1].format(mdate, 1)
print("parse_item2", page_url)
yield FormRequest(url = page_url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback=self.errback_httpbin)
else:
print("parse_item4", url)
yield FormRequest(url = url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback = self.errback_httpbin, dont_filter = True)
except:
print("parse_item exception", e)
def errback_httpbin(self, failure):
print("errback", repr(failure))
if failure.check(HttpError):
response = failure.value.response
print('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
request = failure.request
print('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError):
request = failure.request
print('TimeoutError on %s', request.url)
else:
request = failure.request
print('Other Error on %s', request.url)
def store_items(self, cur_date):
df = pd.DataFrame(self.data_dict[cur_date], columns=["code", "first", "change", "actual"])
df = df.sort_values(['code'], ascending = 1)
filepath = os.path.join(ct.STOCK_FINANCIAL_REPORT_ANNOUNCEMENT_DATE_PATH, "%s.csv" % cur_date)
df.to_csv(filepath, index=False, mode="w", encoding='utf8')
self.data_dict[cur_date].clear()
def update_cookie(self):
self.sfsession = StraightFlushSession()
time_stamp = int(time.time())
time_url = self.start_urls[0].format(int(time_stamp/1200))
request = Request(time_url)
request.add_header("Connection", "close")
request.add_header("Accept-Language", "en-US,en;q=0.5")
request.add_header("User-Agent", 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36')
try:
response = urlopen(request, timeout=50)
if response.status == 200:
server_time = float(response.read().decode("utf-8").split('=')[1].split(';')[0])
self.sfsession.update_server_time(server_time)
return True
except Exception as e:
print("update_cookie", e)
return False
def get_max_page(self, doc):
span_text = doc("div.m-page.J-ajax-page span").text()
last_page = span_text.split("/")
max_page = int(last_page[1])
return max_page
def update_data(self, doc, cur_date):
tr_node = doc("table tbody tr")
if tr_node.length == 0: return False
for tr in tr_node.items():
code = tr.children("td").eq(1).text().strip(' ') #股票代码
first = tr.children("td").eq(3).text().strip(' ') # 首次预约时间
changed = tr.children("td").eq(4).text().strip(' ') # 变更时间
actual = tr.children("td").eq(5).text().strip(' ') # 实际披露时间
first = first.replace("-", "").replace("00000000", "")
changed = changed.replace("-", "")
actual = actual.replace("-", "").replace("00000000", "")
self.data_dict[cur_date].append([code, first, changed, actual])
return True
| [
"[email protected]"
] | |
f4be8acda03bcb0ffbd7f9c766fc0c1947499472 | 7370127fe73970fdf0882f0696c1dbbf1e818745 | /pds-queries/2020-spring-census/list-unresponsive.py | 8fe4b8cd3cd420681dcd508b13497b717197e856 | [] | no_license | epiphany40223/epiphany | ab5ef0590ac67d2e353592c45177b8e5f7e22457 | 32956e735f0c5e3fc9231449796431d23b4817f0 | refs/heads/main | 2023-09-01T05:12:17.013064 | 2023-08-27T19:57:16 | 2023-08-27T19:57:16 | 41,978,574 | 5 | 11 | null | 2023-09-11T02:00:09 | 2015-09-05T23:06:07 | Python | UTF-8 | Python | false | false | 5,765 | py | #!/usr/bin/env python3
# Basic script to create a list of families which have not responded to the
# 2020 spring census. This version is based on a CSV file import, so you will
# need to retrieve the latest file.
import sys
sys.path.insert(0, '../../python')
import csv
import ECC
import PDSChurch
import helpers
from constants import jotform_member_fields
from pprint import pprint
from pprint import pformat
#############################################################################
def read_jotform_results(filename, log):
log.info("Reading results spreadsheet...")
fids = dict()
with open(filename, encoding='utf-8') as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
fids[int(row['fid'])] = True
l = len(fids.keys())
log.info(f"Found {l} unique Familes in the Jotform results")
return fids.keys()
#############################################################################
# Of the families in the PDS database, find the ones with:
# - a spouse with a valid email address, or
# - a head of household with a valid email address, or
# - a Famile with a valid email address
def find_pds_census_families(log):
log.info("Loading PDS database...")
# Load the PDS database
(pds, families,
members) = PDSChurch.load_families_and_members(filename='pdschurch.sqlite3',
log=log)
# Search for Families that match the desired criteria
# Do it in FID order, just for repeatability
output_families = list()
family_only_emails = dict()
fids = sorted(families)
for fid in fids:
f = families[fid]
# We skipped some Families with too many Members
if len(f['members']) > len(jotform_member_fields):
log.debug(f"--- Skipping Familiy {f['Name']} because they have too many Members")
continue
have_email = False
for m in f['members']:
if helpers.member_is_hoh_or_spouse(m):
em = PDSChurch.find_any_email(m)
if em:
have_email = True
break
# If we have no email, check the Family record itself for an email
if not have_email:
em = PDSChurch.find_any_email(f)
if f:
# Sadness. This looks like a bug in make-and-send-emails.py :-(
#have_email = True
log.info(f"Family-only email: {f['Name']} / fid {fid} / env {f['ParKey']}")
family_only_emails[fid] = f
# We have no email for the Family. Get phone numbers.
if not have_email:
log.debug(f"--- Have no HoH/Spouse/Family emails for Family {f['Name']} -- skipping")
continue
log.debug(f"+++ Family {f['Name']} has an email address")
output_families.append(f)
l = len(output_families)
log.info(f"Found {l} PDS Families with emails")
l = len(family_only_emails)
log.info(F"Found {l} PDS Familes with Family-only email")
return output_families, family_only_emails
#############################################################################
def check_families_only_email_results(families_only_email, fids_replied, log):
for fid, family in families_only_email.items():
if fid in fids_replied:
log.info(f"Happy day! Family-only email FID {fid} has Jotform results!")
#############################################################################
def cross_reference(families_with_email, fids_replied, log):
not_replied_envelope_ids = list()
not_replied_fids = list()
for family in families_with_email:
fid = family['FamRecNum']
if fid not in fids_replied:
log.debug(f"Family did NOT reply: {family['Name']} ({fid} / {family['FamRecNum']})")
not_replied_envelope_ids.append(family['ParKey'].strip())
not_replied_fids.append(fid)
else:
log.debug(f"Family did reply: {family['Name']} ({fid} / {family['FamRecNum']})")
# JMS DOUBLE CHECK
for fid in not_replied_fids:
if fid in fids_replied:
log.error(f"ERROR: Found double FID! {fid}")
return not_replied_envelope_ids
#############################################################################
def write_output_files(not_replied_envelope_ids, filename_base, num_per_file, log):
ids = not_replied_envelope_ids.copy()
file_number = 1
while len(ids) > 0:
ids_to_write = ids[:num_per_file]
if len(ids_to_write) <= 0:
break
filename = f'{filename_base}.{file_number}.txt'
with open(filename, 'w') as f:
f.write(','.join(ids_to_write) + '\n')
l = len(ids_to_write)
log.info(f"Wrote {l} envelope IDs to {filename}")
ids = ids[num_per_file:]
file_number += 1
#############################################################################
def main():
log = ECC.setup_logging(debug=False)
# Read in the Jotform results
filename = 'ECC census update - Sheet1.csv'
fids_replied = read_jotform_results(filename, log)
# Read in PDS Families with emails
families_with_email, families_only_email = find_pds_census_families(log)
# Check for Family-only emails in the results
check_families_only_email_results(families_only_email, fids_replied, log)
# Cross reference the two lists and see what PDS Families with emails
# did not respond to the census
not_replied_envelope_ids = cross_reference(families_with_email, fids_replied, log)
# Write output files
filename_base = 'unresponsives'
write_output_files(not_replied_envelope_ids, filename_base, 100, log)
main()
| [
"[email protected]"
] | |
7f5067f65e16a598942794e6419451da5869da52 | c65af972b843e4f11a9aa9005104ac54a283032d | /practice2/second.py | 2dc4335141cb0ae8f34bc1f2bca7306385900aba | [] | no_license | ljeleven/mypython | a63438c4246606082f000967a5d47256fa297aeb | b652338be3937543f0b35a9111dd0d346eb913b5 | refs/heads/master | 2023-05-24T19:30:37.001198 | 2020-04-09T15:40:40 | 2020-04-09T15:40:40 | 240,815,098 | 0 | 0 | null | 2023-05-22T22:41:00 | 2020-02-16T01:46:29 | Python | UTF-8 | Python | false | false | 728 | py | #__author:"longjin"
#date: 2019/6/9
def bonus(n):
bonus = 0
if n <= 100000:
bonus = n*0.1
elif 100000 < n <= 200000:
bonus = 100000*0.1 + (n-100000)*0.075
elif 200000 < n <= 400000:
bonus = 100000*0.1 + 100000*0.075 + (n-200000)*0.05
elif 400000 < n <= 600000:
bonus = 100000 * 0.1 + 100000 * 0.075 + 200000 * 0.05 + (n-400000)*0.03
elif 600000 < n <= 1000000:
bonus = 100000 * 0.1 + 100000 * 0.075 + 200000 * 0.05 + 200000 * 0.03 + (n-600000)*0.015
elif 10000000 < n:
bonus = 100000 * 0.1 + 100000 * 0.075 + 200000 * 0.05 + 200000*0.03 + 400000*0.015 + (n-1000000)*0.01
return bonus
n = int(input('please input your profit: '))
print(bonus(n)) | [
"[email protected]"
] | |
864ef80c8f12deaf341abf42e44b085fa2dfb5ad | 5ade44090b99ba19edd5cc0b07e4ebf1f8cc416e | /introduction.to.programming.with.turtle/2-3.triangle.py | 348245d3846c5d3332b8ac3cb95229421d5f1f07 | [
"MIT"
] | permissive | Mito445/programming_with_python | af2f201fc3f13cab88fdaa708ecda6da05ad1b46 | 1114eaa7432ea8581d880bcebd1813a9fb873cce | refs/heads/master | 2020-05-27T13:00:53.085344 | 2019-05-21T07:59:18 | 2019-05-21T07:59:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from easygraphics.turtle import *
create_world()
for i in range(3):
fd(100)
lt(120)
pause()
close_world()
| [
"[email protected]"
] | |
abc96381576d91e73fd9c07972847fb15b2ae392 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_ignore_error05.py | 63452115f9c827a96269ffee2d9f92da2fd2a24a | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,015 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('ignore_error05.xlsx')
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string('A1', '123')
worksheet.write_formula('A2', '=1/0', None, '#DIV/0!')
worksheet.ignore_errors({'number_stored_as_text': 'A1', 'eval_error': 'A2'})
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
c25b620107cf61d98745b98607852eb71b1016f7 | bf9a77bd51ba2dd5bf9c6e7cbf0ec9ec403f0b4f | /tests/test_lib_wordpress.py | 71bdb09f5d3ff37d3d02219681a284be863e3876 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | s672/clld | bf3e53698ef19b71181ca8e837b863d8ea423afe | cce7abeb504e0e29b61e7d14e93a1dc1d2294a3b | refs/heads/master | 2023-03-30T06:22:06.732159 | 2021-04-07T06:57:20 | 2021-04-07T06:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | from clld.lib.wordpress import *
def _requests(mocker, c, status=200):
return mocker.Mock(get=lambda *a, **kw: mocker.Mock(text=c, status_code=status))
def test_sluggify():
assert sluggify('a and B') == 'a-and-b'
def test_Client(mocker):
client = Client('blog.example.org', 'user', 'password')
mocker.patch('clld.lib.wordpress.requests', _requests(mocker, '', status=404))
client.get_post_id_from_path('/post')
mocker.patch('clld.lib.wordpress.requests', _requests(mocker, '<div class="post" id="post-1">'))
client.get_post_id_from_path('/post')
mocker.patch(
'clld.lib.wordpress.requests',
_requests(mocker, '<input type="hidden" name="comment_post_ID" value="1" />'))
client.get_post_id_from_path('/post')
client.server = mocker.MagicMock()
client.set_categories([{'name': 'cat', 'description': 'desc'}])
client.set_categories([{'name': 'cat', 'description': 'desc'}], post_id=3)
client.create_post(
'title', 'content',
date=1,
tags=['tag'],
custom_fields={'a': 'x'},
categories=[{'name': 'cat', 'description': 'desc'}])
client.server = mocker.MagicMock(wp=mocker.Mock(getCategories=mocker.Mock(return_value=[{
'categoryName': 'n', 'categoryId': '1'}])))
client.get_categories()
client.get_categories(name='n')
client.set_categories([{'name': 'n', 'description': 'desc'}])
| [
"[email protected]"
] | |
ff1b51789b5b92e740b1589c7ae516bcf3bfc011 | a342b1d6c7451cf3982b835dfc81924efe0509b4 | /tests/fixpath.py | 6e87fe37d75741be878ccbd0cf94c64ffdc6390d | [] | no_license | phaustin/eoas_canvas | 23bbc27a99f5d0654dce13be3fc3cbcc022d9250 | 79544df2095c7e536f35c29bbd9f568a0ff3633c | refs/heads/master | 2021-04-03T05:12:50.232463 | 2019-03-01T01:21:31 | 2019-03-01T01:21:31 | 124,586,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | """
import this in your main function to add the parent
to the folder holding the script to the front of sys.path
"""
from pathlib import Path
import sys, os
import site
the_path=Path(sys.argv[0]).resolve()
print(f'fixpath: inserting package directory in path: {the_path}')
the_path=the_path.parents[1]
sys.path.insert(0, str(the_path))
site.removeduppaths()
| [
"[email protected]"
] | |
5e52e7f42b5a8b03de3b12096468a57d982485b9 | 1cfb54adac19bfd69cc58ab23918925a800494c4 | /youtube_project/youtube_app/urls.py | 5cce935a401be7be76fbf852834bf410a85a1441 | [] | no_license | SatishNitk/youtube_clone | f743bae04190d4a3c0881a2a1b3daf23d9d9e468 | 698c94d5ef9689428da6a35b01928fd071978772 | refs/heads/master | 2020-06-18T20:08:56.113547 | 2019-07-28T04:56:52 | 2019-07-28T04:56:52 | 196,431,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py |
from django.urls import path,include
from youtube_app.views import *
urlpatterns = [
path('', HomeView.as_view()),
path('login/',LoginView.as_view()),
path('register/',RegisterView.as_view()),
path('video/',NewVideo.as_view()),
path('logout/',LogoutView.as_view()),
path('comment/',CommentView.as_view()),
path('video/<int:id>', VideoView.as_view()),
path('get_video/<file_name>', VideoFileView.as_view()),
]
| [
"[email protected]"
] | |
a8567ccd2b5a4624126ca8ab8456180bbdc05fc2 | 64cee8c8f33ae6be8edf0daa7a3a83efee86c82c | /doc/source/conf.py | 654ff054fca0e5c2a439fd8b5ba07d4f6f22ccd0 | [
"MIT"
] | permissive | shengyongniu/cemba_data | 52881061dac63c5dca4bbedf9bc7f1f345b13575 | 6d076ed7f19ac76650d91fe9172393cc6c10e686 | refs/heads/master | 2021-10-09T14:31:43.849987 | 2018-12-29T23:19:53 | 2018-12-29T23:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,946 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../cemba_data'))
# -- Project information -----------------------------------------------------
project = 'cemba_data'
copyright = '2018, Hanqing Liu'
author = 'Hanqing Liu'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cemba_datadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cemba_data.tex', 'cemba\\_data Documentation',
'Hanqing Liu', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cemba_data', 'cemba_data Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cemba_data', 'cemba_data Documentation',
author, 'cemba_data', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for t_o_d_o extension ----------------------------------------------
# If true, `t_o_d_o` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | [
"[email protected]"
] | |
6d9f788e635eba1977d3eee0524bb2eada7ec450 | 5f1c3a2930b20c3847496a249692dc8d98f87eee | /Pandas/Excel_DataAnalysis/Question5.py | 272e1db4934d35b2c84d146a8f2e7f6ac7cee2c9 | [] | no_license | AmbyMbayi/CODE_py | c572e10673ba437d06ec0f2ae16022d7cbe21d1c | 5369abf21a8db1b54a5be6cbd49432c7d7775687 | refs/heads/master | 2020-04-24T05:01:46.277759 | 2019-02-22T08:26:04 | 2019-02-22T08:26:04 | 171,723,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | """Write a pandas program to insert a column in the sixth position of the said excel sheet and fill it with NaN values
"""
import pandas as pd
import numpy as np
df = pd.read_excel('coalpublic2013.xls')
df.insert(3, "column1", np.nan)
print(df.head) | [
"[email protected]"
] | |
315888a36f3671ad0377b488fbeba896827df303 | 09fd456a6552f42c124c148978289fae1af2d5c3 | /Graph/210.py | b37dc35636fc78cbaea209ecf36234d07c89537d | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | # 210. Course Schedule II
# There are a total of n courses you have to take, labeled from 0 to n-1.
# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
# Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
# There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
# Example 1:
# Input: 2, [[1,0]]
# Output: [0,1]
# Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
# course 0. So the correct course order is [0,1] .
# Example 2:
# Input: 4, [[1,0],[2,0],[3,1],[3,2]]
# Output: [0,1,2,3] or [0,2,1,3]
# Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
# courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
# So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
# Note:
# The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
import collections
class Solution:
def findOrder(self, numCourses, prerequisites):
graph = collections.defaultdict(list)
res = []
for u, v in prerequisites:
graph[u].append(v)
visited = [0 for x in range(numCourses)]
for i in range(numCourses):
if not self.dfs(i, graph, visited, res):
return []
return res
def dfs(self, node, graph, visited, res):
if visited[node] == -1:
return False
if visited[node] == 1:
return True
visited[node] = -1
for i in graph[node]:
if not self.dfs(i, graph, visited, res):
return False
visited[node] = 1
res.append(node)
return True | [
"[email protected]"
] | |
ce55db8946daf7db06859e2c0e253fee52256393 | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /Typical DP Contest/TDPCa2.py | 0e482b3d312d102ca64129efa5e8468a4aad6f79 | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | n = int(input())
p = list(map(int, input().split()))
dp = [[None for i in range(sum(p)+1)] for _ in range(n+1)]
def canMake(i, v):
if (i == 0):
return v == 0
if (dp[i][v] != None):
return dp[i][v]
# print(i)
if (v - p[i-1] >= 0):
dp[i][v] = canMake(i-1, v) or canMake(i-1, v-p[i-1])
else:
dp[i][v] = canMake(i-1, v)
return dp[i][v]
ans = 0
for i in range(sum(p) + 1):
if (canMake(n, i)):
ans += 1
print(ans)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.