hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
538b05195aa3c62cda3499af221928cc57bfb7bb
1,423
py
Python
alipay/aop/api/domain/KbAdvertSettleBillResponse.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
213
2018-08-27T16:49:32.000Z
2021-12-29T04:34:12.000Z
alipay/aop/api/domain/KbAdvertSettleBillResponse.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
29
2018-09-29T06:43:00.000Z
2021-09-02T03:27:32.000Z
alipay/aop/api/domain/KbAdvertSettleBillResponse.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
59
2018-08-27T16:59:26.000Z
2022-03-25T10:08:15.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class KbAdvertSettleBillResponse(object): def __init__(self): self._download_url = None self._paid_date = None @property def download_url(self): return self._download_url @download_url.setter def download_url(self, value): self._download_url = value @property def paid_date(self): return self._paid_date @paid_date.setter def paid_date(self, value): self._paid_date = value def to_alipay_dict(self): params = dict() if self.download_url: if hasattr(self.download_url, 'to_alipay_dict'): params['download_url'] = self.download_url.to_alipay_dict() else: params['download_url'] = self.download_url if self.paid_date: if hasattr(self.paid_date, 'to_alipay_dict'): params['paid_date'] = self.paid_date.to_alipay_dict() else: params['paid_date'] = self.paid_date return params @staticmethod def from_alipay_dict(d): if not d: return None o = KbAdvertSettleBillResponse() if 'download_url' in d: o.download_url = d['download_url'] if 'paid_date' in d: o.paid_date = d['paid_date'] return o
25.410714
75
0.599438
1,306
0.917779
0
0
598
0.420239
0
0
176
0.123682
538b8d9cb91e4b908b2574c10cefedcf90ea344f
6,356
py
Python
day5.py
PLCoster/adventofcode2019
7aad1503dcf80b127b21191850ad9c93f91a602a
[ "MIT" ]
1
2019-12-09T21:26:22.000Z
2019-12-09T21:26:22.000Z
day5.py
PLCoster/adventofcode2019
7aad1503dcf80b127b21191850ad9c93f91a602a
[ "MIT" ]
null
null
null
day5.py
PLCoster/adventofcode2019
7aad1503dcf80b127b21191850ad9c93f91a602a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Dec 2 11:06:59 2019 @author: Paul """ def read_data(filename): """ Reads csv file into a list, and converts to ints """ data = [] f = open(filename, 'r') for line in f: data += line.strip('\n').split(',') int_data = [int(i) for i in data] f.close() return int_data def run_intcode(program, input_int): """ Takes data, list of ints to run int_code on. Returns list of ints after intcode program has been run. Running Intcode program looks reads in the integers sequentially in sets of 4: data[i] == Parameter Mode + Opcode (last two digits) data[i+1] == Entry 1 data[i+2] == Entry 2 data[i+3] == Entry 3 If Opcode == 1, the value of the opcode at index location = entry 1 and 2 in the program are summed and stored at the index location of entry 3. If Opcode == 2, the value of the opcode at index location = entry 1 and 2 in the program are multiplied and stored at the index location of entry 3. If Opcode == 3, the the single integer (input) is saved to the position given by index 1. If Opcode == 4, the program outputs the value of its only parameter. E.g. 4,50 would output the value at address 50. If Opcode == 5 and entry 1 is != 0, the intcode position moves to the index stored at entry 2. Otherwise it does nothing. If Opcode == 6 and entry 1 is 0, the intcode postion moves to the index stored at entry 2. Otherwise it does nothing. If Opcode == 7 and entry 1> entry 2, store 1 in position given by third param, otherwise store 0 at position given by third param. If Opcode == 7 and entry 1 = entry 2, store 1 in position given by third param, otherwise store 0 at position given by third param. If Opcode == 99, the program is completed and will stop running. Parameters are digits to the left of the opcode, read left to right: Parameter 0 -> Position mode - the entry is treated as an index location Parameter 1 -> Immediate mode - the entry is treated as a value """ data = program[:] answer = -1 params = [0, 0, 0] param_modes = ['', '', ''] i = 0 while (i < len(program)): #print("i = ", i) # Determine Opcode and parameter codes: opcode_str = "{:0>5d}".format(data[i]) opcode = int(opcode_str[3:]) param_modes[0] = opcode_str[2] param_modes[1] = opcode_str[1] param_modes[2] = opcode_str[0] #print(opcode_str) for j in range(2): if param_modes[j] == '0': try: params[j] = data[data[i+j+1]] except IndexError: continue else: try: params[j] = data[i+j+1] except IndexError: continue #print(params, param_modes) # If opcode is 1, add relevant entries: if opcode == 1: data[data[i+3]] = params[0] + params[1] i += 4; # If opcode is 2, multiply the relevant entries: elif opcode == 2: data[data[i+3]] = params[0] * params[1] i += 4; # If opcode is 3, store input value at required location. elif opcode == 3: data[data[i+1]] = input_int i += 2; # If opcode is 4, print out the input stored at specified location. elif opcode == 4: answer = data[data[i+1]] print("Program output: ", data[data[i+1]]) i += 2; # If the opcode is 5 and the next parameter !=0, jump forward elif opcode == 5: if params[0] != 0: i = params[1] else: i += 3 # If the opcode is 6 and next parameter is 0, jump forward elif opcode == 6: if params[0] == 0: i = params[1] else: i += 3 # If the opcode is 7, carry out less than comparison and store 1/0 at loc 3 elif opcode == 7: if params[0] < params[1]: data[data[i+3]] = 1 else: data[data[i+3]] = 0 i += 4 # If the opcode is 8, carry out equality comparison and store 1/0 at loc 3 elif opcode == 8: if params[0] == params[1]: data[data[i+3]] = 1 else: data[data[i+3]] = 0 i += 4 # If the opcode is 99, halt the intcode elif opcode == 99: print("Program ended by halt code") break # If opcode is anything else something has gone wrong! else: print("Problem with the Program") break return data, answer program = read_data("day5input.txt") #print(program) result1, answer1 = run_intcode(program, 1) #print(result1) print("Part 1: Answer is: ", answer1) result2, answer2 = run_intcode(program, 5) #print(result2) print("Part 2: Answer is: ", answer2) #test_program = [1002,4,3,4,33] #test_program2 = [3,0,4,0,99] #test_program3 = [1101,100,-1,4,0] #test_program4 = [3,9,8,9,10,9,4,9,99,-1,8] # 1 if input = 8, 0 otherwise #test_program5 = [3,9,7,9,10,9,4,9,99,-1,8] # 1 if input < 8, 0 otherwise #test_program6 = [3,3,1108,-1,8,3,4,3,99] # 1 if input = 8, 0 otherwise #test_program7 = [3,3,1107,-1,8,3,4,3,99] # 1 if input < 8, 0 otherwise #test_program8 = [3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9] # 0 if input = 0, 1 otherwise #test_program9 = [3,3,1105,-1,9,1101,0,0,12,4,12,99,1] # 0 if input = 0, 1 otherwise #test_program10 = [3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0, #36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20, #1105,1,46,98,99] # 999 if input < 8, 1000 if input = 8, 1001 if input > 8
34.73224
92
0.522498
0
0
0
0
0
0
0
0
3,728
0.586532
538bf59cdb6e50d49c8fe6d1f6a72767b79df904
3,333
py
Python
textvis/textprizm/models.py
scclab/textvisdrg-prototype
e912e4441b0e42e0f6c477edd03227b93b8ace73
[ "MIT" ]
null
null
null
textvis/textprizm/models.py
scclab/textvisdrg-prototype
e912e4441b0e42e0f6c477edd03227b93b8ace73
[ "MIT" ]
null
null
null
textvis/textprizm/models.py
scclab/textvisdrg-prototype
e912e4441b0e42e0f6c477edd03227b93b8ace73
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class Schema(models.Model): name = models.CharField(max_length=200) description = models.TextField() class Code(models.Model): name = models.CharField(max_length=200) description = models.TextField() active_instances = models.PositiveIntegerField(default=0) schema = models.ForeignKey(Schema, related_name="codes") code_type = models.IntegerField(default=0) def __unicode__(self): if self.description: return "%s/%s (%d): %s" % (self.schema_id, self.name, self.id, self.description) else: return "%s/%s (%d)" % (self.schema_id, self.name, self.id) class DataSet(models.Model): name = models.CharField(max_length=100) created = models.DateTimeField() class Session(models.Model): set = models.ForeignKey(DataSet) started = models.DateTimeField() ended = models.DateTimeField() def __unicode__(self): return "%d (%s - %s)" % (self.id, str(self.started), str(self.ended)) class Participant(models.Model): name = models.CharField(max_length=100) description = models.TextField() def __unicode__(self): return self.name class Message(models.Model): session = models.ForeignKey(Session) idx = models.IntegerField() time = models.DateTimeField() type = models.IntegerField() participant = models.ForeignKey(Participant, related_name='messages') message = models.TextField() codes = models.ManyToManyField(Code, through='CodeInstance') @classmethod def get_between(cls, start, end): """ Get messages that are inclusively between the two messages, or two dates. Takes into account the exact ordering of messages, meaning that you won't get messages at the same time but after the last message, for example. """ if isinstance(start, Message): after_first = ~models.Q(session=start.session) | models.Q(idx__gte=start.idx) after_first = models.Q(time__gte=start.time) & after_first else: after_first = models.Q(time__gte=start) if isinstance(end, Message): before_last = ~models.Q(session=end.session) | models.Q(idx__lte=end.idx) before_last = models.Q(time__lte=end.time) & before_last else: before_last = models.Q(time__lte=end) return cls.objects.filter(after_first, before_last) @property def text(self): return self.message @property def user_name(self): return self.participant.name @property def created_at(self): return self.time class User(models.Model): name = models.CharField(max_length=100) full_name = models.CharField(max_length=250) email = models.CharField(max_length=250) def __unicode__(self): return self.name class AbstractCodeInstance(models.Model): class Meta: abstract = True code = models.ForeignKey(Code) message = models.ForeignKey(Message) added = models.DateTimeField() class CodeInstance(AbstractCodeInstance): user = models.ForeignKey(User) task_id = models.PositiveIntegerField() intensity = models.FloatField() flag = models.IntegerField()
28.245763
101
0.659166
3,231
0.969397
0
0
1,091
0.327333
0
0
358
0.107411
538cf8a863a1cdd537656657d4741a5309d4d759
8,079
py
Python
test/test_purchasing.py
jacob22/accounting
e2fceea880e3f056703ba97b6cf52b73cd7af93b
[ "Apache-2.0" ]
null
null
null
test/test_purchasing.py
jacob22/accounting
e2fceea880e3f056703ba97b6cf52b73cd7af93b
[ "Apache-2.0" ]
null
null
null
test/test_purchasing.py
jacob22/accounting
e2fceea880e3f056703ba97b6cf52b73cd7af93b
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2019 Open End AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys if (sys.version_info >=(3, 0)): PYT3 = True import urllib.request import urllib.parse else: PYT3 = False import urllib2 import urlparse import contextlib import json import os import py import subprocess import time import uuid from . import support here = os.path.dirname(__file__) class Container(object): def __init__(self, **kw): self.__dict__.update(kw) def do_purchase(products, emailaddress): params = { 'data': [ {'items': [{'product': product} for product in products], 'buyerName': 'Kalle Anka', 'buyerEmail': emailaddress} ] } if PYT3: req = urllib.request.Request(urllib.parse.urljoin(support.url, '/rest/purchase'), json.dumps(params).encode('ascii'), {'Content-Type': 'application/json'}) data = json.load(urllib.request.urlopen(req)) else: req = urllib2.Request(urlparse.urljoin(support.url, '/rest/purchase'), json.dumps(params), {'Content-Type': 'application/json'}) data = json.load(urllib2.urlopen(req)) return Container(id=data['purchase'], invoice=data['invoiceUrl'], buyerEmail=emailaddress) def check_mail(client, mailssh, purchase, mailtype): client.run('sendmail -qf') message, = mailssh.find_and_delete_mail(None, 'TO', purchase.buyerEmail) msg, headers = mailssh.parse(message) assert headers['X-OE-MailType'] == [mailtype] assert purchase.invoice in msg return msg, headers @contextlib.contextmanager def check_mails(client, mailssh, purchase): check_mail(client, mailssh, purchase, 'order-confirmation') yield check_mail(client, mailssh, purchase, 'full-payment-confirmation') def gen_pg(client, org, id_args=[1, 1]): cmd = 'python /root/accounting/members/paymentgen.py %s %s %s' % ( org.id, id_args[0], id_args[1]) id_args[0] += 1 id_args[1] += 1000 stdin, stdout, stderr = client.exec_command('PYTHONPATH=/root/accounting ' + cmd) return stdout.read() def upload_pg(tmpdir, ssh, pgdata): pgfile = tmpdir.join('pgfile') pgfile.write(pgdata) dest = uuid.uuid4() with ssh(username='nordea') as client: sftp = client.open_sftp() sftp.put(str(pgfile), 'incoming/%s' % dest, confirm=False) @py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh', 'ssh', 'org', 'emailaddress') def test_full_plusgiro_payment(mailssh, ssh, org, emailaddress, tmpdir): purchase = do_purchase([org.product], emailaddress) with ssh() as client: with check_mails(client, mailssh, purchase): pgdata = gen_pg(client, org) upload_pg(tmpdir, ssh, pgdata) @py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh', 'ssh', 'org', 'emailaddress') def test_partial_plusgiro_payment(ssh, mailssh, org, emailaddress, tmpdir): purchase = do_purchase([org.product], emailaddress) with ssh() as client: with check_mails(client, mailssh, purchase): pgdata1 = gen_pg(client, org) pgdata2 = gen_pg(client, org) pgdata3 = gen_pg(client, org) # The sum is 66666 (öre). It is probably unique in the fake pgfile, # so we can simply replace it in order to make partial payments. if PYT3: partial_payment1 = pgdata1.replace(b'66666', b'22222') # pay 222.22 SEK partial_payment2 = pgdata2.replace(b'66666', b'33333') # pay 333.33 SEK final_payment = pgdata3.replace(b'66666', b'11111') # final 111.11 SEK else: partial_payment1 = pgdata1.replace('66666', '22222') # pay 222.22 SEK partial_payment2 = pgdata2.replace('66666', '33333') # pay 333.33 SEK final_payment = pgdata3.replace('66666', '11111') # final 111.11 SEK upload_pg(tmpdir, ssh, partial_payment1) msg, headers = check_mail(client, mailssh, purchase, 'partial-payment-confirmation') assert '222,22' in msg # amount paid assert '444,44' in msg # amount remaining upload_pg(tmpdir, ssh, partial_payment2) msg, headers = check_mail(client, mailssh, purchase, 'partial-payment-confirmation') assert '333,33' in msg # amount paid assert '111,11' in msg # amount remaining upload_pg(tmpdir, ssh, final_payment) @py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh', 'nodes', 'ssh', 'org', 'emailaddress') def test_swish_payment(nodes, ssh, mailssh, org, emailaddress): #py.test.skip('Skip swish tests until certificates work') purchase = do_purchase([org.product], emailaddress) with ssh() as client: with check_mails(client, mailssh, purchase): print(purchase.invoice) if PYT3: parsed = urllib.parse.urlparse(purchase.invoice) _, _, purchase, _ = parsed.path.split('/') path = '/providers/swish/charge/%s/%s' % (org.swish_provider, purchase) url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, path, '', '', '')) data = {'phone': '1231181189'} req = urllib.request.Request(url, json.dumps(data).encode('ascii'), {'Content-Type': 'application/json'}) response = json.load(urllib.request.urlopen(req)) else: parsed = urlparse.urlparse(purchase.invoice) _, _, purchase, _ = parsed.path.split('/') path = '/providers/swish/charge/%s/%s' % (org.swish_provider, purchase) url = urlparse.urlunparse((parsed.scheme, parsed.netloc, path, '', '', '')) data = {'phone': '1231181189'} req = urllib2.Request(url, json.dumps(data), {'Content-Type': 'application/json'}) response = json.load(urllib2.urlopen(req)) print(response) assert response['status'] == 'CREATED' path = '/providers/swish/poll/%s/%s' % (org.swish_provider, response['id']) if PYT3: url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, path, '', '', '')) else: url = urlparse.urlunparse((parsed.scheme, parsed.netloc, path, '', '', '')) for _ in range(20): if PYT3: req = urllib.request.Request(url) response = json.load(urllib.request.urlopen(req)) else: req = urllib2.Request(url) response = json.load(urllib2.urlopen(req)) print(response) if response['status'] == 'PAID': break time.sleep(1)
39.409756
89
0.564179
87
0.010767
188
0.023267
5,174
0.640347
0
0
1,964
0.243069
538d31ed98e59299719777fcb1330ca052cef24d
1,455
py
Python
iot/downstream/fog_processes.py
SENERGY-Platform/senergy-connector
7198f6b2ec08b3c09c53755f259a2711921fdcbe
[ "Apache-2.0" ]
null
null
null
iot/downstream/fog_processes.py
SENERGY-Platform/senergy-connector
7198f6b2ec08b3c09c53755f259a2711921fdcbe
[ "Apache-2.0" ]
null
null
null
iot/downstream/fog_processes.py
SENERGY-Platform/senergy-connector
7198f6b2ec08b3c09c53755f259a2711921fdcbe
[ "Apache-2.0" ]
null
null
null
""" Copyright 2020 InfAI (CC SES) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __all__ = ("Router", ) from ..util import conf, get_logger, mqtt import threading import cc_lib logger = get_logger(__name__.split(".", 1)[-1]) class Router(threading.Thread): def __init__(self, client: cc_lib.client.Client, mqtt_client: mqtt.Client): super().__init__(name="downstream-fog-processes-router", daemon=True) self.__cc = client self.__mqtt = mqtt_client def run(self) -> None: try: while True: envelope = self.__cc.receive_fog_processes() logger.debug(envelope) self.__mqtt.publish( "{}/{}".format(conf.MQTTClient.fog_processes_pub_topic, envelope.sub_topic), envelope.message, qos=conf.MQTTClient.qos ) except Exception as ex: logger.error(ex)
31.630435
96
0.648797
710
0.487973
0
0
0
0
0
0
643
0.441924
538d3918006c09254385e7ece91e4c11554aa399
462
py
Python
django_project/user_profile/migrations/0003_order_payment_method.py
aliyaandabekova/DJANGO_PROJECT
7b94f80fa56acf936da014aa5d91da79457bf4eb
[ "MIT" ]
null
null
null
django_project/user_profile/migrations/0003_order_payment_method.py
aliyaandabekova/DJANGO_PROJECT
7b94f80fa56acf936da014aa5d91da79457bf4eb
[ "MIT" ]
null
null
null
django_project/user_profile/migrations/0003_order_payment_method.py
aliyaandabekova/DJANGO_PROJECT
7b94f80fa56acf936da014aa5d91da79457bf4eb
[ "MIT" ]
null
null
null
# Generated by Django 3.2.3 on 2021-05-27 13:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('user_profile', '0002_auto_20210526_1747'), ] operations = [ migrations.AddField( model_name='order', name='payment_method', field=models.CharField(choices=[('cash', 'cash'), ('wallet', 'wallet')], default='cash', max_length=10), ), ]
24.315789
116
0.603896
369
0.798701
0
0
0
0
0
0
143
0.309524
538daa45b22d9013e84ef526505b8753b513ae7f
2,522
py
Python
day07/test.py
mpirnat/aoc2016
1aec59aca01541d0d1c30f85d4668959c82fa35c
[ "MIT" ]
null
null
null
day07/test.py
mpirnat/aoc2016
1aec59aca01541d0d1c30f85d4668959c82fa35c
[ "MIT" ]
null
null
null
day07/test.py
mpirnat/aoc2016
1aec59aca01541d0d1c30f85d4668959c82fa35c
[ "MIT" ]
null
null
null
#!/usr/bin/env python import unittest from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings from day07 import supports_tls, count_tls_addresses from day07 import find_abas, supports_ssl, count_ssl_addresses class TestFindingABBASequences(unittest.TestCase): cases = ( ('abba', True), ('oxyyxo', True), ('aaaa', False), ('abcd', False), ) def test_finds_abba_sequences(self): for text, expected in self.cases: self.assertEqual(has_abba(text), expected) class TestGettingAllowedChunks(unittest.TestCase): cases = ( ('abba[mnop]qrst[abcd]defg', ['abba', 'qrst', 'defg']), ) def test_finds_allowed_substrings(self): for text, expected in self.cases: self.assertEqual(get_abba_allowed_strings(text), expected) class TestGettingDisallowedChunks(unittest.TestCase): cases = ( ('abba[mnop]qrst[abcd]defg', ['mnop', 'abcd']), ) def test_finds_disallowed_substrings(self): for text, expected in self.cases: self.assertEqual(get_abba_disallowed_strings(text), expected) class TestCheckingTLSAddresses(unittest.TestCase): cases = ( ('abba[mnop]qrst', True), ('abcd[bddb]xyyx', False), ('aaaa[qwer]tyui', False), ('ioxxoj[asdfgh]zxcvbn', True), ) def test_finds_tls_addresses(self): for text, expected in self.cases: self.assertEqual(supports_tls(text), expected) def test_counts_tls_addresses(self): data = [x[0] for x in self.cases] self.assertEqual(count_tls_addresses(data), 2) class TestFindingABASequences(unittest.TestCase): cases = ( ('aba', ['aba']), ('xyxxyx', ['xyx']), ('aaakekeke', ['eke', 'kek']), ('zazbzbzbcdb', ['bzb', 'zaz', 'zbz']), ) def test_finds_aba_sequences(self): for text, expected in self.cases: self.assertEqual(find_abas(text), expected) class TestCheckingSSLAddresses(unittest.TestCase): cases = ( ('aba[bab]xyz', True), ('xyx[xyx]xyx', False), ('aaa[kek]eke', True), ('zazbz[bzb]cdb', True), ) def test_finds_ssl_addresses(self): for text, expected in self.cases: self.assertEqual(supports_ssl(text), expected) def test_counts_ssl_addresses(self): data = [x[0] for x in self.cases] self.assertEqual(count_ssl_addresses(data), 3) if __name__ == '__main__': unittest.main()
27.714286
81
0.635607
2,219
0.879857
0
0
0
0
0
0
335
0.132831
538e1ba9c8f2894b4bdf8950c5cd9a8fa42ed826
4,787
py
Python
rlnets/PG.py
HTRPOCODES/HTRPO-v2
7e085e8077e6caa38d192bbd33b41c49b36ad6a6
[ "MIT" ]
7
2020-02-24T15:05:20.000Z
2021-08-24T02:27:13.000Z
rlnets/PG.py
ZhangHanbo/Deep-Reinforcement-Learning-Package
10ab418fcb4807747ebe162920f3df1e80b80a2a
[ "MIT" ]
null
null
null
rlnets/PG.py
ZhangHanbo/Deep-Reinforcement-Learning-Package
10ab418fcb4807747ebe162920f3df1e80b80a2a
[ "MIT" ]
1
2020-04-11T13:08:23.000Z
2020-04-11T13:08:23.000Z
import torch import numpy as np import torch.nn.functional as F from torch.autograd import Variable from basenets.MLP import MLP from basenets.Conv import Conv from torch import nn class FCPG_Gaussian(MLP): def __init__(self, n_inputfeats, n_actions, sigma, n_hiddens = [30], nonlinear = F.tanh, usebn = False, outactive = None, outscaler = None, initializer = "orthogonal", initializer_param = {"gain":np.sqrt(2), "last_gain": 0.1} ): self.n_actions = n_actions super(FCPG_Gaussian, self).__init__( n_inputfeats, # input dim n_actions, # output dim n_hiddens, # hidden unit number list nonlinear, usebn, outactive, outscaler, initializer, initializer_param=initializer_param, ) self.logstd = nn.Parameter(torch.log(sigma * torch.ones(n_actions) + 1e-8)) def forward(self,x, other_data = None): x = MLP.forward(self, x, other_data) # for exploration, we need to make sure that the std is not too low. logstd = torch.clamp(self.logstd, min = np.log(0.1)) return x, logstd.expand_as(x), torch.exp(logstd).expand_as(x) def cuda(self, device = None): self.logstd.cuda() return self._apply(lambda t: t.cuda(device)) class FCPG_Softmax(MLP): def __init__(self, n_inputfeats, # input dim n_actions, # output dim n_hiddens = [10], # hidden unit number list nonlinear = F.tanh, usebn = False, outactive = F.softmax, outscaler = None, initializer = "orthogonal", initializer_param = {"gain":np.sqrt(2), "last_gain": 0.1} ): self.n_actions = n_actions super(FCPG_Softmax, self).__init__( n_inputfeats, # input dim n_actions, # output dim n_hiddens, # hidden unit number list nonlinear, usebn, outactive, outscaler, initializer, initializer_param=initializer_param, ) def forward(self, x, other_data=None): x = MLP.forward(self, x, other_data) # for exploration, and similar to e-greedy x = x + 0.01 / self.n_actions x = x / torch.sum(x, dim = -1, keepdim=True).detach() return x class ConvPG_Softmax(Conv): def __init__(self, n_inputfeats, # input dim n_actions, # output dim k_sizes = [8, 4, 3], channels = [8, 16, 16], strides = [4, 2, 2], fcs = [32, 32, 32], # hidden unit number list nonlinear = F.relu, usebn = False, outactive = F.softmax, outscaler = None, initializer="xavier", initializer_param={} ): self.n_actions = n_actions super(ConvPG_Softmax, self).__init__( n_inputfeats, # input dim n_actions, # output dim k_sizes, channels, strides, fcs, nonlinear, usebn, outactive, outscaler, initializer, initializer_param=initializer_param, ) def forward(self, x, other_data=None): x = Conv.forward(self, x, other_data) # for exploration, and similar to e-greedy x = x + 0.01 / self.n_actions x = x / torch.sum(x, dim=-1, keepdim=True).detach() return x # TODO: support multi-layer value function in which action is concat before the final layer class FCVALUE(MLP): def __init__(self, n_inputfeats, n_hiddens = [30], nonlinear = F.tanh, usebn = False, outactive = None, outscaler = None, initializer="orthogonal", initializer_param={"gain":np.sqrt(2), "last_gain": 0.1} ): super(FCVALUE, self).__init__( n_inputfeats, 1, n_hiddens, nonlinear, usebn, outactive, outscaler, initializer, initializer_param=initializer_param, )
34.192857
91
0.48569
4,505
0.94109
0
0
0
0
0
0
553
0.115521
538e7c69b579d9dbd9a344fd3df293fc4cfca562
10,057
py
Python
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
m4rkl1u/tensorflow
90a8825c7ae9719e8969d45040b4155b0e7de130
[ "Apache-2.0" ]
2
2018-12-05T10:58:40.000Z
2019-01-24T11:36:01.000Z
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
m4rkl1u/tensorflow
90a8825c7ae9719e8969d45040b4155b0e7de130
[ "Apache-2.0" ]
null
null
null
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
m4rkl1u/tensorflow
90a8825c7ae9719e8969d45040b4155b0e7de130
[ "Apache-2.0" ]
2
2019-02-26T16:21:15.000Z
2020-12-04T17:48:17.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SparseTensorsMap.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.ops import array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variables from tensorflow.python.platform import benchmark from tensorflow.python.platform import test # pylint: disable=protected-access add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map take_many_sparse_from_tensors_map = ( sparse_ops._take_many_sparse_from_tensors_map) # pylint: enable=protected-access class SparseTensorsMapTest(test.TestCase): def _SparseTensorPlaceholder(self, dtype=None): if dtype is None: dtype = dtypes.int32 return sparse_tensor_lib.SparseTensor( array_ops.placeholder(dtypes.int64), array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64)) def _SparseTensorValue_5x6(self, permutation): ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]).astype(np.int64) val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32) ind = ind[permutation] val = val[permutation] shape = np.array([5, 6]).astype(np.int64) return sparse_tensor_lib.SparseTensorValue(ind, val, shape) def _SparseTensorValue_3x4(self, permutation): ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]]).astype(np.int64) val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32) ind = ind[permutation] val = val[permutation] shape = np.array([3, 4]).astype(np.int64) return sparse_tensor_lib.SparseTensorValue(ind, val, shape) def _SparseTensorValue_1x1x1(self): ind = np.array([[0, 0, 0]]).astype(np.int64) val = np.array([0]).astype(np.int32) shape = np.array([3, 4, 5]).astype(np.int64) return sparse_tensor_lib.SparseTensorValue(ind, val, shape) def testAddTakeMany(self): with self.session(graph=ops.Graph(), use_gpu=False) as sess: sp_input0 = self._SparseTensorValue_5x6(np.arange(6)) sp_input1 = self._SparseTensorValue_3x4(np.arange(6)) handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a") handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a") self.assertEqual(handle0.get_shape(), ()) handles_concat = array_ops.stack([handle0, handle1]) sp_out = take_many_sparse_from_tensors_map( sparse_map_op=handle0.op, sparse_handles=handles_concat) combined_indices, combined_values, combined_shape = self.evaluate(sp_out) self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0 self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0]) self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1 self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0]) self.assertAllEqual(combined_values[:6], sp_input0[1]) self.assertAllEqual(combined_values[6:], sp_input1[1]) self.assertAllEqual(combined_shape, [2, 5, 6]) def testFeedAddTakeMany(self): with self.session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input0_val = self._SparseTensorValue_5x6(np.arange(6)) input1_val = self._SparseTensorValue_3x4(np.arange(6)) handle = add_sparse_to_tensors_map(sp_input) handle0_value = sess.run(handle, feed_dict={sp_input: input0_val}) handle1_value = sess.run(handle, feed_dict={sp_input: input1_val}) sparse_handles = ops.convert_to_tensor( [handle0_value, handle1_value], dtype=dtypes.int64) sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=sparse_handles) combined_indices, combined_values, combined_shape = self.evaluate( sp_roundtrip) self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0 self.assertAllEqual(combined_indices[:6, 1:], input0_val[0]) self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1 self.assertAllEqual(combined_indices[6:, 1:], input1_val[0]) self.assertAllEqual(combined_values[:6], input0_val[1]) self.assertAllEqual(combined_values[6:], input1_val[1]) self.assertAllEqual(combined_shape, [2, 5, 6]) def testAddManyTakeManyRoundTrip(self): with self.session(use_gpu=False) as sess: # N == 4 because shape_value == [4, 5] indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64) values_value = np.array([b"a", b"b", b"c"]) shape_value = np.array([4, 5], dtype=np.int64) sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string) handles = add_many_sparse_to_tensors_map(sparse_tensor) roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handles.op, sparse_handles=handles) handles_value, roundtrip_value = sess.run( [handles, roundtrip], feed_dict={ sparse_tensor.indices: indices_value, sparse_tensor.values: values_value, sparse_tensor.dense_shape: shape_value }) self.assertEqual(handles_value.shape, (4,)) self.assertAllEqual(roundtrip_value.indices, indices_value) self.assertAllEqual(roundtrip_value.values, values_value) self.assertAllEqual(roundtrip_value.dense_shape, shape_value) def testDeserializeFailsInconsistentRank(self): with self.session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input0_val = self._SparseTensorValue_5x6(np.arange(6)) input1_val = self._SparseTensorValue_1x1x1() handle = add_sparse_to_tensors_map(sp_input) handle0_value = sess.run(handle, feed_dict={sp_input: input0_val}) handle1_value = sess.run(handle, feed_dict={sp_input: input1_val}) handle_concat = ops.convert_to_tensor( [handle0_value, handle1_value], dtype=dtypes.int64) sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=handle_concat) with self.assertRaisesOpError( r"Inconsistent rank across SparseTensors: rank prior to " r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"): self.evaluate(sp_roundtrip) def testTakeManyFailsWrongInputOp(self): with self.session(use_gpu=False) as sess: input_val = self._SparseTensorValue_5x6(np.arange(6)) handle = add_sparse_to_tensors_map(input_val) handle_value = self.evaluate(handle) bad_handle = handle_value + 10 sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle]) with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"): self.evaluate(sp_roundtrip) class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark): def benchmarkVeryLarge2DFloatSparseTensor(self): np.random.seed(127) num_elements = 10000 batch_size = 64 indices_batch = np.random.randint( batch_size, size=num_elements, dtype=np.int64) indices_value = np.arange(num_elements, dtype=np.int64) indices = np.asarray( sorted(zip(indices_batch, indices_value)), dtype=np.int64) values = ["feature_value_for_embedding_lookup"] * num_elements shape = np.asarray([batch_size, num_elements], dtype=np.int64) with session.Session(config=benchmark.benchmark_config()) as sess: with ops.device("/cpu:0"): indices = variables.Variable(indices) values = variables.Variable(values) shape = variables.Variable(shape) st = sparse_tensor_lib.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st) st_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = sparse_ops.serialize_many_sparse(st) st_deserialized = sparse_ops.deserialize_many_sparse( st_serialized, dtype=values.dtype) st_deserialized_op = st_deserialized.values.op variables.global_variables_initializer().run() st_roundtrip_values = self.evaluate(st_roundtrip) st_deserialized_values = self.evaluate(st_deserialized) np.testing.assert_equal(st_roundtrip_values.values, st_deserialized_values.values) np.testing.assert_equal(st_roundtrip_values.indices, st_deserialized_values.indices) np.testing.assert_equal(st_roundtrip_values.dense_shape, st_deserialized_values.dense_shape) self.run_op_benchmark( sess, st_roundtrip_op, min_iters=2000, name="benchmark_very_large_2d_float_st_tensor_maps") self.run_op_benchmark( sess, st_deserialized_op, min_iters=2000, name="benchmark_very_large_2d_float_st_serialization") if __name__ == "__main__": test.main()
42.079498
80
0.704484
8,415
0.836731
0
0
0
0
0
0
1,185
0.117828
538ed9ab23e9e71ee700c89f6a7e07b38fae61a0
50,485
py
Python
cloudroast/objectstorage/smoke/object_smoke.py
RULCSoft/cloudroast
30f0e64672676c3f90b4a582fe90fac6621475b3
[ "Apache-2.0" ]
null
null
null
cloudroast/objectstorage/smoke/object_smoke.py
RULCSoft/cloudroast
30f0e64672676c3f90b4a582fe90fac6621475b3
[ "Apache-2.0" ]
null
null
null
cloudroast/objectstorage/smoke/object_smoke.py
RULCSoft/cloudroast
30f0e64672676c3f90b4a582fe90fac6621475b3
[ "Apache-2.0" ]
null
null
null
""" Copyright 2015 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import calendar import time import zlib from hashlib import md5 import unittest from cafe.drivers.unittest.decorators import ( DataDrivenFixture, data_driven_test) from cloudcafe.objectstorage.objectstorage_api.common.constants import \ Constants from cloudroast.objectstorage.fixtures import ObjectStorageFixture from cloudroast.objectstorage.generators import ( ObjectDatasetList, CONTENT_TYPES) CONTAINER_DESCRIPTOR = 'object_smoke_test' STATUS_CODE_MSG = ('{method} expected status code {expected}' ' received status code {received}') @DataDrivenFixture class ObjectSmokeTest(ObjectStorageFixture): @classmethod def setUpClass(cls): super(ObjectSmokeTest, cls).setUpClass() cls.default_obj_name = Constants.VALID_OBJECT_NAME_WITH_UNICODE @staticmethod def generate_chunk_data(): for i in range(10): yield "Test chunk %s\r\n" % i @data_driven_test(ObjectDatasetList()) def ddtest_object_retrieval_with_valid_object_name( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) response = self.client.get_object(container_name, object_name) method = 'object creation with valid object name' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo'])) def ddtest_object_retrieval_with_if_match( self, object_type, generate_object): """ Bug filed for dlo/slo support of If-match Header: https://bugs.launchpad.net/swift/+bug/1279076 """ container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name obj_info = generate_object(container_name, object_name) headers = {'If-Match': obj_info.get('etag')} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'object retrieval with if match header' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo'])) def ddtest_object_retrieval_with_if_none_match( self, object_type, generate_object): """ Bug filed for dlo/slo support of If-match Header: https://bugs.launchpad.net/swift/+bug/1279076 """ container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_info = generate_object(container_name, object_name) headers = {'If-None-Match': 'grok'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'object retrieval with if none match header' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) headers = {'If-None-Match': object_info.get('etag')} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'object should be flagged as not modified' expected = 304 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_retrieval_with_if_modified_since( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'If-Modified-Since': 'Fri, 17 Aug 2001 18:44:42 GMT'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'object retrieval with if modified since header (past date)' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_not_modified_with_if_modified_since( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'If-Modified-Since': 'Fri, 17 Aug 2101 18:44:42 GMT'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'object retrieval with if modified since header (future date)' expected = 304 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_retrieval_with_if_unmodified_since( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'If-Unmodified-Since': 'Fri, 17 Aug 2101 18:44:42 GMT'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'object retrieval with if unmodified since header' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_retrieval_fails_with_if_unmodified_since( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'If-Unmodified-Since': 'Fri, 17 Aug 2001 18:44:42 GMT'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = ('object retrieval precondition fail with if unmodified' ' since header') expected = 412 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_partial_object_retrieval_with_start_range( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'Range': 'bytes=5-'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'partial object retrieval with start range' expected = 206 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_partial_object_retrieval_with_end_range( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'Range': 'bytes=-4'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'partial object retrieval with end range' expected = 206 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_partial_object_retrieval_with_range( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'Range': 'bytes=5-8'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'partial object retrieval with start and end range' expected = 206 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_partial_object_retrieval_with_complete_range( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'Range': 'bytes=99-0'} response = self.client.get_object( container_name, self.default_obj_name, headers=headers) method = 'partial object retrieval with complete range' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_creation_with_valid_object_name( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_info = generate_object(container_name, object_name) response = object_info.get('response') method = 'object creation with valid object name' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object( container_name, self.default_obj_name) method = 'object retrieval' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response_md5 = md5(response.content).hexdigest() self.assertEqual( object_info.get('md5'), response_md5, msg='should return identical object') @data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo'])) def ddtest_object_update_with_valid_object_name( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) updated_object_data = 'Updated test file data' updated_content_length = str(len(updated_object_data)) headers = {'Content-Length': updated_content_length, 'Content-Type': CONTENT_TYPES.get('text')} response = self.client.create_object( container_name, self.default_obj_name, headers=headers, data=updated_object_data) method = 'object update with valid object name' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_creation_with_etag( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_info = generate_object(container_name, object_name) response = object_info.get('response') method = 'object creation with etag header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) response = self.client.get_object( container_name, self.default_obj_name) self.assertIn( 'etag', response.headers, msg="Etag header was set") if object_type == 'standard': expected = object_info.get('etag') else: expected = '"{0}"'.format(object_info.get('etag')) received = response.headers.get('etag') self.assertEqual( expected, received, msg='object created with Etag header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo'])) def test_object_creation_with_uppercase_etag(self): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_data = "valid_data" data_md5 = md5(object_data).hexdigest() upper_etag = data_md5.upper() headers = {"ETag": upper_etag} create_response = self.client.create_object(container_name, object_name, data=object_data, headers=headers) method = 'object creation with uppercase etag header' expected = 201 received = create_response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) object_response = self.client.get_object( container_name, self.default_obj_name) self.assertIn( 'etag', object_response.headers, msg="Etag header was set") expected = data_md5 received = object_response.headers.get('etag') self.assertEqual( expected, received, msg='object created with Etag header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_control_allow_credentials( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = {'Access-Control-Allow-Credentials': 'true'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Allow-Credentials header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Allow-Credentials', response.headers, msg="Access-Control-Allow-Credentials header was set") expected = 'true' received = response.headers.get('Access-Control-Allow-Credentials') self.assertEqual( expected, received, msg='object created with Access-Control-Allow-Credentials header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_control_allow_methods( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = { 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Allow-Methods header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Allow-Methods', response.headers, msg="Access-Control-Allow-Methods header was set") expected = 'GET, POST, OPTIONS' received = response.headers.get('Access-Control-Allow-Methods') self.assertEqual( expected, received, msg='object created with Access-Control-Allow-Methods header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_control_allow_origin( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = { 'Access-Control-Allow-Origin': 'http://example.com'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Allow-Origin header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Allow-Origin', response.headers, msg="Access-Control-Allow-Origin header was set") expected = 'http://example.com' received = response.headers.get('Access-Control-Allow-Origin') self.assertEqual( expected, received, msg='object created with Access-Control-Allow-Origin header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_control_expose_headers( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = {'Access-Control-Expose-Headers': 'X-Foo-Header'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Expose-Headers header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Expose-Headers', response.headers, msg="Access-Control-Expose-Headers header was set") expected = 'X-Foo-Header' received = response.headers.get('Access-Control-Expose-Headers') self.assertEqual( expected, received, msg='object created with Access-Control-Expose-Headers header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_controle_max_age( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = {'Access-Control-Max-Age': '5'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Max-Age header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Max-Age', response.headers, msg="Access-Control-Max-Age header was set") expected = '5' received = response.headers.get('Access-Control-Max-Age') self.assertEqual( expected, received, msg='object created with Access-Control-Max-Age header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_control_request_headers( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = {'Access-Control-Request-Headers': 'x-requested-with'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Request-Headers header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Request-Headers', response.headers, msg="Access-Control-Request-Headers header was set") expected = 'x-requested-with' received = response.headers.get('Access-Control-Request-Headers') self.assertEqual( expected, received, msg='object created with Access-Control-Request-Headers header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_creation_with_access_control_request_method( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = {'Access-Control-Request-Method': 'GET'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with Access-Control-Request-Method header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Access-Control-Request-Method', response.headers, msg="Access-Control-Request-Method header was set") expected = 'GET' received = response.headers.get('Access-Control-Request-Method') self.assertEqual( expected, received, msg='object created with Access-Control-Request-Method header' ' value expected: {0} received: {1}'.format( expected, received)) @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object-cors') def ddtest_object_retrieval_with_origin( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name headers = {'access-control-allow-origin': 'http://example.com', 'access-control-expose-headers': 'X-Trans-Id'} generate_object(container_name, object_name, headers=headers) headers = {'Origin': 'http://example.com'} response = self.client.get_object_metadata( container_name, object_name, headers=headers) self.assertIn( 'access-control-expose-headers', response.headers, msg="access-control-expose-headers header should be set") self.assertIn( 'access-control-allow-origin', response.headers, msg="access-control-allow-origin header should be set") expected = 'http://example.com' received = response.headers.get('access-control-allow-origin') self.assertEqual( expected, received, msg='access-control-allow-origin header should reflect origin' ' expected: {0} received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo'])) def ddtest_object_creation_with_file_compression( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name def object_data_op(data, extra_data): data = zlib.compress(data) return (data, extra_data) object_headers = {'Content-Encoding': 'gzip'} object_info = generate_object(container_name, object_name, data_op=object_data_op, headers=object_headers) response = object_info.get('response') method = 'object creation with Content-Encoding header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Content-Encoding', response.headers, msg="Content-Encoding header was set") expected = 'gzip' received = response.headers.get('Content-Encoding') self.assertEqual( expected, received, msg='object created with Content-Encoding header value' ' expected: {0} received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList()) def ddtest_object_creation_with_content_disposition( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = { 'Content-Disposition': 'attachment; filename=testdata.txt'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with content disposition header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'Content-Disposition', response.headers, msg="Content-Disposition header was set") expected = 'attachment; filename=testdata.txt' received = response.headers.get('Content-Disposition') self.assertEqual( expected, received, msg='object created with Content-Disposition header value' ' expected: {0} received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList()) def ddtest_object_creation_with_x_delete_at( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name start_time = calendar.timegm(time.gmtime()) future_time = str(int(start_time + 60)) object_headers = {'X-Delete-At': future_time} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with X-Delete-At header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'X-Delete-At', response.headers, msg="X-Delete-At header was set") expected = future_time received = response.headers.get('X-Delete-At') self.assertEqual( expected, received, msg='object created with X-Delete-At header value' ' expected: {0} received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList()) def ddtest_object_creation_with_delete_after( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name object_headers = {'X-Delete-After': '60'} object_info = generate_object(container_name, object_name, headers=object_headers) response = object_info.get('response') method = 'object creation with X-Delete-After header' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'X-Delete-At', response.headers, msg="X-Delete-At header was set") @data_driven_test(ObjectDatasetList()) @ObjectStorageFixture.required_features('object_versioning') def ddtest_versioned_container_creation_with_valid_data( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_history_container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) headers = {'X-Versions-Location': object_history_container_name} self.client.set_container_metadata(container_name, headers=headers) # list objects in non-current container response = self.client.list_objects( object_history_container_name) method = 'list on empty versioned container' expected = 204 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) # Create an object (version 1) object_name = self.default_obj_name ver1_info = generate_object(container_name, object_name) response = ver1_info.get('response') method = 'object version one creation' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) # Update an object (version 2) object_name = self.default_obj_name ver2_info = generate_object(container_name, object_name) response = ver2_info.get('response') method = 'update version one object' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.list_objects(object_history_container_name) method = 'list on versioned container' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @unittest.skip('Problem with this tests assertion, needs review') @data_driven_test(ObjectDatasetList()) def ddtest_put_copy_object(self, object_type, generate_object): src_container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) dest_container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) src_object_name = '{0}_source'.format(self.default_obj_name) generate_object(src_container_name, src_object_name) dest_obj_name = '{0}_destination'.format(self.default_obj_name) source = '/{0}/{1}'.format(src_container_name, src_object_name) hdrs = {'X-Copy-From': source, 'Content-Length': '0'} response = self.client.copy_object( dest_container_name, dest_obj_name, headers=hdrs) method = 'put copy object' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object( dest_container_name, dest_obj_name) method = 'copied object retrieval' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_copy_object(self, object_type, generate_object): src_container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) dest_container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) src_object_name = '{0}_source'.format(self.default_obj_name) generate_object(src_container_name, src_object_name) dest_object_name = '{0}_destination'.format(self.default_obj_name) dest = '/{0}/{1}'.format(dest_container_name, dest_object_name) headers = {'Destination': dest} response = self.client.copy_object( src_container_name, src_object_name, headers=headers) method = 'copy object' expected = 201 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object( dest_container_name, dest_object_name) method = 'copied object retrieval' expected = 200 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_object_deletion_with_valid_object( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) response = self.client.delete_object( container_name, object_name) method = 'delete object' expected = 204 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object( container_name, self.default_obj_name) method = 'object retrieval' expected = 404 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) @data_driven_test(ObjectDatasetList()) def ddtest_obj_metadata_update_with_object_possessing_metadata( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name, headers={'X-Object-Meta-Grok': 'Drok'}) response = self.client.get_object_metadata( container_name, object_name) self.assertIn( 'X-Object-Meta-Grok', response.headers, msg="object not created with X-Object-Meta-Grok header") expected = 'Drok' received = response.headers.get('X-Object-Meta-Grok') self.assertEqual( expected, received, msg='object created with X-Object-Meta-Grok header value' ' expected: {0} received: {1}'.format(expected, received)) headers = {'X-Object-Meta-Foo': 'Bar'} response = self.client.set_object_metadata( container_name, self.default_obj_name, headers=headers) method = 'set object metadata' expected = 202 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'X-Object-Meta-Foo', response.headers, msg="object updated with X-Object-Meta-Foo header") expected = 'Bar' received = response.headers.get('X-Object-Meta-Foo') self.assertEqual( expected, received, msg='object X-Object-Meta-Foo header value expected: {0}' ' received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList()) def ddtest_obj_metadata_update(self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object_name = self.default_obj_name generate_object(container_name, object_name) headers = {'X-Object-Meta-Grok': 'Drok'} response = self.client.set_object_metadata( container_name, object_name, headers=headers) method = 'set object metadata X-Object-Meta-Grok: Drok' expected = 202 received = response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) response = self.client.get_object_metadata( container_name, self.default_obj_name) self.assertIn( 'X-Object-Meta-Grok', response.headers, msg="object updated with X-Object-Meta-Grok header") expected = 'Drok' received = response.headers.get('X-Object-Meta-Grok') self.assertEqual( expected, received, msg='object X-Object-Meta-Grok header value expected: {0}' ' received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList()) def ddtest_content_type_not_detected_without_detect_content_type_header( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object1_name = 'object1.txt' object1_headers = {'Content-Type': 'application/x-www-form-urlencoded'} generate_object(container_name, object1_name, headers=object1_headers) object2_name = 'object2.txt' object2_headers = {'X-Detect-Content-Type': False, 'Content-Type': 'application/x-www-form-urlencoded'} generate_object(container_name, object2_name, headers=object2_headers) response = self.client.get_object( container_name, object1_name) expected = 'application/x-www-form-urlencoded' received = response.headers.get('content-type') self.assertEqual( expected, received, msg='object created should have content type: {0}' ' received: {1}'.format(expected, received)) response = self.client.get_object( container_name, object2_name) self.assertEqual( expected, received, msg='object created should have content type: {0}' ' received: {1}'.format(expected, received)) @data_driven_test(ObjectDatasetList()) def ddtest_content_type_detected_with_detect_content_type( self, object_type, generate_object): container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) object1_name = 'object1.txt' object1_headers = {'X-Detect-Content-Type': True, 'Content-Type': 'application/x-www-form-urlencoded'} generate_object(container_name, object1_name, headers=object1_headers) response = self.client.get_object( container_name, object1_name) expected = 'text/plain' received = response.headers.get('content-type') self.assertEqual( expected, received, msg='object created should have content type: {0}' ' received: {1}'.format(expected, received)) object2_name = 'object2.txt' object2_headers = {'X-Detect-Content-Type': True} generate_object(container_name, object2_name, headers=object2_headers) response = self.client.get_object( container_name, object2_name) expected = 'text/plain' received = response.headers.get('content-type') self.assertEqual( expected, received, msg='object created should have content type: {0}' ' received: {1}'.format(expected, received)) def test_object_creation_via_chunked_transfer(self): """ Scenario: Create an object using chunked transfer encoding. Expected Results: Return a 201 status code and a single object should be created. """ container_name = self.create_temp_container( descriptor=CONTAINER_DESCRIPTOR) headers = {"Transfer-Encoding": "chunked"} create_response = self.client.create_object( container_name, self.default_obj_name, headers=headers, data=self.generate_chunk_data()) method = 'Object creation via chunked transfer' expected = 201 received = create_response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received))) object_response = self.client.get_object(container_name, self.default_obj_name) method = 'Object retrieval' expected = 200 received = object_response.status_code self.assertEqual( expected, received, msg=STATUS_CODE_MSG.format( method=method, expected=expected, received=str(received)))
34.273591
79
0.605051
49,333
0.977181
96
0.001902
49,352
0.977558
0
0
8,187
0.162167
538f0d9adeec1b1a9f1d17d56827c035463ad1c5
1,412
py
Python
ceph/tests/conftest.py
remicalixte/integrations-core
b115e18c52820fe1a92495f538fdc14ddf83cfe1
[ "BSD-3-Clause" ]
1
2021-03-24T13:00:14.000Z
2021-03-24T13:00:14.000Z
ceph/tests/conftest.py
remicalixte/integrations-core
b115e18c52820fe1a92495f538fdc14ddf83cfe1
[ "BSD-3-Clause" ]
null
null
null
ceph/tests/conftest.py
remicalixte/integrations-core
b115e18c52820fe1a92495f538fdc14ddf83cfe1
[ "BSD-3-Clause" ]
null
null
null
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import pytest from datadog_checks.dev import docker_run from datadog_checks.dev.conditions import CheckDockerLogs from datadog_checks.dev.subprocess import run_command from .common import BASIC_CONFIG, HERE E2E_METADATA = { 'start_commands': [ 'apt-get update', 'apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y docker.io', ], 'docker_volumes': ['/var/run/docker.sock:/var/run/docker.sock'], } @pytest.fixture(scope="session") def dd_environment(): compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml') # We need a custom condition to wait a bit longer with docker_run( compose_file=compose_file, conditions=[ CheckDockerLogs(compose_file, 'spawning ceph --cluster ceph -w', wait=5), CheckDockerLogs(compose_file, 'Running on http://0.0.0.0:5000/'), ], ): # Clean the disk space warning run_command( ['docker', 'exec', 'dd-test-ceph', 'ceph', 'tell', 'mon.*', 'injectargs', '--mon_data_avail_warn', '5'] ) # Wait a bit for the change to take effect condition = CheckDockerLogs(compose_file, 'Cluster is now healthy') condition() yield BASIC_CONFIG, E2E_METADATA
32.837209
115
0.659348
0
0
780
0.552408
813
0.575779
0
0
642
0.454674
538f4e290b42893ff7be5c3f3a19a555501eb1e6
3,025
py
Python
federation/hostmeta/fetchers.py
weex/federation
01357aacb04b076442ce5f803a0fc65df5a74d09
[ "BSD-3-Clause" ]
93
2016-11-26T10:52:13.000Z
2022-01-15T20:07:35.000Z
federation/hostmeta/fetchers.py
weex/federation
01357aacb04b076442ce5f803a0fc65df5a74d09
[ "BSD-3-Clause" ]
75
2016-10-18T10:15:44.000Z
2019-10-05T22:16:32.000Z
federation/hostmeta/fetchers.py
weex/federation
01357aacb04b076442ce5f803a0fc65df5a74d09
[ "BSD-3-Clause" ]
9
2017-04-08T08:03:45.000Z
2021-09-13T22:00:48.000Z
import json from typing import Dict, Optional import requests from federation.hostmeta.parsers import ( parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document) from federation.utils.network import fetch_document HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1 def fetch_mastodon_document(host): doc, status_code, error = fetch_document(host=host, path='/api/v1/instance') if not doc: return try: doc = json.loads(doc) except json.JSONDecodeError: return return parse_mastodon_document(doc, host) def fetch_matrix_document(host: str) -> Optional[Dict]: doc, status_code, error = fetch_document(host=host, path='/_matrix/federation/v1/version') if not doc: return try: doc = json.loads(doc) except json.JSONDecodeError: return return parse_matrix_document(doc, host) def fetch_misskey_document(host: str, mastodon_document: Dict=None) -> Optional[Dict]: try: response = requests.post(f'https://{host}/api/meta') # ¯\_(ツ)_/¯ except Exception: return try: doc = response.json() except json.JSONDecodeError: return if response.status_code == 200: return parse_misskey_document(doc, host, mastodon_document=mastodon_document) def fetch_nodeinfo_document(host): doc, status_code, error = fetch_document(host=host, path='/.well-known/nodeinfo') if not doc: return try: doc = json.loads(doc) except json.JSONDecodeError: return url, highest_version = '', 0.0 if doc.get('0'): # Buggy NodeInfo from certain old Hubzilla versions url = doc.get('0', {}).get('href') elif isinstance(doc.get('links'), dict): # Another buggy NodeInfo from certain old Hubzilla versions url = doc.get('links').get('href') else: for link in doc.get('links'): version = float(link.get('rel').split('/')[-1]) if highest_version < version <= HIGHEST_SUPPORTED_NODEINFO_VERSION: url, highest_version = link.get('href'), version if not url: return doc, status_code, error = fetch_document(url=url) if not doc: return try: doc = json.loads(doc) except json.JSONDecodeError: return return parse_nodeinfo_document(doc, host) def fetch_nodeinfo2_document(host): doc, status_code, error = fetch_document(host=host, path='/.well-known/x-nodeinfo2') if not doc: return try: doc = json.loads(doc) except json.JSONDecodeError: return return parse_nodeinfo2_document(doc, host) def fetch_statisticsjson_document(host): doc, status_code, error = fetch_document(host=host, path='/statistics.json') if not doc: return try: doc = json.loads(doc) except json.JSONDecodeError: return return parse_statisticsjson_document(doc, host)
28.809524
110
0.668099
0
0
0
0
0
0
0
0
323
0.106636
538fd4b4cff424f1346a608bba50033518ef9ea5
2,582
py
Python
features/analysis_features.py
iag0g0mes/t2_fis_driving_style
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
[ "Apache-2.0" ]
5
2021-04-20T16:03:37.000Z
2022-03-11T00:13:11.000Z
features/analysis_features.py
iag0g0mes/t2_fis_driving_style
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
[ "Apache-2.0" ]
1
2021-04-21T02:35:38.000Z
2021-04-21T12:54:14.000Z
features/analysis_features.py
iag0g0mes/t2fis_driving_style
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
[ "Apache-2.0" ]
null
null
null
import numpy as np from typing import Any, Dict, List, Tuple, NoReturn import argparse import os def parse_arguments() -> Any: """Parse command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( "--data_dir", default="", type=str, help="Directory where the features (npy files) are saved", ) parser.add_argument("--mode", required=True, type=str, help="train/val/test/sample", choices=['train', 'test', 'val','sample']) parser.add_argument("--obs_len", default=2, type=int, help="Observed length of the trajectory in seconds", choices=[1,2,3,4,5]) parser.add_argument("--filter", default='ekf', type=str, help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol", choices=['ekf', 'none', 'ekf-savgol', 'savgol']) return parser.parse_args() def stats(traj:np.ndarray) -> NoReturn: #central tendency : mean #dispersion : std #bounds : min max #quantile : 0.25, 0.5, 0.75 labels = ['mean_v', 'mean_acc', 'mean_deac', 'std_jy'] for i, l in zip(range(0, traj.shape[1]), labels): t = traj[:, i] _mean = round(np.mean(t),2) _std = round(np.std(t),2) _min = round(np.min(t),2) _max = round(np.max(t),2) _q25 = round(np.quantile(t, 0.25),2) _q50 = round(np.quantile(t, 0.5),2) _q75 = round(np.quantile(t, 0.75),2) print (f'Feature: {l}') print ('\tmean:{} | std:{} | min:{} | max:{} | q25:{} | q50:{} | q75:{}'.format(_mean, _std, _min, _max, _q25, _q50, _q75)) if __name__== '__main__': #_filters = ['none', 'ekf', 'savgol', 'ekf-savgol'] #_modes = ['train', 'val', 'test', 'sample'] #_obs_len = [2,5] #seg = _obs_len[0] #mode = _modes[3] #filter_name = _filters[0] args = parse_arguments() if args.mode == 'test': args.obs_len = 2 assert os.path.exists(args.data_dir),\ f'[Analysis][main][ERROR] data_dir not found!({args.data_dir})' data_file = 'features_{}_{}s_{}.npy'.format(args.mode, args.obs_len, args.filter) assert os.path.exists(os.path.join(args.data_dir, data_file)),\ f'[Analysis][main][ERROR] data_file not found!({data_file})' print ('[Analysis] loading dataset....') # (m, 4) # [mean_v, mean_acc, mean_deac, std_jy] data = np.load(os.path.join(args.data_dir,data_file)) print ('[Analysis] mode:{} | filter:{} | obs_len:{}'.format(args.mode, args.filter, args.obs_len)) print ('[Analysis] data shape:{}'.format(data.shape)) print ('[Analysis] stats:') stats(data)
23.907407
88
0.606119
0
0
0
0
0
0
0
0
1,055
0.408598
538fed081c6f7c33b40d25f1c7cac9cd82761148
2,916
py
Python
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
null
null
null
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import freezegun import mock import oslo_messaging as om from watcher.common import rpc from watcher import notifications from watcher.objects import service as w_service from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionPlanNotification(base.DbTestCase): def setUp(self): super(TestActionPlanNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier def test_service_failed(self): service = utils.get_test_service(mock.Mock(), created_at=datetime.datetime.utcnow()) state = w_service.ServiceStatus.FAILED notifications.service.send_service_update(mock.MagicMock(), service, state, host='node0') notification = self.m_notifier.warning.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual({ 'watcher_object.data': { 'last_seen_up': '2016-09-22T08:32:06Z', 'name': 'watcher-service', 'sevice_host': 'controller', 'status_update': { 'watcher_object.data': { 'old_state': 'ACTIVE', 'state': 'FAILED' }, 'watcher_object.name': 'ServiceStatusUpdatePayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' } }, 'watcher_object.name': 'ServiceUpdatePayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, payload )
37.384615
79
0.607339
1,991
0.682785
0
0
2,044
0.70096
0
0
1,062
0.364198
53901ad02fc361ceba4528f28baf2995acc82248
1,422
py
Python
leetcode/medium/best-time-to-buy-and-sell-stock-ii.py
rainzhop/cumulus-tank
09ebc7858ea53630e30606945adfea856a80faa3
[ "MIT" ]
null
null
null
leetcode/medium/best-time-to-buy-and-sell-stock-ii.py
rainzhop/cumulus-tank
09ebc7858ea53630e30606945adfea856a80faa3
[ "MIT" ]
null
null
null
leetcode/medium/best-time-to-buy-and-sell-stock-ii.py
rainzhop/cumulus-tank
09ebc7858ea53630e30606945adfea856a80faa3
[ "MIT" ]
null
null
null
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/ # # Say you have an array for which the ith element is the price of a given stock on day i. # # Design an algorithm to find the maximum profit. # You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times). # However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again). class Solution(object): def maxProfit(self, prices): """ :type prices: List[int] :rtype: int """ if prices == []: return 0 profit_list = [] min_val = prices[0] max_val = prices[0] tend = 0 # 0:down, 1:up for i in range(1, len(prices)): if prices[i] < prices[i - 1]: # go down if tend == 1: max_val = prices[i - 1] profit_list.append(max_val - min_val) tend = 0 pass if prices[i] > prices[i - 1]: # go up if tend == 0: min_val = prices[i - 1] tend = 1 pass if tend == 1: profit_list.append(prices[i] - min_val) return sum(profit_list) if __name__ == '__main__': prices = [8,9,2,5] s = Solution() print s.maxProfit(prices)
32.318182
123
0.524613
869
0.611111
0
0
0
0
0
0
550
0.386779
5391eb5d4685629e3d8228f4e55d8a98857010ab
7,787
py
Python
django_loci/tests/base/test_admin.py
yashikajotwani12/django-loci
2c0bcb33f4a56d559f798e37fd17b2143b912ce4
[ "BSD-3-Clause" ]
205
2017-11-17T10:35:02.000Z
2022-03-29T18:50:32.000Z
django_loci/tests/base/test_admin.py
yashikajotwani12/django-loci
2c0bcb33f4a56d559f798e37fd17b2143b912ce4
[ "BSD-3-Clause" ]
98
2017-11-20T16:03:27.000Z
2022-01-19T21:12:47.000Z
django_loci/tests/base/test_admin.py
yashikajotwani12/django-loci
2c0bcb33f4a56d559f798e37fd17b2143b912ce4
[ "BSD-3-Clause" ]
46
2017-11-20T23:25:26.000Z
2022-02-10T05:06:16.000Z
import json import os import responses from django.urls import reverse from .. import TestAdminMixin, TestLociMixin class BaseTestAdmin(TestAdminMixin, TestLociMixin): geocode_url = 'https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/' def test_location_list(self): self._login_as_admin() self._create_location(name='test-admin-location-1') url = reverse('{0}_location_changelist'.format(self.url_prefix)) r = self.client.get(url) self.assertContains(r, 'test-admin-location-1') def test_floorplan_list(self): self._login_as_admin() self._create_floorplan() self._create_location() url = reverse('{0}_floorplan_changelist'.format(self.url_prefix)) r = self.client.get(url) self.assertContains(r, '1st floor') def test_location_json_view(self): self._login_as_admin() loc = self._create_location() r = self.client.get(reverse('admin:django_loci_location_json', args=[loc.pk])) expected = { 'name': loc.name, 'address': loc.address, 'type': loc.type, 'is_mobile': loc.is_mobile, 'geometry': json.loads(loc.geometry.json), } self.assertDictEqual(r.json(), expected) def test_location_floorplan_json_view(self): self._login_as_admin() fl = self._create_floorplan() r = self.client.get( reverse('admin:django_loci_location_floorplans_json', args=[fl.location.pk]) ) expected = { 'choices': [ { 'id': str(fl.pk), 'str': str(fl), 'floor': fl.floor, 'image': fl.image.url, 'image_width': fl.image.width, 'image_height': fl.image.height, } ] } self.assertDictEqual(r.json(), expected) def test_location_change_image_removed(self): self._login_as_admin() loc = self._create_location(name='test-admin-location-1', type='indoor') fl = self._create_floorplan(location=loc) # remove floorplan image os.remove(fl.image.path) url = reverse('{0}_location_change'.format(self.url_prefix), args=[loc.pk]) r = self.client.get(url) self.assertContains(r, 'test-admin-location-1') def test_floorplan_change_image_removed(self): self._login_as_admin() loc = self._create_location(name='test-admin-location-1', type='indoor') fl = self._create_floorplan(location=loc) # remove floorplan image os.remove(fl.image.path) url = reverse('{0}_floorplan_change'.format(self.url_prefix), args=[fl.pk]) r = self.client.get(url) self.assertContains(r, 'test-admin-location-1') def test_is_mobile_location_json_view(self): self._login_as_admin() loc = self._create_location(is_mobile=True, geometry=None) response = self.client.get( reverse('admin:django_loci_location_json', args=[loc.pk]) ) self.assertEqual(response.status_code, 200) content = json.loads(response.content) self.assertEqual(content['geometry'], None) loc1 = self._create_location( name='location2', address='loc2 add', type='outdoor' ) response1 = self.client.get( reverse('admin:django_loci_location_json', args=[loc1.pk]) ) self.assertEqual(response1.status_code, 200) content1 = json.loads(response1.content) expected = { 'name': 'location2', 'address': 'loc2 add', 'type': 'outdoor', 'is_mobile': False, 'geometry': {'type': 'Point', 'coordinates': [12.512124, 41.898903]}, } self.assertEqual(content1, expected) @responses.activate def test_geocode(self): self._login_as_admin() address = 'Red Square' url = '{0}?address={1}'.format( reverse('admin:django_loci_location_geocode_api'), address ) # Mock HTTP request to the URL to work offline responses.add( responses.GET, f'{self.geocode_url}findAddressCandidates?singleLine=Red+Square&f=json&maxLocations=1', body=self._load_content('base/static/test-geocode.json'), content_type='application/json', ) response = self.client.get(url) response_lat = round(response.json()['lat']) response_lng = round(response.json()['lng']) self.assertEqual(response.status_code, 200) self.assertEqual(response_lat, 56) self.assertEqual(response_lng, 38) def test_geocode_no_address(self): self._login_as_admin() url = reverse('admin:django_loci_location_geocode_api') response = self.client.get(url) expected = {'error': 'Address parameter not defined'} self.assertEqual(response.status_code, 400) self.assertEqual(response.json(), expected) @responses.activate def test_geocode_invalid_address(self): self._login_as_admin() invalid_address = 'thisaddressisnotvalid123abc' url = '{0}?address={1}'.format( reverse('admin:django_loci_location_geocode_api'), invalid_address ) responses.add( responses.GET, f'{self.geocode_url}findAddressCandidates?singleLine=thisaddressisnotvalid123abc' '&f=json&maxLocations=1', body=self._load_content('base/static/test-geocode-invalid-address.json'), content_type='application/json', ) response = self.client.get(url) expected = {'error': 'Not found location with given name'} self.assertEqual(response.status_code, 404) self.assertEqual(response.json(), expected) @responses.activate def test_reverse_geocode(self): self._login_as_admin() lat = 52 lng = 21 url = '{0}?lat={1}&lng={2}'.format( reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng ) # Mock HTTP request to the URL to work offline responses.add( responses.GET, f'{self.geocode_url}reverseGeocode?location=21.0%2C52.0&f=json&outSR=4326', body=self._load_content('base/static/test-reverse-geocode.json'), content_type='application/json', ) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains(response, 'POL') @responses.activate def test_reverse_location_with_no_address(self): self._login_as_admin() lat = -30 lng = -30 url = '{0}?lat={1}&lng={2}'.format( reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng ) responses.add( responses.GET, f'{self.geocode_url}reverseGeocode?location=-30.0%2C-30.0&f=json&outSR=4326', body=self._load_content( 'base/static/test-reverse-location-with-no-address.json' ), content_type='application/json', ) response = self.client.get(url) response_address = response.json()['address'] self.assertEqual(response.status_code, 404) self.assertEqual(response_address, '') def test_reverse_geocode_no_coords(self): self._login_as_admin() url = reverse('admin:django_loci_location_reverse_geocode_api') response = self.client.get(url) expected = {'error': 'lat or lng parameter not defined'} self.assertEqual(response.status_code, 400) self.assertEqual(response.json(), expected)
38.549505
99
0.617054
7,666
0.984461
0
0
3,160
0.405805
0
0
1,966
0.252472
539267e2204960bd72eacaf1dd33c30f2edce8d2
1,270
py
Python
dca_models/deform_offsets_module.py
vatsalag99/Deformable-Channel-Attention
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
[ "MIT" ]
1
2020-12-01T20:57:09.000Z
2020-12-01T20:57:09.000Z
dca_models/deform_offsets_module.py
vatsalag99/Deformable-Channel-Attention
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
[ "MIT" ]
null
null
null
dca_models/deform_offsets_module.py
vatsalag99/Deformable-Channel-Attention
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
[ "MIT" ]
null
null
null
import torch from torch import nn from torch.nn.parameter import Parameter from einops import rearrange, reduce, repeat class dca_offsets_layer(nn.Module): """Constructs a Offset Generation module. """ def __init__(self, channel, n_offsets): super(dca_offsets_layer, self).__init__() self.channel = channel self.n_offsets = n_offsets def covariance_features(self, x): """ Takes in a feature map and returns the unnormalized covariance matrix """ m_batchsize, C, height, width = x.size() x = x - x.mean(dim=1, keepdim=True) / (x.std(dim=1, keepdim=True) + 1e-5) proj_query = x.view(m_batchsize, C, -1) proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) return energy def forward(self, x): m_batchsize, C, height, width = x.size() cov_matrix = self.covariance_features(x).reshape(m_batchsize, C, 1, C) _, locations = torch.topk(cov_matrix, self.n_offsets, dim=1) delta = torch.stack(self.n_offsets*[torch.arange(0, self.channel)], dim=0) delta = torch.stack(m_batchsize * [delta], dim=0) offsets = locations.squeeze() - delta.cuda() return offsets
35.277778
82
0.640157
1,146
0.902362
0
0
0
0
0
0
142
0.111811
539324c139f4acda8b0dbb87e42e77a126f0fc1b
155
py
Python
tests/__init__.py
egor43/PyImageComparsion
5270f5646c40391cc5ac225305d7be9b0b7de140
[ "BSD-2-Clause" ]
null
null
null
tests/__init__.py
egor43/PyImageComparsion
5270f5646c40391cc5ac225305d7be9b0b7de140
[ "BSD-2-Clause" ]
null
null
null
tests/__init__.py
egor43/PyImageComparsion
5270f5646c40391cc5ac225305d7be9b0b7de140
[ "BSD-2-Clause" ]
null
null
null
from . import test_helpers from . import test_image_opener from . import test_image_metrick from . import test_compare_tools from . import test_compare_api
31
32
0.845161
0
0
0
0
0
0
0
0
0
0
5395cbb4a78f713d4a2814a8d200c21fd6a061c3
485
py
Python
core/urls.py
donnellan0007/blog
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
[ "MIT" ]
null
null
null
core/urls.py
donnellan0007/blog
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
[ "MIT" ]
null
null
null
core/urls.py
donnellan0007/blog
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
[ "MIT" ]
null
null
null
from django.contrib import admin from django.urls import path from .views import index, email, post_detail, posts, hot_takes, take_detail from . import views app_name = "core" urlpatterns = [ path('',views.index,name="index"), path('email/',views.email,name="email"), path('post/<slug>/',views.post_detail,name='post'), path('posts/',views.posts,name='posts'), path('takes/',views.hot_takes,name='takes'), path('take/<slug>/',views.take_detail,name='take'), ]
32.333333
75
0.68866
0
0
0
0
0
0
0
0
100
0.206186
53971f3415b6410a3e353dbb14eb4ceab3a8c1a1
30
py
Python
griddy/__init__.py
pgolding/pandas-grid
0f80db1511097656496dee503d7bb281b97b8bdc
[ "BSD-2-Clause" ]
1
2018-01-03T11:34:08.000Z
2018-01-03T11:34:08.000Z
griddy/__init__.py
pgolding/pandas-grid
0f80db1511097656496dee503d7bb281b97b8bdc
[ "BSD-2-Clause" ]
null
null
null
griddy/__init__.py
pgolding/pandas-grid
0f80db1511097656496dee503d7bb281b97b8bdc
[ "BSD-2-Clause" ]
null
null
null
from .grid import render_table
30
30
0.866667
0
0
0
0
0
0
0
0
0
0
5398b81471428ab8f27e820b3cfc198272b782d9
1,573
py
Python
utils/dbconn.py
iamvishnuks/Xmigrate
f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7
[ "MIT" ]
4
2020-05-26T11:19:02.000Z
2020-08-06T11:12:34.000Z
utils/dbconn.py
iamvishnuks/Xmigrate
f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7
[ "MIT" ]
46
2022-02-19T09:11:11.000Z
2022-03-31T15:42:50.000Z
utils/dbconn.py
iamvishnuks/Xmigrate
f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7
[ "MIT" ]
2
2019-12-20T12:30:33.000Z
2020-01-02T22:01:25.000Z
from mongoengine import * from dotenv import load_dotenv from os import getenv from cassandra.cluster import Cluster from cassandra.auth import PlainTextAuthProvider from cassandra.cqlengine import connection from cassandra.cqlengine.management import sync_table from cassandra.query import ordered_dict_factory from model.discover import * from model.blueprint import * from model.disk import * from model.storage import * from model.project import * from model.network import * from model.user import * load_dotenv() cass_db = getenv("CASS_DB") cass_password = getenv("CASS_PASSWORD") cass_user = getenv("CASS_USER") def create_db_con(): auth_provider = PlainTextAuthProvider(username=cass_user, password=cass_password) cluster = Cluster([cass_db],auth_provider=auth_provider) session = cluster.connect() session.execute(""" CREATE KEYSPACE IF NOT EXISTS migration WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' } """) session.set_keyspace('migration') session.row_factory = ordered_dict_factory connection.setup([cass_db], "migration",protocol_version=3,auth_provider=auth_provider) sync_table(BluePrint) sync_table(Discover) sync_table(Project) sync_table(Network) sync_table(Subnet) sync_table(Storage) sync_table(Bucket) sync_table(GcpBucket) sync_table(User) sync_table(Disk) session.execute("CREATE INDEX IF NOT EXISTS ON blue_print (network);") session.execute("CREATE INDEX IF NOT EXISTS ON blue_print (subnet);") return session
33.468085
91
0.760966
0
0
0
0
0
0
0
0
309
0.19644
53990709c9653095e01a4f58d04ac79451da6d42
3,921
py
Python
src/syft/lib/__init__.py
godormad/PySyft
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
[ "Apache-2.0" ]
null
null
null
src/syft/lib/__init__.py
godormad/PySyft
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
[ "Apache-2.0" ]
null
null
null
src/syft/lib/__init__.py
godormad/PySyft
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
[ "Apache-2.0" ]
null
null
null
# stdlib import importlib import sys from typing import Any from typing import Any as TypeAny from typing import Dict as TypeDict from typing import Optional # third party from packaging import version # syft relative from ..ast.globals import Globals from ..lib.python import create_python_ast from ..lib.torch import create_torch_ast from ..lib.torchvision import create_torchvision_ast from ..logger import critical from ..logger import traceback_and_raise from .misc import create_union_ast class VendorLibraryImportException(Exception): pass def vendor_requirements_available(vendor_requirements: TypeDict[str, TypeAny]) -> bool: # see if python version is supported if "python" in vendor_requirements: python_reqs = vendor_requirements["python"] PYTHON_VERSION = sys.version_info min_version = python_reqs.get("min_version", None) if min_version is not None: if PYTHON_VERSION < min_version: traceback_and_raise( VendorLibraryImportException( f"Unable to load {vendor_requirements['lib']}." + f"Python: {PYTHON_VERSION} < {min_version}" ) ) # see if torch version is supported if "torch" in vendor_requirements: torch_reqs = vendor_requirements["torch"] # third party import torch TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) min_version = torch_reqs.get("min_version", None) if min_version is not None: if TORCH_VERSION < version.parse(min_version): traceback_and_raise( VendorLibraryImportException( f"Unable to load {vendor_requirements['lib']}." + f"Torch: {TORCH_VERSION} < {min_version}" ) ) return True def load_lib(lib: str, options: TypeDict[str, TypeAny] = {}) -> None: try: _ = importlib.import_module(lib) vendor_ast = importlib.import_module(f"syft.lib.{lib}") PACKAGE_SUPPORT = getattr(vendor_ast, "PACKAGE_SUPPORT", None) PACKAGE_SUPPORT.update(options) if PACKAGE_SUPPORT is not None and vendor_requirements_available( vendor_requirements=PACKAGE_SUPPORT ): update_ast = getattr(vendor_ast, "update_ast", None) if update_ast is not None: global lib_ast update_ast(ast_or_client=lib_ast) for _, client in lib_ast.registered_clients.items(): update_ast(ast_or_client=client) # cache the constructor for future created clients lib_ast.loaded_lib_constructors[lib] = update_ast except VendorLibraryImportException as e: critical(e) except Exception as e: critical(f"Unable to load package support for: {lib}. {e}") # now we need to load the relevant frameworks onto the node def create_lib_ast(client: Optional[Any] = None) -> Globals: python_ast = create_python_ast(client=client) torch_ast = create_torch_ast(client=client) torchvision_ast = create_torchvision_ast(client=client) # numpy_ast = create_numpy_ast() lib_ast = Globals(client=client) lib_ast.add_attr(attr_name="syft", attr=python_ast.attrs["syft"]) lib_ast.add_attr(attr_name="torch", attr=torch_ast.attrs["torch"]) lib_ast.add_attr(attr_name="torchvision", attr=torchvision_ast.attrs["torchvision"]) # let the misc creation be always the last, as it needs the full ast solved # to properly generated unions union_misc_ast = getattr(getattr(create_union_ast(lib_ast, client), "syft"), "lib") misc_root = getattr(getattr(lib_ast, "syft"), "lib") misc_root.add_attr(attr_name="misc", attr=union_misc_ast.attrs["misc"]) return lib_ast lib_ast = create_lib_ast(None)
35.972477
88
0.665902
55
0.014027
0
0
0
0
0
0
784
0.199949
5399748c26ec62ec3b268e3e29283c1ccc28b398
8,742
py
Python
scripts/griffin_GC_counts.py
GavinHaLab/Griffin
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
[ "BSD-3-Clause-Clear" ]
1
2021-09-08T05:43:15.000Z
2021-09-08T05:43:15.000Z
scripts/griffin_GC_counts.py
GavinHaLab/Griffin
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
[ "BSD-3-Clause-Clear" ]
null
null
null
scripts/griffin_GC_counts.py
GavinHaLab/Griffin
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
[ "BSD-3-Clause-Clear" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[ ]: import pysam import os import pandas as pd import numpy as np import time import argparse import sys from multiprocessing import Pool # In[ ]: # ##arguments for testing # bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam' # bam_file_name = 'MBC_1041_1_ULP' # mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph' # ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa' # chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes' # out_dir = './tmp/' # map_q = 20 # size_range = [15,500] # CPU = 4 # In[ ]: parser = argparse.ArgumentParser() parser.add_argument('--bam_file', help='sample_bam_file', required=True) parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True) parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True) parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True) parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True) parser.add_argument('--out_dir',help='folder for GC bias results',required=True) parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True) parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True) parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True) args = parser.parse_args() bam_file_path = args.bam_file bam_file_name = args.bam_file_name mapable_path=args.mapable_regions ref_seq_path = args.ref_seq chrom_sizes_path = args.chrom_sizes out_dir = args.out_dir map_q = args.map_q size_range = args.size_range CPU = args.CPU # In[ ]: print('arguments provided:') print('\tbam_file_path = "'+bam_file_path+'"') print('\tbam_file_name = "'+bam_file_name+'"') print('\tmapable_regions = "'+mapable_path+'"') print('\tref_seq_path = "'+ref_seq_path+'"') print('\tchrom_sizes_path = "'+chrom_sizes_path+'"') print('\tout_dir = "'+out_dir+'"') print('\tmap_q = '+str(map_q)) print('\tsize_range = '+str(size_range)) print('\tCPU = '+str(CPU)) # In[ ]: mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0] out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt' print('out_file',out_file) # In[ ]: #create a directory for the GC data if not os.path.exists(out_dir +'/'+mapable_name): os.mkdir(out_dir +'/'+mapable_name) if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'): os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/') # In[ ]: #import filter mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None) #remove non standard chromosomes and X and Y chroms = ['chr'+str(m) for m in range(1,23)] mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)] print('chroms:', chroms) print('number_of_intervals:',len(mapable_intervals)) sys.stdout.flush() # In[ ]: def collect_reads(sublist): #create a dict for holding the frequency of each read length and GC content GC_dict = {} for length in range(size_range[0],size_range[1]+1): GC_dict[length]={} for num_GC in range(0,length+1): GC_dict[length][num_GC]=0 #import the bam file #this needs to be done within the loop otherwise it gives a truncated file warning bam_file = pysam.AlignmentFile(bam_file_path, "rb") print('sublist intervals:',len(sublist)) #this might also need to be in the loop #import the ref_seq ref_seq=pysam.FastaFile(ref_seq_path) for i in range(len(sublist)): chrom = sublist.iloc[i][0] start = sublist.iloc[i][1] end = sublist.iloc[i][2] if i%5000==0: print('interval',i,':',chrom,start,end,'seconds:',np.round(time.time()-start_time)) sys.stdout.flush() #fetch any read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatically) fetched = bam_file.fetch(chrom,start,end) for read in fetched: #use both fw (positive template length) and rv (negative template length) reads if (read.is_reverse==False and read.template_length>=size_range[0] and read.template_length<=size_range[1]) or (read.is_reverse==True and -read.template_length>=size_range[0] and -read.template_length<=size_range[1]): #qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these if read.is_paired==True and read.mapping_quality>=map_q and read.is_duplicate==False and read.is_qcfail==False: if read.is_reverse==False: read_start = read.reference_start read_end = read.reference_start+read.template_length elif read.is_reverse==True: read_end = read.reference_start + read.reference_length read_start = read_end + read.template_length fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end) #tally up the GC content fragment_seq=fragment_seq.replace('g','G').replace('c','C').replace('a','A').replace('t','T').replace('n','N') # ################# # ##logic check#### # ################# # if read.is_reverse==False: # if fragment_seq[0:read.reference_length]==read.query_sequence and len(fragment_seq)==read.template_length: # print('fw match',read.reference_length) # else: # print(fragment_seq[0:read.reference_length],read.reference_length,'fw') # print(read.query_sequence,len(read.query_sequence),'fw') # print(len(fragment_seq),read.template_length) # print('\n') # elif read.is_reverse==True: # if fragment_seq[-read.reference_length:]==read.query_sequence and len(fragment_seq)==-read.template_length: # print('rv match',read.reference_length) # else: # print(fragment_seq[-read.reference_length:],read.reference_length,'rv') # print(read.query_sequence,len(read.query_sequence),'rv') # print(len(fragment_seq),read.template_length) # print('\n') # ################# #split and convert to numpy array fragment_seq = np.array(list(fragment_seq)) #replace with values fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1 fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0 fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always get an integer) #should be very rare if the filter is done right fragment_seq = fragment_seq.astype(int) num_GC = int(fragment_seq.sum()) GC_dict[abs(read.template_length)][num_GC]+=1 print('done') return(GC_dict) # In[ ]: start_time = time.time() p = Pool(processes=CPU) #use the available CPU sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU GC_dict_list = p.map(collect_reads, sublists, 1) # In[ ]: all_GC_df = pd.DataFrame() for i,GC_dict in enumerate(GC_dict_list): GC_df = pd.DataFrame() for length in GC_dict.keys(): current = pd.Series(GC_dict[length]).reset_index() current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'}) current['length']=length current = current[['length','num_GC','number_of_fragments']] GC_df = GC_df.append(current, ignore_index=True) GC_df = GC_df.set_index(['length','num_GC']) all_GC_df[i] = GC_df['number_of_fragments'] del(GC_df,GC_dict) all_GC_df = all_GC_df.sum(axis=1) all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'}) all_GC_df = all_GC_df.reset_index() all_GC_df.to_csv(out_file,sep='\t',index=False) # In[ ]: print('done') # In[ ]: # In[ ]: # In[ ]:
33.366412
241
0.636811
0
0
0
0
0
0
0
0
3,902
0.446351
5399b6c7047b5726e42c8b72d0dc40c3dfb01acf
4,372
py
Python
task2/04-task2-upload-dim-tables.py
canovasjm/InterviewProject_JuanCanovas
6ff385c66664328cea0678454560e89e44851e24
[ "MIT" ]
null
null
null
task2/04-task2-upload-dim-tables.py
canovasjm/InterviewProject_JuanCanovas
6ff385c66664328cea0678454560e89e44851e24
[ "MIT" ]
null
null
null
task2/04-task2-upload-dim-tables.py
canovasjm/InterviewProject_JuanCanovas
6ff385c66664328cea0678454560e89e44851e24
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 1 18:17:07 2021 @author: jm """ # %% required libraries import numpy as np import pandas as pd from sqlalchemy import create_engine # %% connect to DB # create connection using pymssql engine = create_engine('mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga') connection = engine.connect() # %% read data sets from where I will build the dimension tables # read employee roster data employee_roster = pd.read_excel("datasources/Employee_Roster_Data.xlsx", sheet_name = 'Sheet1') # read skills data skills = pd.read_excel("datasources/skills.xlsx", sheet_name = "Sheet1") # read hours data hours = pd.read_excel("datasources/hours.xlsx", sheet_name = "Sheet1") # %% dimensions created from source employee_roster # %% create DIM_Currency # get unique values currencies = sorted(employee_roster['Currency'].unique()) # create a data frame DIM_Currency = pd.DataFrame({'id_currency': (np.arange(len(currencies)) + 1), 'currency': currencies}) # send data frame to DB DIM_Currency.to_sql('DIM_Currency', con = connection, if_exists = 'append', index = False) # %% create DIM_Department # get unique values departments = sorted(pd.concat([employee_roster['Department'], skills['Department']], axis = 0).unique()) # create a data frame DIM_Department = pd.DataFrame({'id_department': (np.arange(len(departments)) + 1), 'department': departments}) # send data frame to DB DIM_Department.to_sql('DIM_Department', con = connection, if_exists = 'append', index = False) # %% create DIM_Gender # get unique values genders = sorted(pd.concat([employee_roster['Gender'], skills['Gender']], axis = 0).unique()) # create a data frame DIM_Gender = pd.DataFrame({'id_gender': (np.arange(len(genders)) + 1), 'gender': genders}) # send data frame to DB DIM_Gender.to_sql('DIM_Gender', con = connection, if_exists = 'append', index = False) # %% create DIM_User # check if 'UserId' values in 'skills' are in 'User_ID' in 'employee_roster' # we get 20134 'True' values, meaning that all 'UserId' in 'skills' are already # in 'User_ID' in employee_roster users_check_1 = np.isin(skills['UserId'], employee_roster['User_ID']).sum() # check if 'UserId' values in 'hours' are in 'User_ID' in 'employee_roster' # we get 7659 'True' values, meaning that NOT all 'UserId' in 'hours' are already # in 'User_ID' in employee_roster users_check_2 = np.isin(hours['UserId'], employee_roster['User_ID']).sum() # get unique values users = sorted(pd.concat([employee_roster['User_ID'], skills['UserId'], hours['UserId']], axis = 0).unique()) # create a data frame to use pd.merge() df_users = pd.DataFrame({'User_ID': users}) # left join 'df_user' with 'employee_roster' on 'UserID' users_final = pd.merge(df_users, employee_roster, on = 'User_ID', how ='left') # select only columns I need users_final = users_final[['User_ID', 'Email_ID', 'Fullname']] # rename columns users_final.rename(columns = {'User_ID': 'id_user', 'Email_ID': 'id_email', 'Fullname': 'fullname'}, inplace = True) # send data frame to DB users_final.to_sql('DIM_User', con = connection, if_exists = 'append', index = False) # %% dimensions created from source skills # %% create DIM_AttributeGroup # get unique values att_group = sorted(skills['Attribute Group'].unique()) # create a data frame DIM_AttributeGroup = pd.DataFrame({'id_att_group': (np.arange(len(att_group)) + 1), 'attribute_group': att_group}) # send data frame to DB DIM_AttributeGroup.to_sql('DIM_AttributeGroup', con = connection, if_exists = 'append', index = False) # %% create DIM_AttributeSubGroup # get unique values att_sub_group = sorted(skills['Attribute Sub-Group'].unique()) # create a data frame DIM_AttributeSubGroup = pd.DataFrame({'id_att_sub_group': (np.arange(len(att_sub_group)) + 1), 'attribute_sub_group': att_sub_group}) # send data frame to DB DIM_AttributeSubGroup.to_sql('DIM_AttributeSubGroup', con = connection, if_exists = 'append', index = False) # %% create DIM_AttributeName # get unique values att_name = sorted(skills['Attribute Name'].unique()) # create a data frame DIM_AttributeName = pd.DataFrame({'id_att_name': (np.arange(len(att_name)) + 1), 'attribute_name': att_name}) # send data frame to DB DIM_AttributeName.to_sql('DIM_AttributeName', con = connection, if_exists = 'append', index = False)
34.698413
133
0.730101
0
0
0
0
0
0
0
0
2,305
0.527219
539a58166d003e0486119a3a4445a376e8149b19
6,897
py
Python
cogs/server.py
vikasbaghel1001/Kanna-Chan
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
[ "MIT" ]
5
2021-10-17T07:29:42.000Z
2022-03-23T11:01:58.000Z
cogs/server.py
vikasbaghel1001/Kanna-Chan
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
[ "MIT" ]
1
2021-10-17T08:14:09.000Z
2021-10-17T08:14:09.000Z
cogs/server.py
vikasbaghel1001/Kanna-Chan
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
[ "MIT" ]
4
2021-07-12T04:20:22.000Z
2021-10-01T03:29:50.000Z
import discord from discord.ext import commands arrow = "<a:right:877425183839891496>" kwee = "<:kannawee:877036162122924072>" kdance = "<a:kanna_dance:877038778798207016>" kbored = "<:kanna_bored:877036162827583538>" ksmug = "<:kanna_smug:877038777896427560>" heart = "<a:explosion_heart:877426228775227392>" class Server(commands.Cog): def __init__(self, client): self.client = client self.kana_id = 857835279259664403 @commands.command() @commands.is_owner() async def sabout(self, ctx): kana = self.client.get_user(self.kana_id) about_file = discord.File("./images/about_server.png") await ctx.send(file = about_file) emb = discord.Embed(title=f"{kdance} ABOUT SERVER {kdance}",description = f"{arrow} **DRAGON LOLI'S HOME** is the official Server of the bot **Kanna Chan**. It's a friendly community meant for having fun, chilling and spending time with others.\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!", color=0xfc74c6) emb.add_field( name=f"{kwee} __ROLES__", value=f"{arrow} <@&876800883441156138> The highest role supposed to be only for Kanna Chan.\n{arrow} <@&876817811396263946> Admins of the Server and have the highest power and authority after owner.\n{arrow} <@&876818242058997791> Moderators of the server meant to moderate the chat and maintain a positive environment in community.\n{arrow} <@&876801038420701196> Developer(s) of Kanna Chan have this role.\n{arrow} <@&876804164661944340> All other users who join this server get this role by default. They have image and embed perms by deault.\n{arrow} **PS: APART FROM THESE SELF-ROLES ARE ALSO AVAIALBLE FOR MEMBERS.**", inline=False ) emb.add_field( name=f"{ksmug} __CHANNELS__", value=f"{arrow} <#877030933847490691> Read the rules here.\n{arrow} <#877031867440832574> Channel for grabbing self-roles.\n{arrow} <#876798564704084011> The general chat for the server.\n{arrow} <#876798809819189249> Bot Commands should be executed here.\n{arrow} <#876798696078065694> You can give suggestions for improving Kanna Chan here.\n{arrow} <#876798720254029864> You can report BUGS here if you find any in Kanna Chan.\n{arrow} <#876798750876651530> For any other support or query use this channel.\n{arrow} **P.S: YOU CAN PING ANY STAFF MEMBER OR DEVELOPER WHILE REPORTING BUG OR IN CASE OF ANY QUERY.**", inline=False ) emb.set_footer( text="Kanna Chan", icon_url=kana.avatar_url ) await ctx.send(embed=emb) @commands.command() @commands.is_owner() async def rule(self, ctx): kana = self.client.get_user(self.kana_id) rule_file = discord.File("./images/rules.png") await ctx.send(file=rule_file) emb = discord.Embed(title=f"{kbored} RULES {kbored}", color=0xfc74c6) emb.add_field( name=f"{heart} **Be respectful**", value=f"You must respect all users, regardless of your liking towards them. Treat others the way you want to be treated.", inline=False ) emb.add_field( name=f"{heart} **No Inappropriate Language**", value=f"{arrow} The use of profanity should be kept to a minimum. However, any derogatory language towards any user is prohibited.", inline=False ) emb.add_field( name=f"{heart} **No spamming**", value=f"{arrow} Don't send a lot of small messages right after each other. Do not disrupt chat by spamming.", inline=False ) emb.add_field( name=f"{heart} **No pornographic/adult/other NSFW material**", value=f"{arrow} This is a community server and not meant to share this kind of material.", inline=False ) emb.add_field( name=f"{heart} **No advertisements**", value=f"{arrow} We do not tolerate any kind of advertisements, whether it be for other communities or streams. You can post your content in the media channel if it is relevant and provides actual value (Video/Art)", inline=False ) emb.add_field( name=f"{heart} **No offensive names and profile pictures**", value=f"{arrow} You will be asked to change your name or picture if the staff deems them inappropriate.", inline=False ) emb.add_field( name=f"{heart} **Server Raiding**", value=f"{arrow} Raiding or mentions of raiding are not allowed.", inline=False ) emb.add_field( name=f"{heart} **Direct & Indirect Threats**", value=f"{arrow} Threats to other users of DDoS, Death, DoX, abuse, and other malicious threats are absolutely prohibited and disallowed.", inline=False ) emb.add_field( name=f"{heart} **Follow the Discord Community Guidelines**", value=f"{arrow} You can find them here: https://discordapp.com/guidelines", inline=False ) emb.add_field( name=f"{heart} **VOICE CHANNELS**", value=f"{arrow} Do not join voice chat channels without permission of the people already in there.", inline=False ) emb.add_field( name=f"{heart} **DECISIONS AND ISSUES**", value = f"{arrow} ***The Admins and Mods will Mute/Kick/Ban per discretion. If you feel mistreated DM an Admin and we will resolve the issue.***", inline=False ) emb.add_field( name=f"{heart} **CHANGES**", value = f"{arrow} ***Your presence in this server implies accepting these rules, including all further changes. These changes might be done at any time without notice, it is your responsibility to check for them.***", inline=False ) emb.set_footer( text="Kanna Chan", icon_url=kana.avatar_url ) await ctx.send(embed=emb) @commands.Cog.listener() async def on_member_join(self, member): if member.guild.id == 876798564704084008: if member.bot: return else: member_role = member.guild.get_role(876804164661944340) await member.add_roles(member_role) desc = f"{member.name} Thanks for joining Kanna's Server. The server is currently under construction, Thanks for being an **early supporter**!! If you need any kind of help or support just ping any staff member or DM `aSHish#1198`. Have a nice stay in the server :)" await member.send(desc) else: return def setup(client): client.add_cog(Server(client)) print(">> Server Utility loaded")
54.307087
636
0.641438
6,492
0.941279
0
0
6,344
0.91982
6,217
0.901406
4,037
0.585327
539b64bd9ed2668ae9a573fa432b5a05793c8032
109
py
Python
test/run/t344.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
2,671
2015-01-03T08:23:25.000Z
2022-03-31T06:15:48.000Z
test/run/t344.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
972
2015-01-05T08:11:00.000Z
2022-03-29T13:47:15.000Z
test/run/t344.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
845
2015-01-03T19:53:36.000Z
2022-03-29T18:34:22.000Z
for ch in "Hello world!": d = ord(ch) h = hex(d) o = oct(d) b = bin(d) print ch, d, h, o, b
12.111111
25
0.449541
0
0
0
0
0
0
0
0
14
0.12844
539b84ee2616f61a9bf370a8a3b1b21465720328
10,016
py
Python
paho/mqtt/subscribe.py
RandomGamer342/TTM4115-plantsensor
e63c34160d284bb6fd26563eeba949d54026348b
[ "MIT" ]
8
2017-01-17T02:25:08.000Z
2019-07-24T13:39:55.000Z
python/lib/python3.4/site-packages/paho/mqtt/subscribe.py
nidiascampos/smartgreen
d574d90918702ac3bd383ed77d673f871576c5b0
[ "Apache-2.0" ]
5
2018-11-20T16:57:21.000Z
2019-03-17T19:59:52.000Z
python/lib/python3.4/site-packages/paho/mqtt/subscribe.py
nidiascampos/smartgreen
d574d90918702ac3bd383ed77d673f871576c5b0
[ "Apache-2.0" ]
9
2017-01-19T03:56:05.000Z
2020-03-10T04:03:20.000Z
# Copyright (c) 2016 Roger Light <[email protected]> # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Public License v1.0 # and Eclipse Distribution License v1.0 which accompany this distribution. # # The Eclipse Public License is available at # http://www.eclipse.org/legal/epl-v10.html # and the Eclipse Distribution License is available at # http://www.eclipse.org/org/documents/edl-v10.php. # # Contributors: # Roger Light - initial API and implementation """ This module provides some helper functions to allow straightforward subscribing to topics and retrieving messages. The two functions are simple(), which returns one or messages matching a set of topics, and callback() which allows you to pass a callback for processing of messages. """ import paho.mqtt.client as paho import paho.mqtt as mqtt import ssl def _on_connect(c, userdata, flags, rc): """Internal callback""" if rc != 0: raise mqtt.MQTTException(paho.connack_string(rc)) if type(userdata['topics']) is list: for t in userdata['topics']: c.subscribe(t, userdata['qos']) else: c.subscribe(userdata['topics'], userdata['qos']) def _on_message_callback(c, userdata, message): """Internal callback""" userdata['callback'](c, userdata['userdata'], message) def _on_message_simple(c, userdata, message): """Internal callback""" if userdata['msg_count'] == 0: return # Don't process stale retained messages if 'retained' was false if userdata['retained'] == False and message.retain == True: return userdata['msg_count'] = userdata['msg_count'] - 1 if userdata['messages'] is None and userdata['msg_count'] == 0: userdata['messages'] = message c.disconnect() return userdata['messages'].append(message) if userdata['msg_count'] == 0: c.disconnect() def callback(callback, topics, qos=0, userdata=None, hostname="localhost", port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None, protocol=paho.MQTTv311, transport="tcp"): """Subscribe to a list of topics and process them in a callback function. This function creates an MQTT client, connects to a broker and subscribes to a list of topics. Incoming messages are processed by the user provided callback. This is a blocking function and will never return. callback : function of the form "on_message(client, userdata, message)" for processing the messages received. topics : either a string containing a single topic to subscribe to, or a list of topics to subscribe to. qos : the qos to use when subscribing. This is applied to all topics. userdata : passed to the callback hostname : a string containing the address of the broker to connect to. Defaults to localhost. port : the port to connect to the broker on. Defaults to 1883. client_id : the MQTT client id to use. If "" or None, the Paho library will generate a client id automatically. keepalive : the keepalive timeout value for the client. Defaults to 60 seconds. will : a dict containing will parameters for the client: will = {'topic': "<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}. Topic is required, all other parameters are optional and will default to None, 0 and False respectively. Defaults to None, which indicates no will should be used. auth : a dict containing authentication parameters for the client: auth = {'username':"<username>", 'password':"<password>"} Username is required, password is optional and will default to None if not provided. Defaults to None, which indicates no authentication is to be used. tls : a dict containing TLS configuration parameters for the client: dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>", 'keyfile':"<keyfile>", 'tls_version':"<tls_version>", 'ciphers':"<ciphers">} ca_certs is required, all other parameters are optional and will default to None if not provided, which results in the client using the default behaviour - see the paho.mqtt.client documentation. Defaults to None, which indicates that TLS should not be used. transport : set to "tcp" to use the default setting of transport which is raw TCP. Set to "websockets" to use WebSockets as the transport. """ if qos < 0 or qos > 2: raise ValueError('qos must be in the range 0-2') callback_userdata = { 'callback':callback, 'topics':topics, 'qos':qos, 'userdata':userdata} client = paho.Client(client_id=client_id, userdata=callback_userdata, protocol=protocol, transport=transport) client.on_message = _on_message_callback client.on_connect = _on_connect if auth is not None: username = auth['username'] try: password = auth['password'] except KeyError: password = None client.username_pw_set(username, password) if will is not None: will_topic = will['topic'] try: will_payload = will['payload'] except KeyError: will_payload = None try: will_qos = will['qos'] except KeyError: will_qos = 0 try: will_retain = will['retain'] except KeyError: will_retain = False client.will_set(will_topic, will_payload, will_qos, will_retain) if tls is not None: ca_certs = tls['ca_certs'] try: certfile = tls['certfile'] except KeyError: certfile = None try: keyfile = tls['keyfile'] except KeyError: keyfile = None try: tls_version = tls['tls_version'] except KeyError: tls_version = ssl.PROTOCOL_SSLv23; try: ciphers = tls['ciphers'] except KeyError: ciphers = None client.tls_set(ca_certs, certfile, keyfile, tls_version=tls_version, ciphers=ciphers) client.connect(hostname, port, keepalive) client.loop_forever() def simple(topics, qos=0, msg_count=1, retained=True, hostname="localhost", port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None, protocol=paho.MQTTv311, transport="tcp"): """Subscribe to a list of topics and return msg_count messages. This function creates an MQTT client, connects to a broker and subscribes to a list of topics. Once "msg_count" messages have been received, it disconnects cleanly from the broker and returns the messages. topics : either a string containing a single topic to subscribe to, or a list of topics to subscribe to. qos : the qos to use when subscribing. This is applied to all topics. msg_count : the number of messages to retrieve from the broker. if msg_count == 1 then a single MQTTMessage will be returned. if msg_count > 1 then a list of MQTTMessages will be returned. retained : If set to True, retained messages will be processed the same as non-retained messages. If set to False, retained messages will be ignored. This means that with retained=False and msg_count=1, the function will return the first message received that does not have the retained flag set. hostname : a string containing the address of the broker to connect to. Defaults to localhost. port : the port to connect to the broker on. Defaults to 1883. client_id : the MQTT client id to use. If "" or None, the Paho library will generate a client id automatically. keepalive : the keepalive timeout value for the client. Defaults to 60 seconds. will : a dict containing will parameters for the client: will = {'topic': "<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}. Topic is required, all other parameters are optional and will default to None, 0 and False respectively. Defaults to None, which indicates no will should be used. auth : a dict containing authentication parameters for the client: auth = {'username':"<username>", 'password':"<password>"} Username is required, password is optional and will default to None if not provided. Defaults to None, which indicates no authentication is to be used. tls : a dict containing TLS configuration parameters for the client: dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>", 'keyfile':"<keyfile>", 'tls_version':"<tls_version>", 'ciphers':"<ciphers">} ca_certs is required, all other parameters are optional and will default to None if not provided, which results in the client using the default behaviour - see the paho.mqtt.client documentation. Defaults to None, which indicates that TLS should not be used. transport : set to "tcp" to use the default setting of transport which is raw TCP. Set to "websockets" to use WebSockets as the transport. """ if msg_count < 1: raise ValueError('msg_count must be > 0') # Set ourselves up to return a single message if msg_count == 1, or a list # if > 1. if msg_count == 1: messages = None else: messages = [] userdata = {'retained':retained, 'msg_count':msg_count, 'messages':messages} callback(_on_message_simple, topics, qos, userdata, hostname, port, client_id, keepalive, will, auth, tls, protocol, transport) return userdata['messages']
38.523077
92
0.648862
0
0
0
0
0
0
0
0
6,764
0.675319
539b8675dc9b20bffab7e413aa5943d934069113
1,561
py
Python
py/2017/day24/aoc_day_24.py
cs-cordero/advent-of-code
614b8f78b43c54ef180a7dc411a0d1366a62944f
[ "MIT" ]
null
null
null
py/2017/day24/aoc_day_24.py
cs-cordero/advent-of-code
614b8f78b43c54ef180a7dc411a0d1366a62944f
[ "MIT" ]
null
null
null
py/2017/day24/aoc_day_24.py
cs-cordero/advent-of-code
614b8f78b43c54ef180a7dc411a0d1366a62944f
[ "MIT" ]
2
2019-12-01T15:33:27.000Z
2020-12-14T05:37:23.000Z
from collections import defaultdict def solution(): starting_components = d[0] best_scores = [] for component in starting_components: n_a, n_b = get_ports(component) nxt_port = n_a if n_b == 0 else n_b best_scores.append(recurse(component, set(), nxt_port, 0)) print("fuck", max(best_scores)) def recurse(component, seen, next_port, level): seen.add(component) c_a, c_b = get_ports(component) next_components = d[next_port] - seen my_score = sum(get_ports(component)) scores = [] for next_component in next_components: n_a, n_b = get_ports(next_component) nxt_port = n_a if n_b in (c_a, c_b) else n_b score, reclevel = recurse(next_component, seen.copy(), nxt_port, level + 1) scores.append((score, reclevel)) scores = sorted(scores, key=lambda x: (x[1], x[0]), reverse=True) print(component, level, scores) return my_score + (scores[0][0] if scores else 0), scores[0][1] if scores else level def get_ports(component): return map(int, component.split("/")) if __name__ == "__main__": d = defaultdict(set) # with open('aoc_day_24_sample.txt') as f: with open("aoc_day_24_input.txt") as f: sample = f.readlines() # sample = [ # '0/1', # '1/2', # '1/3', # '1/4', # '5/0', # '2/5', # '3/6', # '4/500' # ] for component in sample: a, b = map(int, component.split("/")) d[a].add(component) d[b].add(component) solution()
27.875
88
0.59385
0
0
0
0
0
0
0
0
198
0.126842
539ea2a319db010bc0f4b82dc9bd72f7d9cbdfe7
175
py
Python
scratchnet/scratchnet.py
Gr1m3y/scratchnet
5fce471b6e12dc05b3a92fd8581445f7d598d1c3
[ "MIT" ]
null
null
null
scratchnet/scratchnet.py
Gr1m3y/scratchnet
5fce471b6e12dc05b3a92fd8581445f7d598d1c3
[ "MIT" ]
null
null
null
scratchnet/scratchnet.py
Gr1m3y/scratchnet
5fce471b6e12dc05b3a92fd8581445f7d598d1c3
[ "MIT" ]
null
null
null
import numpy as np import network def main(): x = np.array([2, 3]) nw = network.NeuralNetwork() print(nw.feedforward(x)) if __name__ == "__main__": main()
13.461538
32
0.617143
0
0
0
0
0
0
0
0
10
0.057143
539eb7f2ba00a494348f5e2c2412e8b083606e64
1,048
py
Python
live-plotting.py
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji
05a61dca25ef6dc6827e3389a753eb65a09c1813
[ "Apache-2.0" ]
null
null
null
live-plotting.py
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji
05a61dca25ef6dc6827e3389a753eb65a09c1813
[ "Apache-2.0" ]
22
2017-11-10T21:37:20.000Z
2017-12-05T22:36:50.000Z
live-plotting.py
rmhsawyer/EC601-Final-Project
05a61dca25ef6dc6827e3389a753eb65a09c1813
[ "Apache-2.0" ]
3
2017-10-30T20:07:18.000Z
2017-12-03T00:47:18.000Z
#draw the predictions from real-time.py import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib import style style.use('fivethirtyeight') fig = plt.figure() ax1 = fig.add_subplot(1,1,1) def animate(i): graph_data = open('emotion.txt', 'r').read() lines = graph_data.split('\n') xs = [] y_angry = [] y_fear = [] y_happy = [] y_sad = [] y_surprise = [] y_neutral = [] for line in lines: if len(line) > 1: time, angry, fear, happy, sad, surprise, neutral = line.split(',') xs.append(time) y_angry.append(angry) y_fear.append(fear) y_happy.append(happy) y_sad.append(sad) y_surprise.append(surprise) y_neutral.append(neutral) ax1.clear() ax1.plot(xs, y_angry) ax1.plot(xs, y_fear) ax1.plot(xs, y_happy) ax1.plot(xs, y_sad) ax1.plot(xs, y_surprise) ax1.plot(xs, y_neutral) ani = animation.FuncAnimation(fig, animate, interval=1000) plt.show()
24.952381
78
0.605916
0
0
0
0
0
0
0
0
79
0.075382
539f08b39f8bed483a13e19cdf11f4b9e2b776e6
1,850
py
Python
code/run_policy.py
kirk86/ARS
a4ac03e06bce5f183f7b18ea74b81c6c45c4426b
[ "BSD-2-Clause" ]
null
null
null
code/run_policy.py
kirk86/ARS
a4ac03e06bce5f183f7b18ea74b81c6c45c4426b
[ "BSD-2-Clause" ]
null
null
null
code/run_policy.py
kirk86/ARS
a4ac03e06bce5f183f7b18ea74b81c6c45c4426b
[ "BSD-2-Clause" ]
1
2019-03-27T14:11:16.000Z
2019-03-27T14:11:16.000Z
""" Code to load a policy and generate rollout data. Adapted from https://github.com/berkeleydeeprlcourse. Example usage: python run_policy.py ../trained_policies/Humanoid-v1/policy_reward_11600/lin_policy_plus.npz Humanoid-v1 --render \ --num_rollouts 20 """ import numpy as np import gym def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('expert_policy_file', type=str) parser.add_argument('envname', type=str) parser.add_argument('--render', action='store_true') parser.add_argument('--num_rollouts', type=int, default=20, help='Number of expert rollouts') args = parser.parse_args() print('loading and building expert policy') lin_policy = np.load(args.expert_policy_file) lin_policy = lin_policy[lin_policy.files[0]] M = lin_policy[0] # mean and std of state vectors estimated online by ARS. mean = lin_policy[1] std = lin_policy[2] env = gym.make(args.envname) returns = [] observations = [] actions = [] for i in range(args.num_rollouts): print('iter', i) obs = env.reset() done = False totalr = 0. steps = 0 while not done: action = np.dot(M, (obs - mean)/std) observations.append(obs) actions.append(action) obs, r, done, _ = env.step(action) totalr += r steps += 1 if args.render: env.render() if steps % 100 == 0: print("%i/%i"%(steps, env.spec.timestep_limit)) if steps >= env.spec.timestep_limit: break returns.append(totalr) print('returns', returns) print('mean return', np.mean(returns)) print('std of return', np.std(returns)) if __name__ == '__main__': main()
28.90625
119
0.605946
0
0
0
0
0
0
0
0
522
0.282162
539f836eb4814996e6e8dcea4c9325a8edccf36d
6,048
py
Python
src/poliastro/plotting/tisserand.py
TreshUp/poliastro
602eb3c39d315be6dc1edaa12d72ab0e361334f6
[ "MIT" ]
null
null
null
src/poliastro/plotting/tisserand.py
TreshUp/poliastro
602eb3c39d315be6dc1edaa12d72ab0e361334f6
[ "MIT" ]
null
null
null
src/poliastro/plotting/tisserand.py
TreshUp/poliastro
602eb3c39d315be6dc1edaa12d72ab0e361334f6
[ "MIT" ]
null
null
null
""" Generates Tisserand plots """ from enum import Enum import numpy as np from astropy import units as u from matplotlib import pyplot as plt from poliastro.plotting._base import BODY_COLORS from poliastro.twobody.mean_elements import get_mean_elements from poliastro.util import norm class TisserandKind(Enum): """All possible Tisserand kinds""" APSIS = "apsis" ENERGY = "energy" PERIOD = "period" class TisserandPlotter: """Generates Tisserand figures""" def __init__(self, kind=TisserandKind.APSIS, axes=None): """Object initializer Parameters ---------- kind : TisserandKind Nature for the Tisserand axes : ~matplotlib.pyplot.axes Axes for the figure """ # Asign Tisserand kind self.kind = kind # Check if axis available if not axes: _, self.ax = plt.subplots(1, 1) else: self.ax = axes # Force axes scale regarding Tisserand kind self.ax.set_xscale("log") if self.kind == TisserandKind.APSIS: self.ax.set_yscale("log") def _solve_tisserand( self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100 ): """Solves all possible Tisserand lines with a meshgrid workflow Parameters ---------- body : ~poliastro.bodies.Body Body to be plotted Tisserand vinf_array : ~astropy.units.Quantity Desired Vinf for the flyby num_contours : int Number of contour lines for flyby speed alpha_lim : tuple Minimum and maximum flyby angles. N : int Number of points for flyby angle. Notes ----- The algorithm for generating Tisserand plots is the one depicted in "Preliminary Trajectory Design of a Mission to Enceladus" by David Falcato Fialho Palma, section 3.6 """ # Generate mean orbital elements Earth body_rv = get_mean_elements(body).to_vectors() R_body, V_body = norm(body_rv.r), norm(body_rv.v) # Generate non-dimensional velocity and alpha span vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours) alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N) vinf_array /= V_body # Construct the mesh for any configuration V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array) # Solving for non-dimensional a_sc and ecc_sc A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA)) ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2) # Compute main Tisserand variables RR_P = A_SC * R_body * (1 - ECC_SC) RR_A = A_SC * R_body * (1 + ECC_SC) TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k) EE = -body.parent.k / (2 * A_SC * R_body) # Build color lines to internal canvas return RR_P, RR_A, EE, TT def _build_lines(self, RR_P, RR_A, EE, TT, color): """Collect lines and append them to internal data Parameters ---------- data : list Array containing [RR_P, RR_A, EE, TT, color] Returns ------- lines: list Plotting lines for the Tisserand """ # Plot desired kind lines if self.kind == TisserandKind.APSIS: # Generate apsis lines lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color) elif self.kind == TisserandKind.ENERGY: # Generate energy lines lines = self.ax.plot( RR_P.to(u.AU), EE.to(u.km ** 2 / u.s ** 2), color=color ) elif self.kind == TisserandKind.PERIOD: # Generate period lines lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color) return lines def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None): """Plots body Tisserand line within flyby angle Parameters ---------- body : ~poliastro.bodies.Body Body to be plotted Tisserand vinf : ~astropy.units.Quantity Vinf velocity line alpha_lim : tuple Minimum and maximum flyby angles color : str String representing for the color lines Returns ------- self.ax: ~matplotlib.axes.Axes Apsis tisserand is the default plotting option """ # HACK: to reuse Tisserand solver, we transform input Vinf into a tuple vinf_span = (vinf, vinf) # Solve Tisserand parameters RR_P, RR_A, EE, TT = self._solve_tisserand( body, vinf_span, num_contours=2, alpha_lim=alpha_lim ) # Check if color defined if not color: color = BODY_COLORS[body.name] # Build canvas lines from Tisserand parameters self._build_lines(RR_P, RR_A, EE, TT, color) return self.ax def plot(self, body, vinf_span, num_contours=10, color=None): """Plots body Tisserand for given amount of solutions within Vinf span Parameters ---------- body : ~poliastro.bodies.Body Body to be plotted Tisserand vinf_span : tuple Minimum and maximum Vinf velocities num_contours : int Number of points to iterate over previously defined velocities color : str String representing for the color lines Returns ------- self.ax: ~matplotlib.axes.Axes Apsis tisserand is the default plotting option """ # Solve Tisserand parameters RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours) # Check if color defined if not color: color = BODY_COLORS[body.name] # Build canvas lines from Tisserand parameters self._build_lines(RR_P, RR_A, EE, TT, color) return self.ax
30.24
81
0.586475
5,754
0.951389
0
0
0
0
0
0
3,130
0.517526
53a13df64d25ae2c757b6265afa2baab533adc4f
3,122
py
Python
libs/Rack.py
jlin/inventory
c098c98e570c3bf9fadfd811eb75e1213f6ea428
[ "BSD-3-Clause" ]
22
2015-01-16T01:36:32.000Z
2020-06-08T00:46:18.000Z
libs/Rack.py
jlin/inventory
c098c98e570c3bf9fadfd811eb75e1213f6ea428
[ "BSD-3-Clause" ]
8
2015-12-28T18:56:19.000Z
2019-04-01T17:33:48.000Z
libs/Rack.py
jlin/inventory
c098c98e570c3bf9fadfd811eb75e1213f6ea428
[ "BSD-3-Clause" ]
13
2015-01-13T20:56:22.000Z
2022-02-23T06:01:17.000Z
from KeyValueTree import KeyValueTree from truth.models import KeyValue as TruthKeyValue, Truth from systems.models import KeyValue as KeyValue from django.test.client import RequestFactory from api_v2.keyvalue_handler import KeyValueHandler import json factory = RequestFactory() class Rack: rack_name = None tree = None kv = None ru = None width = None systems = [] ethernet_patch_panel_24 = [] ethernet_patch_panel_48 = [] def __init__(self, rack_name): self.systems = [] self.rack_name = rack_name self.kv = Truth.objects.select_related('truth_key_value').get(name=self.rack_name) self.system_list = KeyValue.objects.select_related('system').filter(value__contains="truth:%s" % (self.rack_name)) self.ethernet_patch_panel_24 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 24) self.ethernet_patch_panel_48 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 48) import pdb h = KeyValueHandler() for s in self.system_list: request = factory.get('/api/v2/keyvalue/?keystore=%s' % (s.system.hostname), follow=True) tree = h.read(request) system_ru = self._get_system_ru(tree) system_image = self._get_system_image(tree) system_slot = self._get_system_slot(tree) self.systems.append({ "system_name":s.system.hostname, "system_id":s.system.id, "system_ru":system_ru, "system_image":system_image, 'system_slot':system_slot, 'operating_system':str(s.system.operating_system), 'server_model': str(s.system.server_model), 'oob_ip': str(s.system.oob_ip), }) self.systems = sorted(self.systems, key=lambda k: k['system_slot']) try: self.ru = self.kv.keyvalue_set.get(key='rack_ru').value except: self.ru = 42 try: self.width = self.kv.keyvalue_set.get(key='rack_width').value except: self.width = 30 def _get_ethernet_patch_panels(self, tree, type, port_count): ret = [] for i in tree.keyvalue_set.all(): match_string = "%i_port_%s_patch_panel" % (port_count, type) if str(i.key) == match_string: ret.append(i.value) return ret def _get_system_ru(self, tree): for i in tree.iterkeys(): try: if 'system_ru' in i.split(':'): return tree[i] except: pass return 4 def _get_system_image(self, tree): for i in tree.iterkeys(): try: if 'system_image' in i.split(':'): return tree[i] except: pass return None def _get_system_slot(self, tree): for i in tree.iterkeys(): try: if 'system_slot' in i.split(':'): return tree[i] except: pass return 1
34.688889
122
0.575593
2,837
0.908712
0
0
0
0
0
0
293
0.09385
53a26f62743c91c61bf312038531a22cbbef6701
151
py
Python
r2c_isg/functions/__init__.py
returntocorp/inputset-generator
c33952cc5683e9e70b24f76936c42ec8e354d121
[ "MIT" ]
3
2019-11-02T20:14:34.000Z
2020-01-23T21:47:20.000Z
r2c_isg/functions/__init__.py
returntocorp/inputset-generator
c33952cc5683e9e70b24f76936c42ec8e354d121
[ "MIT" ]
19
2019-09-18T01:48:07.000Z
2021-11-04T11:20:48.000Z
r2c_isg/functions/__init__.py
returntocorp/inputset-generator
c33952cc5683e9e70b24f76936c42ec8e354d121
[ "MIT" ]
3
2019-11-15T22:31:13.000Z
2020-03-10T10:19:39.000Z
from .trim import trim from .sample import sample from .sort import sort function_map = { 'trim': trim, 'sample': sample, 'sort': sort }
13.727273
26
0.649007
0
0
0
0
0
0
0
0
20
0.13245
53a287190d58a2db9d8427aaa2bd973ac3e2cd59
59
py
Python
__init__.py
csalyk/nirspec
58661371871d29103afe42bfccc0bff9ff773914
[ "MIT-0" ]
null
null
null
__init__.py
csalyk/nirspec
58661371871d29103afe42bfccc0bff9ff773914
[ "MIT-0" ]
null
null
null
__init__.py
csalyk/nirspec
58661371871d29103afe42bfccc0bff9ff773914
[ "MIT-0" ]
null
null
null
from .nirspec import divspec from .nirspec import gluespec
19.666667
29
0.830508
0
0
0
0
0
0
0
0
0
0
53a2e756b6afda167f3e4ff4e520ec037aac6965
9,526
py
Python
poem.py
xcollantes/poetry-generator
456c9702f0105b49b8c3edbb55043a10efbf359b
[ "MIT" ]
null
null
null
poem.py
xcollantes/poetry-generator
456c9702f0105b49b8c3edbb55043a10efbf359b
[ "MIT" ]
null
null
null
poem.py
xcollantes/poetry-generator
456c9702f0105b49b8c3edbb55043a10efbf359b
[ "MIT" ]
null
null
null
from __future__ import absolute_import from __future__ import print_function import datetime import os import random import sys import uuid import base64 import yaml import re try: import en except: print("DOWNLOD NODECUBE") print("""wget https://www.nodebox.net/code/data/media/linguistics.zip unzip linguistics.zip""") VERSION = "1.1" THEME_PROB = 0 class bnfDictionary: def __init__(self, file): self.grammar = yaml.load(open(file,'r')) self.poemtype = "<poem>" def generate(self, key, num): gram = self.grammar[key] if len(gram)==1: i = 0 else: i = random.randint(0, len(gram) - 1) string = "" if "<" not in gram[i]: string = gram[i] else: for word in gram[i].split(): if "<" not in word: string = string + word + " " else: if "verb" in word and word != '<adverb>': if "pverb" in word or "mushy" in self.poemtype: v = self.generate("<pverb>", 1).strip() elif "nverb" in word: v = self.generate("<nverb>", 1).strip() # else: # v = self.generate("<verb>", 1).strip() if random.randint(1, 100) < THEME_PROB: v = self.generate("<theme-verb>", 1).strip() if "verb-inf" in word: string = string + \ en.verb.present_participle(v) + " " elif "verb-pr" in word: string = string + \ en.verb.present( v, person=3, negate=False) + " " elif "verb-past" in word: string = string + en.verb.past(v) + " " else: string = string + v + " " elif "noun" in word: if "pnoun" in word or "mushy" in self.poemtype: v = self.generate("<pnoun>", 1).strip() elif "nnoun" in word: v = self.generate("<nnoun>", 1).strip() else: v = self.generate("<noun>", 1).strip() if random.randint(1, 100) < THEME_PROB: v = self.generate("<theme-noun>", 1).strip() if "pl" in word: v = en.noun.plural(v) string = string + v + " " elif "person" in word: v = self.generate("<person>", 1).strip() if "pl" in word: v = en.noun.plural(v) string = string + v + " " elif "adj" in word: if "mushy" in self.poemtype: v = self.generate("<padj>",1) else: if random.randint(1, 100) < THEME_PROB: v = self.generate("<theme-adj>", 1).strip() else: v = self.generate(word, 1).strip() string = string + v + " " elif "fruit" in word: v = self.generate("<fruit>", 1).strip() if "pl" in word: v = en.noun.plural(v) string = string + self.generate(word, 1) + " " elif "person" in word: v = self.generate("<fruit>", 1).strip() if "pl" in word: v = en.noun.plural(v) string = string + self.generate(word, 1) + " " else: if "-pl" in word: v = en.noun.plural(self.generate(word.replace("-pl",""),1)) else: v = self.generate(word, 1) string = string + v + " " return string def generatePretty(self, key, seed_str): if seed_str == None: seed_str = str(uuid.uuid4()).split("-")[0] random.seed(uuid.uuid5(uuid.NAMESPACE_DNS,seed_str).int) #tool = language_check.LanguageTool('en-US') self.poemtype = key if key == "<mushypoem>": key = "<poem>" poem = self.generate(key, 1) poem = poem.replace(" ,", ",") puncuation = [".", ".", ".", ".", "!", "?"] dontbreaks = ["of", "behind", "the", "when", "what", "why", "who", ",", "your", "by", "like", "to", "you", "your", "a", "are", "become", "newline"] capitalize = False breaks = 0 poem2 = [] foundFirstBreak = False for word in poem.replace("\n", "newline").split(): poem2.append(word.lower()) if random.randint(1, 100) < 2 and "newline" not in word and foundFirstBreak: isgood = True for dontbreak in list(dontbreaks + puncuation): if dontbreak == word.lower(): isgood = False if isgood: poem2.append("newline") if "newline" in word: foundFirstBreak = True poem3 = [] beforeFirstBreak = True for word in poem2: if "newline" in word: breaks += 1 beforeFirstBreak = False else: breaks = 0 if beforeFirstBreak or word == "i" or "i'" in word: word = word.capitalize() poem3.append(word) capitalize = False else: if breaks > 1: capitalize = True if capitalize == True and "newline" not in word: word = word.capitalize() capitalize = False for punc in list(set(puncuation)): if punc in word: capitalize = True poem3.append(word) if random.randint(1, 100) < 0 and "newline" not in word: isgood = True for dontbreak in list(dontbreaks + puncuation): if dontbreak == word.lower(): isgood = False if isgood: poem3.append(random.choice(puncuation)) capitalize = True # noPunc = True # for punc in list(set(puncuation)): # if punc in word: # noPunc = False # if noPunc: # poem3.append(random.choice(puncuation)) newPoem = " ".join(poem3) newPoem = newPoem.replace(" a a", " an a") newPoem = newPoem.replace("newline .", ". newline") newPoem = newPoem.replace("newline ?", "? newline") newPoem = newPoem.replace("newline !", "! newline") newPoem = newPoem.replace("newline ,", ", newline") newPoem = newPoem.replace("newline", "\n") newPoem = newPoem.replace(" \n \n", "\n\n") newPoem = newPoem.replace("\n \n ", "\n\n") newPoem = newPoem.replace(" '", "'") for punc in list(set(puncuation)): newPoem = newPoem.replace(" " + punc, punc) for punc in list(set(puncuation)): newPoem = newPoem.replace(" " + punc, punc) for punc in list(set(puncuation)): newPoem = newPoem.replace(" " + punc, punc) newPoem = newPoem.replace(" ,", ",") newPoem = newPoem.replace("?.", "?") newPoem = newPoem.replace(".?", ".") newPoem = newPoem.replace(",.", ",") newPoem = newPoem.replace("!.", "!") newPoem = newPoem.replace("..", ".") newPoem = newPoem.replace("..", ".") newPoem = newPoem.replace("..", ".") title = newPoem.split("\n")[0] newTitle = title.replace(".", "") newPoem = newPoem.replace(title, "<h1>" + newTitle + "</h1>") newPoem2 = "" firstLine = False secondLine = False for line in newPoem.split("\n"): if len(line) > 0: if firstLine and not secondLine: newPoem2 = newPoem2 + "<p>\n" secondLine = True if firstLine == False: firstLine = True newPoem2 = newPoem2 + line + " \n" if firstLine and secondLine: newPoem2 = newPoem2 + line + " <br />\n" else: newPoem2 = newPoem2 + " <br />\n" newPoem2 = newPoem2 + "</p>" return newPoem2,seed_str bnf = bnfDictionary('brain.yaml') def generate_poem(poemtype, hex_seed=None): p,seed_str = bnf.generatePretty('<' + poemtype + '>',hex_seed) return p,seed_str if __name__ == '__main__': poemtype = 'poem' if 'mushy' in sys.argv[1:]: poemtype = 'mushypoem' p,seed_str=generate_poem(poemtype) print(("*"*30 + "\n"*5)) filtered = [] for line in re.sub("<.*?>", " ", p).split("\n"): if len(line.strip()) > 0: filtered.append(line.strip()) else: filtered.append("pause") print(p)
39.526971
97
0.43607
8,594
0.902163
0
0
0
0
0
0
1,276
0.133949
53a46773e97ade0a733cbe735e77d4be70d5d02d
3,927
py
Python
openstack/tests/unit/block_storage/v2/test_proxy.py
infonova/openstacksdk
3cf6730a71d8fb448f24af8a5b4e82f2af749cea
[ "Apache-2.0" ]
null
null
null
openstack/tests/unit/block_storage/v2/test_proxy.py
infonova/openstacksdk
3cf6730a71d8fb448f24af8a5b4e82f2af749cea
[ "Apache-2.0" ]
null
null
null
openstack/tests/unit/block_storage/v2/test_proxy.py
infonova/openstacksdk
3cf6730a71d8fb448f24af8a5b4e82f2af749cea
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.block_storage.v2 import _proxy from openstack.block_storage.v2 import snapshot from openstack.block_storage.v2 import stats from openstack.block_storage.v2 import type from openstack.block_storage.v2 import volume from openstack.tests.unit import test_proxy_base class TestVolumeProxy(test_proxy_base.TestProxyBase): def setUp(self): super(TestVolumeProxy, self).setUp() self.proxy = _proxy.Proxy(self.session) def test_snapshot_get(self): self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot) def test_snapshots_detailed(self): self.verify_list(self.proxy.snapshots, snapshot.SnapshotDetail, paginated=True, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1}) def test_snapshots_not_detailed(self): self.verify_list(self.proxy.snapshots, snapshot.Snapshot, paginated=True, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}) def test_snapshot_create_attrs(self): self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot) def test_snapshot_delete(self): self.verify_delete(self.proxy.delete_snapshot, snapshot.Snapshot, False) def test_snapshot_delete_ignore(self): self.verify_delete(self.proxy.delete_snapshot, snapshot.Snapshot, True) def test_type_get(self): self.verify_get(self.proxy.get_type, type.Type) def test_types(self): self.verify_list(self.proxy.types, type.Type, paginated=False) def test_type_create_attrs(self): self.verify_create(self.proxy.create_type, type.Type) def test_type_delete(self): self.verify_delete(self.proxy.delete_type, type.Type, False) def test_type_delete_ignore(self): self.verify_delete(self.proxy.delete_type, type.Type, True) def test_volume_get(self): self.verify_get(self.proxy.get_volume, volume.Volume) def test_volumes_detailed(self): self.verify_list(self.proxy.volumes, volume.VolumeDetail, paginated=True, method_kwargs={"details": True, "query": 1}, expected_kwargs={"query": 1}) def test_volumes_not_detailed(self): self.verify_list(self.proxy.volumes, volume.Volume, paginated=True, method_kwargs={"details": False, "query": 1}, expected_kwargs={"query": 1}) def test_volume_create_attrs(self): self.verify_create(self.proxy.create_volume, volume.Volume) def test_volume_delete(self): self.verify_delete(self.proxy.delete_volume, volume.Volume, False) def test_volume_delete_ignore(self): self.verify_delete(self.proxy.delete_volume, volume.Volume, True) def test_volume_extend(self): self._verify("openstack.block_storage.v2.volume.Volume.extend", self.proxy.extend_volume, method_args=["value", "new-size"], expected_args=["new-size"]) def test_backend_pools(self): self.verify_list(self.proxy.backend_pools, stats.Pools, paginated=False)
39.27
75
0.663102
3,100
0.789407
0
0
0
0
0
0
702
0.178762
53a4815531cf8a3d91a379873dd45b934995baa1
20,346
py
Python
src/ncstyler/console.py
starofrainnight/ncstyler
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
[ "MIT" ]
null
null
null
src/ncstyler/console.py
starofrainnight/ncstyler
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
[ "MIT" ]
null
null
null
src/ncstyler/console.py
starofrainnight/ncstyler
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
[ "MIT" ]
null
null
null
#!/usr/bin/env python import argparse import CppHeaderParser import re import sys import yaml import copy import six import os.path import traceback class CppDefine(dict): def __init__(self): self["name"] = None self["parameters"] = [] self["line_number"] = -1 class CppDefineParameter(dict): def __init__(self): self["name"] = None self["line_number"] = -1 class CppNamespace(dict): def __init__(self): self["name"] = None self["line_number"] = -1 class CppFileName(dict): def __init__(self): self["name"] = None self["line_number"] = -1 class Application(object): def __init__(self): description='''A styler just target to naming conventions of source code''' parser = argparse.ArgumentParser(description=description) parser.add_argument("-c", "--config", help="Configuration file path (In YAML format)", required=True) parser.add_argument("-o", "--output", help="Output file path") parser.add_argument("-d", "--debug", action='store_true', help="Print trace stack") parser.add_argument("file_path", help="Source file path") self.__args = parser.parse_args() # If user does not specific output path, we default it to input file # path if self.__args.output is None: self.__args.output = self.__args.file_path self.__config = yaml.load(open(self.__args.config)) old_base = self.__config["_base_"] self.__config["_base_"] = { "re":"[a-zA-Z0-9_]+", "error": "", } self.__config["_base_"].update(old_base) def parse_define(self, adefine): matched = re.match(r"[^\w]*(\w+)(?:\(([^\)]*)\)|\s*).*", adefine) name = matched.group(1) parameters = [] if matched.group(2) is not None: parameter_names = matched.group(2).split(',') for parameter_name in parameter_names: aparameter = CppDefineParameter() aparameter["name"] = parameter_name.strip() parameters.append(aparameter) result = CppDefine() result["name"] = name result["parameters"] = parameters return result def _is_special_method(self, amethod): if isinstance(amethod, six.string_types): amethod_name = amethod else: amethod_name = amethod["name"] founded = re.findall(r"(?:^|[^\w]+)operator[^\w]+", amethod_name) if len(founded) <= 0: if re.match(r"(?:^|.*\W)operator\W.*", amethod["debug"]) is not None: return True return False return True def _get_argument_name(self, an_argument): if isinstance(an_argument, six.string_types): return an_argument if len(an_argument["name"]) > 0: return an_argument["name"] # If it's a functor?? with "class name::function" style matched = re.match(r"^\w+\s*\(\w*::\*(\w+)\)\(.*$", an_argument["type"]) if matched is None: # with normal "function" style matched = re.match(r"[^\(]*\([^\)]*\W(\w+)\W.*\).*", an_argument["type"]) if matched is None: return "" else: return matched.group(1) def _get_config(self, name): override_table = { "class": "_base_", "function": "_base_", "variant": "_base_", "namespace": "_base_", "define": "_base_", "filename": "_base_", # Special config use to define filename rule "argument": "variant", "static_variant": "variant", "global_variant": "variant", "function_argument": "argument", "class_method_argument": "function_argument", "struct_method_argument": "class_method_argument", "define_function_argument": "function_argument", "define_function": "function", "class_method": "function", "struct_method": "class_method", "class_variant": "variant", "struct_variant": "class_variant", "typedef": "class", "struct": "class", "enum": "class", "enum_value": "define", "union": "struct", } my_config = dict() if name in override_table: base_name = override_table[name] my_config.update(self._get_config(base_name)) if name in self.__config: my_config.update(self.__config[name]) return my_config def _is_valid_variable(self, cpp_variable): if cpp_variable["type"] == "return": return False if len(cpp_variable["type"]) <= 0: return False return True def _get_cpp_method_re(self, name): prefix = "operator" if not name.startswith(prefix): return re.escape(name) # Operator methods chars = [] for achar in name[len(prefix):]: chars.append("\\s*") if achar.isalnum(): chars.append(achar) else: chars.append("\\") chars.append(achar) return "operator%s" % ''.join(chars) def _validate_codes_of_cpp_method(self, cpp_method): start_line_index = cpp_method["line_number"] - 1 # Extract cpp method codes rest_lines = self._source_lines[start_line_index:] content = '\n'.join(rest_lines) code_lines = [] name_re = self._get_cpp_method_re(cpp_method["name"]) name_start_pos = re.search(name_re, content).span()[0] parameters_start_pos = content.index('(', name_start_pos) parameters_stop_pos = content.index(')', parameters_start_pos) stack = [] try: i = content.index('{', parameters_stop_pos + 1) except ValueError: return; try: semicolonPos = content.index(';', parameters_stop_pos + 1) if semicolonPos <= i: return; except ValueError: # Not found a semicolon, just ignored. pass skipped_lines = cpp_method["line_number"] + content.count("\n", 0, i) - 2 stack.append(i) i += 1 first_i = i last_i = 0 is_finding_block_comment = False is_finding_single_comment = False while (len(stack) > 0) and (i < len(content)): c = content[i] if is_finding_block_comment: # If finding block comment, then skip all other searching if (c == "*") and (content[i + 1] == "/"): is_finding_block_comment = False elif (c == "/") and (content[i + 1] == "*"): is_finding_block_comment = True elif is_finding_single_comment: # If finding single comment, then skip all other searching if c == "\n": is_finding_single_comment = False elif (c == "/") and (content[i + 1] == "/"): is_finding_single_comment = True elif c == "{": stack.append(i) elif c == "}": last_i = i del stack[len(stack) - 1] i += 1 if len(stack) <= 0: content = content[first_i:last_i] founded = re.findall(r"\w+\W+(\w+)\s*=[^=]", content) for aname in founded: avariant = dict() avariant["name"] = aname avariant["line_number"] = cpp_method["line_number"] self._validate_name(avariant, "variant") def _validate_name(self, cpp_object, name_re): cpp_object_name = "" if isinstance(cpp_object, six.string_types): cpp_object_name = cpp_object cpp_object = dict() cpp_object["name"] = cpp_object_name cpp_object["line_number"] = -1 elif "name" in cpp_object: cpp_object_name = cpp_object["name"] if ('<' in cpp_object_name) and ("debug" in cpp_object): matched = re.match(r".*?(\w+)\W+$", cpp_object["debug"]) if matched is not None: cpp_object_name = matched.group(1) else: return # Parse union like names splitted = cpp_object_name.split() if len(splitted) > 1: cpp_object_name = splitted[-1] if '...' in cpp_object_name: # Does not have valid name, we must not check it . return if len(cpp_object_name) <= 0: # Does not have valid name, we must not check it . return matched = re.match(self._get_config(name_re)["re"], cpp_object_name) if matched is None: filename = os.path.basename(self.__args.file_path) error_message = self._get_config(name_re)["error"] if len(error_message) > 0: error_message = "%s %s" % ( ' '.join([rule_name.capitalize() for rule_name in name_re.split("_")]), error_message) if self.__args.debug: traceback.print_stack() raise SyntaxError("%s:%s:error: Name '%s' isn't matched with rule : %s! %s" % ( filename, cpp_object["line_number"], cpp_object_name, name_re, error_message)) def _get_class_realname(self, class_name): return re.match(r"(\w+).*", class_name).group(1) def _validate_cpp_object(self, cpp_object): cpp_object_type = type(cpp_object) if cpp_object_type == CppDefine: if len(cpp_object["parameters"]) <= 0: # Normal Define Name self._validate_name(cpp_object, "define") else: # Function Liked Define Name self._validate_name(cpp_object, "define_function") for aparameter in cpp_object["parameters"]: self._validate_name(aparameter, "define_function_argument") elif cpp_object_type == CppHeaderParser.CppClass: if "struct" in cpp_object["declaration_method"]: class_re = "struct" class_method_re = "struct_method" class_method_argument_re = "struct_method_argument" class_variant_re = "struct_variant" else: class_re = "class" class_method_re = "class_method" class_method_argument_re = "class_method_argument" class_variant_re = "class_variant" self._validate_name(cpp_object, class_re) for amethod in cpp_object.get_all_methods(): matched = re.match(r".*typedef\W[^\(]*\([^\)]*\W(\w+)\W.*\).*", amethod["debug"]) if matched is None: self._validate_codes_of_cpp_method(amethod) if not self._is_special_method(amethod): if ((amethod["name"] != self._get_class_realname(cpp_object["name"])) and (not amethod.get("constructor", False)) and (not amethod.get("destructor", False))): try: self._validate_name(amethod, class_method_re) except SyntaxError: is_need_reraise = True try: self._validate_name(amethod, "define_function") is_need_reraise = False except SyntaxError: pass if is_need_reraise: raise for aparameter in amethod["parameters"]: an_object = dict() an_object["line_number"] = aparameter["line_number"] if (aparameter["type"].endswith("::*") and (")" in aparameter["name"])): an_object["name"] = re.match(r"(\w+).*", aparameter["name"]).group(1) try: self._validate_name(an_object, class_method_re) except SyntaxError: is_need_reraise = True try: self._validate_name(amethod, "define_function") is_need_reraise = False except SyntaxError: pass if is_need_reraise: raise else: an_object["name"] = self._get_argument_name(aparameter) self._validate_name(an_object, class_method_argument_re) else: self._validate_name( {"name":matched.group(1), "line_number":amethod["line_number"]}, "typedef") for access_specifier in CppHeaderParser.supportedAccessSpecifier: for amember in cpp_object["properties"][access_specifier]: is_skip_validate = False if ("type" in amember) and (amember["type"] is not None): internal_predeclares = ["class", "struct", "union"] if amember["type"] in internal_predeclares: is_skip_validate = True if not is_skip_validate: if amember["static"]: self._validate_name(amember, "static_variant") else: self._validate_name(amember, class_variant_re) for amember in cpp_object["structs"][access_specifier]: self._validate_cpp_object(amember) for amember in cpp_object["enums"][access_specifier]: self._validate_cpp_object(amember) elif cpp_object_type == CppHeaderParser.CppStruct: self._validate_name(cpp_object, "struct") elif cpp_object_type == CppHeaderParser.CppEnum: self._validate_name(cpp_object, "enum") line_number = -1 if "line_number" in cpp_object: line_number = cpp_object["line_number"] for amember in cpp_object["values"]: # Use parent line number if enum value does not have it's line # number if "line_number" not in amember: amember["line_number"] = line_number self._validate_name(amember, "enum_value") elif cpp_object_type == CppHeaderParser.CppVariable: if cpp_object["type"] != "return": if cpp_object["static"]: self._validate_name(cpp_object, "static_variant") elif cpp_object["type"] not in ["class", "struct", "union"]: if not cpp_object["type"].endswith("::"): # Don't parse variable that implemented outside of # template class. It's already be parsed when parsing # the class. self._validate_name(cpp_object, "global_variant") elif cpp_object_type == CppHeaderParser.CppMethod: # Exclude "main" function while parsing global function while True: # FIXME: Parse special case : "struct RArraySize <T ( & ) [ N ]> {" if "debug" in cpp_object: if re.match(r".*\>\s*{$", cpp_object["debug"]) is not None: break self._validate_codes_of_cpp_method(cpp_object) if cpp_object["name"] == "main": break if self._is_special_method(cpp_object): break if (cpp_object["class"] is None) or (len(cpp_object["class"]) <= 0): if ">" in cpp_object["name"]: regex = r"^[^<:]*?(?:(\w+)::)?(\w+)\s*<" matched = re.search(regex, cpp_object["debug"]) if matched.group(1) is not None: cpp_object["class"] = matched.group(1) cpp_object["name"] = matched.group(2) self._validate_name(cpp_object, "class_method") elif len(cpp_object["returns"]) > 0: # If a function does not have return value(at least # "void"), it maybe macro invokes. # FIXME: We just ignored this situation: # Code Snippets: static RSignal<void(int)> sReceived; if "<" not in cpp_object["name"]: self._validate_name(cpp_object, "function") break if self._get_class_realname(cpp_object["class"]) == cpp_object["name"]: # Constructor / Destructor will the same with class name break self._validate_name(cpp_object, "class_method") break elif cpp_object_type == CppHeaderParser.CppUnion: self._validate_name(cpp_object, "union") elif cpp_object_type == CppNamespace: self._validate_name(cpp_object, "namespace") elif cpp_object_type == CppFileName: self._validate_name(cpp_object, "filename") def exec_(self): try: with open(self.__args.file_path, "r") as source_file: # For later parse by _validate_codes_of_cpp_method() self._source_lines = source_file.readlines() parsed_info = CppHeaderParser.CppHeader(self.__args.file_path) # Verify File Names filename = os.path.basename(self.__args.file_path) cpp_object = CppFileName() cpp_object["name"] = filename self._validate_cpp_object(cpp_object) # Verify Define Names for define_text in parsed_info.defines: self._validate_cpp_object(self.parse_define(define_text)) # Verify Function Names for cpp_object in parsed_info.functions: self._validate_cpp_object(cpp_object) # Verify Class Names for cpp_object in parsed_info.classes_order: self._validate_cpp_object(cpp_object) # Verify Struct Names for cpp_object in parsed_info.structs_order: self._validate_cpp_object(cpp_object) # Verify Enum Names for cpp_object in parsed_info.enums: self._validate_cpp_object(cpp_object) # Verify Variable Names for cpp_object in parsed_info.variables: # Avoid checking member variable inside function body. if '{' not in cpp_object['type']: self._validate_cpp_object(cpp_object) for namespace in parsed_info.namespaces: cpp_object = CppNamespace() cpp_object["name"] = namespace self._validate_cpp_object(cpp_object) # Verify Typdef Names for cpp_object in parsed_info.typedefs: self._validate_cpp_object(cpp_object) except SyntaxError as e: print(str(e)) return 1 except CppHeaderParser.CppHeaderParser.CppParseError as e: # CppHeaderParser can't parse this file, but we should pass it, this # is the CppHeaderParser's problem. print(str(e)) return 0 return 0 def main(): a = Application() sys.exit(a.exec_()) if __name__ == "__main__": # Execute only if run as a script main()
38.172608
97
0.524182
20,050
0.985452
0
0
0
0
0
0
4,041
0.198614
53a4ae1a747ba84b0abf192cd72d5b27b2b5e891
1,527
py
Python
theone/wsgi/server.py
laozijiaojiangnan/TheOne
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
[ "Apache-2.0" ]
null
null
null
theone/wsgi/server.py
laozijiaojiangnan/TheOne
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
[ "Apache-2.0" ]
null
null
null
theone/wsgi/server.py
laozijiaojiangnan/TheOne
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
[ "Apache-2.0" ]
null
null
null
import typing as t from http.server import HTTPServer, BaseHTTPRequestHandler from . import response as resp class WsgiServer(HTTPServer): pass class WsgiHandel(BaseHTTPRequestHandler): def handle(self) -> None: handle_response = SimpleHandler(self.wfile) handle_response.send() class SimpleHandler: def __init__(self, wfile): self._response = resp.Response.create_empty() # type: resp.Response self.sender = wfile def send(self): """像浏览器发送包 node: 下面分成了三次发送,因为合在发送会有 bug,不确定问题,暂时先这样 """ line = f"{self._response.line.version} {self._response.line.code} {self._response.line.code}\r\n" self.sender.write(bytes(line, 'utf-8')) self.add_header(key='Content-Length', value=len(self._response.body.content)) headers = "".join( [f"{h.key}:{h.value}\r\n" for h in self._response.headers] ) print(f'headers: {headers}') self.sender.write(bytes(headers, 'utf-8')) body = f"\r\n{self._response.body.content}" self.sender.write(bytes(body, 'utf-8')) def add_header(self, key: str, value: t.Any) -> t.List[resp.Headers]: """添加请求头键值对 Args: key: 键 value: 值 Return: 存在的所有键值对信息 """ if self._response is None: self._response = resp.Response.create_empty() h = resp.Headers(key=key, value=value) self._response.headers.append(h) return self._response.headers
28.277778
105
0.612967
1,522
0.927483
0
0
0
0
0
0
532
0.324193
53a59bcf9df24d2abf9133b0c94be6aa674beda0
4,462
py
Python
pytorch_translate/attention/multihead_attention.py
dzhulgakov/translate
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
[ "BSD-3-Clause" ]
1
2019-06-14T20:20:39.000Z
2019-06-14T20:20:39.000Z
pytorch_translate/attention/multihead_attention.py
dzhulgakov/translate
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
[ "BSD-3-Clause" ]
null
null
null
pytorch_translate/attention/multihead_attention.py
dzhulgakov/translate
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 from fairseq.modules import multihead_attention as fair_multihead from pytorch_translate.attention import ( BaseAttention, attention_utils, register_attention, ) @register_attention("multihead") class MultiheadAttention(BaseAttention): """ Multiheaded Scaled Dot Product Attention Implements equation: MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V) Similarly to the above, d_k = d_v = d_model / h In this implementation, keys and values are both set to encoder output Inputs init: decoder_hidden_state_dim : dimensionality of decoder hidden state context_dim : dimensionality of encoder output kwargs : nheads : integer # of attention heads unseen_mask: if True, only attend to previous sequence positions src_lengths_mask: if True, mask padding based on src_lengths forward: decoder_state : [batch size, d_model] source_hids : [sequence length, batch size, d_model] src_lengths : [batch size] forward: query : [sequence length, batch size, d_model] key: [sequence length, batch size, d_model] value: [sequence length, batch size, d_model] Output result : [batch_size, d_model] """ def __init__( self, decoder_hidden_state_dim, context_dim, *, nheads=1, unseen_mask=False, src_length_mask=True ): super().__init__(decoder_hidden_state_dim, context_dim) assert decoder_hidden_state_dim == context_dim d_model = decoder_hidden_state_dim # for brevity assert d_model % nheads == 0 if unseen_mask: raise NotImplementedError( "Unseen mask not supported with sequential decoding" ) self._fair_attn = fair_multihead.MultiheadAttention(d_model, nheads) self.use_src_length_mask = src_length_mask def forward(self, decoder_state, source_hids, src_lengths, squeeze=True): """ Computes MultiheadAttention with respect to either a vector or a tensor Inputs: decoder_state: (bsz x decoder_hidden_state_dim) or (bsz x T x decoder_hidden_state_dim) source_hids: srclen x bsz x context_dim src_lengths: bsz x 1, actual sequence lengths squeeze: Whether or not to squeeze on the time dimension. Even if decoder_state.dim() is 2 dimensional an explicit time step dimension will be unsqueezed. Outputs: [batch_size, max_src_len] if decoder_state.dim() == 2 & squeeze or [batch_size, 1, max_src_len] if decoder_state.dim() == 2 & !squeeze or [batch_size, T, max_src_len] if decoder_state.dim() == 3 & !squeeze or [batch_size, T, max_src_len] if decoder_state.dim() == 3 & squeeze & T != 1 or [batch_size, max_src_len] if decoder_state.dim() == 3 & squeeze & T == 1 """ batch_size = decoder_state.shape[0] if decoder_state.dim() == 3: query = decoder_state elif decoder_state.dim() == 2: query = decoder_state.unsqueeze(1) else: raise ValueError("decoder state must be either 2 or 3 dimensional") query = query.transpose(0, 1) value = key = source_hids src_len_mask = None if src_lengths is not None and self.use_src_length_mask: # [batch_size, 1, seq_len] src_len_mask_int = attention_utils.create_src_lengths_mask( batch_size=batch_size, src_lengths=src_lengths ) src_len_mask = src_len_mask_int != 1 attn, attn_weights = self._fair_attn.forward( query, key, value, key_padding_mask=src_len_mask, need_weights=True ) # attn.shape = T X bsz X embed_dim # attn_weights.shape = bsz X T X src_len attn_weights = attn_weights.transpose(0, 2) # attn_weights.shape = src_len X T X bsz if squeeze: attn = attn.squeeze(0) # attn.shape = squeeze(T) X bsz X embed_dim attn_weights = attn_weights.squeeze(1) # attn_weights.shape = src_len X squeeze(T) X bsz return attn, attn_weights return attn, attn_weights
35.412698
85
0.62528
4,227
0.947333
0
0
4,260
0.954729
0
0
2,470
0.553563
53a74fabccfed340e02d074e5c163a36783d5463
1,102
py
Python
custom_components/purrsong/__init__.py
RobertD502/home-assistant-lavviebot
5c69f474786f043773cba42b7806fb77d4f89672
[ "MIT" ]
3
2021-04-15T21:23:26.000Z
2021-12-18T07:45:40.000Z
custom_components/purrsong/__init__.py
RobertD502/home-assistant-lavviebot
5c69f474786f043773cba42b7806fb77d4f89672
[ "MIT" ]
2
2021-10-21T12:08:32.000Z
2021-11-12T19:13:11.000Z
custom_components/purrsong/__init__.py
RobertD502/home-assistant-lavviebot
5c69f474786f043773cba42b7806fb77d4f89672
[ "MIT" ]
null
null
null
"""Support for Purrsong LavvieBot S""" import asyncio import logging import voluptuous as vol from lavviebot import LavvieBotApi import homeassistant.helpers.config_validation as cv from homeassistant import config_entries from homeassistant.const import EVENT_HOMEASSISTANT_STOP from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.const import ( CONF_PASSWORD, CONF_USERNAME ) from .const import DOMAIN _LOGGER = logging.getLogger(__name__) def setup(hass, config): """Setup of the component""" return True async def async_setup_entry(hass, config_entry): """Set up Lavviebot integration from a config entry.""" username = config_entry.data.get(CONF_USERNAME) password = config_entry.data.get(CONF_PASSWORD) _LOGGER.info("Initializing the Lavviebot API") lavviebot = await hass.async_add_executor_job(LavvieBotApi, username, password) _LOGGER.info("Connected to API") hass.data[DOMAIN] = lavviebot hass.async_add_job( hass.config_entries.async_forward_entry_setup(config_entry, "sensor") ) return True
26.238095
83
0.772232
0
0
0
0
0
0
546
0.495463
179
0.162432
53a80bedba1fa544dba66c5282310b99391dfaba
917
py
Python
MathPainting_OOP/shapes.py
matbocz/kurs-python-udemy
bbc53d0b2073b400aaad5ff908b3e1c09b815121
[ "MIT" ]
null
null
null
MathPainting_OOP/shapes.py
matbocz/kurs-python-udemy
bbc53d0b2073b400aaad5ff908b3e1c09b815121
[ "MIT" ]
null
null
null
MathPainting_OOP/shapes.py
matbocz/kurs-python-udemy
bbc53d0b2073b400aaad5ff908b3e1c09b815121
[ "MIT" ]
null
null
null
class Rectangle: """A rectangle shape that can be drawn on a Canvas object""" def __init__(self, x, y, width, height, color): self.x = x self.y = y self.width = width self.height = height self.color = color def draw(self, canvas): """Draws itself into the Canvas object""" # Changes a slice of the array with new values canvas.data[self.x: self.x + self.height, self.y: self.y + self.width] = self.color class Square: """A square shape that can be drawn on a Canvas object""" def __init__(self, x, y, side, color): self.x = x self.y = y self.side = side self.color = color def draw(self, canvas): """Draws itself into the Canvas object""" # Changes a slice of the array with new values canvas.data[self.x: self.x + self.side, self.y: self.y + self.side] = self.color
30.566667
91
0.591058
913
0.995638
0
0
0
0
0
0
291
0.317339
53a892c5198d37c345b5950774654f861533af79
2,904
py
Python
problems/Kelvin_Helmholtz/problem.py
sddyates/mars
a56735bd344b7337151fb419b1c832b0c702ea69
[ "MIT" ]
1
2019-12-20T20:29:14.000Z
2019-12-20T20:29:14.000Z
problems/Kelvin_Helmholtz/problem.py
sddyates/mars
a56735bd344b7337151fb419b1c832b0c702ea69
[ "MIT" ]
3
2019-08-30T08:12:16.000Z
2020-05-15T16:19:53.000Z
problems/Kelvin_Helmholtz/problem.py
sddyates/mars
a56735bd344b7337151fb419b1c832b0c702ea69
[ "MIT" ]
1
2019-12-21T03:51:30.000Z
2019-12-21T03:51:30.000Z
from mars import main_loop import numpy as np from mars.settings import * class Problem: """ Synopsis -------- User class for the Kelvin-Helmholtz instability Args ---- None Methods ------- initialise Set all variables in each cell to initialise the simulation. internal_bc Specify the internal boundary for the simulation. TODO ---- None """ def __init__(self): self.parameter = { 'Name':'Kelvin Helmholtz instability.', 'Dimensions':'2D', 'x1 min':-0.5, 'x1 max':0.5, 'x2 min':-0.5, 'x2 max':0.5, 'x3 min':-0.5, 'x3 max':0.5, 'resolution x1':256, 'resolution x2':256, 'resolution x3':0, 'cfl':0.3, 'initial dt':1.0e-5, 'max dt increase':1.5, 'initial t': 0.0, 'max time': 5.0, 'save frequency': 2.5e-2, 'output type': ['numpy'], 'output primitives': True, 'print to file':False, 'profiling': True, 'restart file':None, 'gamma':1.4, 'density unit':1.0, 'length unit':1.0, 'velocity unit':1.0, 'optimisation': 'numba', 'riemann':'hllc', 'reconstruction':'linear', 'limiter':'minmod', 'time stepping':'RK2', 'method':'hydro', 'lower x1 boundary':'reciprocal', 'upper x1 boundary':'reciprocal', 'lower x2 boundary':'reciprocal', 'upper x2 boundary':'reciprocal', 'lower x3 boundary':'reciprocal', 'upper x3 boundary':'reciprocal', 'internal boundary':False } def initialise(self, V, g, l): if self.parameter['Dimensions'] == '2D': Y, X = np.meshgrid(g.x1, g.x2, indexing='ij') if self.parameter['Dimensions'] == '3D': Z, Y, X = np.meshgrid(g.x1, g.x2, g.x3, indexing='ij') yp = 0.25 dens_1 = 2.0 dens_2 = 1.0 pres = 2.0 vel_1 = 0.5 vel_2 = 0.0 amp = 0.001 vx1_per = (np.random.random(V.shape)*2.0 - 1)*amp vx2_per = (np.random.random(V.shape)*2.0 - 1)*amp region_1 = np.absolute(Y) < yp region_2 = np.absolute(Y) > yp V[rho, region_1] = dens_1 V[prs, region_1] = pres V[vx1, region_1] = vel_1 + vx1_per[vx1, region_1] V[vx2, region_1] = vel_2 + vx2_per[vx2, region_1] V[rho, region_2] = dens_2 V[prs, region_2] = pres V[vx1, region_2] = -vel_1 + vx1_per[vx1, region_2] V[vx2, region_2] = vel_2 + vx2_per[vx2, region_2] def internal_bc(self): return None if __name__ == "__main__": main_loop(Problem())
24.2
68
0.490358
2,773
0.95489
0
0
0
0
0
0
1,041
0.358471
53a8f467665d04dfb54d9331579d408e1a611989
1,461
py
Python
pythainlp/util/thai.py
korkeatw/pythainlp
6fc7c3434d5e58c8e8e2bf13470445cbab0866bd
[ "Apache-2.0" ]
null
null
null
pythainlp/util/thai.py
korkeatw/pythainlp
6fc7c3434d5e58c8e8e2bf13470445cbab0866bd
[ "Apache-2.0" ]
null
null
null
pythainlp/util/thai.py
korkeatw/pythainlp
6fc7c3434d5e58c8e8e2bf13470445cbab0866bd
[ "Apache-2.0" ]
1
2020-05-27T09:53:09.000Z
2020-05-27T09:53:09.000Z
# -*- coding: utf-8 -*- """ Check if it is Thai text """ import string _DEFAULT_IGNORE_CHARS = string.whitespace + string.digits + string.punctuation def isthaichar(ch: str) -> bool: """ Check if a character is Thai เป็นอักษรไทยหรือไม่ :param str ch: input character :return: True or False """ ch_val = ord(ch) if ch_val >= 3584 and ch_val <= 3711: return True return False def isthai(word: str, ignore_chars: str = ".") -> bool: """ Check if all character is Thai เป็นคำที่มีแต่อักษรไทยหรือไม่ :param str word: input text :param str ignore_chars: characters to be ignored (i.e. will be considered as Thai) :return: True or False """ if not ignore_chars: ignore_chars = "" for ch in word: if ch not in ignore_chars and not isthaichar(ch): return False return True def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float: """ :param str text: input text :return: float, proportion of characters in the text that is Thai character """ if not text or not isinstance(text, str): return 0 if not ignore_chars: ignore_chars = "" num_thai = 0 num_ignore = 0 for ch in text: if ch in ignore_chars: num_ignore += 1 elif isthaichar(ch): num_thai += 1 num_count = len(text) - num_ignore return (num_thai / num_count) * 100
22.476923
87
0.612594
0
0
0
0
0
0
0
0
640
0.41052
53a95c744ad18d63a19b3fc856fe6442690ea1c8
54
py
Python
Numpy/tempCodeRunnerFile.py
zharmedia386/Data-Science-Stuff
40183c329e3b30c582c545c260ca7916f29e2f09
[ "MIT" ]
null
null
null
Numpy/tempCodeRunnerFile.py
zharmedia386/Data-Science-Stuff
40183c329e3b30c582c545c260ca7916f29e2f09
[ "MIT" ]
null
null
null
Numpy/tempCodeRunnerFile.py
zharmedia386/Data-Science-Stuff
40183c329e3b30c582c545c260ca7916f29e2f09
[ "MIT" ]
null
null
null
print(b) print(c) print(d) print(e) print(f) print(g)
7.714286
8
0.666667
0
0
0
0
0
0
0
0
0
0
53a96c42fcec2518a3a26c0e6dece5934119cc53
1,941
py
Python
Python/Filter.py
KilroyWasHere-cs-j/savitzky-golay
2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f
[ "MIT" ]
null
null
null
Python/Filter.py
KilroyWasHere-cs-j/savitzky-golay
2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f
[ "MIT" ]
null
null
null
Python/Filter.py
KilroyWasHere-cs-j/savitzky-golay
2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f
[ "MIT" ]
null
null
null
import numpy as np from scipy.signal import savgol_filter import matplotlib.pyplot as plt import MadDog x = [] y = [] def generate(): # Generate random data base = np.linspace(0, 5, 11) # base = np.random.randint(0, 10, 5) outliers = np.random.randint(10, 20, 2) data = np.concatenate((base, outliers)) np.random.shuffle(data) return data def fill_data(): # Build random data return np.concatenate((np.array([0]), MadDog.find_outliers(generate()))), np.concatenate( (np.array([0]), MadDog.find_outliers(generate()))) # np.sin(x) + np.cos(x) + np.random.random(100) # np.linspace(0, 2*np.pi, 100) def savitzky(x, y, ploy_nom): return savgol_filter(x, len(x) - 1, 10), savgol_filter(y, len(y) - 1, 10) def map(x_filtered, y_filtered, x, y, title="title"): # Generate some test data heatmap, xedges, yedges = np.histogram2d(x, y, bins=50) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] plt.clf() plt.imshow(heatmap.T, extent=extent, origin='lower') plt.show() heatmap, xedges, yedges = np.histogram2d(x_filtered, y_filtered, bins=50) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] plt.clf() plt.imshow(heatmap.T, extent=extent, origin='lower') plt.show() def show(x_filtered, y_filtered, x, y, title="Lorem ipsum"): # Plotting fig = plt.figure() ax = fig.subplots() plt.plot(x_filtered, y_filtered, 'red', marker="o") plt.plot(x, y, 'green', marker="o") plt.subplots_adjust(bottom=0.25) plt.xlabel('x') plt.ylabel('y') plt.title(title) plt.legend(["Filter", "Raw"]) plt.show() # Generating the noisy signal x, y = fill_data() print(len(y)) # Savitzky-Golay filter x_filtered, y_filtered = savitzky(x, y, 2) print("X unfiltered>> ", x) print("Y unfiltered>> ", y) print("X filtered>> ", x_filtered) print("Y filtered>> ", y_filtered) show(x_filtered, y_filtered, x, y)
26.589041
107
0.640907
0
0
0
0
0
0
0
0
376
0.193715
53aa536c76b41bd1afbf13c8b634be33ef9462e1
8,087
py
Python
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
dietrichc/streamline-ppc-reports
256f79246aba3c2cf8f792d87a066391a2f471e0
[ "Apache-2.0" ]
null
null
null
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
dietrichc/streamline-ppc-reports
256f79246aba3c2cf8f792d87a066391a2f471e0
[ "Apache-2.0" ]
null
null
null
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
dietrichc/streamline-ppc-reports
256f79246aba3c2cf8f792d87a066391a2f471e0
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Adds an ad customizer feed. Associates the feed with customer and adds an ad that uses the feed to populate dynamic data. Tags: CustomerFeedService.mutate, FeedItemService.mutate Tags: FeedMappingService.mutate, FeedService.mutate Tags: AdGroupAdService.mutate """ __author__ = ('[email protected] (Mark Saniscalchi)', '[email protected] (Yufeng Guo)') # Import appropriate classes from the client library. from googleads import adwords # See the Placeholder reference page for a list of all the placeholder types # and fields: # https://developers.google.com/adwords/api/docs/appendix/placeholders PLACEHOLDER_AD_CUSTOMIZER = '10' PLACEHOLDER_FIELD_INTEGER = '1' PLACEHOLDER_FIELD_FLOAT = '2' PLACEHOLDER_FIELD_PRICE = '3' PLACEHOLDER_FIELD_DATE = '4' PLACEHOLDER_FIELD_STRING = '5' ADGROUPS = [ 'INSERT_ADGROUP_ID_HERE', 'INSERT_ADGROUP_ID_HERE' ] FEEDNAME = 'INSERT_FEED_NAME_HERE' def main(client, adgroups): # Initialize appropriate services. ad_group_ad_service = client.GetService('AdGroupAdService', version='v201406') customer_feed_service = client.GetService( 'CustomerFeedService', version='v201406') feed_item_service = client.GetService('FeedItemService', version='v201406') feed_mapping_service = client.GetService( 'FeedMappingService', version='v201406') feed_service = client.GetService('FeedService', version='v201406') # First, create a customizer feed. One feed per account can be used for all # ads. customizer_feed = { 'name': FEEDNAME, 'attributes': [ {'type': 'STRING', 'name': 'Name'}, {'type': 'STRING', 'name': 'Price'}, {'type': 'DATE_TIME', 'name': 'Date'} ] } feed_service_operation = { 'operator': 'ADD', 'operand': customizer_feed } response = feed_service.mutate([feed_service_operation]) if response and 'value' in response: feed = response['value'][0] feed_data = { 'feedId': feed['id'], 'nameId': feed['attributes'][0]['id'], 'priceId': feed['attributes'][1]['id'], 'dateId': feed['attributes'][2]['id'] } print ('Feed with name \'%s\' and ID %s was added with:' '\tName attribute ID %s and price attribute ID %s and date attribute' 'ID %s') % (feed['name'], feed['id'], feed_data['nameId'], feed_data['priceId'], feed_data['dateId']) else: raise Exception('No feeds were added') # Creating feed mapping to map the fields with customizer IDs. feed_mapping = { 'placeholderType': PLACEHOLDER_AD_CUSTOMIZER, 'feedId': feed_data['feedId'], 'attributeFieldMappings': [ { 'feedAttributeId': feed_data['nameId'], 'fieldId': PLACEHOLDER_FIELD_STRING }, { 'feedAttributeId': feed_data['priceId'], 'fieldId': PLACEHOLDER_FIELD_PRICE }, { 'feedAttributeId': feed_data['dateId'], 'fieldId': PLACEHOLDER_FIELD_DATE } ] } feed_mapping_operation = { 'operator': 'ADD', 'operand': feed_mapping } response = feed_mapping_service.mutate([feed_mapping_operation]) if response and 'value' in response: feed_mapping = response['value'][0] print ('Feed mapping with ID %s and placeholder type %s was saved for feed' ' with ID %s.') % (feed_mapping['feedMappingId'], feed_mapping['placeholderType'], feed_mapping['feedId']) else: raise Exception('No feed mappings were added.') # Now adding feed items -- the values we'd like to place. items_data = [ { 'name': 'Mars', 'price': '$1234.56', 'date': '20140601 000000', 'adGroupId': adgroups[0] }, { 'name': 'Venus', 'price': '$1450.00', 'date': '20140615 120000', 'adGroupId': adgroups[1] } ] feed_items = [{'feedId': feed_data['feedId'], 'adGroupTargeting': { 'TargetingAdGroupId': item['adGroupId'] }, 'attributeValues': [ { 'feedAttributeId': feed_data['nameId'], 'stringValue': item['name'] }, { 'feedAttributeId': feed_data['priceId'], 'stringValue': item['price'] }, { 'feedAttributeId': feed_data['dateId'], 'stringValue': item['date'] } ]} for item in items_data] feed_item_operations = [{ 'operator': 'ADD', 'operand': feed_item } for feed_item in feed_items] response = feed_item_service.mutate(feed_item_operations) if response and 'value' in response: for feed_item in response['value']: print 'Feed item with ID %s was added.' % feed_item['feedItemId'] else: raise Exception('No feed items were added.') # Finally, creating a customer (account-level) feed with a matching function # that determines when to use this feed. For this case we use the "IDENTITY" # matching function that is always 'true' just to associate this feed with # the customer. The targeting is done within the feed items using the # :campaign_targeting, :ad_group_targeting, or :keyword_targeting attributes. matching_function = { 'operator': 'IDENTITY', 'lhsOperand': [ { 'xsi_type': 'ConstantOperand', 'type': 'BOOLEAN', 'booleanValue': 'true' } ] } customer_feed = { 'feedId': feed_data['feedId'], 'matchingFunction': matching_function, 'placeholderTypes': [PLACEHOLDER_AD_CUSTOMIZER] } customer_feed_operation = { 'operator': 'ADD', 'operand': customer_feed } response = customer_feed_service.mutate([customer_feed_operation]) if response and 'value' in response: feed = response['value'][0] print 'Customer feed with ID %s was added.' % feed['feedId'] else: raise Exception('No customer feeds were added.') # All set! We can now create ads with customizations. text_ad = { 'xsi_type': 'TextAd', 'headline': 'Luxury Cruise to {=%s.Name}' % FEEDNAME, 'description1': 'Only {=%s.Price}' % FEEDNAME, 'description2': 'Offer ends in {=countdown(%s.Date)}!' % FEEDNAME, 'url': 'http://www.example.com', 'displayUrl': 'www.example.com' } # We add the same ad to both ad groups. When they serve, they will show # different values, since they match different feed items. operations = [{ 'operator': 'ADD', 'operand': { 'adGroupId': adgroup, 'ad': text_ad } } for adgroup in adgroups] print operations response = ad_group_ad_service.mutate(operations) print '===ad group ad service===' print response if response and 'value' in response: for ad in response['value']: print ('\tCreated an ad with ID \'%s\', type \'%s\', and status \'%s\'.' % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status'])) else: raise Exception('No ads were added.') if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, ADGROUPS)
32.09127
80
0.614072
0
0
0
0
0
0
0
0
4,074
0.503771
53aaad486aeb5cf94c98b45787e68241bed70175
2,001
py
Python
tests/test_minhash.py
azachar/pyminhash
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
[ "MIT" ]
null
null
null
tests/test_minhash.py
azachar/pyminhash
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
[ "MIT" ]
null
null
null
tests/test_minhash.py
azachar/pyminhash
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
[ "MIT" ]
null
null
null
import pytest from pyminhash import MinHash from pyminhash.datasets import load_data def test__sparse_vector(): df = load_data() myMinHasher = MinHash(10) res = myMinHasher._sparse_vectorize(df, 'name') assert res.columns.tolist() == ['name', 'sparse_vector'] assert res['sparse_vector'].dtype == 'object' def test__create_hashing_parameters(): n_hashes = 10 myMinHasher = MinHash(n_hash_tables=n_hashes) res = myMinHasher._create_hashing_parameters() assert len(res) == n_hashes assert res.dtype == 'int64' assert min(res) >= 0 assert min(res) <= myMinHasher.max_token_value def test__create_minhash(): n_hashes = 10 myMinHasher = MinHash(n_hash_tables=n_hashes) doc = [59, 65, 66, 67, 118, 150, 266] res = myMinHasher._create_minhash(doc) assert len(res) == n_hashes def test__create_minhash_signatures(): df = load_data() myMinHasher = MinHash(3) df = myMinHasher._sparse_vectorize(df, 'name') df = myMinHasher._create_minhash_signatures(df) for col in ['hash_0', 'hash_1', 'hash_2']: assert col in df.columns assert df[col].dtype == 'int64' def test_fit_predict(): df = load_data() myMinHasher = MinHash(10) res = myMinHasher.fit_predict(df, 'name') assert res.columns.tolist() == ['row_number_1', 'row_number_2', 'name_1', 'name_2', 'jaccard_sim'] assert res['jaccard_sim'].dtype == 'float' def test_fit_predict_accuracy(): def jaccard(x, y): x_tokens = set(x.split()) y_tokens = set(y.split()) return len(x_tokens.intersection(y_tokens)) / len(x_tokens.union(y_tokens)) df = load_data() myMinHasher = MinHash(1000) res = myMinHasher.fit_predict(df, 'name') assert len(res) == 1727 res['jaccard_real'] = res.apply(lambda row: jaccard(row['name_1'], row['name_2']), axis=1) res['diff'] = res['jaccard_real'] - res['jaccard_sim'] assert abs(res['diff'].mean()) < 0.02 assert res['diff'].std() < 0.1
30.318182
102
0.667166
0
0
0
0
0
0
0
0
258
0.128936
53ab5b39a644e03ecaaf97048f3ae768e29b5a48
503
py
Python
settings.py
danylo-dudok/youtube-rss
c4478605274cdeac33f909d7fcb7d265898e80bc
[ "MIT" ]
null
null
null
settings.py
danylo-dudok/youtube-rss
c4478605274cdeac33f909d7fcb7d265898e80bc
[ "MIT" ]
null
null
null
settings.py
danylo-dudok/youtube-rss
c4478605274cdeac33f909d7fcb7d265898e80bc
[ "MIT" ]
null
null
null
from datetime import datetime, timedelta from typing import final from tools import localize_time RSS_URL_PREFIX: final = 'https://www.youtube.com/feeds/videos.xml?channel_id={0}' LOCATION_ARGUMENT_PREFIX: final = '--location=' CHANNEL_ARGUMENT_PREFIX: final = '--channels=' LAST_CHECK_ARGUMENT_PREFIX: final = '--last-check=' TWO_WEEKS_IN_DAYS: final = 14 DEFAULT_LAST_CHECK: final = localize_time(datetime.now() - timedelta(days=TWO_WEEKS_IN_DAYS)) EMPTY: final = '' CHANNEL_POSTS_LIMIT: final = 20
35.928571
93
0.787276
0
0
0
0
0
0
0
0
100
0.198807
53ac58babeeeae8a59ad21aa748c5f201e132f9d
1,325
py
Python
openpicle/caravel.py
DX-MON/OpenPICle
c036333f807b1b4959af22bde8c4cac553ef162f
[ "BSD-3-Clause" ]
null
null
null
openpicle/caravel.py
DX-MON/OpenPICle
c036333f807b1b4959af22bde8c4cac553ef162f
[ "BSD-3-Clause" ]
null
null
null
openpicle/caravel.py
DX-MON/OpenPICle
c036333f807b1b4959af22bde8c4cac553ef162f
[ "BSD-3-Clause" ]
null
null
null
# SPDX-License-Identifier: BSD-3-Clause from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter __all__ = ( 'PIC16Caravel', ) class PIC16Caravel(Elaboratable): def elaborate(self, platform): from .pic16 import PIC16 from .soc.busses.qspi import QSPIBus m = Module() reset = Signal() busy_n = Signal(reset = 1) m.submodules.qspiFlash = qspiFlash = QSPIBus(resourceName = ('spi_flash_4x', 0)) m.submodules.pic = pic = ResetInserter(reset)(EnableInserter(busy_n)(PIC16())) run = platform.request('run', 0) pBus = platform.request('p_bus', 0) addr = pBus.addr.o dataIn = pBus.data.i dataOut = pBus.data.o dataDir = pBus.data.oe read = pBus.read write = pBus.write with m.If(qspiFlash.complete | reset): m.d.sync += busy_n.eq(1) with m.Elif(pic.iBus.read): m.d.sync += busy_n.eq(0) m.d.comb += [ reset.eq(~qspiFlash.ready), run.o.eq(qspiFlash.ready & busy_n), qspiFlash.address[0].eq(0), qspiFlash.address[1:].eq(pic.iBus.address), pic.iBus.data.eq(qspiFlash.data), qspiFlash.read.eq(pic.iBus.read), addr.eq(pic.pBus.address), read.eq(pic.pBus.read), pic.pBus.readData.eq(dataIn), write.eq(pic.pBus.write), dataOut.eq(pic.pBus.writeData), dataDir.eq(pic.pBus.write), ] return m def get_ports(self): return []
25
82
0.682264
1,170
0.883019
0
0
0
0
0
0
79
0.059623
53ad1ae14a311f840335b9dec9f60aa2cc4425a1
2,615
py
Python
cogs/stats.py
est73/raid-shack
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
[ "MIT" ]
null
null
null
cogs/stats.py
est73/raid-shack
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
[ "MIT" ]
null
null
null
cogs/stats.py
est73/raid-shack
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
[ "MIT" ]
null
null
null
from discord.ext import commands import discord class Stats(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.has_permissions(manage_channels=True) async def stats(self, ctx): members = await ctx.guild.fetch_members(limit=None).flatten() member_count = 0 member_role_count = 0 instinct_count = 0 mystic_count = 0 valor_count = 0 ign_count = 0 tc_count = 0 level_count = 0 country_count = 0 profile_count = 0 for member in members: if not member.bot: member_count += 1 for role in member.roles: if role.name == "Member": member_role_count += 1 if role.name == "instinct": instinct_count += 1 if role.name == "mystic": mystic_count += 1 if role.name == "valor": valor_count += 1 if role.name == "ign": ign_count += 1 if role.name == "tc": tc_count += 1 if role.name == "level": level_count += 1 if role.name == "country": country_count += 1 if role.name == "profile": profile_count += 1 values = [f'Members: {member_count}', f'Members Role: {member_role_count}', f'Members on Team Instinct: {instinct_count}', f'Members on Team Mystic: {mystic_count}', f'Members on Team Valor: {valor_count}', f'Members with IGN set: {ign_count}', f'Members with TC set: {tc_count}', f'Members with level set: {level_count}', f'Members with country set: {country_count}', f'Members with completed Nexus Profiles: {profile_count}'] embed = discord.Embed(color=discord.Color.green()) embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url) embed.add_field(name='Server Stats:', value='\n'.join(values), inline=False) await ctx.send(embed=embed) @stats.error async def permission_error(self, ctx, error): if isinstance(error, commands.MissingPermissions): await ctx.send("Sorry, you can't run this command") else: raise error def setup(bot): bot.add_cog(Stats(bot))
35.337838
84
0.507457
2,518
0.962906
0
0
2,427
0.928107
2,334
0.892543
519
0.19847
53b0797fa1d2b73bd60c7d0448335bb8ff3970e6
2,995
py
Python
tests/bucket/test_bucket.py
WillChilds-Klein/mistress-mapreduce
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
[ "Apache-2.0" ]
2
2018-12-02T11:10:15.000Z
2019-02-21T22:24:00.000Z
tests/bucket/test_bucket.py
WillChilds-Klein/mistress-mapreduce
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
[ "Apache-2.0" ]
1
2019-02-21T22:23:36.000Z
2019-02-21T22:23:36.000Z
tests/bucket/test_bucket.py
WillChilds-Klein/mistress-mapreduce
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
[ "Apache-2.0" ]
3
2018-04-26T16:02:10.000Z
2018-12-02T11:10:16.000Z
from mrs.bucket import WriteBucket from mrs import BinWriter, HexWriter def test_writebucket(): b = WriteBucket(0, 0) b.addpair((4, 'test')) b.collect([(3, 'a'), (1, 'This'), (2, 'is')]) values = ' '.join(value for key, value in b) assert values == 'test a This is' b.sort() values = ' '.join(value for key, value in b) assert values == 'This is a test' def test_write_only(): b = WriteBucket(0, 0) b.addpair((4, 'test'), write_only=True) b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True) values = ' '.join(value for key, value in b) assert values == '' readonly_copy = b.readonly_copy() assert readonly_copy.url is None def test_writing(tmpdir): b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter) prefix = b.prefix() assert prefix == 'source_2_split_4_' listdir = tmpdir.listdir() assert listdir == [] b.addpair((1, 2)) filename = prefix + '.mrsb' path = tmpdir.join(filename).strpath listdir = tmpdir.listdir() assert listdir == [path] readonly_copy = b.readonly_copy() assert readonly_copy.url == path def test_roundtrip(tmpdir): b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter) prefix = b.prefix() assert prefix == 'source_2_split_4_' listdir = tmpdir.listdir() assert listdir == [] b.addpair((4, 'test')) b.collect([(3, 'a'), (1, 'This'), (2, 'is')]) values = ' '.join(value for key, value in b) assert values == 'test a This is' b.close_writer(do_sync=False) filename = prefix + '.mrsb' path = tmpdir.join(filename).strpath listdir = tmpdir.listdir() assert listdir == [path] readonly_copy = b.readonly_copy() assert readonly_copy.url == path values = ' '.join(value for key, value in readonly_copy) assert values == 'test a This is' values = ' '.join(value for key, value in readonly_copy.stream()) assert values == 'test a This is' b.clean() listdir = tmpdir.listdir() assert listdir == [] def test_roundtrip_write_only(tmpdir): b = WriteBucket(7, 1, dir=tmpdir.strpath, format=HexWriter) prefix = b.prefix() assert prefix == 'source_7_split_1_' listdir = tmpdir.listdir() assert listdir == [] b.addpair((4, 'test'), write_only=True) b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True) values = ' '.join(value for key, value in b) assert values == '' b.close_writer(do_sync=False) filename = prefix + '.mrsx' path = tmpdir.join(filename).strpath listdir = tmpdir.listdir() assert listdir == [path] readonly_copy = b.readonly_copy() assert readonly_copy.url == path values = ' '.join(value for key, value in readonly_copy) assert values == '' values = ' '.join(value for key, value in readonly_copy.stream()) assert values == 'test a This is' b.clean() listdir = tmpdir.listdir() assert listdir == [] # vim: et sw=4 sts=4
26.741071
69
0.621035
0
0
0
0
0
0
0
0
303
0.101169
53b14303d9879fe4fc46ca016bb6d34bfedbf48e
35,783
py
Python
inquire/agents/dempref.py
HARPLab/inquire
fa74eb10e5391a0f226753668a31527c68fc6962
[ "BSD-3-Clause" ]
null
null
null
inquire/agents/dempref.py
HARPLab/inquire
fa74eb10e5391a0f226753668a31527c68fc6962
[ "BSD-3-Clause" ]
null
null
null
inquire/agents/dempref.py
HARPLab/inquire
fa74eb10e5391a0f226753668a31527c68fc6962
[ "BSD-3-Clause" ]
null
null
null
""" An agent which uses demonstrations and preferences. Code adapted from Learning Reward Functions by Integrating Human Demonstrations and Preferences. """ import itertools import os import time from pathlib import Path from typing import Dict, List import arviz as az from inquire.agents.agent import Agent from inquire.environments.environment import Environment from inquire.interactions.feedback import Query, Trajectory from inquire.interactions.modalities import Preference import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm import pymc3.distributions.transforms as tr import scipy.optimize as opt import theano.tensor as tt class DemPref(Agent): """A preference-querying agent seeded with demonstrations. Note: We instantiate the agent according to arguments corresponding to what the the original paper's codebase designates as their main experiment. """ def __init__( self, weight_sample_count: int, trajectory_sample_count: int, trajectory_length: int, interaction_types: list = [], w_dim: int = 4, which_param_csv: int = 0, visualize: bool = False, ): """Initialize the agent. Note we needn't maintain a domain's start state; that's handled in inquire/tests/evaluation.py and the respective domain. """ self._weight_sample_count = weight_sample_count self._trajectory_sample_count = trajectory_sample_count self._trajectory_length = trajectory_length self._interaction_types = interaction_types self._visualize = visualize """ Get the pre-defined agent parameters """ self._dempref_agent_parameters = self.read_param_csv(which_param_csv) """ Instance attributes from orginal codebase's 'runner.py' object. Note that some variable names are modified to be consist with the Inquire parlance. """ self.domain_name = self._dempref_agent_parameters["domain"][0] self.teacher_type = self._dempref_agent_parameters["teacher_type"][0] self.n_demos = self._dempref_agent_parameters["n_demos"][0] self.gen_demos = self._dempref_agent_parameters["gen_demos"][0] self.opt_iter_count = self._dempref_agent_parameters["opt_iter_count"][ 0 ] self.trim_start = self._dempref_agent_parameters["trim_start"][0] self.query_option_count = self._dempref_agent_parameters[ "query_option_count" ][0] self.update_func = self._dempref_agent_parameters["update_func"][0] self.trajectory_length = self._dempref_agent_parameters[ "trajectory_length" ][0] self.incl_prev_query = self._dempref_agent_parameters[ "incl_prev_query" ][0] self.gen_scenario = self._dempref_agent_parameters["gen_scenario"][0] self.n_pref_iters = self._dempref_agent_parameters["n_pref_iters"][0] self.epsilon = self._dempref_agent_parameters["epsilon"][0] """ Instantiate the DemPref-specific sampler and query generator: """ self._sampler = None self._w_samples = None self._query_generator = None self._first_q_session = True self._q_session_index = 0 self._query_index = 0 self._w_dim = w_dim assert ( self.update_func == "pick_best" or self.update_func == "approx" or self.update_func == "rank" ), ("Update" " function must be one of the provided options") if self.incl_prev_query and self.teacher_type == "term": assert ( self.n_demos > 0 ), "Cannot include previous query if no demonstration is provided" self.n_samples_summ = self._dempref_agent_parameters["n_samples_summ"][ 0 ] self.n_samples_exp = self._dempref_agent_parameters["n_samples_exp"][0] self.beta_demo = self._dempref_agent_parameters["beta_demo"][0] self.beta_pref = self._dempref_agent_parameters["beta_pref"][0] self.beta_teacher = self._dempref_agent_parameters["beta_teacher"][0] """If we want to save data as they did in DemPref:""" self.first_q_session = True self.q_session_index = 0 self.query_index = 0 self.config = [ self.teacher_type, self.n_demos, self.trim_start, self.query_option_count, self.update_func, self.trajectory_length, self.incl_prev_query, self.gen_scenario, self.n_pref_iters, self.epsilon, self.n_samples_summ, self.n_samples_exp, self.beta_demo, self.beta_pref, self.beta_teacher, ] self.df = pd.DataFrame(columns=["run #", "pref_iter", "type", "value"]) def initialize_weights(self, domain: Environment) -> np.ndarray: """Randomly initialize weights for gradient descent.""" self.reset() return self.w_samples def reset(self) -> None: """Prepare for new query session.""" if self._sampler is not None: self._sampler.clear_pref() self._sampler = self.DemPrefSampler( query_option_count=self.query_option_count, dim_features=self._w_dim, update_func=self.update_func, beta_demo=self.beta_demo, beta_pref=self.beta_pref, visualize=self._visualize, ) self.w_samples = self._sampler.sample(N=self.n_samples_summ) """If we want to save data as they did in DemPref:""" mean_w = np.mean(self.w_samples, axis=0) mean_w = mean_w / np.linalg.norm(mean_w) var_w = np.var(self.w_samples, axis=0) # Make sure to properly index data: if self.first_q_session: self.first_q_session = False else: self.q_session_index += 1 data = [ [self.q_session_index, 0, "mean", mean_w], [self.q_session_index, 0, "var", var_w], ] self.df = self.df.append( pd.DataFrame( data, columns=["run #", "pref_iter", "type", "value"] ), ignore_index=True, ) def generate_query( self, domain: Environment, query_state: int, curr_w: np.ndarray, verbose: bool = False, ) -> list: """Generate query using approximate gradients. Code adapted from DemPref's ApproxQueryGenerator. """ if self._query_generator is None: self._query_generator = self.DemPrefQueryGenerator( dom=domain, num_queries=self.query_option_count, trajectory_length=self.trajectory_length, num_expectation_samples=self.n_samples_exp, include_previous_query=self.incl_prev_query, generate_scenario=self.gen_scenario, update_func=self.update_func, beta_pref=self.beta_pref, ) if self.incl_prev_query: if len(self.demos) > 0: self.random_scenario_index = np.random.randint(len(self.demos)) else: self.random_scenario_index = 0 last_query_choice = self.all_query_choices[ self.random_scenario_index ] # Generate query_options while ensuring that features of query_options # are epsilon apart: query_diff = 0 print("Generating query_options") while query_diff <= self.epsilon: if self.incl_prev_query: if last_query_choice.null: query_options = self._query_generator.generate_query_options( self.w_samples, blank_traj=True ) else: query_options = self._query_generator.generate_query_options( self.w_samples, last_query_choice ) else: query_options = self._query_generator.generate_query_options( self.w_samples ) query_diffs = [] for m in range(len(query_options)): for n in range(m): query_diffs.append( np.linalg.norm( domain.features_from_trajectory( query_options[m].trajectory ) - domain.features_from_trajectory( query_options[n].trajectory ) ) ) query_diff = max(query_diffs) query = Query( query_type=Preference, task=None, start_state=query_state, trajectories=query_options, ) return query def update_weights( self, current_weights: np.ndarray, domain: Environment, feedback: list ) -> np.ndarray: """Update the model's learned weights. ::inputs: ::current_weights: Irrelevant for DemPref; useful to other agents ::domain: The task's environment ::feedback: A list of the human feedback received to this point. DemPref utilizes only the most recent """ if feedback == []: # No feedback yet received return self.w_samples else: # Use the most recent Choice in feedback: query_options = feedback[-1].choice.options choice = feedback[-1].choice.selection choice_index = query_options.index(choice) if self.incl_prev_query: self.all_query_choices[self.random_scenario_index] = choice # Create dictionary map from rankings to query-option features; # load into sampler: features = [ domain.features_from_trajectory(x.trajectory) for x in query_options ] phi = {k: features[k] for k in range(len(query_options))} self._sampler.load_prefs(phi, choice_index) self.w_samples = self._sampler.sample(N=self.n_samples_summ) # Return the new weights from the samples: mean_w = np.mean(self.w_samples, axis=0) mean_w = mean_w / np.linalg.norm(mean_w) return np.array(mean_w, copy=True).reshape(1, -1) def read_param_csv(self, which_csv: int = 0) -> dict: """Read an agent-parameterization .csv. ::inputs: :creation_index: A time-descending .csv file index. e.g. if creation_index = 0, use the dempref dempref_agent.csv most recently created. """ data_path = Path.cwd() / Path("../inquire/agents/") # Sort the .csvs in descending order by time of creation: all_files = np.array(list(Path.iterdir(data_path))) all_csvs = all_files[ np.argwhere([f.suffix == ".csv" for f in all_files]) ] all_csvs = np.array([str(f[0]).strip() for f in all_csvs]) sorted_csvs = sorted(all_csvs, key=os.path.getmtime) sorted_csvs = [Path(c) for c in sorted_csvs] # Select the indicated .csv and convert it to a dictionary: chosen_csv = sorted_csvs[-which_csv] df = pd.read_csv(chosen_csv) params_dict = df.to_dict() return params_dict def process_demonstrations( self, trajectories: list, domain: Environment ) -> None: """Generate demonstrations to seed the querying process.""" self.demos = trajectories phi_demos = [ domain.features_from_trajectory(x.trajectory) for x in self.demos ] self._sampler.load_demo(np.array(phi_demos)) self.cleaned_demos = self.demos if self.incl_prev_query: self.all_query_choices = [d for d in self.cleaned_demos] class DemPrefSampler: """Sample trajectories for querying. Code adapted from original DemPref agent. """ def __init__( self, query_option_count: int, dim_features: int, update_func: str = "approx", beta_demo: float = 0.1, beta_pref: float = 1.0, visualize: bool = False, ): """ Initialize the sampler. :param query_option_count: Number of queries. :param dim_features: Dimension of feature vectors. :param update_func: options are "rank", "pick_best", and "approx". To use "approx", query_option_count must be 2; will throw an assertion error otherwise :param beta_demo: parameter measuring irrationality of teacher in providing demonstrations :param beta_pref: parameter measuring irrationality of teacher in selecting preferences """ self.query_option_count = query_option_count self.dim_features = dim_features self.update_func = update_func self.beta_demo = beta_demo self.beta_pref = beta_pref self._visualize = visualize if self.update_func == "approx": assert ( self.query_option_count == 2 ), "Cannot use approximation to update function if query_option_count > 2" elif not ( self.update_func == "rank" or self.update_func == "pick_best" ): raise Exception( update_func + " is not a valid update function." ) # feature vectors from demonstrated trajectories self.phi_demos = np.zeros((1, self.dim_features)) # a list of np.arrays containing feature difference vectors and # which encode the ranking from the preference # queries self.phi_prefs = [] def load_demo(self, phi_demos: np.ndarray): """ Load the demonstrations into the Sampler. :param demos: a Numpy array containing feature vectors for each demonstration; has dimension n_dem -by- self.dim_features """ self.phi_demos = phi_demos def load_prefs(self, phi: Dict, rank): """ Load the results of a preference query into the Sampler. :param phi: a dictionary mapping rankings (0,...,query_option_count-1) to feature vectors """ result = [] if self.update_func == "rank": result = [None] * len(rank) for i in range(len(rank)): result[i] = phi[rank[i]] elif self.update_func == "approx": result = phi[rank] - phi[1 - rank] elif self.update_func == "pick_best": result, tmp = [phi[rank] - phi[rank]], [] for key in sorted(phi.keys()): if key != rank: tmp.append(phi[key] - phi[rank]) result.extend(tmp) self.phi_prefs.append(np.array(result)) def clear_pref(self): """Clear all preference information from the sampler.""" self.phi_prefs = [] def sample(self, N: int, T: int = 1, burn: int = 1000) -> np.ndarray: """Return N samples from the distribution. The distribution is defined by applying update_func on the demonstrations and preferences observed thus far. :param N: number of w_samples to draw. :param T: if greater than 1, all samples except each T^{th} sample are discarded :param burn: how many samples before the chain converges; these initial samples are discarded :return: list of w_samples drawn """ """Define model for MCMC. NOTE the DemPref codebase creates a sampler via PyMC3 version 3.5; this codebase adapts their model to PyMC3 version 3.11.2. We use the NUTS sampling algorithm (an extension of Hamilitonian Monte Carlo MCMC): https://arxiv.org/abs/1111.4246. """ # Define update function: if self.update_func == "approx": def update_function(distribution): result = tt.sum( [ -tt.nnet.relu( -self.beta_pref * tt.dot(self.phi_prefs[i], distribution) ) for i in range(len(self.phi_prefs)) ] ) + tt.sum( self.beta_demo * tt.dot(self.phi_demos, distribution) ) return result elif self.update_func == "pick_best": def update_function(distribution): result = tt.sum( [ -tt.log( tt.sum( tt.exp( self.beta_pref * tt.dot( self.phi_prefs[i], distribution ) ) ) ) for i in range(len(self.phi_prefs)) ] ) + tt.sum( self.beta_demo * tt.dot(self.phi_demos, distribution) ) return result elif self.update_func == "rank": def update_function(distribution): result = ( tt.sum( # sum across different queries [ tt.sum( # sum across different terms in PL-update -tt.log( [ tt.sum( # sum down different feature-differences in a single term in PL-update tt.exp( self.beta_pref * tt.dot( self.phi_prefs[i][ j:, : ] - self.phi_prefs[i][j], distribution, ) ) ) for j in range( self.query_option_count ) ] ) ) for i in range(len(self.phi_prefs)) ] ) + tt.sum( self.beta_demo * tt.dot(self.phi_demos, distribution) ), ) return result self.update_function = update_function while True: test_value = np.random.uniform( low=-1, high=1, size=self.dim_features ) test_value = test_value / np.linalg.norm(test_value) norm = (test_value ** 2).sum() if norm <= 1: break # Get a sampling trace (and avoid Bad Initial Energy): while True: trace = self.get_trace(test_value) if trace is not None: break if self._visualize: az.plot_trace(trace) plt.show() input("Press enter to continue") az.plot_energy(trace) plt.show() input("Press enter to continue") az.plot_posterior(trace) plt.show() input("Press enter to continue") all_samples = trace.sel( draw=slice(burn, None) ).posterior.rv_x.values all_samples = all_samples.reshape( all_samples.shape[0] * all_samples.shape[1], -1 ) w_samples = np.array([r / np.linalg.norm(r) for r in all_samples]) return w_samples def get_trace(self, test_val: np.ndarray) -> az.InferenceData: """Create an MCMC trace.""" # model accumulates the objects defined within the proceeding # context: model = pm.Model() with model: # Add random-variable x to model: rv_x = pm.Uniform( name="rv_x", shape=self.dim_features, lower=-1, upper=1, testval=test_val, ) # Define the prior as the unit ball centered at 0: def sphere(w): """Determine if w is part of the unit ball.""" w_sum = pm.math.sqr(w).sum() result = tt.switch( pm.math.gt(w_sum, 1.0), -100, # -np.inf, self.update_function(w), ) return result try: # Potential is a "potential term" defined as an "additional # tensor...to be added to the model logp"(PyMC3 developer # guide). In this instance, the potential is effectively # the model's log-likelihood. p = pm.Potential("sphere", sphere(rv_x)) trace = pm.sample( 10000, tune=5000, return_inferencedata=True, init="adapt_diag", ) # except: except ( pm.SamplingError, pm.parallel_sampling.ParallelSamplingError, ): return None return trace class DemPrefQueryGenerator: """Generate queries. Code adapted from original DemPref agent. """ def __init__( self, dom: Environment, num_queries: int, trajectory_length: int, num_expectation_samples: int, include_previous_query: bool, generate_scenario: bool, update_func: str, beta_pref: float, ) -> None: """ Initialize the approx query generation. Note: this class generates queries using approx gradients. ::original inputs: :dom: the domain to generate queries on :num_queries: number of queries to generate at each time step :trajectory_length: the length of each query :num_expectation_samples: number of w_samples to use in approximating the objective function :include_previous_query: boolean for whether one of the queries is the previously selected query :generate_scenario: boolean for whether we want to generate the scenario -- i.e., other agents' behavior :update_func: the update_func used; the options are "pick_best", "approx", and "rank" :beta_pref: the rationality parameter for the teacher selecting her query ::Inquire-specific inputs: :start_state: The state from which a trajectory begins. """ assert ( num_queries >= 1 ), "QueryGenerator.__init__: num_queries must be at least 1" assert ( trajectory_length >= 1 ), "QueryGenerator.__init__: trajectory_length must be at least 1" assert ( num_expectation_samples >= 1 ), "QueryGenerator.__init__: num_expectation_samples must be \ at least 1" self.domain = dom self.num_queries = num_queries self.trajectory_length = trajectory_length self.num_expectation_samples = num_expectation_samples self.include_previous_query = include_previous_query self.generate_scenario = ( generate_scenario # Currently must be False ) assert ( self.generate_scenario is False ), "Cannot generate scenario when using approximate gradients" self.update_func = update_func self.beta_pref = beta_pref self.num_new_queries = ( self.num_queries - 1 if self.include_previous_query else self.num_queries ) def generate_query_options( self, w_samples: np.ndarray, last_query_choice: Trajectory = None, blank_traj: bool = False, ) -> List[Trajectory]: """ Generate self.num_queries number of queries. This function produces query options that (locally) maximize the maximum volume removal objective. :param w_samples: Samples of w :param last_query_choice: The previously selected query. Only required if self.incl_prev_query is True :param blank_traj: True is last_query_choice is blank. (Only True if not using Dempref but using incl_prev_) :return: a list of trajectories (queries) """ start = time.perf_counter() def func(controls: np.ndarray, *args) -> float: """Minimize via L_BFGS. :param controls: an array, concatenated to contain the control input for all queries :param args: the first argument is the domain, and the second is the samples that will be used to approximate the objective function :return: the value of the objective function for the given set of controls """ domain = args[0] w_samples = args[1] controls = np.array(controls) controls_set = [ controls[i * z : (i + 1) * z] for i in range(self.num_new_queries) ] features_each_q_option = np.zeros( (domain.w_dim, self.num_new_queries) ) for i, c in enumerate(controls_set): features_each_q_option[ :, i ] = domain.features_from_trajectory( c, controls_as_input=True ) if self.include_previous_query and not blank_traj: features_each_q_option = np.append( features_each_q_option, domain.features_from_trajectory(last_query_choice), axis=1, ) if self.update_func == "pick_best": return -objective(features_each_q_option, w_samples) elif self.update_func == "approx": return -approx_objective(features_each_q_option, w_samples) else: return -rank_objective(features_each_q_option, w_samples) def objective(features: List, w_samples: np.ndarray) -> float: """ Maximize the volume removal objective. :param features: a list containing the feature values of each query :param w_samples: samples of w, used to approximate the objective :return: the value of the objective function, evaluated on the given queries' features """ volumes_removed = [] for i in range(len(features)): feature_diff = np.array( [f - features[i] for f in features] ) # query_option_count x feature_size weighted_feature_diff = ( np.sum(np.dot(feature_diff, w_samples.T), axis=1) / w_samples.shape[0] ) # query_option_count x 1 -- summed across w_samples v_removed = 1.0 - 1.0 / np.sum( np.exp(self.beta_pref * weighted_feature_diff) ) volumes_removed.append(v_removed) return np.min(volumes_removed) def approx_objective( features: np.ndarray, w_samples: np.ndarray ) -> float: """ Approximate the maximum volume removal objective. :param features: the feature values of each query option :param w_samples: w_samples of w used to approximate the objective :return: the value of the objective function, evaluated on the given queries' features """ if features.shape[0] > features.shape[1]: features = features.T volumes_removed = [] for i in range(len(features)): feature_diff = ( features[i] - features[1 - i] ) # 1 x feature_size weighted_feature_diff = ( np.sum(np.dot(feature_diff, w_samples.T)) / w_samples.shape[0] ) # 1 x 1 -- summed across w_samples v_removed = 1.0 - np.minimum( 1.0, np.exp(self.beta_pref * weighted_feature_diff) ) volumes_removed.append(v_removed) return np.min(volumes_removed) def rank_objective(features, w_samples) -> float: """ The ranking maximum volume removal objective function. Note: This objective uses the Plackett-Luce model of teacher behavior. CANNOT BE USED WITH (incl_prev_QUERY AND NO DEMPREF). :param features: a list containing the feature values of each query :param w_samples: samples of w, used to approximate the objective :return: the value of the objective function, evaluated on the given queries' features """ # features: query_option_count x feature_size # w_samples: n_samples x feature_size exp_rewards = ( np.sum(np.dot(features, w_samples.T), axis=1) / w_samples.shape[0] ) # query_option_count x 1 -- summed across w_samples volumes_removed = [] rankings = itertools.permutations( list(range(self.num_queries)) ) # iterating over all possible rankings for rank in rankings: exp_rewards_sorted = [None] * len(rank) for i in range(len(rank)): exp_rewards_sorted[rank[i]] = exp_rewards[i] value, i = 1, 0 for i in range(len(rank) - 1): value *= 1.0 / np.sum( np.exp( self.beta_pref * ( np.array(exp_rewards_sorted[i:]) - exp_rewards_sorted[i] ) ) ) volumes_removed.append(1 - value) return np.min(volumes_removed) # The following optimization is w.r.t. volume removal; the domain's # optimization is w.r.t. the linear combination of weights and # features; this difference is a trait of the DemPref codebase. z = self.trajectory_length * self.domain.control_size lower_input_bound = [ x[0] for x in self.domain.control_bounds ] * self.trajectory_length upper_input_bound = [ x[1] for x in self.domain.control_bounds ] * self.trajectory_length opt_res = opt.fmin_l_bfgs_b( func, x0=np.random.uniform( low=self.num_new_queries * lower_input_bound, high=self.num_new_queries * upper_input_bound, size=(self.num_new_queries * z), ), args=(self.domain, w_samples), bounds=self.domain.control_bounds * self.num_new_queries * self.trajectory_length, approx_grad=True, ) query_options_controls = [ opt_res[0][i * z : (i + 1) * z] for i in range(self.num_new_queries) ] end = time.perf_counter() print(f"Finished computing queries in {end - start}s") # Note the domain was reset w/ appropriate seed before beginning # this query session; domain.run(c) will thus reset to appropriate # state: raw_trajectories = [ self.domain.run(c) for c in query_options_controls ] raw_phis = [ self.domain.features_from_trajectory(t) for t in raw_trajectories ] query_options_trajectories = [ Trajectory(raw_trajectories[i], raw_phis[i]) for i in range(len(raw_trajectories)) ] if self.include_previous_query and not blank_traj: return [last_query_choice] + query_options_trajectories else: return query_options_trajectories
40.570295
123
0.506386
35,100
0.980913
0
0
0
0
0
0
11,153
0.311684
53b25c7fce6d985ae97109a316a32f1fdb359f32
1,049
py
Python
coba/learners/__init__.py
mrucker/banditbenchmark
0365291b3a0cf1d862d294e0386d0ccad3f360f1
[ "BSD-3-Clause" ]
1
2020-07-22T13:43:14.000Z
2020-07-22T13:43:14.000Z
coba/learners/__init__.py
mrucker/coba
4f679fb5c6e39e2d0bf3e609c77a2a6865168795
[ "BSD-3-Clause" ]
null
null
null
coba/learners/__init__.py
mrucker/coba
4f679fb5c6e39e2d0bf3e609c77a2a6865168795
[ "BSD-3-Clause" ]
null
null
null
"""This module contains all public learners and learner interfaces.""" from coba.learners.primitives import Learner, SafeLearner from coba.learners.bandit import EpsilonBanditLearner, UcbBanditLearner, FixedLearner, RandomLearner from coba.learners.corral import CorralLearner from coba.learners.vowpal import VowpalMediator from coba.learners.vowpal import VowpalArgsLearner, VowpalEpsilonLearner, VowpalSoftmaxLearner, VowpalBagLearner from coba.learners.vowpal import VowpalCoverLearner, VowpalRegcbLearner, VowpalSquarecbLearner, VowpalOffPolicyLearner from coba.learners.linucb import LinUCBLearner __all__ = [ 'Learner', 'SafeLearner', 'RandomLearner', 'FixedLearner', 'EpsilonBanditLearner', 'UcbBanditLearner', 'CorralLearner', 'LinUCBLearner', 'VowpalArgsLearner', 'VowpalEpsilonLearner', 'VowpalSoftmaxLearner', 'VowpalBagLearner', 'VowpalCoverLearner', 'VowpalRegcbLearner', 'VowpalSquarecbLearner', 'VowpalOffPolicyLearner', 'VowpalMediator' ]
36.172414
122
0.766444
0
0
0
0
0
0
0
0
375
0.357483
53b40880bc916c9f0a3ace8c04060a57ded76e7b
24,347
py
Python
virtual/lib/python3.8/site-packages/dns/zonefile.py
Lenus254/personal_blog
aac38e4b5372c86efa8e24db2e051fef8e5feef8
[ "Unlicense" ]
1
2022-01-27T05:54:14.000Z
2022-01-27T05:54:14.000Z
virtual/lib/python3.8/site-packages/dns/zonefile.py
Lenus254/personal_blog
aac38e4b5372c86efa8e24db2e051fef8e5feef8
[ "Unlicense" ]
null
null
null
virtual/lib/python3.8/site-packages/dns/zonefile.py
Lenus254/personal_blog
aac38e4b5372c86efa8e24db2e051fef8e5feef8
[ "Unlicense" ]
null
null
null
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS Zones.""" import re import sys import dns.exception import dns.name import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import dns.transaction import dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException): """Unknown origin""" class CNAMEAndOtherData(dns.exception.DNSException): """A node has a CNAME and other data""" def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node is None: # empty nodes are neutral. return node_kind = node.classify() if node_kind == dns.node.NodeKind.CNAME and \ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not compatible with a ' 'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR and \ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not compatible with a ' 'regular data node') # Otherwise at least one of the node and the rdataset is neutral, so # adding the rdataset is ok class Reader: """Read a DNS zone file into a transaction.""" def __init__(self, tok, rdclass, txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok = tok (self.zone_origin, self.relativize, _) = \ txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl = 0 self.last_ttl_known = False if force_ttl is not None: default_ttl = force_ttl if default_ttl is None: self.default_ttl = 0 self.default_ttl_known = False else: self.default_ttl = default_ttl self.default_ttl_known = True self.last_name = self.current_origin self.zone_rdclass = rdclass self.txn = txn self.saved_state = [] self.current_file = None self.allow_include = allow_include self.allow_directives = allow_directives self.force_name = force_name self.force_ttl = force_ttl self.force_rdclass = force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1: token = self.tok.get() if token.is_eol_or_eof(): break def _get_identifier(self): token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError return token def _rr_line(self): """Process one line from a DNS zone file.""" token = None # Name if self.force_name is not None: name = self.force_name else: if self.current_origin is None: raise UnknownOrigin token = self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof(): # treat leading WS followed by EOL/EOF as if they were EOL/EOF. return self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) # TTL if self.force_ttl is not None: ttl = self.force_ttl self.last_ttl = ttl self.last_ttl_known = True else: token = self._get_identifier() ttl = None try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True token = None except dns.ttl.BadTTL: if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token) # Class if self.force_rdclass is not None: rdclass = self.force_rdclass else: token = self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except Exception: rdclass = self.zone_rdclass self.tok.unget(token) if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError("RR class is not zone's class") # Type if self.force_rdtype is not None: rdtype = self.force_rdtype else: token = self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError( "unknown rdatatype '%s'" % token.value) try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise. raise except Exception: # All exceptions that occur in the processing of rdata # are treated as syntax errors. This is not strictly # correct, but it is correct almost all of the time. # We convert them to syntax errors so that we can emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError( "caught exception {}: {}".format(str(ty), str(va))) if not self.default_ttl_known and rdtype == dns.rdatatype.SOA: # The pre-RFC2308 and pre-BIND9 behavior inherits the zone default # TTL from the SOA minttl if no $TTL statement is present before the # SOA is parsed. self.default_ttl = rd.minimum self.default_ttl_known = True if ttl is None: # if we didn't have a TTL on the SOA, set it! ttl = rd.minimum # TTL check. We had to wait until now to do this as the SOA RR's # own TTL can be inferred from its minimum. if ttl is None: raise dns.exception.SyntaxError("Missing default TTL value") self.txn.add(name, ttl, rd) def _parse_modify(self, side): # Here we catch everything in '{' '}' in a group so we can replace it # with ''. is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$") is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$") is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$") # Sometimes there are modifiers in the hostname. These come after # the dollar sign. They are in the form: ${offset[,width[,base]]}. # Make names g1 = is_generate1.match(side) if g1: mod, sign, offset, width, base = g1.groups() if sign == '': sign = '+' g2 = is_generate2.match(side) if g2: mod, sign, offset = g2.groups() if sign == '': sign = '+' width = 0 base = 'd' g3 = is_generate3.match(side) if g3: mod, sign, offset, width = g3.groups() if sign == '': sign = '+' base = 'd' if not (g1 or g2 or g3): mod = '' sign = '+' offset = 0 width = 0 base = 'd' if base != 'd': raise NotImplementedError() return mod, sign, offset, width, base def _generate_line(self): # range lhs [ttl] [class] type rhs [ comment ] """Process one line containing the GENERATE statement from a DNS zone file.""" if self.current_origin is None: raise UnknownOrigin token = self.tok.get() # Range (required) try: start, stop, step = dns.grange.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # lhs (required) try: lhs = token.value token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: if not (self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError("Missing default TTL value") if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass = self.zone_rdclass if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError("RR class is not zone's class") # Type try: rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value) # rhs (required) rhs = token.value # The code currently only supports base 'd', so the last value # in the tuple _parse_modify returns is ignored lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs) rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs) for i in range(start, stop + 1, step): # +1 because bind is inclusive and python is exclusive if lsign == '+': lindex = i + int(loffset) elif lsign == '-': lindex = i - int(loffset) if rsign == '-': rindex = i - int(roffset) elif rsign == '+': rindex = i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod), lzfindex) rdata = rhs.replace('$%s' % (rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise. raise except Exception: # All exceptions that occur in the processing of rdata # are treated as syntax errors. This is not strictly # correct, but it is correct almost all of the time. # We convert them to syntax errors so that we can emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va))) self.txn.add(name, ttl, rd) def read(self): """Read a DNS zone file and build a zone object. @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin """ try: while 1: token = self.tok.get(True, True) if token.is_eof(): if self.current_file is not None: self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$' and self.allow_directives: c = token.value.upper() if c == '$TTL': token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError("bad $TTL") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol() elif c == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone_origin is None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif c == '$INCLUDE' and self.allow_include: token = self.tok.get() filename = token.value token = self.tok.get() if token.is_identifier(): new_origin =\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError( "bad origin in $INCLUDE") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file = open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif c == '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError( "Unknown zone file directive '" + c + "'") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as detail: (filename, line_number) = self.tok.where() if detail is None: detail = "syntax error" ex = dns.exception.SyntaxError( "%s:%d: %s" % (filename, line_number, detail)) tb = sys.exc_info()[2] raise ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement, read_only): assert not read_only super().__init__(manager, replacement, read_only) self.rdatasets = {} def _get_rdataset(self, name, rdtype, covers): return self.rdatasets.get((name, rdtype, covers)) def _get_node(self, name): rdatasets = [] for (rdataset_name, _, _), rdataset in self.rdatasets.items(): if name == rdataset_name: rdatasets.append(rdataset) if len(rdatasets) == 0: return None node = dns.node.Node() node.rdatasets = rdatasets return node def _put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset def _delete_name(self, name): # First remove any changes involving the name remove = [] for key in self.rdatasets: if key[0] == name: remove.append(key) if len(remove) > 0: for key in remove: del self.rdatasets[key] def _delete_rdataset(self, name, rdtype, covers): try: del self.rdatasets[(name, rdtype, covers)] except KeyError: pass def _name_exists(self, name): for (n, _, _) in self.rdatasets: if n == name: return True return False def _changed(self): return len(self.rdatasets) > 0 def _end_transaction(self, commit): if commit and self._changed(): rrsets = [] for (name, _, _), rdataset in self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin self.relativize = relativize self.rdclass = rdclass self.rrsets = [] def writer(self, replacement=False): assert replacement is True return RRsetsReaderTransaction(self, True, False) def get_class(self): return self.rdclass def origin_information(self): if self.relativize: effective = dns.name.empty else: effective = self.origin return (self.origin, self.relativize, effective) def set_rrsets(self, rrsets): self.rrsets = rrsets def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): """Read one or more rrsets from the specified text, possibly subject to restrictions. *text*, a file object or a string, is the input to process. *name*, a string, ``dns.name.Name``, or ``None``, is the owner name of the rrset. If not ``None``, then the owner name is "forced", and the input must not specify an owner name. If ``None``, then any owner names are allowed and must be present in the input. *ttl*, an ``int``, string, or None. If not ``None``, the the TTL is forced to be the specified value and the input must not specify a TTL. If ``None``, then a TTL may be specified in the input. If it is not specified, then the *default_ttl* will be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If not ``None``, then the class is forced to the specified value, and the input must not specify a class. If ``None``, then the input may specify a class that matches *default_rdclass*. Note that it is not possible to return rrsets with differing classes; specifying ``None`` for the class simply allows the user to optionally type a class as that may be convenient when cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class of the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not ``None``, then the type is forced to the specified value, and the input must not specify a type. If ``None``, then a type must be present for each RR. *default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if the TTL is not forced and is not specified, then this value will be used. if ``None``, then if the TTL is not forced an error will occur if the TTL is not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder is used. Note that codecs only apply to the owner name; dnspython does not do IDNA for names in rdata, as there is no IDNA zonefile format. *origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any relative names in the input, and also the origin to relativize to if *relativize* is ``True``. *relativize*, a bool. If ``True``, names are relativized to the *origin*; if ``False`` then any relative names in the input are made absolute by appending the *origin*. """ if isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str): name = dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype, default_ttl=default_ttl) reader.read() return manager.rrsets
38.9552
83
0.548897
18,633
0.76531
0
0
0
0
0
0
5,957
0.244671
53b4099090d815c2fccdfff9285d6d8c4361e95f
11,719
py
Python
swift/common/daemon.py
fossabot/swift-1
63fc013b8b96484cede0e9901ad54676b8c93298
[ "Apache-2.0" ]
null
null
null
swift/common/daemon.py
fossabot/swift-1
63fc013b8b96484cede0e9901ad54676b8c93298
[ "Apache-2.0" ]
null
null
null
swift/common/daemon.py
fossabot/swift-1
63fc013b8b96484cede0e9901ad54676b8c93298
[ "Apache-2.0" ]
1
2020-03-09T19:58:52.000Z
2020-03-09T19:58:52.000Z
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import os import sys import time import signal from re import sub import eventlet.debug from eventlet.hubs import use_hub from swift.common import utils class Daemon(object): """ Daemon base class A daemon has a run method that accepts a ``once`` kwarg and will dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass of Daemon must implement :meth:`run_once` and :meth:`run_forever`. A subclass of Daemon may override :meth:`get_worker_args` to dispatch arguments to individual child process workers and :meth:`is_healthy` to perform context specific periodic wellness checks which can reset worker arguments. Implementations of Daemon do not know *how* to daemonize, or execute multiple daemonized workers, they simply provide the behavior of the daemon and context specific knowledge about how workers should be started. """ def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): """Override this to run the script once""" raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): """Override this to run forever""" raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): """ Override this to do something after running using multiple worker processes. This method is called in the parent process. This is probably only useful for run-once mode since there is no "after running" in run-forever mode. """ pass def get_worker_args(self, once=False, **kwargs): """ For each worker yield a (possibly empty) dict of kwargs to pass along to the daemon's :meth:`run` method after fork. The length of elements returned from this method will determine the number of processes created. If the returned iterable is empty, the Strategy will fallback to run-inline strategy. :param once: False if the worker(s) will be daemonized, True if the worker(s) will be run once :param kwargs: plumbed through via command line argparser :returns: an iterable of dicts, each element represents the kwargs to be passed to a single worker's :meth:`run` method after fork. """ return [] def is_healthy(self): """ This method is called very frequently on the instance of the daemon held by the parent process. If it returns False, all child workers are terminated, and new workers will be created. :returns: a boolean, True only if all workers should continue to run """ return True class DaemonStrategy(object): """ This is the execution strategy for using subclasses of Daemon. The default behavior is to invoke the daemon's :meth:`Daemon.run` method from within the parent process. When the :meth:`Daemon.run` method returns the parent process will exit. However, if the Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be invoked in child processes, with the arguments provided from the parent process's instance of the daemon. If a child process exits it will be restarted with the same options, unless it was executed in once mode. :param daemon: an instance of a :class:`Daemon` (has a `run` method) :param logger: a logger instance """ def __init__(self, daemon, logger): self.daemon = daemon self.logger = logger self.running = False # only used by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self, once=False, **kwargs): """Run the daemon""" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): """Daemonize and execute our strategy""" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False def _fork(self, once, **kwargs): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) # do not return from this stack, nor execute any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options, aborting workers') self.cleanup() return True return False def check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p) else: if pid == 0: # child still running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def cleanup(self): for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): """ Loads settings from conf, then instantiates daemon ``klass`` and runs the daemon with the specified ``once`` kwarg. The section_name will be derived from the daemon ``klass`` if not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass of :class:`Daemon` :param conf_file: Path to configuration file :param section_name: Section name from conf file to load config from :param once: Passed to daemon :meth:`Daemon.run` method """ # very often the config section_name is based on the class name # the None singleton will be passed through to readconf as is if section_name == '': section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The message will be printed to stderr # and results in an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false) will over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited %s', os.getpid())
36.621875
79
0.63572
8,300
0.708252
234
0.019968
0
0
0
0
5,158
0.44014
53b4d42745fdda68cc9c6626c17825d3356f7324
474
py
Python
backend/resource_files_sample.py
Bhaskers-Blu-Org1/multicloud-incident-response-navigator
e6ba6322fdcc533b6ed14abb4681470a6bb6bd85
[ "Apache-2.0" ]
null
null
null
backend/resource_files_sample.py
Bhaskers-Blu-Org1/multicloud-incident-response-navigator
e6ba6322fdcc533b6ed14abb4681470a6bb6bd85
[ "Apache-2.0" ]
null
null
null
backend/resource_files_sample.py
Bhaskers-Blu-Org1/multicloud-incident-response-navigator
e6ba6322fdcc533b6ed14abb4681470a6bb6bd85
[ "Apache-2.0" ]
1
2020-07-30T10:07:19.000Z
2020-07-30T10:07:19.000Z
import resource_files resources = resource_files.ResourceFiles() # sample use case of getting yamls print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster")) # sample use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of getting describe info print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
36.461538
103
0.78481
0
0
0
0
0
0
0
0
304
0.64135
53b5ca21f061bcccc9e7720c97265d2e56f05552
1,305
py
Python
backend/api/v1/auth_module/auth_api.py
aroraenterprise/projecteos
e1fb0438af8cb59b77792523c6616c480b23a6f8
[ "MIT" ]
null
null
null
backend/api/v1/auth_module/auth_api.py
aroraenterprise/projecteos
e1fb0438af8cb59b77792523c6616c480b23a6f8
[ "MIT" ]
null
null
null
backend/api/v1/auth_module/auth_api.py
aroraenterprise/projecteos
e1fb0438af8cb59b77792523c6616c480b23a6f8
[ "MIT" ]
null
null
null
""" Project: flask-rest Author: Saj Arora Description: Handle auth endpoints such as auth/signup, auth/login """ from api.v1 import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import helper from .auth_controller import AuthController def sage_auth_signup_function(self, resource, **kwargs): _UserModel = resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user = _UserModel(**args) # user has been created user.put() # save to get a key for the user result, params = AuthController.create_unique_for_user(user.key) if not result: # not successful user.key.delete() raise params # this holds the error message else: return params # this holds accesskey and refresh token def sage_auth_authenticate_function(self, resource, **kwargs): result, params = AuthController.authenticate_client() if not result: # not successful raise params # this holds the error message else: return params # this holds the refresh token and the access token auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False) }
36.25
104
0.744828
0
0
0
0
0
0
0
0
372
0.285057
53b66284f62a337ba9819ca33a9acfe617722619
1,785
py
Python
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py
AngsarM/QuanGuru
5db6105f843bbc78c2d5b1547e32d494fbe10b8d
[ "BSD-3-Clause" ]
9
2021-05-23T06:30:45.000Z
2021-12-27T13:33:54.000Z
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py
cahitkargi/QuanGuru
9b5c94465cd58bc32f6ff845f29dfdec7e0f9075
[ "BSD-3-Clause" ]
26
2022-03-18T02:40:54.000Z
2022-03-25T07:00:25.000Z
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py
cahitkargi/QuanGuru
9b5c94465cd58bc32f6ff845f29dfdec7e0f9075
[ "BSD-3-Clause" ]
5
2021-05-23T06:30:24.000Z
2022-02-04T02:40:08.000Z
import random as rn import numpy as np # open system dynamics of a qubit and compare numerical results with the analytical calculations # NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical # calculations. # currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving # any collapse operators. For now, only looks at excited state populations # TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics. decayRateSM = rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every # step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above. def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations: populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k in populations: populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical'])
45.769231
117
0.773109
0
0
0
0
0
0
0
0
868
0.486275
53b6650eb89817fbb23a4d021878f43cb942eb48
538
py
Python
QuGraphy/state.py
Mohamed-ShehabEldin/QuGraphy
c43fe7128f91e7bd383393f5ff16ff613077e8d7
[ "Apache-2.0" ]
null
null
null
QuGraphy/state.py
Mohamed-ShehabEldin/QuGraphy
c43fe7128f91e7bd383393f5ff16ff613077e8d7
[ "Apache-2.0" ]
null
null
null
QuGraphy/state.py
Mohamed-ShehabEldin/QuGraphy
c43fe7128f91e7bd383393f5ff16ff613077e8d7
[ "Apache-2.0" ]
null
null
null
#this file will contain function that related to vector state from .density import * #we may use some functions from them and dependencies def row2col(vec): if np.ndim(vec)==1: col=[] for element in vec: col.append([element]) return col else: return vec def check_state(state): row2col(state) if np.shape(state)[1]>1: raise Exception("invalid state, not a vector!") if schmidt_inner(state,state) !=1: raise Exception("invalid state, not normalized!")
25.619048
79
0.633829
0
0
0
0
0
0
0
0
176
0.327138
53b6dc5235fed6c6481fdc6dfb8b105b1f554689
4,480
py
Python
uncoverml/metadata_profiler.py
GeoscienceAustralia/uncoverml
672914377afa4ad1c069fcd4845bc45f80132e36
[ "Apache-2.0" ]
34
2017-03-14T23:59:58.000Z
2022-03-03T18:04:25.000Z
uncoverml/metadata_profiler.py
GeoscienceAustralia/uncoverml
672914377afa4ad1c069fcd4845bc45f80132e36
[ "Apache-2.0" ]
106
2017-03-22T00:26:10.000Z
2022-03-12T00:19:08.000Z
uncoverml/metadata_profiler.py
GeoscienceAustralia/uncoverml
672914377afa4ad1c069fcd4845bc45f80132e36
[ "Apache-2.0" ]
21
2017-05-04T04:02:39.000Z
2022-02-04T00:55:18.000Z
#! /usr/bin/env python """ Description: Gather Metadata for the uncover-ml prediction output results: Reference: email 2019-05-24 Overview Creator: (person who generated the model) Model; Name: Type and date: Algorithm: Extent: Lat/long - location on Australia map? SB Notes: None of the above is required as this information will be captured in the yaml file. Model inputs: 1. Covariates - list (in full) 2. Targets: path to shapefile: csv file SB Notes: Only covaraite list file. Targets and path to shapefile is not required as this is available in the yaml file. May be the full path to the shapefile has some merit as one can specify partial path. Model performance JSON file (in full) SB Notes: Yes Model outputs 1. Prediction grid including path 2. Quantiles Q5; Q95 3. Variance: 4. Entropy: 5. Feature rank file 6. Raw covariates file (target value - covariate value) 7. Optimisation output 8. Others ?? SB Notes: Not required as these are model dependent, and the metadata will be contained in each of the output geotif file. Model parameters: 1. YAML file (in full) 2. .SH file (in full) SB Notes: The .sh file is not required. YAML file is read as a python dictionary in uncoverml which can be dumped in the metadata. CreationDate: 31/05/19 Developer: [email protected] Revision History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional description """ # import section import os import sys import json import pickle import datetime import getpass import socket from ppretty import ppretty import uncoverml class MetadataSummary(): """ Summary Description of the ML prediction output """ def __init__(self, model, config): self.model = model self.description = "Metadata for the ML results" username = getpass.getuser() hostname = socket.gethostname() self.creator = username self.computename = hostname self.datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") self.version = uncoverml.__version__ model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) self.config = config self.name = self.config.name # 'demo_regression' self.algorithm = self.config.algorithm # 'svr' self.extent = ((-10, 100),(-40, 140)) if config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf: self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics = None def write_metadata(self, out_filename): """ write the metadata for this prediction result, into a human-readable txt file. in order to make the ML results traceable and reproduceable (provenance) """ with open(out_filename, 'w') as outf: outf.write("# Metadata Profile for the Prediction Results") outf.write("\n\n############ Software Environment ###########\n\n") outf.write("Creator = %s \n"%self.creator) outf.write("Computer = %s \n"%self.computename) outf.write("ML Algorithm = %s \n"%self.algorithm) outf.write("Version = %s\n"%self.version) outf.write("Datetime = %s \n"%self.datetime) outf.write("\n\n############ Performance Matrics ###########\n\n") if self.model_performance_metrics: for keys, values in self.model_performance_metrics.items(): outf.write("%s = %s\n"%(keys, values)) outf.write("\n\n############ Configuration ###########\n\n") conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write("\n\n############ Model ###########\n\n") model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write("\n\n############ The End of Metadata ###########\n\n") return out_filename
32.941176
206
0.620536
2,813
0.627902
0
0
0
0
0
0
2,256
0.503571
53b7cf475edf549606a00bf10c8b39ab817c0d94
72
py
Python
testjpkg/jsonify/hij.py
thisisishara/test_pypi_cli
15b22ed8943a18a6d9de9ee4ba6a84249a633e2e
[ "MIT" ]
null
null
null
testjpkg/jsonify/hij.py
thisisishara/test_pypi_cli
15b22ed8943a18a6d9de9ee4ba6a84249a633e2e
[ "MIT" ]
null
null
null
testjpkg/jsonify/hij.py
thisisishara/test_pypi_cli
15b22ed8943a18a6d9de9ee4ba6a84249a633e2e
[ "MIT" ]
null
null
null
print("hiiiiiiiiiiiiiiiix") def sayhi(): print("2nd pkg said hi")
12
28
0.666667
0
0
0
0
0
0
0
0
37
0.513889
53b7d55368f6a08688dd3db11b258ac91759ec48
2,447
py
Python
asv_bench/benchmarks/algorithms.py
raspbian-packages/pandas
fb33806b5286deb327b2e0fa96aedf25a6ed563f
[ "PSF-2.0", "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause" ]
null
null
null
asv_bench/benchmarks/algorithms.py
raspbian-packages/pandas
fb33806b5286deb327b2e0fa96aedf25a6ed563f
[ "PSF-2.0", "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause" ]
null
null
null
asv_bench/benchmarks/algorithms.py
raspbian-packages/pandas
fb33806b5286deb327b2e0fa96aedf25a6ed563f
[ "PSF-2.0", "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause" ]
null
null
null
import numpy as np import pandas as pd from pandas.util import testing as tm class algorithm(object): goal_time = 0.2 def setup(self): N = 100000 self.int_unique = pd.Int64Index(np.arange(N * 5)) # cache is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg = np.arange(-1000000, 0) self.arrmixed = np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time = 0.2 def setup(self): N = 100000 self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1 day', freq='s', periods=N), }) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash() def time_series_string(self): self.df.B.hash() def time_series_categorical(self): self.df.C.hash()
26.89011
67
0.612178
2,364
0.966081
0
0
0
0
0
0
95
0.038823
53b8d7ac852024e1d3318cbf747bac9b0ef35d8a
28,857
py
Python
RMtools_1D/do_RMsynth_1D.py
lh-astro/RM-Tools
ac64cc41b2f696f21ee7dd001303cbad1ff71114
[ "MIT" ]
null
null
null
RMtools_1D/do_RMsynth_1D.py
lh-astro/RM-Tools
ac64cc41b2f696f21ee7dd001303cbad1ff71114
[ "MIT" ]
null
null
null
RMtools_1D/do_RMsynth_1D.py
lh-astro/RM-Tools
ac64cc41b2f696f21ee7dd001303cbad1ff71114
[ "MIT" ]
null
null
null
#!/usr/bin/env python #=============================================================================# # # # NAME: do_RMsynth_1D.py # # # # PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.# # # # MODIFIED: 16-Nov-2018 by J. West # # MODIFIED: 23-October-2019 by A. Thomson # # # #=============================================================================# # # # The MIT License (MIT) # # # # Copyright (c) 2015 - 2018 Cormac R. Purcell # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # #=============================================================================# import sys import os import time import traceback import json import math as m import numpy as np import matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools will no longer run with Python 2! Please use Python 3.') exit() C = 2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType="variance", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None): """Run RM synthesis on 1D data. Args: data (list): Contains frequency and polarization data as either: [freq_Hz, I, Q, U, dI, dQ, dU] freq_Hz (array_like): Frequency of each channel in Hz. I (array_like): Stokes I intensity in each channel. Q (array_like): Stokes Q intensity in each channel. U (array_like): Stokes U intensity in each channel. dI (array_like): Error in Stokes I intensity in each channel. dQ (array_like): Error in Stokes Q intensity in each channel. dU (array_like): Error in Stokes U intensity in each channel. or [freq_Hz, q, u, dq, du] freq_Hz (array_like): Frequency of each channel in Hz. q (array_like): Fractional Stokes Q intensity (Q/I) in each channel. u (array_like): Fractional Stokes U intensity (U/I) in each channel. dq (array_like): Error in fractional Stokes Q intensity in each channel. du (array_like): Error in fractional Stokes U intensity in each channel. Kwargs: polyOrd (int): Order of polynomial to fit to Stokes I spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel size (rad/m^2). nSamples (float): Number of samples across the RMSF. weightType (str): Can be "variance" or "uniform" "variance" -- Weight by uncertainty in Q and U. "uniform" -- Weight uniformly (i.e. with 1s) fitRMSF (bool): Fit a Gaussian to the RMSF? noStokesI (bool: Is Stokes I data provided? phiNoise_radm2 (float): ???? nBits (int): Precision of floating point numbers. showPlots (bool): Show plots? debug (bool): Turn on debugging messages & plots? verbose (bool): Verbosity. log (function): Which logging function to use. units (str): Units of data. Returns: mDict (dict): Summary of RM synthesis results. aDict (dict): Data output by RM synthesis. """ # Sanity checks if not os.path.exists(args.dataFile[0]): print("File does not exist: '%s'." % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2*nBits) # freq_Hz, I, Q, U, dI, dQ, dU try: if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data if verbose: log("... success.") except Exception: if verbose: log("...failed.") # freq_Hz, q, u, dq, du try: if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data if verbose: log("... success.") noStokesI = True except Exception: if verbose: log("...failed.") if debug: log(traceback.format_exc()) sys.exit() if verbose: log("Successfully read in the Stokes spectra.") # If no Stokes I present, create a dummy spectrum = unity if noStokesI: if verbose: log("Warn: no Stokes I data in use.") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr = (dQArr + dUArr)/2.0 # Fit the Stokes I spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr = QArr, UArr = UArr, dIArr = dIArr, dQArr = dQArr, dUArr = dUArr, polyOrd = polyOrd, verbose = True, debug = debug) # Plot the data and the Stokes I model fit if verbose: log("Plotting the input data and spectral index fit.") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr, qArr = qArr, uArr = uArr, dIArr = dIArr, dqArr = dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr, fig = specFig, units = units) # Use the custom navigation toolbar (does not work on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # if not plt.isinteractive(): # specFig.show() # DEBUG (plot the Q, U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM # Faraday depth sampling. Zero always centred on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2 or all 1s (uniform) if weightType=="variance": weightArr = 1.0 / np.power(dQUArr, 2.0) else: weightType = "uniform" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log("Weight type is '%s'." % weightType) startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits = nBits, verbose = verbose, log = log) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = verbose, log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime) # Determine the Stokes I value at lam0Sq_m2 from the Stokes I model # Multiply the dirty FDF by Ifreq0 to recover the PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux # Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure the parameters of the dirty FDF # Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict["Ifreq0"] = toscalar(Ifreq0) mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]]) mDict["IfitStat"] = fitDict["fitStatus"] mDict["IfitChiSqRed"] = fitDict["chiSqRed"] mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2) mDict["freq0_Hz"] = toscalar(freq0_Hz) mDict["fwhmRMSF"] = toscalar(fwhmRMSF) mDict["dQU"] = toscalar(nanmedian(dQUArr)) mDict["dFDFth"] = toscalar(dFDFth) mDict["units"] = units if fitDict["fitStatus"] >= 128: log("WARNING: Stokes I model contains negative values!") elif fitDict["fitStatus"] >= 64: log("Caution: Stokes I model has low signal-to-noise.") #Add information on nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels])) mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels])) mDict["N_channels"]=good_channels.size mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the q and u spectra mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr = uArr, dqArr = dqArr, duArr = duArr, fracPol = mDict["fracPol"], psi0_deg = mDict["polAngle0Fit_deg"], RM_radm2 = mDict["phiPeakPIfit_rm2"]) mDict.update(mD) # Debugging plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD["xArrQ"], qArr=pD["yArrQ"], dqArr=pD["dyArrQ"], sigmaAddqArr=pD["sigmaAddArrQ"], chiSqRedqArr=pD["chiSqRedArrQ"], probqArr=pD["probArrQ"], uArr=pD["yArrU"], duArr=pD["dyArrU"], sigmaAdduArr=pD["sigmaAddArrU"], chiSqReduArr=pD["chiSqRedArrU"], probuArr=pD["probArrU"], mDict=mDict) if saveOutput: if verbose: print("Saving debug plots:") outFilePlot = prefixOut + ".debug-plots.pdf" if verbose: print("> " + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show() #add array dictionary aDict = dict() aDict["phiArr_radm2"] = phiArr_radm2 aDict["phi2Arr_radm2"] = phi2Arr_radm2 aDict["RMSFArr"] = RMSFArr aDict["freqArr_Hz"] = freqArr_Hz aDict["weightArr"]=weightArr aDict["dirtyFDF"]=dirtyFDF if verbose: # Print the results to the screen log() log('-'*80) log('RESULTS:\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"])) log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"], mDict["dPolAngleFit_deg"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"], mDict["dPolAngle0Fit_deg"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"], mDict["dPhiPeakPIfit_rm2"])) log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9)) log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units)) log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"], mDict["dAmpPeakPIfit"],units)) log('QU Noise = %.4g %s' % (mDict["dQU"],units)) log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units)) log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units)) log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units)) log('FDF SNR = %.4g ' % (mDict["snrPIfit"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"], mDict["dSigmaAddPlusQ"], mDict["dSigmaAddMinusQ"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"], mDict["dSigmaAddPlusU"], mDict["dSigmaAddMinusU"])) log() log('-'*80) # Plot the RM Spread Function and dirty FDF if showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict["phiPeakPIfit_rm2"], fig = fdfFig, units = units) # Use the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass # Display the figure # fdfFig.show() # Pause if plotting enabled if showPlots: plt.show() elif saveOutput or debug: if verbose: print("Saving RMSF and dirty FDF plot:") outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf" if verbose: print("> " + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if verbose: print "Press <RETURN> to exit ...", # input() return mDict, aDict def readFile(dataFile, nBits, verbose=True, debug=False): """ Read the I, Q & U data from the ASCII file. Inputs: datafile (str): relative or absolute path to file. nBits (int): number of bits to store the data as. verbose (bool): Print verbose messages to terminal? debug (bool): Print full traceback in case of failure? Returns: data (list of arrays): List containing the columns found in the file. If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU], else [freq_Hz, q, u, dq, du]. """ # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2*nBits) # Output prefix is derived from the input file name # Read the data-file. Format=space-delimited, comments="#". if verbose: print("Reading the data file '%s':" % dataFile) # freq_Hz, I, Q, U, dI, dQ, dU try: if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print("... success.") data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr] except Exception: if verbose: print("...failed.") # freq_Hz, q, u, dq, du try: if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = \ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print("... success.") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI = True except Exception: if verbose: print("...failed.") if debug: print(traceback.format_exc()) sys.exit() if verbose: print("Successfully read in the Stokes spectra.") return data def saveOutput(outdict, arrdict, prefixOut, verbose): # Save the dirty FDF, RMSF and weight array to ASCII files if verbose: print("Saving the dirty FDF, RMSF weight arrays to ASCII files.") outFile = prefixOut + "_FDFdirty.dat" if verbose: print("> %s" % outFile) np.savetxt(outFile, list(zip(arrdict["phiArr_radm2"], arrdict["dirtyFDF"].real, arrdict["dirtyFDF"].imag))) outFile = prefixOut + "_RMSF.dat" if verbose: print("> %s" % outFile) np.savetxt(outFile, list(zip(arrdict["phi2Arr_radm2"], arrdict["RMSFArr"].real, arrdict["RMSFArr"].imag))) outFile = prefixOut + "_weight.dat" if verbose: print("> %s" % outFile) np.savetxt(outFile, list(zip(arrdict["freqArr_Hz"], arrdict["weightArr"]))) # Save the measurements to a "key=value" text file outFile = prefixOut + "_RMsynth.dat" if verbose: print("Saving the measurements on the FDF in 'key=val' and JSON formats.") print("> %s" % outFile) FH = open(outFile, "w") for k, v in outdict.items(): FH.write("%s=%s\n" % (k, v)) FH.close() outFile = prefixOut + "_RMsynth.json" if verbose: print("> %s" % outFile) json.dump(dict(outdict), open(outFile, "w")) #-----------------------------------------------------------------------------# def main(): import argparse """ Start the function to perform RM-synthesis if called from the command line. """ # Help string to be shown using the -h option descStr = """ Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII file. The Stokes I spectrum is first fit with a polynomial and the resulting model used to create fractional q = Q/I and u = U/I spectra. The ASCII file should the following columns, in a space separated format: [freq_Hz, I, Q, U, I_err, Q_err, U_err] OR [freq_Hz, Q, U, Q_err, U_err] To get outputs, one or more of the following flags must be set: -S, -p, -v. """ epilog_text=""" Outputs with -S flag: _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U] _RMSF.dat: Computed RMSF [Phi, Q, U] _RMsynth.dat: list of derived parameters for RM spectrum (approximately equivalent to -v flag output) _RMsynth.json: dictionary of derived parameters for RM spectrum _weight.dat: Calculated channel weights [freq_Hz, weight] """ # Parse the command line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1, help="ASCII file containing Stokes spectra & errors.") parser.add_argument("-t", dest="fitRMSF", action="store_true", help="fit a Gaussian to the RMSF [False]") parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None, help="absolute max Faraday depth sampled [Auto].") parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None, help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)") parser.add_argument("-s", dest="nSamples", type=float, default=10, help="number of samples across the RMSF lobe [10].") parser.add_argument("-w", dest="weightType", default="variance", help="weighting [inverse variance] or 'uniform' (all 1s).") parser.add_argument("-o", dest="polyOrd", type=int, default=2, help="polynomial order to fit to I spectrum [2].") parser.add_argument("-i", dest="noStokesI", action="store_true", help="ignore the Stokes I spectrum [False].") parser.add_argument("-b", dest="bit64", action="store_true", help="use 64-bit floating point precision [False (uses 32-bit)]") parser.add_argument("-p", dest="showPlots", action="store_true", help="show the plots [False].") parser.add_argument("-v", dest="verbose", action="store_true", help="verbose output [False].") parser.add_argument("-S", dest="saveOutput", action="store_true", help="save the arrays and plots [False].") parser.add_argument("-D", dest="debug", action="store_true", help="turn on debugging messages & plots [False].") parser.add_argument("-U", dest="units", type=str, default="Jy/beam", help="Intensity units of the data. [Jy/beam]") args = parser.parse_args() # Sanity checks if not os.path.exists(args.dataFile[0]): print("File does not exist: '%s'." % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0]) # Set the floating point precision nBits = 32 if args.bit64: nBits = 64 verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run RM-synthesis on the spectra mDict, aDict = run_rmsynth(data = data, polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples, weightType = args.weightType, fitRMSF = args.fitRMSF, noStokesI = args.noStokesI, nBits = nBits, showPlots = args.showPlots, debug = args.debug, verbose = verbose, units = args.units, prefixOut = prefixOut, args = args, ) if args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------# if __name__ == "__main__": main()
45.159624
111
0.524517
0
0
0
0
0
0
0
0
12,885
0.446512
53b911e92af8c5251a19a68b93418217d94e2790
310
py
Python
cogdl/modules/conv/__init__.py
awesome-archive/cogdl
0a354eaaaf851e7218197508e7e85a81d3fb5753
[ "MIT" ]
8
2020-06-03T00:55:09.000Z
2022-01-23T16:06:56.000Z
cogdl/modules/conv/__init__.py
awesome-archive/cogdl
0a354eaaaf851e7218197508e7e85a81d3fb5753
[ "MIT" ]
null
null
null
cogdl/modules/conv/__init__.py
awesome-archive/cogdl
0a354eaaaf851e7218197508e7e85a81d3fb5753
[ "MIT" ]
6
2020-06-03T00:55:11.000Z
2022-03-16T01:14:36.000Z
from .message_passing import MessagePassing from .gcn_conv import GCNConv from .gat_conv import GATConv from .se_layer import SELayer from .aggregator import Meanaggregator from .maggregator import meanaggr __all__ = [ 'MessagePassing', 'GCNConv', 'GATConv', 'SELayer', 'Meanaggregator' ]
20.666667
43
0.751613
0
0
0
0
0
0
0
0
59
0.190323
53b93c021c611ea7b35c2a4e8768e23aee0fabe0
1,449
py
Python
netket/utils/jax.py
gpescia/MyNetKet
958510966a5870d9d491de0628903cf1fc210921
[ "Apache-2.0" ]
1
2022-01-31T15:19:09.000Z
2022-01-31T15:19:09.000Z
netket/utils/jax.py
gpescia/MyNetKet
958510966a5870d9d491de0628903cf1fc210921
[ "Apache-2.0" ]
26
2021-08-06T15:27:57.000Z
2022-03-30T16:55:18.000Z
netket/utils/jax.py
gpescia/MyNetKet
958510966a5870d9d491de0628903cf1fc210921
[ "Apache-2.0" ]
1
2021-04-25T15:47:32.000Z
2021-04-25T15:47:32.000Z
# Copyright 2021 The NetKet Authors - All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable from . import struct def get_afun_if_module(mod_or_fun) -> Callable: """Returns the apply function if it's a module. Does nothing otherwise.""" if hasattr(mod_or_fun, "apply"): return mod_or_fun.apply else: return mod_or_fun @struct.dataclass class WrappedApplyFun: """Wraps a callable to be a module-like object with the method `apply`.""" apply: Callable """The wrapped callable.""" def __repr__(self): return f"{type(self).__name__}(apply={self.apply}, hash={hash(self)})" def wrap_afun(mod_or_fun): """Wraps a callable to be a module-like object with the method `apply`. Does nothing if it already has an apply method. """ if hasattr(mod_or_fun, "apply"): return mod_or_fun else: return WrappedApplyFun(mod_or_fun)
30.829787
78
0.712215
258
0.178054
0
0
276
0.190476
0
0
974
0.672188
53b95578f3b9aa9d904006c7f7edb3a1fb45bd48
10,933
py
Python
geetools/batch/featurecollection.py
Kungreye/gee_tools
d0712ac78410250c41503ca08075f536d58d2ef3
[ "MIT" ]
null
null
null
geetools/batch/featurecollection.py
Kungreye/gee_tools
d0712ac78410250c41503ca08075f536d58d2ef3
[ "MIT" ]
null
null
null
geetools/batch/featurecollection.py
Kungreye/gee_tools
d0712ac78410250c41503ca08075f536d58d2ef3
[ "MIT" ]
null
null
null
# coding=utf-8 import ee from . import utils import json import csv from .. import tools def fromShapefile(filename, crs=None, start=None, end=None): """ Convert an ESRI file (.shp and .dbf must be present) to a ee.FeatureCollection At the moment only works for shapes with less than 1000 records and doesn't handle complex shapes. :param filename: the name of the filename. If the shape is not in the same path than the script, specify a path instead. :type filename: str :param start: :return: the FeatureCollection :rtype: ee.FeatureCollection """ import shapefile wgs84 = ee.Projection('EPSG:4326') # read the filename reader = shapefile.Reader(filename) fields = reader.fields[1:] field_names = [field[0] for field in fields] field_types = [field[1] for field in fields] types = dict(zip(field_names, field_types)) features = [] projection = utils.getProjection(filename) if not crs else crs # catch a string with format "EPSG:XXX" if isinstance(projection, str): if 'EPSG:' in projection: projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter records with start and end start = start if start else 0 if not end: records = reader.shapeRecords() end = len(records) else: end = end + 1 if (end-start)>1000: msg = "Can't process more than 1000 records at a time. Found {}" raise ValueError(msg.format(end-start)) for i in range(start, end): # atr = dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr = {} for fld, rec in zip(field_names, sr.record): fld_type = types[fld] if fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N', 'F']: value = rec else: continue atr[fld] = value geom = sr.shape.__geo_interface__ if projection is not None: geometry = ee.Geometry(geom, projection) \ .transform(wgs84, 1) else: geometry = ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): """ Create a list of Features from a GeoJSON file. Return a python tuple with ee.Feature inside. This is due to failing when attempting to create a FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating it yourself casting the result of this function to a ee.List or using it directly as a FeatureCollection argument. :param filename: the name of the file to load :type filename: str :param crs: a coordinate reference system in EPSG format. If not specified it will try to get it from the geoJSON, and if not there it will rise an error :type: crs: str :return: a tuple of features. """ if filename: with open(filename, 'r') as geoj: content = geoj.read() geodict = json.loads(content) else: geodict = data features = [] # Get crs from GeoJSON if not crs: filecrs = geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs = [part for part in splitcrs if part] try: if cleancrs[-1] == 'CRS84': crs = 'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name)) except IndexError: raise ValueError('{} not recognized'.format(name)) else: crs = 'EPSG:4326' for n, feat in enumerate(geodict.get('features')): properties = feat.get('properties') geom = feat.get('geometry') ty = geom.get('type') coords = geom.get('coordinates') if ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty == 'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None): """ Create a list of Features from a KML file. Return a python tuple with ee.Feature inside. This is due to failing when attempting to create a FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating it yourself casting the result of this function to a ee.List or using it directly as a FeatureCollection argument. :param filename: the name of the file to load :type filename: str :param crs: a coordinate reference system in EPSG format. If not specified it will try to get it from the geoJSON, and if not there it will rise an error :type: crs: str :return: a tuple of features. """ geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features'] for feat in features: # remove styleUrl prop = feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl') # remove Z value if needed geom = feat['geometry'] ty = geom['type'] if ty == 'GeometryCollection': geometries = geom['geometries'] for g in geometries: c = g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): """ Get the FeatureCollection as a dict object """ size = collection.size() condition = size.gte(4999) def greater(): size = collection.size() seq = tools.ee_list.sequence(0, size, split_at) limits = ee.List.zip(seq.slice(1), seq) def over_limits(n): n = ee.List(n) ini = ee.Number(n.get(0)) end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats = content['features'] for i in range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c = content_c['features'] feats = feats + feats_c content['features'] = feats return content def toGeoJSON(collection, name, path=None, split_at=4000): """ Export a FeatureCollection to a GeoJSON file :param collection: The collection to export :type collection: ee.FeatureCollection :param name: name of the resulting file :type name: str :param path: The path where to save the file. If None, will be saved in the current folder :type path: str :param split_at: limit to avoid an EE Exception :type split_at: int :return: A GeoJSON (.geojson) file. :rtype: file """ import json import os if not path: path = os.getcwd() # name if name[-8:-1] != '.geojson': fname = name+'.geojson' content = toDict(collection, split_at) with open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename, split_at=4000): """ Alternative to download a FeatureCollection as a CSV """ d = toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext = filename[-4:] if ext != '.csv': filename += '.csv' with open(filename, 'w') as thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader() # write rows for feature in features: properties = feature['properties'] fid = feature['id'] geom = feature['geometry']['type'] # match fields properties['system:index'] = fid properties['geometry'] = geom # write row writer.writerow(properties) return thecsv def toLocal(collection, filename, filetype=None, selectors=None, path=None): """ Download a FeatureCollection to a local file a CSV or geoJSON file. This uses a different method than `toGeoJSON` and `toCSV` :param filetype: The filetype of download, either CSV or JSON. Defaults to CSV. :param selectors: The selectors that should be used to determine which attributes will be downloaded. :param filename: The name of the file to be downloaded """ if not filetype: filetype = 'CSV' url = collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url, filename, filetype, path) return thefile def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs): """ This function can create folders and ImageCollections on the fly. The rest is the same to Export.image.toAsset. You can pass the same params as the original function :param table: the feature collection to upload :type table: ee.FeatureCollection :param assetPath: path to upload the image (only PATH, without filename) :type assetPath: str :param name: filename for the image (AssetID will be assetPath + name) :type name: str :return: the tasks :rtype: ee.batch.Task """ # Check if the user is specified in the asset path is_user = (assetPath.split('/')[0] == 'users') if not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = "{}/{}".format(user, assetPath) if create: # Recrusive create path path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset ID (Path + name) assetId = '/'.join([assetPath, name]) # Description description = utils.matchDescription(name) # Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if verbose: print('Exporting {} to {}'.format(name, assetPath)) return task
32.346154
83
0.611269
0
0
0
0
0
0
0
0
4,316
0.394768
53bae4caf0f5e1b3ae61fd16a27c99803d8b7c2e
1,357
py
Python
index.py
extwiii/Rock-paper-scissors-lizard-Spock
7a8eda9f168636a9878c91620e625997ba0994a8
[ "Apache-2.0" ]
1
2018-08-02T00:52:33.000Z
2018-08-02T00:52:33.000Z
index.py
extwiii/Rock-paper-scissors-lizard-Spock
7a8eda9f168636a9878c91620e625997ba0994a8
[ "Apache-2.0" ]
null
null
null
index.py
extwiii/Rock-paper-scissors-lizard-Spock
7a8eda9f168636a9878c91620e625997ba0994a8
[ "Apache-2.0" ]
null
null
null
# Rock-paper-scissors-lizard-Spock template # The key idea of this program is to equate the strings # "rock", "paper", "scissors", "lizard", "Spock" to numbers # as follows: # # 0 - rock # 1 - Spock # 2 - paper # 3 - lizard # 4 - scissors import random def name_to_number(name): if name == "rock": return 0 elif name == 'Spock': return 1 elif name == 'paper': return 2 elif name == 'lizard': return 3 elif name == 'scissors': return 4 else : return None def number_to_name(number): if number == 0: return "rock" elif number == 1: return 'Spock' elif number == 2: return 'paper' elif number == 3: return 'lizard' elif number == 4: return 'scissors' else : return None def rpsls(player_choice): print "" print "Player chooses",player_choice player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_choice = number_to_name(comp_number) print "Computer chooses",comp_choice diff = (player_number - comp_number)%5 if (diff == 1) or (diff == 2): print "Player wins!" elif (diff == 3) or (diff == 4): print "Computer wins!" else : print "Tie!" rpsls("rock") rpsls("Spock") rpsls("paper") rpsls("lizard") rpsls("scissors")
21.539683
59
0.590273
0
0
0
0
0
0
0
0
415
0.305822
53bd7ca2bf66bb072074f8694f4fa68fad92a150
9,067
py
Python
libs/clustering/ensembles/utils.py
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
3
2021-08-17T21:59:19.000Z
2022-03-08T15:46:24.000Z
libs/clustering/ensembles/utils.py
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
4
2021-08-04T13:57:24.000Z
2021-10-11T14:57:15.000Z
libs/clustering/ensembles/utils.py
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
null
null
null
""" Contains functions to generate and combine a clustering ensemble. """ import numpy as np import pandas as pd from sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score as ari from sklearn.metrics import adjusted_mutual_info_score as ami from sklearn.metrics import normalized_mutual_info_score as nmi from tqdm import tqdm from clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None): """ It generates an ensemble from the data given a set of clusterers (a clusterer is an instance of a clustering algorithm with a fixed set of parameters). Args: data: A numpy array, pandas dataframe, or any other structure supported by the clusterers as data input. clusterers: A dictionary with clusterers specified in this format: { 'k-means #1': KMeans(n_clusters=2), ... } attributes: A list of attributes to save in the final dataframe; for example, including "n_clusters" will extract this attribute from the estimator and include it in the final dataframe returned. affinity_matrix: If the clustering algorithm is AgglomerativeClustering (from sklearn) and the linkage method is different than ward (which only support euclidean distance), the affinity_matrix is given as data input to the estimator instead of data. Returns: A pandas DataFrame with all the partitions generated by the clusterers. Columns include the clusterer name/id, the partition, the estimator parameters (obtained with the get_params() method) and any other attribute specified. """ ensemble = [] for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get partition # # for agglomerative clustering both data and affinity_matrix should be # given; for ward linkage, data is used, and for the other linkage # methods the affinity_matrix is used if (type(clus_obj).__name__ == "AgglomerativeClustering") and ( clus_obj.linkage != "ward" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) # remove from partition noisy points (for example, if using DBSCAN) partition[partition < 0] = np.nan # get number of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop if n_clusters <= 1 if n_clusters <= 1: reset_estimator(clus_obj) continue res = pd.Series( { "clusterer_id": clus_name, "clusterer_params": str(clus_obj.get_params()), "partition": partition, } ) for attr in attributes: if attr == "n_clusters" and not hasattr(clus_obj, attr): res[attr] = n_clusters else: res[attr] = getattr(clus_obj, attr) ensemble.append(res) # for some estimators such as DBSCAN this is needed, because otherwise # the estimator saves references of huge data structures not needed in # this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index("clusterer_id") def get_ensemble_distance_matrix(ensemble, n_jobs=1): """ Given an ensemble, it computes the coassociation matrix (a distance matrix for all objects using the ensemble information). For each object pair, the coassociation matrix contains the percentage of times the pair of objects was clustered together in the ensemble. Args: ensemble: A numpy array representing a set of clustering solutions on the same data. Each row is a clustering solution (partition) and columns are objects. n_jobs: The number of jobs used by the pairwise_distance matrix from sklearn. Returns: A numpy array representing a square distance matrix for all objects (coassociation matrix). """ def _compare(x, y): xy = np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan" ) def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False): """ It combines a clustering ensemble using a set of methods that the user can specify. Each of these methods combines the ensemble and returns a single partition. This function returns the combined partition that maximizes the selection criterion. Args: ensemble: a clustering ensemble (rows are partitions, columns are objects). k: the final number of clusters for the combined partition. methods: a list of methods to apply on the ensemble; each returns a combined partition. selection_criterion: a function that represents the selection criterion; this function has to accept an ensemble as the first argument, and a partition as the second one. n_jobs: number of jobs. use_tqdm: ensembles/disables the use of tqdm to show a progress bar. Returns: Returns a tuple: (partition, best method name, best criterion value) """ from concurrent.futures import ProcessPoolExecutor, as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods} for future in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name = tasks[future] part = future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name] = { "partition": part, "criterion_value": criterion_value, } # select the best performing method according to the selection criterion best_method = max( methods_results, key=lambda x: methods_results[x]["criterion_value"] ) best_method_results = methods_results[best_method] return ( best_method_results["partition"], best_method, best_method_results["criterion_value"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): """ Runs a consensus clustering method on the ensemble data, obtains the consolidated partition with the desired number of clusters, and computes a series of performance measures. Args: method_func: A consensus function (first argument is either the ensemble or the coassociation matrix derived from the ensemble). ensemble_data: A numpy array with the ensemble data that will be given to the specified method. For evidence accumulation methods, this is the coassociation matrix (a square matrix with the distance between object pairs derived from the ensemble). ensemble: A numpy array representing the ensemble (partitions in rows, objects in columns). k: The number of clusters to obtain from the ensemble data using the specified method. kwargs: Other parameters passed to `method_func`. Returns: It returns a tuple with the data partition derived from the ensemble data using the specified method, and some performance measures of this partition. """ part = method_func(ensemble_data, k, **kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member in ensemble ] ) ami_values = np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member in ensemble ] ) ari_values = np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member in ensemble ] ) performance_values = { "ari_mean": np.mean(ari_values), "ari_median": np.median(ari_values), "ari_std": np.std(ari_values), "ami_mean": np.mean(ami_values), "ami_median": np.median(ami_values), "ami_std": np.std(ami_values), "nmi_mean": np.mean(nmi_values), "nmi_median": np.median(nmi_values), "nmi_std": np.std(nmi_values), } return part, performance_values
35.837945
88
0.644645
0
0
0
0
0
0
0
0
5,095
0.561928
53bdcb0790280882aedd07e5cb2cef0159140f96
7,236
py
Python
backend/chart/application/service/employees.py
toshi-click/chart_app
10577d7835554a93688ae0c58ecb25fbe2925bec
[ "BSD-3-Clause" ]
null
null
null
backend/chart/application/service/employees.py
toshi-click/chart_app
10577d7835554a93688ae0c58ecb25fbe2925bec
[ "BSD-3-Clause" ]
7
2020-10-25T05:34:54.000Z
2020-12-02T11:31:44.000Z
backend/chart/application/service/employees.py
toshi-click/chart_app
10577d7835554a93688ae0c58ecb25fbe2925bec
[ "BSD-3-Clause" ]
1
2021-04-30T16:51:43.000Z
2021-04-30T16:51:43.000Z
import logging from django.db import transaction, connection from django.utils import timezone from django.utils.timezone import localtime from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import AppLogicBaseService from chart.models import Employees, Departments """ employeesテーブルを操作するクラスです。 """ class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod @transaction.atomic() def create_employees(): """ Employeesを作成する """ service = EmployeesService() for emp_no in range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if emp_no <= 5: department_no = DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values("id").first() # データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def create_departments(): """ Departmentsを作成する """ service = EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in DepartmentType: department_no = department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: # データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def update_employees(): """ Employeesを更新する """ service = EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( "id").first() department_id = select_model['id'] department_date_from = 20190903 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values("id").first() department_id = select_model['id'] department_date_from = 20190905 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def select_employees(): """ Employeesを検索する """ # テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug("reference:emp_no={}".format(employees_item.emp_no)) logging.debug("reference:department_no={}".format(employees_item.department.department_no)) logging.debug("reference:department_name={}".format(employees_item.department.department_name)) logging.debug("reference:first_name={}".format(employees_item.first_name)) logging.debug("reference:last_name={}".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related("department"): logging.debug("select_related:emp_no={}".format(employees_item.emp_no)) logging.debug("select_related:first_name={}".format(employees_item.first_name)) logging.debug("select_related:last_name={}".format(employees_item.last_name)) logging.debug("select_related:department_no={}".format(employees_item.department.department_no)) logging.debug("select_related:department_name={}".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( "department__employees_set"): logging.debug("prefetch_related:emp_no={}".format(employees_item.emp_no)) logging.debug("prefetch_related:first_name={}".format(employees_item.first_name)) logging.debug("prefetch_related:last_name={}".format(employees_item.last_name)) logging.debug("prefetch_related:department_no={}".format(employees_item.department.department_no)) logging.debug("prefetch_related:department_name={}".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees(): """ トランケートを行う """ cursor = connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no): """ employeesを登録する """ self.regist_model = Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id = department_id self.regist_model.first_name = "first_name_" + str(emp_no).zfill(3) self.regist_model.last_name = "last_name_" + str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from = "20190902" self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def _regist_departments(self, department_no, department_name): """ departmentsを登録する """ self.regist_model = Departments() self.regist_model.department_no = department_no self.regist_model.department_name = department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from): """ 配属情報を更新する """ self.update_model = Employees() self.update_model.pk = employees_id self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt = localtime(timezone.now()) self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
43.590361
116
0.674268
7,321
0.941729
0
0
5,420
0.697196
0
0
1,855
0.238616
53bf55da72ae86acb1c699435bc12016f38e84ea
146
py
Python
DataQualityTester/views/pages.py
pwyf/data-quality-tester
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
[ "MIT" ]
null
null
null
DataQualityTester/views/pages.py
pwyf/data-quality-tester
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
[ "MIT" ]
53
2017-04-07T09:41:38.000Z
2022-02-11T14:26:46.000Z
DataQualityTester/views/pages.py
pwyf/iati-simple-tester
ef7f06ebbd4dd45e6ca76d93a3f624abc33d961c
[ "MIT" ]
3
2017-07-19T13:43:14.000Z
2019-10-29T15:25:49.000Z
from flask import render_template def home(): return render_template('upload.html') def about(): return render_template('about.html')
14.6
41
0.726027
0
0
0
0
0
0
0
0
25
0.171233
53bfb5244dff3d80fd05051eac4247280b733cea
5,761
py
Python
hastakayit_gui.py
roselight/Image-Recognition-with-OpenCv
4d0607f37bc80ee0b00790cdcbb0a22c76852ac4
[ "MIT" ]
2
2020-04-10T21:53:52.000Z
2020-04-11T12:24:35.000Z
hastakayit_gui.py
roselight/Image-Recognition-with-OpenCv
4d0607f37bc80ee0b00790cdcbb0a22c76852ac4
[ "MIT" ]
null
null
null
hastakayit_gui.py
roselight/Image-Recognition-with-OpenCv
4d0607f37bc80ee0b00790cdcbb0a22c76852ac4
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '.\hastakayit_gui.ui' # # Created by: PyQt5 UI code generator 5.11.3 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate, QDateTime # Veritabanı bağlantısı için sql cümleciği oluşturuldu. db = mysql.connector.connect( host="localhost", user="root", passwd="12345", database="cilth_vt" ) cursor = db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(600, 205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("../heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap("../avatar.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName("btn_kayit") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName("btn_cikis") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName("gridLayout_3") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName("lbl_htc") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName("lbl_hadsoyad") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName("lbl_hcinsiyet") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName("lineEdit_2") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName("lineEdit_3") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName("lineEdit") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName("lbl_hdt") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName("dt_hdt") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle = ("INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount except: veri=2 if (veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarılı.") else: QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarısız") def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı")) self.btn_kayit.setText(_translate("MainWindow", "ONAYLA")) self.btn_cikis.setText(_translate("MainWindow", "İPTAL")) self.lbl_htc.setText(_translate("MainWindow", "TC Kimlik No:")) self.lbl_hadsoyad.setText(_translate("MainWindow", "Hasta Adı Soyadı:")) self.lbl_hcinsiyet.setText(_translate("MainWindow", "Cinsiyet: ")) self.lbl_hdt.setText(_translate("MainWindow", "Doğum Tarihi:")) self.dt_hdt.setDisplayFormat(_translate("MainWindow", "yyyy.MM.dd")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow2() ui.setupUi2(MainWindow) MainWindow.show() sys.exit(app.exec_())
44.658915
117
0.69849
4,984
0.859903
0
0
0
0
0
0
994
0.171498
53c0dd2b4f081d4c8d070b26922f68bf139eaa76
4,138
py
Python
.travis/manage_daily_builds.py
loonwerks/AGREE
58640ab89aaa3c72ccca0b8c80cf96d1815981da
[ "BSD-3-Clause" ]
5
2020-12-28T15:41:04.000Z
2021-07-31T09:07:28.000Z
.travis/manage_daily_builds.py
loonwerks/AGREE
58640ab89aaa3c72ccca0b8c80cf96d1815981da
[ "BSD-3-Clause" ]
89
2020-01-27T17:16:00.000Z
2022-03-31T09:57:25.000Z
.travis/manage_daily_builds.py
loonwerks/AGREE
58640ab89aaa3c72ccca0b8c80cf96d1815981da
[ "BSD-3-Clause" ]
5
2020-02-25T00:33:21.000Z
2021-01-02T07:23:11.000Z
#!/usr/bin/env python3 ''' Copyright (c) 2021, Collins Aerospace. Developed with the sponsorship of Defense Advanced Research Projects Agency (DARPA). Permission is hereby granted, free of charge, to any person obtaining a copy of this data, including any software or models in source or binary form, as well as any drawings, specifications, and documentation (collectively &quot;the Data&quot;), to deal in the Data without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Data, and to permit persons to whom the Data is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Data. THE DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. ''' import os import re import sys from github3 import GitHub from pprint import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds matching %s' % (sname)) # obtain git handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list of releases releases = repository.releases() # extract keys and sort by build date release_keys = {x.id : x.created_at for x in releases if sname in x.name} sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1]) print('%s' % (pformat(sorted_keys))) # filter to obtain the keys to delete delete_keys = [v[0] for v in sorted_keys[2:]] print('Deleting releases: %s' % (pformat(delete_keys))) # iterate, deleting the releases and corresponding tags for rel in releases: print('examining rel %d from %s...' % (rel.id, str(rel.created_at))) if rel.id in delete_keys and rel.tag_name is not None: print(' deleting release id %d and tag %s.' % (rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if rel_tag_ref is not None: print(' deleting tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look for stale files in the release assets = rel.assets() print('In release %s found assets:' % (rel.name)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if build_times else None print('Lastest build time is %s' % (latest_build_time)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None')) if match is not None: asset_build_time = match.group(1) if asset_build_time != latest_build_time: print('deleting stale asset %s' % (asset.name)) asset.delete() if __name__ == '__main__': manage_daily_builds(sys.argv[1])
48.682353
137
0.678347
0
0
0
0
0
0
0
0
2,038
0.492508
53c1b1b92893f74554831ae30476aefdb5464370
5,743
py
Python
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
KaihuiLiang/ParlAI
fb5c92741243756516fa50073d34e94ba0b6981e
[ "MIT" ]
null
null
null
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
KaihuiLiang/ParlAI
fb5c92741243756516fa50073d34e94ba0b6981e
[ "MIT" ]
1
2020-11-12T02:20:02.000Z
2020-11-12T02:20:02.000Z
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
MoPei/ParlAI
321bc857f2765cd76d5134531a802442ac4c9f5c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Test components of specific crowdsourcing tasks. """ import json import os import unittest import pandas as pd import parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase): """ Test the analysis code for the static turn annotations task. """ def test_compile_results(self): """ Test compiling results on a dummy set of data. """ with testing_utils.tempdir() as tmpdir: # Define expected stdout # Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json' ) # Save a file of gold annotations gold_annotations = { "1_0_5": { "bucket_0": False, "bucket_1": False, "bucket_2": False, "bucket_3": False, "bucket_4": False, "none_all_good": True, }, "1_1_5": { "bucket_0": False, "bucket_1": False, "bucket_2": False, "bucket_3": False, "bucket_4": True, "none_all_good": False, }, "2_0_5": { "bucket_0": False, "bucket_1": True, "bucket_2": False, "bucket_3": False, "bucket_4": False, "none_all_good": False, }, "2_1_5": { "bucket_0": False, "bucket_1": False, "bucket_2": False, "bucket_3": False, "bucket_4": True, "none_all_good": False, }, } with open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations, f) # Run compilation of results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, } ) args = parser.parse_args([]) with testing_utils.capture_output() as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout = output.getvalue() # Check the output against what it should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check that the saved results file is what it should be sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results = ( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) # Drop the 'folder' column, which contains a system-dependent path string actual_results_rel_path = [ obj for obj in os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if not actual_results.equals(expected_results): raise ValueError( f'\n\n\tExpected results:\n{expected_results.to_csv()}' f'\n\n\tActual results:\n{actual_results.to_csv()}' ) except ImportError: pass if __name__ == "__main__": unittest.main()
37.292208
93
0.482675
5,079
0.884381
0
0
0
0
0
0
1,389
0.24186
53c2457c0d1c8b05d10bdccfca2b07b59c9a6dd9
57
py
Python
scripts/selectors.py
bartongroup/slivka-bio
049aee943503963ce5c9b14267fe001edd8e0125
[ "Apache-2.0" ]
null
null
null
scripts/selectors.py
bartongroup/slivka-bio
049aee943503963ce5c9b14267fe001edd8e0125
[ "Apache-2.0" ]
3
2021-09-01T16:47:02.000Z
2022-02-09T09:01:31.000Z
scripts/selectors.py
bartongroup/slivka-bio
049aee943503963ce5c9b14267fe001edd8e0125
[ "Apache-2.0" ]
null
null
null
def example_selector(*args, **kwargs): return "default"
19
55
0.736842
0
0
0
0
0
0
0
0
9
0.157895
53c38f978d506f03ad72b1b6b50a34e76cbf6a7b
3,937
py
Python
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
mith1979/ansible_automation
013dfa67c6d91720b787fadb21de574b6e023a26
[ "Apache-2.0" ]
1
2020-10-14T00:06:54.000Z
2020-10-14T00:06:54.000Z
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
mith1979/ansible_automation
013dfa67c6d91720b787fadb21de574b6e023a26
[ "Apache-2.0" ]
null
null
null
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
mith1979/ansible_automation
013dfa67c6d91720b787fadb21de574b6e023a26
[ "Apache-2.0" ]
2
2015-08-06T07:45:48.000Z
2017-01-04T17:47:16.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chatham Financial <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description: Adds or removes plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: "1.1" author: Chris Hoffman options: names: description: - Comma-separated list of plugin names required: true default: null aliases: [name] new_only: description: - Only enable missing plugins - Does not disable plugins that are not in the names list required: false default: "no" choices: [ "yes", "no" ] state: description: - Specify if plugins are to be enabled or disabled required: false default: enabled choices: [enabled, disabled] prefix: description: - Specify a custom install prefix to a Rabbit required: false version_added: "1.3" default: null ''' EXAMPLES = ''' # Enables the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object): def __init__(self, module): self.module = module if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list() def get_all(self): return self._exec(['list', '-E', '-m'], True) def enable(self, name): self._exec(['enable', name]) def disable(self, name): self._exec(['disable', name]) def main(): arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',') new_only = module.params['new_only'] state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled = [] disabled = [] if state == 'enabled': if not new_only: for plugin in enabled_plugins: if plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names: if name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) > 0 or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module snippets from ansible.module_utils.basic import * main()
30.053435
88
0.654559
846
0.214884
0
0
0
0
0
0
1,854
0.470917
53c4401601b96a14bafd9a44d9c96d488de53fcf
7,279
py
Python
vitrage/datasources/static/driver.py
HoonMinJeongUm/Hunmin-vitrage
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
[ "Apache-2.0" ]
null
null
null
vitrage/datasources/static/driver.py
HoonMinJeongUm/Hunmin-vitrage
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
[ "Apache-2.0" ]
null
null
null
vitrage/datasources/static/driver.py
HoonMinJeongUm/Hunmin-vitrage
37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6
[ "Apache-2.0" ]
null
null
null
# Copyright 2016 - Nokia, ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain from six.moves import reduce from oslo_log import log from vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from vitrage.utils import file as file_utils LOG = log.getLogger(__name__) class StaticDriver(DriverBase): # base fields are required for all entities, others are treated as metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self, conf): super(StaticDriver, self).__init__() self.cfg = conf self.entities_cache = [] @staticmethod def _is_valid_config(config): """check for validity of configuration""" # TODO(yujunz) check with yaml schema or reuse template validation return StaticFields.DEFINITIONS in config @staticmethod def get_event_types(): return [] def enrich_event(self, event, event_type): pass def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return self.entities_cache def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path) for path in files], [])) def _get_and_cache_changed_entities(self): changed_entities = [] new_entities = self._get_all_entities() for new_entity in new_entities: old_entity = self._find_entity(new_entity, self.entities_cache) if old_entity: # Add modified entities if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: # Add new entities changed_entities.append(new_entity.copy()) # Add deleted entities for old_entity in self.entities_cache: if not self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities return changed_entities @classmethod def _get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning("Skipped invalid config (possible obsoleted): {}" .format(path)) return [] definitions = config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod def _pack(cls, entities, relationships): entities_dict = {} for entity in entities: cls._pack_entity(entities_dict, entity) for rel in relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod def _pack_entity(cls, entities_dict, entity): static_id = entity[StaticFields.STATIC_ID] if static_id not in entities_dict: metadata = {key: value for key, value in entity.items() if key not in cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] = metadata else: LOG.warning("Skipped duplicated entity: {}".format(entity)) @classmethod def _pack_rel(cls, entities_dict, rel): source_id = rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if source_id == target_id: # self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel, neighbor): """Expand config id to neighbor entity rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type': 'attached', 'source': 's1', 'target': {'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}} """ rel = rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else: # TODO(yujunz) raise exception and ignore invalid relationship LOG.error("Invalid neighbor {} for relationship {}" .format(neighbor, rel)) return None return rel @staticmethod def _find_entity(search_entity, entities): # naive implementation since we don't expect many static entities for entity in entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \ and entity[StaticFields.ID] == \ search_entity[StaticFields.ID]: return entity @staticmethod def _equal_entities(old_entity, new_entity): # TODO(iafek): compare also the relationships return old_entity.get(StaticFields.TYPE) == \ new_entity.get(StaticFields.TYPE) and \ old_entity.get(StaticFields.ID) == \ new_entity.get(StaticFields.ID) and \ old_entity.get(StaticFields.NAME) == \ new_entity.get(StaticFields.NAME) and \ old_entity.get(StaticFields.STATE) == \ new_entity.get(StaticFields.STATE)
37.911458
79
0.637588
6,255
0.859321
0
0
3,980
0.546778
0
0
1,546
0.212392
53c47f75ab180de02752f1ea49f9b87157a860e1
2,406
py
Python
napari/layers/shapes/mesh.py
marshuang80/napari
10f1d0f39fe9ccd42456c95458e2f23b59450f02
[ "BSD-3-Clause" ]
null
null
null
napari/layers/shapes/mesh.py
marshuang80/napari
10f1d0f39fe9ccd42456c95458e2f23b59450f02
[ "BSD-3-Clause" ]
null
null
null
napari/layers/shapes/mesh.py
marshuang80/napari
10f1d0f39fe9ccd42456c95458e2f23b59450f02
[ "BSD-3-Clause" ]
null
null
null
import numpy as np class Mesh: """Contains meshses of shapes that will ultimately get rendered. Attributes ---------- vertices : np.ndarray Qx2 array of vertices of all triangles for shapes including edges and faces vertices_centers : np.ndarray Qx2 array of centers of vertices of triangles for shapes. For vertices corresponding to faces these are the same as the actual vertices. For vertices corresponding to edges these values should be added to a scaled `vertices_offsets` to get the actual vertex positions. The scaling corresponds to the width of the edge vertices_offsets : np.ndarray Qx2 array of offsets of vertices of triangles for shapes. For vertices corresponding to faces these are 0. For vertices corresponding to edges these values should be scaled and added to the `vertices_centers` to get the actual vertex positions. The scaling corresponds to the width of the edge vertices_index : np.ndarray Qx2 array of the index (0, ..., N-1) of each shape that each vertex corresponds and the mesh type (0, 1) for face or edge. triangles : np.ndarray Px3 array of vertex indices that form the mesh triangles triangles_index : np.ndarray Px2 array of the index (0, ..., N-1) of each shape that each triangle corresponds and the mesh type (0, 1) for face or edge. triangles_colors : np.ndarray Px4 array of the rgba color of each triangle triangles_z_order : np.ndarray Length P array of the z order of each triangle. Must be a permutation of (0, ..., P-1) Extended Summary ---------- _types : list Length two list of the different mesh types corresponding to faces and edges """ _types = ['face', 'edge'] def __init__(self): self.clear() def clear(self): """Resets mesh data """ self.vertices = np.empty((0, 2)) self.vertices_centers = np.empty((0, 2)) self.vertices_offsets = np.empty((0, 2)) self.vertices_index = np.empty((0, 2), dtype=int) self.triangles = np.empty((0, 3), dtype=np.uint32) self.triangles_index = np.empty((0, 2), dtype=int) self.triangles_colors = np.empty((0, 4)) self.triangles_z_order = np.empty((0), dtype=int)
38.806452
79
0.646301
2,384
0.990856
0
0
0
0
0
0
1,849
0.768495
53c5781ea07cd092d5d5320da909512506460ef4
184
py
Python
python/helpers.py
cdacos/astrophysics_with_a_pc
b0017856005a4771fbd89c8137fb320b72b1b633
[ "FSFAP" ]
null
null
null
python/helpers.py
cdacos/astrophysics_with_a_pc
b0017856005a4771fbd89c8137fb320b72b1b633
[ "FSFAP" ]
null
null
null
python/helpers.py
cdacos/astrophysics_with_a_pc
b0017856005a4771fbd89c8137fb320b72b1b633
[ "FSFAP" ]
1
2021-03-14T23:13:28.000Z
2021-03-14T23:13:28.000Z
import sys def start_parameter(text, i): if len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i])) return float(sys.argv[i]) else: return float(raw_input(text))
20.444444
45
0.63587
0
0
0
0
0
0
0
0
8
0.043478
53c5eb302f7f03de564020dfecea1ce909aa994c
12,916
py
Python
configs/docker-ubuntu-img/para.py
MarioCarrilloA/stx-packaging
56cf32c4d65ba20f9317102d922ce946a800527d
[ "Apache-2.0" ]
1
2019-06-02T00:28:03.000Z
2019-06-02T00:28:03.000Z
configs/docker-ubuntu-img/para.py
MarioCarrilloA/stx-packaging
56cf32c4d65ba20f9317102d922ce946a800527d
[ "Apache-2.0" ]
11
2019-04-05T16:04:54.000Z
2019-08-23T19:24:49.000Z
configs/docker-ubuntu-img/para.py
MarioCarrilloA/stx-packaging
56cf32c4d65ba20f9317102d922ce946a800527d
[ "Apache-2.0" ]
5
2019-02-18T23:11:30.000Z
2019-04-29T07:42:31.000Z
#!/usr/bin/python3 # vim:se tw=0 sts=4 ts=4 et ai: """ Copyright © 2014 Osamu Aoki Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import argparse import os import pwd import sys import time import debmake.read ########################################################################### # undefined environment variable -> '' def env(var): try: return os.environ[var] except KeyError: return '' ####################################################################### # Initialize parameters ####################################################################### def para(para): debmail = env('DEBEMAIL') if not debmail: #debmail = os.getlogin() + '@localhost' debemail = pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname = env('DEBFULLNAME') if not debfullname: # os.getlogin may not work well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line setting ####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\ {0}: make Debian source package Version: {1} {2} {0} helps to build the Debian package from the upstream source. Normally, this is done as follows: * The upstream tarball is downloaded as the package-version.tar.gz file. * It is untared to create many files under the package-version/ directory. * {0} is invoked in the package-version/ directory possibly without any arguments. * Files in the package-version/debian/ directory are manually adjusted. * dpkg-buildpackage (usually from its wrapper debuild or pdebuild) is invoked in the package-version/ directory to make debian packages. Argument may need to be quoted to protect from the shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage for more.') ck = p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action = 'count', default = 0, help = 'scan source for copyright+license text and exit') ck.add_argument( '-k', '--kludge', action = 'count', default = 0, help = 'compare debian/copyright with the source and exit') sp = p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action = 'store_true', default = False, help = 'make a native source package without .orig.tar.gz') sp.add_argument( '-a', '--archive', type = str, action = 'store', default = '', help = 'use the upstream source tarball directly (-p, -u, -z: overridden)', metavar = 'package-version.tar.gz') sp.add_argument( '-d', '--dist', action = 'store_true', default = False, help = 'run "make dist" equivalent first to generate upstream tarball and use it') sp.add_argument( '-t', '--tar', action = 'store_true', default = False, help = 'run "tar" to generate upstream tarball and use it') p.add_argument( '-p', '--package', action = 'store', default = '', help = 'set the Debian package name', metavar = 'package') p.add_argument( '-u', '--upstreamversion', action = 'store', default = '', help = 'set the upstream package version', metavar = 'version') p.add_argument( '-r', '--revision', action = 'store', default = '', help = 'set the Debian package revision', metavar = 'revision') p.add_argument( '-z', '--targz', action = 'store', default = '', help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument( '-b', '--binaryspec', action = 'store', default = '', help = 'set binary package specs as comma separated list of "binarypackage":"type" pairs, e.g., in full form "foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev" or in short form ",-doc,libfoo1,libfoo1-dbg, libfoo-dev". Here, "binarypackage" is the binary package name; and optional "type" is chosen from "bin", "data", "dbg", "dev", "doc", "lib", "perl", "python", "python3", "ruby", and "script". If "type" is not specified but obvious, it is set by "binarypackage". Otherwise it is set to "bin" for the compiled ELF binary.', metavar = 'binarypackage[:type]') p.add_argument( '-e', '--email', action = 'store', default = debmail, help = 'set e-mail address', metavar = '[email protected]') p.add_argument( '-f', '--fullname', action = 'store', default = debfullname, help = 'set the fullname', metavar = '"firstname lastname"') # p.add_argument( # '-g', # '--gui', # action = 'store_true', # default = False, # help = 'run GUI configuration') # # -h : used by argparse for --help ep = p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default = '', action = 'store', help = 'invoke package build tool', metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action = 'store_true', default = False, help = 'run "dpkg-depcheck" to judge build dependencies and identify file paths') p.add_argument( '-l', '--license', default = '', action = 'store', help = 'add formatted license to debian/copyright', metavar = '"license_file"') p.add_argument( '-m', '--monoarch', action = 'store_true', default = False, help = 'force packages to be non-multiarch') p.add_argument( '-o', '--option', default = '', action = 'store', help = 'read optional parameters from "file"', metavar = '"file"') p.add_argument( '-q', '--quitearly', action = 'store_true', default = False, help='quit early before creating files in the debian directory') p.add_argument( '-s', '--spec', action = 'store_true', default = False, help = 'use upstream spec') p.add_argument( '-v', '--version', action = 'store_true', default = False, help = 'show version information') p.add_argument( '-w', '--with', action = 'store', default = '', dest = 'withargs', help = 'set additional "dh --with" option arguments', metavar = 'args') p.add_argument( '-x', '--extra', default = '', action = 'store', help = 'generate extra configuration files as templates', metavar = '[01234]') p.add_argument( '-y', '--yes', action = 'count', default = 0, help = '"force yes" for all prompts') p.add_argument( '-L', '--local', action = 'store_true', default = False, help='generate configuration files for the local package') p.add_argument( '-P', '--pedantic', action = 'store_true', default = False, help='pedantically check auto-generated files') p.add_argument( '-T', '--tutorial', action = 'store_true', default = False, help='output tutorial comment lines in template files') args = p.parse_args() ####################################################################### # Set parameter values ####################################################################### ############################################# -a if args.archive: para['archive'] = True para['tarball'] = args.archive else: para['archive'] = False para['tarball'] = '' ############################################# para['binaryspec'] = args.binaryspec # -b para['copyright'] = min(args.copyright, 6) # -c if para['copyright'] >=4: para['copyright'] = 3 - para['copyright'] # 0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive para['dist'] = args.dist # -d para['email'] = args.email # -e para['fullname'] = args.fullname # -f # para['gui'] = args.gui # -g para['invoke'] = args.invoke # -i para['judge'] = args.judge # -j if para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge # -k ############################################# -l # --license: args.license -> para['license'] as set if args.license == '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch # -m para['native'] = args.native # -n para['package'] = args.package.lower() # -p ############################################# para['quitearly'] = args.quitearly # -q para['revision'] = args.revision # -r para['spec'] = args.spec # -s para['tar'] = args.tar # -t para['version'] = args.upstreamversion # -u para['print_version'] = args.version # -v ############################################# -w # --with: args.withargs -> para['dh_with'] as set if args.withargs == '': para['dh_with'] = set() # default is empty set else: para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra'] = args.extra # -x para['yes'] = min(args.yes, 2) # -y # 0: ask, 1: yes, 2: no para['targz'] = args.targz # -z para['local'] = args.local # -L para['pedantic'] = args.pedantic # -P para['tutorial'] = args.tutorial # -T ############################################# -o if args.option: exec(debmake.read.read(args.option)) ####################################################################### # return command line parameters ####################################################################### return para ####################################################################### # Test code ####################################################################### if __name__ == '__main__': for p, v in para().items(): print("para['{}'] = \"{}\"".format(p,v))
38.440476
554
0.477083
0
0
0
0
0
0
0
0
6,851
0.530386
53c6b101ead41851286a75be3bcca965a4128b2f
6,164
py
Python
build/lib/jet_django/views/model.py
lukejamison/jet-dasboard
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
[ "MIT" ]
193
2018-08-27T06:10:48.000Z
2022-03-08T13:04:55.000Z
build/lib/jet_django/views/model.py
lukejamison/jet-dasboard
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
[ "MIT" ]
23
2018-10-21T15:05:41.000Z
2020-12-20T15:18:58.000Z
build/lib/jet_django/views/model.py
lukejamison/jet-dasboard
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
[ "MIT" ]
38
2018-10-31T16:19:25.000Z
2022-02-10T05:08:24.000Z
from django.core.exceptions import NON_FIELD_ERRORS from rest_framework import status, viewsets, serializers from rest_framework.decorators import list_route from rest_framework.response import Response from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import GroupFilter from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if 'group_serializer' in kwargs: self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model = build_model queryset = build_queryset pagination_class = CustomPageNumberPagination filter_class = build_filter_class authentication_classes = () permission_classes = (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if self.action == 'aggregate': return AggregateSerializer elif self.action == 'group': return GroupSerializer elif self.action == 'retrieve': return build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get']) def aggregate(self, request): queryset = self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset, y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get']) def group(self, request): queryset = self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if x_lookup: x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup': x_lookup, 'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data) def get_serializer(self, *args, **kwargs): """ Return the serializer instance that should be used for validating and deserializing input, and for serializing output. """ serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self, request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request): i = 1 for instance in build_queryset: setattr(instance, ordering_field, i) instance.save() i += 1 return Response({}) for action in build_actions: def route(self, request): form = action(data=request.data) if not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try: result = form.save(queryset) except Exception as e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result': result}) decorator = list_route(methods=['post']) route = decorator(route) setattr(Viewset, action._meta.name, route) return Viewset
36.91018
161
0.638384
4,602
0.746593
0
0
2,782
0.45133
0
0
510
0.082738
53c79195c421ab20eafd11d18287a51c1a99fb79
779
py
Python
python_minecraft_tut_2021/weatherCraft.py
LeGamermc/ursina_tutorials
f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0
[ "MIT" ]
13
2021-09-01T01:38:13.000Z
2022-03-29T01:43:50.000Z
python_minecraft_tut_2021/weatherCraft.py
LeGamermc/ursina_tutorials
f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0
[ "MIT" ]
14
2021-08-01T05:00:22.000Z
2022-02-03T21:53:23.000Z
python_minecraft_tut_2021/weatherCraft.py
LeGamermc/ursina_tutorials
f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0
[ "MIT" ]
31
2021-08-09T04:08:11.000Z
2022-03-23T11:06:15.000Z
""" Weather functions. """ from ursina import color, window, time from nMap import nMap class Weather: def __init__(this, rate=1): this.red = 0 this.green = 200 this.blue = 211 this.darkling = 0 this.rate = rate this.towardsNight = 1 def setSky(this): r = nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def update(this): this.darkling -= ( this.rate * this.towardsNight * time.dt) if this.darkling < 0: this.towardsNight *= -1 this.darkling = 0 this.setSky()
22.911765
50
0.519897
689
0.884467
0
0
0
0
0
0
26
0.033376
53c796e3204469330950f66fd76505dd80903be6
8,086
py
Python
davenetgame/dispatch/dispatcher.py
davefancella/davenetgame
f16c36539a3898ab4a021e63feef7fe497e5bc69
[ "Apache-2.0" ]
null
null
null
davenetgame/dispatch/dispatcher.py
davefancella/davenetgame
f16c36539a3898ab4a021e63feef7fe497e5bc69
[ "Apache-2.0" ]
null
null
null
davenetgame/dispatch/dispatcher.py
davefancella/davenetgame
f16c36539a3898ab4a021e63feef7fe497e5bc69
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 ''' Copyright 2016 Dave Fancella Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import threading, time from davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol import connection ## @file dispatcher # # This file contains the standard, generic EventDispatcher class. It's the one you use if # the library doesn't support your preferred game engine, or if you'd rather manage the library # independently of your game engine. ## This is the standard EventDispatcher. class EventDispatcher(DispatcherBase): pass ## This is a special server-oriented EventDispatcher that provides for an interactive console # on the server when run in a terminal. This is probably most useful for testing the library, # though it's not unheard of for a server to run in a terminal and have a console. class EventDispatcherServer(DispatcherBase): __console = None __consolecommands = None def __init__(self, **args): super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands = [] # Register the standard commands available to every game server. self.RegisterCommand('show', self.consoleShow, "show (connections)", "Show whatever you want to see.") self.RegisterCommand('help', self.consoleHelp, "help [command]", "print this helpful text. Alternately, type in a command to see its helpful text.") self.RegisterCommand('quit', self.consoleQuit, "quit", "Quit the server.") def Start(self): self.__console.Start() super().Start() def Update(self, timestep): try: while self.__console.HasPending(): msg = self.__console.pop() args = msg.split(" ") command = args.pop(0) command = command.lower() # Ignore simple presses of enter if command == '': continue foundcommand = False for a in self.__consolecommands: if a.command() == command: a.callback(*args) foundcommand = True if not foundcommand: print("Command not recognized: " + command) except: pass super().Update(timestep) ## @name Console API # # These methods give access to the built-in server console and the various commands that # can be created. #@{ ## Console command: show def consoleShow(self, *args): if len(args) != 1: print("Usage: show (connections)") else: if args[0] == "connections": if len(self.GetConnections() ) == 0: print("There are no connections at this time.") else: for a in self.GetConnections(): print("{0:3}: {1:40} {2:10} {3:4}".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) ) else: print("Unknown thing to show: " + args[0]) ## Console command: help def consoleHelp(self, *args): if len(args) > 0: for a in self.__consolecommands: if a.command() == args[0]: print("%10s : %s" % (args[0], a.helplong() )) print("%13s %s" % (" ", a.helpshort() )) print else: print("Command not found.") else: for a in self.__consolecommands: print("%10s : %s" % (a.command(), a.helplong() )) print("%13s %s" % (" ", a.helpshort() )) print() ## Console command: quit def consoleQuit(self, *args): print("Quit signaled from console.") self.Stop() self.__console.Stop() ## Call to register console commands with the server. The library implements a number of standard # commands, but games may need their own commands. In that case, you will need your own callbacks. def RegisterCommand(self, command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command = command, callback = callback, helpshort = helpshort, helplong = helplong ) ) #@} ## This class implements console commands. To create a new console command, simply make an instance of # this class, giving all the keyword arguments in the constructor. # @param 'command' : the name of the command, what the user types to use it. # @param 'callback' : a function that will process the command when the user types it. # @param 'helpshort' : short help text, usually one line of text, preferably not more than 50 characters. # In output, it will be prepended with "Usage: " # @param 'helplong' : long help text, can be as long as needed, as many lines as needed. Do not put # line endings, however. Those will be added as needed. You may put line endings to # signify paragraph breaks, if need be. class ConsoleCommand(object): __command = None __callback = None __helpshort = None __helplong = None def __init__(self, **args): # Ensure the command is always lowercase self.__command = args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort = args['helpshort'] self.__helplong = args['helplong'] def callback(self, *args): self.__callback(*args) def command(self): return self.__command def helpshort(self): return self.__helpshort def helplong(self): return self.__helplong ## This class makes the console input non-blocking. class ConsoleInput(threading.Thread): ## This is the lock that must be called to avoid thread collisions __lock = None ## This is a queue of commands, unparsed. __pcommands = None def __init__(self, **args): threading.Thread.__init__(self, **args) self.__lock = threading.RLock() self.__pcommands = [] ## Call to start the client. def Start(self): self.__continue = True self.start() ## Stops the server. It may still take a few seconds or so. If blocking is "True", then the call will # block until the server has shut down. def Stop(self, blocking=False): self.__continue = False if blocking: self.join() ## Returns true if there are pending lines from stdin to work with def HasPending(self): if len(self.__pcommands) > 0: return True return False ## Starts the console input. Don't call this directly, instead call Start(). def run(self): while self.__continue: msg = input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ## Pops the first item off the commands list and returns it. def pop(self): theCommand = None if len(self.__pcommands) > 0: self.__lock.acquire() theCommand = self.__pcommands.pop(0) self.__lock.release() return theCommand
34.703863
157
0.589661
5,917
0.731759
0
0
0
0
0
0
3,520
0.43532
53c80402ffddb5cb55023d530bbbc0ac778cca90
416
py
Python
account/migrations/0003_customuser_phone_number.py
zenofewords/thebrushstash
7d53bd5f22a2daa1011bb502bce56e735504dc84
[ "MIT" ]
null
null
null
account/migrations/0003_customuser_phone_number.py
zenofewords/thebrushstash
7d53bd5f22a2daa1011bb502bce56e735504dc84
[ "MIT" ]
18
2019-12-05T07:27:52.000Z
2022-02-12T20:50:22.000Z
account/migrations/0003_customuser_phone_number.py
zenofewords/thebrushstash
7d53bd5f22a2daa1011bb502bce56e735504dc84
[ "MIT" ]
null
null
null
# Generated by Django 2.2.7 on 2019-11-17 17:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('account', '0002_remove_customuser_full_name'), ] operations = [ migrations.AddField( model_name='customuser', name='phone_number', field=models.CharField(blank=True, max_length=500), ), ]
21.894737
63
0.620192
323
0.776442
0
0
0
0
0
0
116
0.278846
53c8f59b4f5c675f0331d7886d8de3f13a17f272
322
py
Python
03_Estrutura_de_Repeticao/13_potenciacao.py
gabrieldcpadilha/ListaDeExercicios-PythonBrasil
a92d477468bde5eac8987a26ea79af2ffeb6ad81
[ "MIT" ]
null
null
null
03_Estrutura_de_Repeticao/13_potenciacao.py
gabrieldcpadilha/ListaDeExercicios-PythonBrasil
a92d477468bde5eac8987a26ea79af2ffeb6ad81
[ "MIT" ]
10
2020-08-19T04:31:52.000Z
2020-09-21T22:48:29.000Z
03_Estrutura_de_Repeticao/13_potenciacao.py
gabrieldcpadilha/ListaDeExercicios-PythonBrasil
a92d477468bde5eac8987a26ea79af2ffeb6ad81
[ "MIT" ]
null
null
null
base = int(input('Digite o valor da base: ')) expoente = 0 while expoente <= 0: expoente = int(input('Digite o valor do expoente: ')) if expoente <= 0: print('O expoente tem que ser positivo') potencia = 1 for c in range(1, expoente + 1): potencia *= base print(f'{base}^ {expoente} = {potencia}')
21.466667
57
0.624224
0
0
0
0
0
0
0
0
123
0.381988
53cb133ef9cebb74671b9c48466b895d83fd6371
1,313
py
Python
accounting/accounting/doctype/journal_entry/journal_entry.py
noahjacob/Accounting
6be90c4f82867156532ca71b1faa9d017e3269af
[ "MIT" ]
1
2021-04-05T06:22:16.000Z
2021-04-05T06:22:16.000Z
accounting/accounting/doctype/journal_entry/journal_entry.py
mohsinalimat/Accounting
6be90c4f82867156532ca71b1faa9d017e3269af
[ "MIT" ]
null
null
null
accounting/accounting/doctype/journal_entry/journal_entry.py
mohsinalimat/Accounting
6be90c4f82867156532ca71b1faa9d017e3269af
[ "MIT" ]
2
2021-04-05T06:22:17.000Z
2021-04-10T06:05:36.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2021, Noah Jacob and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.utils import flt from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw("The total debit and credit must be equal. The current difference is {}".format(self.difference)) if self.total_credit == 0 or self.total_debit == 0 : frappe.throw('Total Cannot be Zero') if not self.accounts: frappe.throw('Account Entries are required') else: self.title = self.accounts[0].account def on_submit(self): for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0 for entry in self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit) + flt(entry.credit) self.difference = flt(self.total_debit) - (self.total_credit)
29.840909
113
0.760853
654
0.498096
0
0
0
0
0
0
262
0.199543
53cba4400da1d1c4d684c06ae9715e48948281c2
568
py
Python
polls/models.py
mmeooo/test_django
0364f43549d4082df7100d11c67dd42dc2a82b32
[ "Apache-2.0" ]
null
null
null
polls/models.py
mmeooo/test_django
0364f43549d4082df7100d11c67dd42dc2a82b32
[ "Apache-2.0" ]
null
null
null
polls/models.py
mmeooo/test_django
0364f43549d4082df7100d11c67dd42dc2a82b32
[ "Apache-2.0" ]
null
null
null
from django.db import models # Create your models here. # 클래스의 기능: 상속 class Question(models.Model): # Table question_text= models.CharField(max_length= 100) # column, datatype public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20, decimal_places= 10) # 위의 2개 타입으로 클래스 만들면 ok # link, string-> CharField, data-> DecimalField # 보통 max_length= 100으로 함 class Economics(models.Model): title= models.CharField(max_length= 100) href= models.CharField(max_length= 100) create_date= models.CharField(max_length= 100)
33.411765
71
0.746479
397
0.640323
0
0
0
0
0
0
210
0.33871
53ccd38a42372cb4c8b6646892db6cc4fe7a6bd1
722
py
Python
ipcam/test_snap.py
jack139/HF
4810f4ee2faf9ab51c867e105addc139da2adfd1
[ "BSD-3-Clause" ]
10
2019-04-07T20:13:23.000Z
2021-12-07T06:23:52.000Z
ipcam/test_snap.py
jack139/HF
4810f4ee2faf9ab51c867e105addc139da2adfd1
[ "BSD-3-Clause" ]
1
2020-05-29T16:11:22.000Z
2020-05-29T16:11:22.000Z
ipcam/test_snap.py
jack139/HF
4810f4ee2faf9ab51c867e105addc139da2adfd1
[ "BSD-3-Clause" ]
6
2017-10-20T10:53:33.000Z
2020-04-24T06:34:18.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys,os,time if len(sys.argv)<2: print "usage: test_snap.py <check|show>" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used in HF system if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for i in a: sub='%s/%s' % (path, i) b=os.listdir(sub) if 'capture' in b: b.remove('capture') b.sort() sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3: print "%s - %d, %s - %d, (%d)" % (i, len(b), b[-1], len(c), last_sub-int(b[-1])) else: print "usage: test_snap.py <check|show>" sys.exit(2)
21.878788
83
0.613573
0
0
0
0
0
0
0
0
276
0.382271
53cd4bfd1a117d3dcaa2d01161d38a59434bcf2f
5,608
py
Python
sources/datasets/client_dataset_definitions/client_dataset.py
M4rukku/impact_of_non_iid_data_in_federated_learning
c818db03699c82e42217d56f8ddd4cc2081c8bb1
[ "MIT" ]
null
null
null
sources/datasets/client_dataset_definitions/client_dataset.py
M4rukku/impact_of_non_iid_data_in_federated_learning
c818db03699c82e42217d56f8ddd4cc2081c8bb1
[ "MIT" ]
null
null
null
sources/datasets/client_dataset_definitions/client_dataset.py
M4rukku/impact_of_non_iid_data_in_federated_learning
c818db03699c82e42217d56f8ddd4cc2081c8bb1
[ "MIT" ]
null
null
null
import functools import gc from abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): if not self.within_context: raise OutsideOfContextError( """Error: Tried to access client Dataset outside of context manager. This might lead to data leaks and bad use of memory. Please wrap the usage of ClientDataset.dataset_x inside a "with statement". """) else: value = func(self, *args, **kwargs) return value return wrapper_decorator class ClientDataset(ABC): def __init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier = client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data = None self._test_data = None self._validation_data = None self.within_context = False def process_x(self, raw_x_batch): """Pre-processes each batch of features before being fed to the model.""" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): """Pre-processes each batch of labels before being fed to the model.""" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component: DatasetComponents): if data is None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data["x"]), self.process_y(data["y"]) else: return data @property @throw_error_outside_context def training_data(self): """Returns the Training Data as pair of arrays containing the samples x, and classification y""" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context def training_data_x(self): """Returns the Training Data as an array of samples""" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context def training_data_y(self): """Returns the Classifications for the Training Data as array""" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context def test_data(self): """Returns the Training Data as pair of arrays containing the samples x, and classification y""" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context def test_data_x(self): """Returns the Test Data as an array of samples""" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context def test_data_y(self): """Returns the Classifications for the Test Data as array""" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context def validation_data(self): """Returns the Validation Data as pair of arrays containing the samples x, and classification y""" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context def validation_data_x(self): """Returns the Validation Data as an array of samples""" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self): """Returns the Classifications for the Validation Data as array""" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self): self.within_context = True def __exit__(self, exc_type, exc_value, exc_traceback): self.within_context = False self._train_data = None self._test_data = None self._validation_data = None gc.collect()
38.675862
139
0.652461
4,635
0.826498
0
0
3,494
0.623039
0
0
1,086
0.193652
53ce7501b9e972d2df63aa7b92834c10ac73f623
2,377
py
Python
src/rmt/kinematics.py
mfrigerio17/robot-model-tools
97e25d5c4d1386c503d37a70b57400022c5b7ca0
[ "BSD-3-Clause" ]
2
2020-06-16T09:23:46.000Z
2021-01-20T09:11:43.000Z
src/rmt/kinematics.py
mfrigerio17/robot-model-tools
97e25d5c4d1386c503d37a70b57400022c5b7ca0
[ "BSD-3-Clause" ]
null
null
null
src/rmt/kinematics.py
mfrigerio17/robot-model-tools
97e25d5c4d1386c503d37a70b57400022c5b7ca0
[ "BSD-3-Clause" ]
null
null
null
import logging import numpy import kgprim.motions as motions import kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl as motdsl logger = logging.getLogger(__name__) class RobotKinematics: '''The composition of the constant poses and the joint poses of a robot. This class is a simple aggregation of the geometry model and the joint-poses model. By merging the two, this class have access to the full robot kinematics. Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose between two frames on the robot can be obtained. ''' def __init__(self, geometry, jointPoses): self.robotGeometry = geometry self.jointPoses = jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error("Could not find frame '{0}' in model '{1}'".format(framename, kinematics.robotGeometry.robotName)) return None ee = kinematics.robotGeometry.framesModel.framesByName[ framename ] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error("Frame '{0}' and the base frame do not seem to be connected".format(framename)) return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables) ) H = H.setVariablesValue( valueslist=q ) return H def serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model {modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\n') for cp in robotKinematics.robotGeometry.byPose.values() : text = motdsl.poseSpecToMotionDSLSnippet( cp ) ostream.write(text) ostream.write('\n')
34.955882
119
0.738746
781
0.328565
0
0
0
0
0
0
546
0.229701
53cfe05a29410444b4904c98e9ea7e4826833ee4
4,702
py
Python
awx/main/management/commands/run_dispatcher.py
atr0s/awx
388ef077c384f4c5296d4870d3b0cf0e6718db80
[ "Apache-2.0" ]
null
null
null
awx/main/management/commands/run_dispatcher.py
atr0s/awx
388ef077c384f4c5296d4870d3b0cf0e6718db80
[ "Apache-2.0" ]
null
null
null
awx/main/management/commands/run_dispatcher.py
atr0s/awx
388ef077c384f4c5296d4870d3b0cf0e6718db80
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. import os import logging from multiprocessing import Process from django.conf import settings from django.core.cache import cache as django_cache from django.core.management.base import BaseCommand from django.db import connection as django_connection from kombu import Connection, Exchange, Queue from awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch.control import Control from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID class Command(BaseCommand): help = 'Launch the task dispatcher' def add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed by this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the dispatcher to recycle all of its worker processes;' 'running jobs will run to completion first')) def beat(self): from celery import Celery from celery.beat import PersistentScheduler from celery.apps import beat class AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs): self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs): if os.getppid() != self.ppid: # if the parent PID changes, this process has been orphaned # via e.g., segfault or sigkill, we should exit too raise SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self, entry, producer=None, advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async() class TaskResult(object): id = result['uuid'] return TaskResult() app = Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self, *arg, **options): if options.get('status'): print Control('dispatcher').status() return if options.get('running'): print Control('dispatcher').running() return if options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) # It's important to close these because we're _about_ to fork, and we # don't want the forked processes to inherit the open sockets # for the DB and memcached connections (that way lies race conditions) django_connection.close() django_cache.close() beat = Process(target=self.beat) beat.daemon = True beat.start() reaper.reap() consumer = None with Connection(settings.BROKER_URL) as conn: try: bcast = 'tower_broadcast_all' queues = [ Queue(q, Exchange(q), routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True ) ) consumer = AWXConsumer( 'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt: logger.debug('Terminating Task Dispatcher') if consumer: consumer.stop()
37.616
96
0.576989
3,960
0.842195
0
0
0
0
0
0
906
0.192684
53d0271d7e3d9c0d0f41f088e5b38f2630dec774
5,318
py
Python
pcdet/utils/box_coder_utils.py
Nuri-benbarka/PCDet
8da66ead3bb1120db2fa919187948c8c134e85ae
[ "Apache-2.0" ]
7
2020-11-28T03:38:51.000Z
2021-12-31T07:44:19.000Z
pcdet/utils/box_coder_utils.py
Nuri-benbarka/PCDet
8da66ead3bb1120db2fa919187948c8c134e85ae
[ "Apache-2.0" ]
null
null
null
pcdet/utils/box_coder_utils.py
Nuri-benbarka/PCDet
8da66ead3bb1120db2fa919187948c8c134e85ae
[ "Apache-2.0" ]
1
2021-04-01T15:54:21.000Z
2021-04-01T15:54:21.000Z
import numpy as np import torch from . import common_utils class ResidualCoder(object): def __init__(self, code_size=7): super().__init__() self.code_size = code_size @staticmethod def encode_np(boxes, anchors): """ :param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis :param anchors: (N, 7 + ?) :return: """ box_ndim = anchors.shape[-1] xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1) # need to convert boxes to z-center format zg = zg + hg / 2 za = za + ha / 2 diagonal = np.sqrt(la ** 2 + wa ** 2) # 4.3 xt = (xg - xa) / diagonal yt = (yg - ya) / diagonal zt = (zg - za) / ha # 1.6 lt = np.log(lg / la) wt = np.log(wg / wa) ht = np.log(hg / ha) rt = rg - ra cts = [g - a for g, a in zip(cgs, cas)] return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=-1) @staticmethod def decode_np(box_encodings, anchors): """ :param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis :param anchors: (N, 7 + ?) :return: """ box_ndim = anchors.shape[-1] xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xt, yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1) # need to convert box_encodings to z-bottom format za = za + ha / 2 diagonal = np.sqrt(la ** 2 + wa ** 2) xg = xt * diagonal + xa yg = yt * diagonal + ya zg = zt * ha + za lg = np.exp(lt) * la wg = np.exp(wt) * wa hg = np.exp(ht) * ha rg = rt + ra zg = zg - hg / 2 cgs = [t + a for t, a in zip(cts, cas)] return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1) @staticmethod def encode_torch(boxes, anchors): """ :param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis :param anchors: (N, 7 + ?) :return: """ xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1) za = za + ha / 2 zg = zg + hg / 2 diagonal = torch.sqrt(la ** 2 + wa ** 2) xt = (xg - xa) / diagonal yt = (yg - ya) / diagonal zt = (zg - za) / ha lt = torch.log(lg / la) wt = torch.log(wg / wa) ht = torch.log(hg / ha) rt = rg - ra cts = [g - a for g, a in zip(cgs, cas)] return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1) @staticmethod def decode_torch(box_encodings, anchors): """ :param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis :param anchors: (N, 7 + ?) :return: """ xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1) za = za + ha / 2 diagonal = torch.sqrt(la ** 2 + wa ** 2) xg = xt * diagonal + xa yg = yt * diagonal + ya zg = zt * ha + za lg = torch.exp(lt) * la wg = torch.exp(wt) * wa hg = torch.exp(ht) * ha rg = rt + ra zg = zg - hg / 2 cgs = [t + a for t, a in zip(cts, cas)] return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): """ :param box_preds: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis :param anchors: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis :param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2) :return: """ batch_box_preds = self.decode_torch(box_preds, anchors) if dir_cls_preds is not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte() batch_box_preds[..., -1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period = (2 * np.pi / num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period ) batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds if __name__ == '__main__': pass
35.691275
118
0.507334
5,218
0.981196
0
0
3,460
0.650621
0
0
1,132
0.212862
53d12a0522be9c1f94c8076c489fd23a012f880f
15,175
py
Python
utils/utils.py
jainajinkya/deep_bingham
2ea85b3ea2af579eab36567091b88a1bbf4a627b
[ "MIT" ]
null
null
null
utils/utils.py
jainajinkya/deep_bingham
2ea85b3ea2af579eab36567091b88a1bbf4a627b
[ "MIT" ]
null
null
null
utils/utils.py
jainajinkya/deep_bingham
2ea85b3ea2af579eab36567091b88a1bbf4a627b
[ "MIT" ]
null
null
null
""" Utilities for learning pipeline.""" from __future__ import print_function import copy import dill import hashlib import itertools import third_party.deep_bingham.bingham_distribution as ms import math import numpy as np import os import scipy import scipy.integrate as integrate import scipy.special import sys import torch from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll, yaw, pitch): """Converts roll, yaw, pitch to a quaternion. """ # roll (z), yaw (y), pitch (x) cy = math.cos(math.radians(roll) * 0.5) sy = math.sin(math.radians(roll) * 0.5) cp = math.cos(math.radians(yaw) * 0.5) sp = math.sin(math.radians(yaw) * 0.5) cr = math.cos(math.radians(pitch) * 0.5) sr = math.sin(math.radians(pitch) * 0.5) w = cy * cp * cr + sy * sp * sr x = cy * cp * sr - sy * sp * cr y = sy * cp * sr + cy * sp * cr z = sy * cp * cr - cy * sp * sr quat = np.array([w, x, y, z]) quat = quat / np.linalg.norm(quat) return quat def radians(degree_tensor): """ Method to convert a torch tensor of angles in degree format to radians. Arguments: degree_tensor (torch.Tensor): Tensor consisting of angles in degree format. Returns: radian_tensor (torch.Tensor): Tensor consisting of angles in radian format. """ radian_tensor = degree_tensor/180 * math.pi return radian_tensor def generate_coordinates(coords): """ A function that returns all possible triples of coords Parameters: coords: a numpy array of coordinates Returns: x: the first coordinate of possible triples y: the second coordinate of possible triples z the third coordinate of possible triples """ x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z def ensure_dir_exists(path): """ Checks if a directory exists and creates it otherwise. """ if not os.path.exists(path): os.makedirs(path) def load_lookup_table(path): """ Loads lookup table from dill serialized file. Returns a table specific tuple. For the Bingham case, the tuple containins: table_type (str): options (dict): The options used to generate the lookup table. res_tensor (numpy.ndarray): The actual lookup table data. coords (numpy.ndarray): Coordinates at which lookup table was evaluated. For the von Mises case, it contains: options (dict): The options used to generate the lookup table. res_tensor (numpy.ndarray): The actual lookup table data. """ assert os.path.exists(path), "Lookup table file not found." with open(path, "rb") as dillfile: return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): """ Expected Absolute Angular Deviation of Bingham Random Vector Arguments: kappas: Von Mises kappa parameters for roll, pitch, yaw. integral_options: Options to pass on to the scipy integrator for computing the eaad and the bingham normalization constant. """ def aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0 * acos_val return diff_ang if integral_options is None: integral_options = {"epsrel": 1e-2, "epsabs": 1e-2} param_mu = np.array([0., 0., 0.]) # radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa = kappas direct_norm_const = 8.0 * (np.pi ** 3) \ * scipy.special.iv(0, param_kappa[0]) \ * scipy.special.iv(0, param_kappa[1]) \ * scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0] * np.cos(phi1)) \ * np.exp(param_kappa[1] * np.cos(phi2)) \ * np.exp(param_kappa[2] * np.cos(phi3)) \ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int = integrate.tplquad( integrand_aad, 0.0, 2.0 * np.pi, # phi3 lambda x: 0.0, lambda x: 2. * np.pi, # phi2 lambda x, y: 0.0, lambda x, y: 2. * np.pi, # phi1 **integral_options ) return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): """ Expected Absolute Angular Deviation of Bingham Random Vector Arguments: bingham_z: Bingham dispersion parameter in the format expected by the manstats BinghamDistribution class. integral_options: Options to pass on to the scipy integrator for computing the eaad and the bingham normalization constant. """ def aad(quat_a, quat_b): # acos_val = np.arccos(np.dot(quat_a, quat_b)) # diff_ang = 2 * np.min([acos_val, np.pi - acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2 * acos_val return diff_ang if integral_options is None: integral_options = {"epsrel": 1e-4, "epsabs": 1e-4} bd = ms.BinghamDistribution( np.eye(4), bingham_z, {"norm_const_mode": "numerical", "norm_const_options": integral_options} ) def integrand_transformed(x): # To avoid unnecessary divisions, this term does not contain the # normalization constant. At the end, the result of the integration is # divided by it. return aad(x, bd.mode) \ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2, phi3): sp1 = np.sin(phi1) sp2 = np.sin(phi2) return integrand_transformed(np.array([ sp1 * sp2 * np.sin(phi3), sp1 * sp2 * np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1) ])) * (sp1 ** 2.) * sp2 eaad_int = integrate.tplquad( integrand, 0.0, 2.0 * np.pi, # phi3 lambda x: 0.0, lambda x: np.pi, # phi2 lambda x, y: 0.0, lambda x, y: np.pi, # phi1 **integral_options ) return eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type, options, path=None): """ Builds a lookup table for interpolating the bingham normalization constant. If a lookup table with the given options already exists, it is loaded and returned instead of building a new one. Arguments: table_type: Type of lookup table used. May be 'uniform' or 'nonuniform' options: Dict cotaining type specific options. If type is "uniform" this dict must contain: "bounds" = Tuple (lower_bound, upper_bound) representing bounds. "num_points" = Number of points per dimension. If type is "nonuniform" this dict must contain a key "coords" which is a numpy arrays representing the coordinates at which the interpolation is evaluated. path: absolute path for the lookup table (optional). The default is to create a hash based on the options and to use this for constructing a file name and placing the file in the precomputed folder. """ hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path = os.path.dirname(__file__) \ + "/../precomputed/lookup_{}.dill".format(config_hash) # Load existing table or create new one. if os.path.exists(path): with open(path, "rb") as dillfile: (serialized_type, serialized_options, res_table, coords) \ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash, \ "Serialized lookup table does not match given type & options." elif table_type == "uniform": # Number of points per axis. (lbound, rbound) = options["bounds"] num_points = options["num_points"] assert num_points > 1, \ "Grid must have more than one point per dimension." nc_options = {"epsrel": 1e-3, "epsabs": 1e-7} coords = np.linspace(lbound, rbound, num_points) res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, "wb") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) elif table_type == "nonuniform": nc_options = {"epsrel": 1e-3, "epsabs": 1e-7} coords = options["coords"] res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, "wb") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) else: sys.exit("Unknown lookup table type") return res_table def build_vm_lookup_table(options, path=None): """ Builds a lookup table for interpolating the bingham normalization constant. If a lookup table with the given options already exists, it is loaded and returned instead of building a new one. Arguments: options: Dict cotaining table options. It must contain a key "coords" which is a numpy arrays representing the coordinates at which the interpolation is evaluated. path: absolute path for the lookup table (optional). The default is to create a hash based on the options and to use this for constructing a file name and placing the file in the precomputed folder. """ hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path = os.path.dirname(__file__) \ + "/../precomputed/lookup_{}.dill".format(config_hash) # Load existing table or create new one. if os.path.exists(path): with open(path, "rb") as dillfile: (serialized_options, res_table) \ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash, \ "Serialized lookup table does not match given type & options." else: coords = options["coords"] res_table = _compute_vm_lookup_table(coords) with open(path, "wb") as dillfile: dill.dump((options, res_table), dillfile) return res_table def _compute_bd_lookup_table(coords, nc_options): num_points = len(coords) pool = Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx = point_indices[idx] # Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no impact # on the result as the Bingham normalization constant is agnostic to it. # However, the numpy integration that is used to compute it, combines # numerical 2d and 1d integration which is why the order matters for the # actual computation time. # # TODO: Make pymanstats choose best order automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), "numerical", nc_options) print("Computing NC for Z=[{}, {}, {}, 0.0]: {}".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor class AverageMeter(object): """Computes and stores the averages over a numbers or dicts of numbers. For the dict, this class assumes that no new keys are added during the computation. """ def __init__(self): self.last_val = 0 self.avg = 0 self.count = 0 def update(self, val, n=1): self.last_val = val n = float(n) if type(val) == dict: if self.count == 0: self.avg = copy.deepcopy(val) else: for key in val: self.avg[key] *= self.count / (self.count + n) self.avg[key] += val[key] * n / (self.count + n) else: self.avg *= self.count / (self.count + n) self.avg += val * n / (self.count + n) self.count += n self.last_val = val def _compute_vm_lookup_table(coords): num_points = len(coords) pool = Pool() def nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print("Computing NC for kappas=[{}, {}, {}]: {}".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return z def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not all(z[0][:-1] <= z[0][1:]): print(z) return z
34.805046
83
0.623394
849
0.055947
0
0
0
0
0
0
5,149
0.339308
53d21a61b1f0af656cef94761b86e69e5114d1b2
8,108
py
Python
cli_ui.py
obatsis/Distributed-NTUA
0bf39163b64aaefb2576be01337e0ec6e026ce6d
[ "MIT" ]
null
null
null
cli_ui.py
obatsis/Distributed-NTUA
0bf39163b64aaefb2576be01337e0ec6e026ce6d
[ "MIT" ]
null
null
null
cli_ui.py
obatsis/Distributed-NTUA
0bf39163b64aaefb2576be01337e0ec6e026ce6d
[ "MIT" ]
null
null
null
import requests import os from PyInquirer import style_from_dict, Token, prompt import sys import utils.config as config import utils.ends as ends from utils.colorfy import * from auto.testing import test_trans import time import json style = style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.Instruction: '#0bf416', Token.Answer: '#2196f3 bold', Token.Question: '#0bf416 bold', }) def client(ip, port): os.system('clear') cyan('What a beautiful day to enter the cult...') baseURL = 'http://' + ip + ':' + port while True: print('----------------------------------------------------------------------') method_q = { 'type': 'list', 'name': 'method', 'message': 'Select action:', 'choices': ['Network Overlay', \ 'Insert a Song', \ 'Search for a Song', \ 'Delete a Song', \ 'Depart from Chord', \ 'Run automated test', \ 'Help', \ 'Exit'] } method_a = prompt(method_q, style=style)['method'] os.system('clear') if method_a == 'Depart from Chord': print(cyan("Preparing Node to depart from Chord...")) try: response = requests.get(baseURL + ends.c_depart) if response.status_code == 200: if response.text == "Left the Chord": print(response.text) print(green("Node is out of Toychord network")) else: print(red(response.text)) else : print(red("Got a bad response status code " + response.status_code)) except: print(red("Could not establish connection with Node. Node didnt depart...")) print(red("Unfortunately exiting...")) break elif method_a == 'Insert a Song': print('Insert a Title-Value pair for the song you wish to insert') fetch_q = [ { 'type': 'input', 'name': 'key', 'message': 'Song Title:', 'filter': lambda val: str(val) }, { 'type': 'input', 'name': 'value', 'message': 'Value:', 'filter': lambda val: str(val) } ] fetch_a = prompt(fetch_q, style=style) print(cyan("Inserting Song: ") + fetch_a['key'] + cyan("...")) try: response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200: print(cyan("Inserted by node with id: ") + green(response.text.split(" ")[0])) else : print(red("Got a bad response status code " + response.status_code)) except: print(red("Could not establish connection with Node. Song wasnt inserted...")) print(red("Unfortunately exiting...")) exit(0) continue elif method_a == 'Delete a Song': print('Insert the Song Title you wish to delete') fetch_q = [ { 'type': 'input', 'name': 'key', 'message': 'Song Title:', 'filter': lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style) print(cyan("Deleting Song: ") + fetch_a['key'] + cyan("...")) try: response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(" ")[1] != "@!@": # print(cyan("Deleting Song: ") + green(response.text.split(" ")[1]) + ) print(cyan("Deleted by node with id: ") + green(response.text.split(" ")[0])) else : print(yellow("Song doesnt exist in the Chord")) print(yellow("Couldnt delete it")) except: print(red("Could not establish connection with Node. Song wasnt deleted...")) print(red("Unfortunately exiting...")) exit(0) continue elif method_a == 'Search for a Song': print('Insert the Song Title you wish to Search or * to get all songs of the Chord') fetch_q = [ { 'type': 'input', 'name': 'key', 'message': 'Song Title:', 'filter': lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style) if fetch_a['key'] == "*": print(cyan("Fetching all the songs of the Chord...")) try: response = requests.get(baseURL + ends.c_query_star) if response.status_code == 200: nodes_list = json.loads(response.text) # print(green(response.text)) # print(cyan())) for node in nodes_list["res"]: print(header("\n" + node["uid"]) + " " + underline(node["ip"] + ":" + node["port"])) for song in node["song"]: print(" -" + green(song["key"]) + " " + song["value"]) else: print(yellow("Something went Wrong...") + response.status_code) except: print(red("Could not establish connection with Node. Couldnt search for song...")) print(red("Unfortunately exiting...")) exit(0) else: print(cyan("Searching Song: ") + fetch_a['key'] + cyan("...")) try: response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(" ")[1] != "@!@": print("Song found in node with id: ",green(response.text.split(" ")[0])) print("Song value: " + green(response.text.split(" ")[1])) else: print(yellow("Song doesnt exist in the Chord")) except: print(red("Could not establish connection with Node. Couldnt search for song...")) print(red("Unfortunately exiting...")) exit(0) continue elif method_a == 'Network Overlay': print(cyan("Initiating Network Overlay...")) try: response = requests.get(baseURL + ends.c_overlay) if response.status_code == 200: nodes_list = json.loads(response.text) print('\n') for node in nodes_list["res"]: print(green(node["ip"] + ":" + node["port"]), end = '') if node != nodes_list["res"][-1]: print(" -> ", end = '') print('\n') else : print(red("Got a bad response status code " + response.status_code)) except: print(red("Could not establish connection with Node...")) print(red("Unfortunately exiting...")) exit(0) continue elif method_a == 'Help': print('-------------------------------- Help --------------------------------\n') overlayHelp=header("Overlay: ") + cyan("This functions recreates and prints the current Network Topology(eg. Node1 -> Node2 -> ...)\n") insertHelp=header("Insert Song: ") + cyan("This functions expects a Song Title and a Song Value and inserts them in the Chord\n") queryHelp=header("Search Song: ") + cyan("This function expects a Song Title and returns the Node in whitch the song is stored and the value of the song\n") deleteHelp=header("Delete Song: ") + cyan("This function expects a Song Title and returns the Node who deleted the song\n") departHelp=header("Depart: ") + cyan("This function makes the node connected to this cli leave the Chord\n") autoTests=header("Run automated tests: ") + cyan("This function expects a test number (1=insert, 2=query, 3=requests), runs the test and returns the chord throughput") print( " -",overlayHelp,"\n" " -",insertHelp,"\n", "-",queryHelp,"\n", "-",deleteHelp,"\n", "-",departHelp,"\n", "-",autoTests,"\n", ) continue elif method_a == 'Run automated test': print('Select which test you wish to run (1 = insert, 2 = query, 3 = requests)') fetch_q = [ { 'type': 'input', 'name': 'test_n', 'message': 'Test:', 'filter': lambda val: str(val) } ] fetch_a = prompt(fetch_q, style=style) test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's' if test_number not in ('1', '2', '3'): print(yellow("Wrong test number (give 1, 2 or 3)")) continue print(cyan("Running automated test: ") + ("insert" if test_number == '1' else ("query" if test_number == '2' else "requests")) + cyan("...")) print(blue(test_trans(test_number))) print(cyan("Done!")) continue elif method_a == 'Exit': os.system('clear') break else: os.system('clear') continue if __name__ == '__main__': if len(sys.argv) < 3: print("!! you must tell me the port. Ex. -p 5000 !!") exit(0) if sys.argv[1] in ("-p", "-P"): my_port = sys.argv[2] my_ip = os.popen('ip addr show ' + config.NETIFACE + ' | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'').read().strip() client(my_ip, my_port)
34.21097
170
0.604465
0
0
0
0
0
0
0
0
3,469
0.427849
53d38a232396aeecc14c7708fa90954da15a7129
21,306
py
Python
Contents/scripts/siweighteditor/weight.py
jdrese/SIWeightEditor
0529c1a366b955f4373acd2e2f08f63b7909ff82
[ "MIT" ]
1
2018-12-12T15:39:13.000Z
2018-12-12T15:39:13.000Z
Contents/scripts/siweighteditor/weight.py
jdrese/SIWeightEditor
0529c1a366b955f4373acd2e2f08f63b7909ff82
[ "MIT" ]
null
null
null
Contents/scripts/siweighteditor/weight.py
jdrese/SIWeightEditor
0529c1a366b955f4373acd2e2f08f63b7909ff82
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from maya import mel from maya import cmds from . import lang from . import common import os import json import re class WeightCopyPaste(): def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes self.saveName = saveName self.method = method self.weightFile = weightFile self.threshold = threshold self.engine = engine self.memShapes = {} self.target = tgt self.pasteMode = {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list): temp = self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp) # ファイルパスを生成しておく if path == 'default': self.filePath = os.getenv('MAYA_APP_DIR') + '\\Scripting_Files\\weight\\' + self.saveName elif path == 'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector') try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as e: print e.message return self.filePath = self.protect_pat+'\\' + self.saveName self.fileName = os.path.join(self.filePath, self.saveName + '.json') self.apiName = os.path.join(self.filePath, self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し if mode == 'copy': self.weightCopy() if mode == 'paste': self.weightPaste() def weightPaste(self): dummy = cmds.spaceLocator() for skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile = skinMesh else: weightFile = self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try: with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) # ロード # self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences = cmds.ls(influences, l=True, tr=True) # バインド dstSkinCluster = cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e: print e.message print 'Error !! Skin bind failed : ' + skinMesh continue else: dstSkinCluster = dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya': files = os.listdir(self.filePath) print files if len(files) == 2: for file in files: name, ext = os.path.splitext(file) if ext == '.xml': xml_name = file else: # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') xml_name = meshName + '.xml' if os.path.isfile(self.filePath + '\\' + xml_name): if self.method == 'index' or self.method == 'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight paste to : ' + str(skinMesh) else: print 'Not exist seved weight XML file : ' + skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def weightCopy(self): saveData = {} # 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\')) # 末尾\\が必要なので注意 else: # ある場合は中身を削除 files = os.listdir(self.filePath) if files is not None: for file in files: os.remove(self.filePath + '\\' + file) skinFlag = False all_influences = [] for skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi') bindMethod = cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences += influences #saveData[';influences'] = influences skinFlag = True all_influences = list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences = list(set(all_influences) - set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0) if self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile = skinMesh else: weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\') with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang( en=': It does not perform the transfer of weight because it is not a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not srcSkinCluster: if logTransfer: print skinMesh + massege01 return False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi') bindMethod = cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list): temp = transferedMesh transferedMesh = [] transferedMesh.append(temp) for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh') if not shapes: # もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: # バインド dstSkinCluster = cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if logTransfer: print massege03 + '[' + skinMesh + '] >>> [' + dst + ']' dstSkinCluster = dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True ) if logTransfer: print massege02 + '[' + skinMesh + '] >>> [' + dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return influences else: return True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if srcNode is None: return srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight is False or dstNode is None: return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_', 'l_', 'Left_', 'left_'] start_r_list = ['R_', 'r_', 'Right_', 'right_'] mid_l_list = ['_L_', '_l_', '_Left_', '_left_'] mid_r_list = ['_R_', '_r_', '_Right_', '_right_'] end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left'] end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right'] def_left_list_list = [start_l_list, mid_l_list, end_l_list] def_right_list_list = [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file, end_file] left_list_list = [] right_list_list = [] for i, save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r') as f: save_data = json.load(f) l_list = save_data.keys() r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception as e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not isinstance(object, list): temp = object object = [] object.append(temp) for skinJoint in object: objTypeName = cmds.objectType(skinJoint) if objTypeName == 'joint': split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name = '' for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)): for j, lr_list in enumerate([l_list, r_list]): for k, lr in enumerate(lr_list): if i == 0: if re.match(lr, split_name): side = j + 1 if i == 1: if re.search(lr, split_name): side = j + 1 if i == 2: if re.match(lr[::-1], split_name[::-1]): side = j + 1 if side:#対象が見つかってたら全部抜ける side_name = lr break if side: break if side: break #print 'joint setting :', split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18) new_joint_name = split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility) else: print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 = lang.Lang( en='No mesh selection.\nWould you like to process all of mesh in this scene?.', ja=u'選択メッシュがありません。\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True) ad_node = [] for node in objects: children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform') ad_node += [node]+children #print len(ad_node) objects = set(ad_node) #print len(objects) if not objects: all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh == msg02: objects = cmds.ls(type='transform') if not objects: return mute_flag = 1 skin_list = [] for node in objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag = 0 for skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag == 0: cmds.confirmDialog(m=msg04) if mute_flag == 1: cmds.confirmDialog(m=msg05)
43.129555
120
0.555102
12,948
0.510125
0
0
0
0
0
0
8,924
0.351588
53d3daf836c3d211bfbd295aeb46edb04453a89a
1,350
py
Python
pyConTextNLP/__init__.py
Blulab-Utah/pyConTextPipeline
d4060f89d54f4db56914832033f8ce589ee3c181
[ "Apache-2.0" ]
1
2021-04-30T11:18:32.000Z
2021-04-30T11:18:32.000Z
pyConTextNLP/__init__.py
Blulab-Utah/pyConTextPipeline
d4060f89d54f4db56914832033f8ce589ee3c181
[ "Apache-2.0" ]
null
null
null
pyConTextNLP/__init__.py
Blulab-Utah/pyConTextPipeline
d4060f89d54f4db56914832033f8ce589ee3c181
[ "Apache-2.0" ]
1
2020-06-28T01:51:56.000Z
2020-06-28T01:51:56.000Z
#Copyright 2010 Brian E. Chapman # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. """This is an alternative implementation of the pyConText package where I make use of graphs to indicate relationships between targets and modifiers. Nodes of thegraphs are the targets and modifiers identified in the text; edges of the graphs are relationships between the targets. This provides for much simpler code than what exists in the other version of pyConText where each object has a dictionary of __modifies and __modifiedby that must be kept in sync with each other. Also it is hoped that the use of a directional graph could ultimately simplify our itemData structures as we could chain together items""" import os version = {} with open(os.path.join(os.path.dirname(__file__),"version.py")) as f0: exec(f0.read(), version) __version__ = version['__version__']
43.548387
79
0.786667
0
0
0
0
0
0
0
0
1,199
0.888148
53d42695123c2326facf4f279256b1c384089fd3
78,742
py
Python
pypeit/metadata.py
rcooke-ast/PYPIT
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
[ "BSD-3-Clause" ]
null
null
null
pypeit/metadata.py
rcooke-ast/PYPIT
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
[ "BSD-3-Clause" ]
null
null
null
pypeit/metadata.py
rcooke-ast/PYPIT
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
[ "BSD-3-Clause" ]
null
null
null
""" Provides a class that handles the fits metadata required by PypeIt. .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst """ import os import io import string from copy import deepcopy import datetime from IPython import embed import numpy as np import yaml from astropy import table, coordinates, time, units from pypeit import msgs from pypeit import utils from pypeit.core import framematch from pypeit.core import flux_calib from pypeit.core import parse from pypeit.core import meta from pypeit.io import dict_to_lines from pypeit.par import PypeItPar from pypeit.par.util import make_pypeit_file from pypeit.bitmask import BitMask # TODO: Turn this into a DataContainer # Initially tried to subclass this from astropy.table.Table, but that # proved too difficult. class PypeItMetaData: """ Provides a table and interface to the relevant fits file metadata used during the reduction. The content of the fits table is dictated by the header keywords specified for the provided spectrograph. It is expected that this table can be used to set the frame type of each file. The metadata is validated using checks specified by the provided spectrograph class. For the data table, one should typically provide either the file list from which to grab the data from the fits headers or the data directly. If neither are provided the table is instantiated without any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data save to each file. The class is used to provide the header keyword data to include in the table and specify any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior. files (:obj:`str`, :obj:`list`, optional): The list of files to include in the table. data (table-like, optional): The data to include in the table. The type can be anything allowed by the instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A user provided set of data used to supplement or overwrite metadata read from the file headers. The table must have a `filename` column that is used to match to the metadata table generated within PypeIt. **Note**: This is ignored if `data` is also provided. This functionality is only used when building the metadata from the fits files. strict (:obj:`bool`, optional): Function will fault if there is a problem with the reading the header for any of the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to instead report a warning and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data save to each file. The class is used to provide the header keyword data to include in the table and specify any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior. If not provided, the default parameters specific to the provided spectrograph are used. configs (:obj:`dict`): A dictionary of the unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set the frame type of each fits file. calib_bitmask (:class:`BitMask`): The bitmask used to keep track of the calibration group bits. table (:class:`astropy.table.Table`): The table with the relevant metadata for each fits file to use in the data reduction. """ def __init__(self, spectrograph, par, files=None, data=None, usrdata=None, strict=True): if data is None and files is None: # Warn that table will be empty msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.' ' The table will be empty!') # Initialize internals self.spectrograph = spectrograph self.par = par if not isinstance(self.par, PypeItPar): raise TypeError('Input parameter set must be of type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() # Build table self.table = table.Table(data if files is None else self._build(files, strict=strict, usrdata=usrdata)) # Merge with user data, if present if usrdata is not None: self.merge(usrdata) # Impose types on specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str]) # Initialize internal attributes self.configs = None self.calib_bitmask = None # Initialize columns that the user might add self.set_user_added_columns() # Validate instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types): """ Impose a set of types on certain columns. .. note:: :attr:`table` is edited in place. Args: columns (:obj:`list`): List of column names types (:obj:`list`): List of types """ for c,t in zip(columns, types): if c in self.keys(): self.table[c] = self.table[c].astype(t) def _build(self, files, strict=True, usrdata=None): """ Generate the fitstbl that will be at the heart of PypeItMetaData. Args: files (:obj:`str`, :obj:`list`): One or more files to use to build the table. strict (:obj:`bool`, optional): Function will fault if :func:`fits.getheader` fails to read any of the headers. Set to False to report a warning and continue. usrdata (astropy.table.Table, optional): Parsed for frametype for a few instruments (e.g. VLT) where meta data may not be required Returns: dict: Dictionary with the data to assign to :attr:`table`. """ # Allow for single files _files = files if hasattr(files, '__len__') else [files] # Build lists to fill data = {k:[] for k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename'] = ['None']*len(_files) # Build the table for idx, ifile in enumerate(_files): # User data (for frame type) if usrdata is None: usr_row = None else: # TODO: This check should be done elsewhere # Check if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name list does not match user-provided metadata table. See ' 'usrdata argument of instantiation of PypeItMetaData.') usr_row = usrdata[idx] # Add the directory and file name to the table data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx] = '.' # Read the fits headers headarr = self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta for meta_key in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and '#' in value: value = value.replace('#', '') msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) # JFH Changed the below to not crash if some files have None in # their MJD. This is the desired behavior since if there are # empty or corrupt files we still want this to run. # Validate, print out a warning if there is problem try: time.Time(data['mjd'], format='mjd') except ValueError: mjd = np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files = filenames[mjd == None] # Print status message msg = 'Time invalid for {0} files.\n'.format(len(bad_files)) msg += 'Continuing, but the following frames may be empty or have corrupt headers:\n' for file in bad_files: msg += ' {0}\n'.format(file) msgs.warn(msg) # Return return data # TODO: In this implementation, slicing the PypeItMetaData object # will return an astropy.table.Table, not a PypeItMetaData object. def __getitem__(self, item): return self.table.__getitem__(item) def __setitem__(self, item, value): return self.table.__setitem__(item, value) def __len__(self): return self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\n', ' spectrograph={0}\n'.format( self.spectrograph.name), ' length={0}\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\n'.format( self.spectrograph.name, len(self))]) @staticmethod def default_keys(): return [ 'directory', 'filename', 'instrume' ] def keys(self): return self.table.keys() def sort(self, col): return self.table.sort(col) def merge(self, usrdata, match_type=True): """ Use the provided table to supplement or overwrite the metadata. If the internal table already contains the column in `usrdata`, the function will try to match the data type of the `usrdata` column to the existing data type. If it can't it will just add the column anyway, with the type in `usrdata`. You can avoid this step by setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A user provided set of data used to supplement or overwrite metadata read from the file headers. The table must have a `filename` column that is used to match to the metadata table generated within PypeIt. match_type (:obj:`bool`, optional): Attempt to match the data type in `usrdata` to the type in the internal table. See above. Raises: TypeError: Raised if `usrdata` is not an `astropy.io.table.Table` KeyError: Raised if `filename` is not a key in the provided table. """ meta_data_model = meta.get_meta_data_model() # Check the input if not isinstance(usrdata, table.Table): raise TypeError('Must provide an astropy.io.table.Table instance.') if 'filename' not in usrdata.keys(): raise KeyError('The user-provided table must have \'filename\' column!') # Make sure the data are correctly ordered srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']] # Convert types if possible existing_keys = list(set(self.table.keys()) & set(usrdata.keys())) radec_done = False if len(existing_keys) > 0 and match_type: for key in existing_keys: if len(self.table[key].shape) > 1: # NOT ALLOWED!! # TODO: This should be converted to an assert statement... raise ValueError('CODING ERROR: Found high-dimensional column.') #embed(header='372 of metadata') elif key in meta_data_model.keys(): # Is this meta data?? dtype = meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype # Deal with None's properly nones = usrdata[key] == 'None' usrdata[key][nones] = None # Rest # Allow for str RA, DEC (backwards compatability) if key in ['ra', 'dec'] and not radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done = True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include the user data in the table for key in usrdata.keys(): self.table[key] = usrdata[key][srt] def finalize_usr_build(self, frametype, setup): """ Finalize the build of the table based on user-provided data, typically pulled from the PypeIt file. This function: - sets the frame types based on the provided object - sets all the configurations to the provided `setup` - assigns all frames to a single calibration group, if the 'calib' column does not exist - if the 'comb_id' column does not exist, this sets the combination groups to be either undefined or to be unique for each science or standard frame, see :func:`set_combination_groups`. .. note:: This should only be run if all files are from a single instrument configuration. :attr:`table` is modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why isn't frametype just in the user-provided data? It may be (see get_frame_types) and I'm just not using it... Args: frametype (:obj:`dict`): A dictionary with the types designated by the user. The file name and type are expected to be the key and value of the dictionary, respectively. The number of keys therefore *must* match the number of files in :attr:`table`. For frames that have multiple types, the types should be provided as a string with comma-separated types. setup (:obj:`str`): If the 'setup' columns does not exist, fill the configuration setup columns with this single identifier. """ self.get_frame_types(user=frametype) # TODO: Add in a call to clean_configurations? I didn't add it # here, because this method is only called for a preconstructed # pypeit file, which should nominally follow an execution of # pypeit_setup. If the user edits back in a frame that has an # invalid key, at least for now the DEIMOS image reader will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None): """ Return the configuration dictionary for a given frame. This is not the same as the backwards compatible "setup" dictionary. Args: indx (:obj:`int`): The index of the table row to use to construct the configuration. cfg_keys (:obj:`list`, optional): The list of metadata keys to use to construct the configuration. If None, the `configuration_keys` of :attr:`spectrograph` is used. Returns: dict: A dictionary with the metadata values from the selected row. """ _cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys return {k:self.table[k][indx] for k in _cfg_keys} def master_key(self, row, det=1): """ Construct the master key for the file in the provided row. The master key is the combination of the configuration, the calibration group, and the detector. The configuration ID is the same as included in the configuration column (A, B, C, etc), the calibration group is the same as the calibration bit number, and the detector number is provided as an argument and converted to a zero-filled string with two digits (the maximum number of detectors is 99). Using the calibration bit in the keyword allows MasterFrames to be used with multiple calibration groups. Args: row (:obj:`int`): The 0-indexed row used to construct the key. det (:obj:`int`, :obj:`tuple`, optional): The 1-indexed detector number(s). If a tuple, it must include detectors designated as a viable mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key with configuration, calibration group(s), and detector. Raises: PypeItError: Raised if the 'setup' or 'calibbit' columns haven't been defined. """ if 'setup' not in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot provide master key string without setup and calibbit; ' 'run set_configurations and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return f"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}" def construct_obstime(self, row): """ Construct the MJD of when the frame was observed. .. todo:: - Consolidate with :func:`convert_time` ? Args: row (:obj:`int`): The 0-indexed row of the frame. Returns: astropy.time.Time: The MJD of the observation. """ return time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row, obstime=None): """ Construct the root name primarily for PypeIt file output. Args: row (:obj:`int`): The 0-indexed row of the frame. obstime (:class:`astropy.time.Time`, optional): The MJD of the observation. If None, constructed using :func:`construct_obstime`. Returns: str: The root name for file output. """ _obstime = self.construct_obstime(row) if obstime is None else obstime tiso = time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(" ", ""), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split("T")[1].replace(':','')) def get_setup(self, row, det=None, config_only=False): """ Construct the setup dictionary. .. todo:: - This is for backwards compatibility, but we should consider reformatting it. And it may be something to put in the relevant spectrograph class. Args: row (:obj:`int`): The 0-indexed row used to construct the setup. det (:obj:`int`, optional): The 1-indexed detector to include. If None, all detectors are included. config_only (:obj:`bool`, optional): Just return the dictionary with the configuration, don't include the top-level designation of the configuration itself. Returns: dict: The pypeit setup dictionary with the default format. Raises: PypeItError: Raised if the 'setup' isn't been defined. """ if 'setup' not in self.keys(): msgs.error('Cannot provide instrument setup without \'setup\' column; ' 'run set_configurations.') dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row] dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row] dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row] decker = 'none' if 'decker' not in self.keys() else self['decker'][row] slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row] slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row] binning = '1,1' if 'binning' not in self.keys() else self['binning'][row] skey = 'Setup {}'.format(self['setup'][row]) # Key names *must* match configuration_keys() for spectrographs setup = {skey: {'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, # PypeIt orientation binning of a science image } } } #_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det] #for d in _det: # setup[skey][str(d).zfill(2)] \ # = {'binning': binning, 'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only else setup def get_configuration_names(self, ignore=None, return_index=False, configs=None): """ Get the list of the unique configuration names. This provides just the list of setup identifiers ('A', 'B', etc.) and the row index where it first occurs. This is different from :func:`unique_configurations` because the latter determines and provides the configurations themselves. This is mostly a convenience function for the writing routines. Args: ignore (:obj:`list`, optional): Ignore configurations in the provided list. return_index (:obj:`bool`, optional): Return row indices with the first occurence of these configurations. configs (:obj:`str`, :obj:`list`, optional): One or more strings used to select the configurations to include in the returned objects. If ``'all'``, pass back all configurations. Otherwise, only return the configurations matched to this provided string or list of strings (e.g., ['A','C']). Returns: numpy.array: The list of unique setup names. A second returned object provides the indices of the first occurrence of these setups, if requested. Raises: PypeItError: Raised if the 'setup' isn't been defined. """ if 'setup' not in self.keys(): msgs.error('Cannot get setup names; run set_configurations.') # Unique configurations setups, indx = np.unique(self['setup'], return_index=True) if ignore is not None: # Remove the selected configurations to ignore rm = np.logical_not(np.isin(setups, ignore)) setups = setups[rm] indx = indx[rm] # Restrict _configs = None if configs is None else np.atleast_1d(configs) # TODO: Why do we need to specify 'all' here? Can't `configs is # None` mean that you want all the configurations? Or can we # make the default 'all'? if configs is not None and 'all' not in _configs: use = np.isin(setups, _configs) setups = setups[use] indx = indx[use] return setups, indx if return_index else setups def _get_cfgs(self, copy=False, rm_none=False): """ Convenience method to return :attr:`configs` with possible alterations. This method *should not* be called by any method outside of this class; use :func:`unique_configurations` instead. Args: copy (:obj:`bool`, optional): Return a deep copy of :attr:`configs` instead of the object itself. rm_none (:obj:`bool`, optional): Remove any configurations set to 'None'. If copy is True, this is done *after* :attr:`configs` is copied to a new dictionary. Returns: :obj:`dict`: A nested dictionary, one dictionary per configuration with the associated metadata for each. """ _cfg = deepcopy(self.configs) if copy else self.configs if rm_none and 'None' in _cfg.keys(): del _cfg['None'] return _cfg def unique_configurations(self, force=False, copy=False, rm_none=False): """ Return the unique instrument configurations. If run before the ``'setup'`` column is initialized, this function determines the unique instrument configurations by finding unique combinations of the items in the metadata table listed by the spectrograph ``configuration_keys`` method. If run after the ``'setup'`` column has been set, this simply constructs the configuration dictionary using the unique configurations in that column. This is used to set the internal :attr:`configs`. If this attribute is not None, this function simply returns :attr:`config` (cf. ``force``). .. warning:: Any frame types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will be ignored in the construction of the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None and the frame types have not yet been defined (see :func:`get_frame_types`), this method will fault! Args: force (:obj:`bool`, optional): Force the configurations to be redetermined. Otherwise the configurations are only determined if :attr:`configs` has not yet been defined. copy (:obj:`bool`, optional): Return a deep copy of :attr:`configs` instead of the object itself. rm_none (:obj:`bool`, optional): Remove any configurations set to 'None'. If copy is True, this is done *after* :attr:`configs` is copied to a new dictionary. Returns: :obj:`dict`: A nested dictionary, one dictionary per configuration with the associated metadata for each. Raises: PypeItError: Raised if there are list of frame types to ignore but the frame types have not been defined yet. """ if self.configs is not None and not force: return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys(): msgs.info('Setup column already set. Finding unique configurations.') uniq, indx = np.unique(self['setup'], return_index=True) ignore = uniq == 'None' if np.sum(ignore) > 0: msgs.warn('Ignoring {0} frames with configuration set to None.'.format( np.sum(ignore))) self.configs = {} for i in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to determine unique configurations.') # If the frame types have been set, ignore anything listed in # the ignore_frames indx = np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is not None: if 'frametype' not in self.keys(): msgs.error('To ignore frames, types must have been defined; run get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool) for ftype in ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx = indx[use] if len(indx) == 0: msgs.error('No frames to use to define configurations!') # Get the list of keys to use cfg_keys = self.spectrograph.configuration_keys() # Configuration identifiers are iterations through the # upper-case letters: A, B, C, etc. double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) + double_alphabet cfg_indx = 0 # TODO: Placeholder: Allow an empty set of configuration keys # meaning that the instrument setup has only one configuration. if len(cfg_keys) == 0: self.configs = {} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files assumed to be from a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first file to set the first unique configuration self.configs = {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1 # Check if any of the other files show a different # configuration. for i in indx[1:]: j = 0 for c in self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph): break j += 1 unique = j == len(self.configs) if unique: if cfg_indx == len(cfg_iter): msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None): """ Assign each frame to a configuration (setup) and include it in the metadata table. The internal table is edited *in place*. If the 'setup' column already exists, the configurations are **not** reset unless you call the function with ``force=True``. Args: configs (:obj:`dict`, optional): A nested dictionary, one dictionary per configuration with the associated values of the metadata associated with each configuration. The metadata keywords in the dictionary should be the same as in the table, and the keywords used to set the configuration should be the same as returned by the spectrograph `configuration_keys` method. The latter is not checked. If None, this is set by :func:`unique_configurations`. force (:obj:`bool`, optional): Force the configurations to be reset. fill (:obj:`str`, optional): If the 'setup' column does not exist, fill the configuration setup columns with this single identifier. Ignores other inputs. Raises: PypeItError: Raised if none of the keywords in the provided configuration match with the metadata keywords. Also raised when some frames cannot be assigned to a configuration, the spectrograph defined frames that have been ignored in the determination of the unique configurations, but the frame types have not been set yet. """ # Configurations have already been set if 'setup' in self.keys() and not force: return if 'setup' not in self.keys() and fill is not None: self['setup'] = fill return _configs = self.unique_configurations() if configs is None else configs for k, cfg in _configs.items(): if len(set(cfg.keys()) - set(self.keys())) > 0: msgs.error('Configuration {0} defined using unavailable keywords!'.format(k)) self.table['setup'] = 'None' nrows = len(self) for i in range(nrows): for d, cfg in _configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] = d # Check if any of the configurations are not set not_setup = self.table['setup'] == 'None' if not np.any(not_setup): # All are set, so we're done return # Some frame types may have been ignored ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is None: # Nope, we're still done return # At this point, we need the frame type to continue if 'frametype' not in self.keys(): msgs.error('To account for ignored frames, types must have been defined; run ' 'get_frame_types.') # For each configuration, determine if any of the frames with # the ignored frame types should be assigned to it: for cfg_key in _configs.keys(): in_cfg = self.table['setup'] == cfg_key for ftype, metakey in ignore_frames.items(): # TODO: For now, use this assert to check that the # metakey is either not set or a string assert metakey is None or isinstance(metakey, str), \ 'CODING ERROR: metadata keywords set by config_indpendent_frames are not ' \ 'correctly defined for {0}; values must be None or a string.'.format( self.spectrograph.__class__.__name__) # Get the list of frames of this type without a # configuration indx = (self.table['setup'] == 'None') & self.find_frames(ftype) if not np.any(indx): continue if metakey is None: # No matching meta data defined, so just set all # the frames to this (first) configuration self.table['setup'][indx] = cfg_key continue # Find the unique values of meta for this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn the user that the matching meta values are not # unique for this configuration. if uniq_meta.size != 1: msgs.warn('When setting the instrument configuration for {0} '.format(ftype) + 'frames, configuration {0} does not have unique '.format(cfg_key) + '{0} values.' .format(meta)) # Find the frames of this type that match any of the # meta data values indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key def clean_configurations(self): """ Ensure that configuration-defining keywords all have values that will yield good PypeIt reductions. Any frames that do not are removed from :attr:`table`, meaning this method may modify that attribute directly. The valid values for configuration keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. """ cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits is None: # No values specified, so we're done return good = np.ones(len(self), dtype=bool) for key in cfg_limits.keys(): # NOTE: For now, check that the configuration values were # correctly assigned in the spectrograph class definition. # This should probably go somewhere else or just removed. assert isinstance(cfg_limits[key], list), \ 'CODING ERROR: valid_configuration_values is not correctly defined ' \ 'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__) # Check that the metadata are valid for this column. indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx): msgs.warn('Found frames with invalid {0}.'.format(key)) good &= indx if np.all(good): # All values good, so we're done return # Alert the user that some of the frames are going to be # removed msg = 'The following frames have configurations that cannot be reduced by PypeIt' \ ' and will be removed from the metadata table (pypeit file):\n' indx = np.where(np.logical_not(good))[0] for i in indx: msg += ' {0}\n'.format(self['filename'][i]) msgs.warn(msg) # And remove 'em self.table = self.table[good] def _set_calib_group_bits(self): """ Set the calibration group bit based on the string values of the 'calib' column. """ # Find the number groups by searching for the maximum number # provided, regardless of whether or not a science frame is # assigned to that group. ngroups = 0 for i in range(len(self)): if self['calib'][i] in ['all', 'None']: # No information, keep going continue # Convert to a list of numbers l = np.amax([ 0 if len(n) == 0 else int(n) for n in self['calib'][i].replace(':',',').split(',')]) # Check against current maximum ngroups = max(l+1, ngroups) # Define the bitmask and initialize the bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] = 0 # Set the calibration bits for i in range(len(self)): # Convert the string to the group list grp = parse.str2list(self['calib'][i], ngroups) if grp is None: # No group selected continue # Assign the group; ensure the integers are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): """ Check that the calibration groups are valid. This currently only checks that the science frames are associated with one calibration group. TODO: Is this appropriate for NIR data? """ is_science = self.find_frames('science') for i in range(len(self)): if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames can only be assigned to a single calibration group.') @property def n_calib_groups(self): """Return the number of calibration groups.""" return None if self.calib_bitmask is None else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False, force=False): """ Group calibration frames into sets. Requires the 'setup' column to have been defined. For now this is a simple grouping of frames with the same configuration. .. todo:: - Maintain a detailed description of the logic. The 'calib' column has a string type to make sure that it matches with what can be read from the pypeit file. The 'calibbit' column is actually what is used to determine the calibration group of each frame; see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional): A list of strings with the frame types to use in all calibration groups (e.g., ['bias', 'dark']). default (:obj:`bool`, optional): If the 'calib' column is not present, set a single calibration group *for all rows*. force (:obj:`bool`, optional): Force the calibration groups to be reconstructed if the 'calib' column already exists. Raises: PypeItError: Raised if 'setup' column is not defined, or if `global_frames` is provided but the frame types have not been defined yet. """ # Set the default if requested and 'calib' doesn't exist yet if 'calib' not in self.keys() and default: self['calib'] = '0' # Make sure the calibbit column does not exist if 'calibbit' in self.keys(): del self['calibbit'] # Groups have already been set if 'calib' in self.keys() and 'calibbit' in self.keys() and not force: return # Groups have been set but the bits have not (likely because the # data was read from a pypeit file) if 'calib' in self.keys() and 'calibbit' not in self.keys() and not force: self._set_calib_group_bits() self._check_calib_groups() return # TODO: The rest of this just nominally sets the calibration # group based on the configuration. This will change! # The configuration must be present to determine the calibration # group if 'setup' not in self.keys(): msgs.error('Must have defined \'setup\' column first; try running set_configurations.') configs = np.unique(self['setup'].data).tolist() if 'None' in configs: configs.remove('None') # Ignore frames with undefined configurations n_cfg = len(configs) # TODO: Science frames can only have one calibration group # Assign everything from the same configuration to the same # calibration group; this needs to have dtype=object, otherwise # any changes to the strings will be truncated at 4 characters. self.table['calib'] = np.full(len(self), 'None', dtype=object) for i in range(n_cfg): self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i) # Allow some frame types to be used in all calibration groups # (like biases and darks) if global_frames is not None: if 'frametype' not in self.keys(): msgs.error('To set global frames, types must have been defined; ' 'run get_frame_types.') calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames: indx = np.where(self.find_frames(ftype))[0] for i in indx: self['calib'][i] = calibs # Set the bits based on the string representation of the groups self._set_calib_group_bits() # Check that the groups are valid self._check_calib_groups() def find_frames(self, ftype, calib_ID=None, index=False): """ Find the rows with the associated frame type. If the index is provided, the frames must also be matched to the relevant science frame. Args: ftype (str): The frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the string 'None', this returns all frames without a known type. calib_ID (:obj:`int`, optional): Index of the calibration group that it must match. If None, any row of the specified frame type is included. index (:obj:`bool`, optional): Return an array of 0-indexed indices instead of a boolean array. Returns: numpy.ndarray: A boolean array, or an integer array if index=True, with the rows that contain the frames of the requested type. Raises: PypeItError: Raised if the `framebit` column is not set in the table. """ if 'framebit' not in self.keys(): msgs.error('Frame types are not set. First run get_frame_types.') if ftype == 'None': return self['framebit'] == 0 # Select frames indx = self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is not None: # Select frames in the same calibration group indx &= self.find_calib_group(calib_ID) # Return return np.where(indx)[0] if index else indx def find_frame_files(self, ftype, calib_ID=None): """ Return the list of files with a given frame type. The frames must also match the science frame index, if it is provided. Args: ftype (str): The frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of the calibration group that it must match. If None, any row of the specified frame type is included. Returns: list: List of file paths that match the frame type and science frame ID, if the latter is provided. """ return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx): """ Return the full paths to one or more frames. Args: indx (:obj:`int`, array-like): One or more 0-indexed rows in the table with the frames to return. Can be an array of indices or a boolean array of the correct length. Returns: list: List of the full paths of one or more frames. """ if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits, merge=True): """ Set and return a Table with the frame types and bits. Args: type_bits (numpy.ndarray): Integer bitmask with the frame types. The length must match the existing number of table rows. merge (:obj:`bool`, optional): Merge the types and bits into the existing table. This will *overwrite* any existing columns. Returns: `astropy.table.Table`: Table with two columns, the frame type name and bits. """ # Making Columns to pad string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME # # TODO: It would be good to get around this. Is it related to # this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we can force type_names() in bitmask to always return the # correct type... if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm = ftype_colmA fbits_colm = table.Column(type_bits, name='framebit') t = table.Table([ftype_colm, fbits_colm]) if merge: self['frametype'] = t['frametype'] self['framebit'] = t['framebit'] return t def edit_frame_type(self, indx, frame_type, append=False): """ Edit the frame type by hand. Args: indx (:obj:`int`): The 0-indexed row in the table to edit frame_type (:obj:`str`, :obj:`list`): One or more frame types to append/overwrite. append (:obj:`bool`, optional): Append the frame type. If False, all existing frame types are overwitten by the provided type. """ if not append: self['framebit'][indx] = 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None, merge=True): """ Generate a table of frame types from the input metadata object. .. todo:: - Here's where we could add a SPIT option. Args: flag_unknown (:obj:`bool`, optional): Instead of crashing out if there are unidentified files, leave without a type and continue. user (:obj:`dict`, optional): A dictionary with the types designated by the user. The file name and type are expected to be the key and value of the dictionary, respectively. The number of keys therefore *must* match the number of files in :attr:`table`. For frames that have multiple types, the types should be provided as a string with comma-separated types. merge (:obj:`bool`, optional): Merge the frame typing into the exiting table. Returns: :obj:`astropy.table.Table`: A Table with two columns, the type names and the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame types. """ # Checks if 'frametype' in self.keys() or 'framebit' in self.keys(): msgs.warn('Removing existing frametype and framebit columns.') if 'frametype' in self.keys(): del self.table['frametype'] if 'framebit' in self.keys(): del self.table['framebit'] # # TODO: This needs to be moved into each Spectrograph # if useIDname and 'idname' not in self.keys(): # raise ValueError('idname is not set in table; cannot use it for file typing.') # Start msgs.info("Typing files") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame types from the input dictionary if user is not None: if len(user.keys()) != len(self): raise ValueError('The user-provided dictionary does not match table length.') msgs.info('Using user-provided frame types.') for ifile,ftypes in user.items(): indx = self['filename'] == ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) # Loop over the frame types for i, ftype in enumerate(self.type_bitmask.keys()): # # Initialize: Flag frames with the correct ID name or start by # # flagging all as true # indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \ # else np.ones(len(self), dtype=bool) # Include a combination of instrument-specific checks using # combinations of the full set of metadata exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use & or | ? Using idname above gets overwritten by # this if the frames to meet the other checks in this call. # indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn on the relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the nearest standard star to each science frame # TODO: Should this be 'standard' or 'science' or both? if 'ra' not in self.keys() or 'dec' not in self.keys(): msgs.warn('Cannot associate standard with science frames without sky coordinates.') else: # TODO: Do we want to do this here? indx = self.type_bitmask.flagged(type_bits, flag='standard') for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra == 'None' or dec == 'None': msgs.warn('RA and DEC must not be None for file:' + msgs.newline() + f) msgs.warn('The above file could be a twilight flat frame that was' + msgs.newline() + 'missed by the automatic identification.') b = self.type_bitmask.turn_off(b, flag='standard') continue # If an object exists within 20 arcmins of a listed standard, # then it is probably a standard star foundstd = flux_calib.find_standard_file(ra, dec, check=True) b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard') # Find the files without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info("Couldn't identify the following files:") for f in self['filename'][indx]: msgs.info(f) if not flag_unknown: msgs.error("Check these files before continuing") # Finish up (note that this is called above if user is not None!) msgs.info("Typing completed!") return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): """ Generate the list of columns to be included in the fitstbl (nearly the complete list). Args: write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for calib, comb_id and bkg_id write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction Returns: `numpy.ndarray`_: Array of columns to be used in the fits table> """ # Columns for output columns = self.spectrograph.pypeit_file_keys() extras = [] # comb, bkg columns if write_bkg_pairs: extras += ['calib', 'comb_id', 'bkg_id'] # manual if write_manual: extras += ['manual'] for key in extras: if key not in columns: columns += [key] # Take only those present output_cols = np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True): """ Set combination groups. .. note:: :attr:`table` is edited in place. This function can be used to initialize the combination group and background group columns, and/or to initialize the combination groups to the set of objects (science or standard frames) to a unique integer. If the 'comb_id' or 'bkg_id' columns do not exist, they're set to -1. Args: assign_objects (:obj:`bool`, optional): If all of 'comb_id' values are less than 0 (meaning they're unassigned), the combination groups are set to be unique for each standard and science frame. """ if 'comb_id' not in self.keys(): self['comb_id'] = -1 if 'bkg_id' not in self.keys(): self['bkg_id'] = -1 if assign_objects and np.all(self['comb_id'] < 0): # find_frames will throw an exception if framebit is not # set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1 def set_user_added_columns(self): """ Set columns that the user *might* add .. note:: :attr:`table` is edited in place. This function can be used to initialize columns that the user might add """ if 'manual' not in self.keys(): self['manual'] = '' def write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): """ Write the sorted file. The sorted file lists all the unique instrument configurations (setups) and the frames associated with each configuration. The output data table is identical to the pypeit file output. .. todo:: - This is for backwards compatibility, but we should consider reformatting/removing it. Args: ofile (:obj:`str`): Name for the output sorted file. overwrite (:obj:`bool`, optional): Overwrite any existing file with the same name. ignore (:obj:`list`, optional): Ignore configurations in the provided list. write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for calib, comb_id and bkg_id write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction Raises: PypeItError: Raised if the 'setup' isn't been defined. """ if 'setup' not in self.keys(): msgs.error('Cannot write sorted instrument configuration table without \'setup\' ' 'column; run set_configurations.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is not None) if ignore is not None: for key in cfgs.keys(): if key in ignore: del cfgs[key] # Construct file ff = open(ofile, 'w') for setup in cfgs.keys(): # Get the subtable of frames taken in this configuration indx = self['setup'] == setup if not np.any(indx): continue subtbl = self.table[output_cols][indx] # Write the file ff.write('##########################################################\n') ff.write('Setup {:s}\n'.format(setup)) ff.write('\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\n') ff.write('#---------------------------------------------------------\n') mjd = subtbl['mjd'].copy() # Deal with possibly None mjds if there were corrupt header cards mjd[mjd == None] = -99999.0 isort = np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\n') ff.close() # TODO: Do we need a calib file? def write_calib(self, ofile, overwrite=True, ignore=None): """ Write the calib file. The calib file provides the unique instrument configurations (setups) and the association of each frame from that configuration with a given calibration group. .. todo:: - This is for backwards compatibility, but we should consider reformatting/removing it. - This is complicated by allowing some frame types to have no association with an instrument configuration - This is primarily used for QA now; but could probably use the pypeit file instead Args: ofile (:obj:`str`): Name for the output sorted file. overwrite (:obj:`bool`, optional): Overwrite any existing file with the same name. ignore (:obj:`list`, optional): Ignore calibration groups in the provided list. Raises: PypeItError: Raised if the 'setup' or 'calibbit' columns haven't been defined. """ if 'setup' not in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot write calibration groups without \'setup\' and \'calibbit\' ' 'columns; run set_configurations and set_calibration_groups.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Construct the setups dictionary cfg = self.unique_configurations(copy=True, rm_none=True) # TODO: We should edit the relevant follow-on code so that we # don't have to do these gymnastics. Or better yet, just stop # producing/using the *.calib file. _cfg = {} for setup in cfg.keys(): _cfg[setup] = {} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg = _cfg # Iterate through the calibration bit names as these are the root of the # MasterFrames and QA for icbit in np.unique(self['calibbit'].data): cbit = int(icbit) # for yaml # Skip this group if ignore is not None and cbit in ignore: continue # Find the frames in this group #in_group = self.find_calib_group(i) in_cbit = self['calibbit'] == cbit # Find the unique configurations in this group, ignoring any # undefined ('None') configurations #setup = np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup: setup.remove('None') # Make sure that each calibration group should only contain # frames from a single configuration if len(setup) != 1: msgs.error('Each calibration group must be from one and only one instrument ' 'configuration with a valid letter identifier; i.e., the ' 'configuration cannot be None.') # Find the frames of each type in this group cfg[setup[0]][cbit] = {} for key in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) & in_group ftype_in_group = self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): """ Write a pypeit file in data-table format. The pypeit file is the main configuration file for PypeIt, configuring the control-flow and algorithmic parameters and listing the data files to read. This function writes the columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific to each instrument. Args: output_path (:obj:`str`, optional): Root path for the output pypeit files. If None, set to current directory. If the output directory does not exist, it is created. cfg_lines (:obj:`list`, optional): The list of configuration lines to include in the file. If None are provided, the vanilla configuration is included. write_bkg_pairs (:obj:`bool`, optional): When constructing the :class:`pypeit.metadata.PypeItMetaData` object, include two columns called `comb_id` and `bkg_id` that identify object and background frame pairs. write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction configs (:obj:`str`, :obj:`list`, optional): One or more strings used to select the configurations to include in the returned objects. If ``'all'``, pass back all configurations. Otherwise, only return the configurations matched to this provided string or list of strings (e.g., ['A','C']). See :attr:`configs`. Raises: PypeItError: Raised if the 'setup' isn't defined and split is True. Returns: :obj:`list`: List of ``PypeIt`` files generated. """ # Set output path if output_path is None: output_path = os.getcwd() # Find unique configurations, always ignoring any 'None' # configurations... cfg = self.unique_configurations(copy=True, rm_none=True) # Get the setups to write if configs is None or configs == 'all' or configs == ['all']: cfg_keys = list(cfg.keys()) else: _configs = configs if isinstance(configs, list) else [configs] cfg_keys = [key for key in cfg.keys() if key in _configs] if len(cfg_keys) == 0: msgs.error('No setups to write!') # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the pypeit files ofiles = [None]*len(cfg_keys) for j,setup in enumerate(cfg_keys): # Create the output directory root = '{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path, root) if not os.path.isdir(odir): os.makedirs(odir) # Create the output file name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) # Get the setup lines setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get the paths in_cfg = self['setup'] == setup if not np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist() # Get the data lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\n')[:-1] # Write the file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return return ofiles def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False, header=None): """ Write the metadata either to a file or to the screen. The method allows you to set the columns to print and which column to use for sorting. Args: output (:obj:`str`, optional): Output signature or file name. If None, the table contents are printed to the screen. If ``'table'``, the table that would have been printed/written to disk is returned. Otherwise, the string is interpreted as the name of an ascii file to which to write the table contents. rows (`numpy.ndarray`_, optional): A boolean vector selecting the rows of the table to write. If None, all rows are written. Shape must match the number of the rows in the table. columns (:obj:`str`, :obj:`list`, optional): A list of columns to include in the output file. Can be provided as a list directly or as a comma-separated string. If None or ``'all'``, all columns in are written; if ``'pypeit'``, the columns are the same as those included in the pypeit file. Each selected column must be a valid pypeit metadata keyword, specific to :attr:`spectrograph`. Additional valid keywords, depending on the processing level of the metadata table, are directory, filename, frametype, framebit, setup, calib, and calibbit. sort_col (:obj:`str`, optional): Name of the column to use for sorting the output. If None, the table is printed in its current state. overwrite (:obj:`bool`, optional): Overwrite any existing file; otherwise raise an exception. header (:obj:`str`, :obj:`list`, optional): One or more strings to write to the top of the file, on string per file line; ``# `` is added to the beginning of each string. Ignored if ``output`` does not specify an output file. Returns: `astropy.table.Table`: The table object that would have been written/printed if ``output == 'table'``. Otherwise, the method always returns None. Raises: ValueError: Raised if the columns to include are not valid, or if the column to use for sorting is not valid. FileExistsError: Raised if overwrite is False and the file exists. """ # Check the file can be written (this is here because the spectrograph # needs to be defined first) ofile = None if output in [None, 'table'] else output if ofile is not None and os.path.isfile(ofile) and not overwrite: raise FileExistsError(f'{ofile} already exists; set flag to overwrite.') # Check the rows input if rows is not None and len(rows) != len(self.table): raise ValueError('Boolean vector selecting output rows has incorrect length.') # Get the columns to return if columns in [None, 'all']: tbl_cols = list(self.keys()) elif columns == 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys()) tbl_cols = columns if isinstance(columns, list) else columns.split(',') badcol = [col not in all_cols for col in tbl_cols] if np.any(badcol): raise ValueError('The following columns are not valid: {0}'.format( ', '.join(tbl_cols[badcol]))) # Make sure the basic parameters are the first few columns; do them in # reverse order so I can always insert at the beginning of the list for col in ['framebit', 'frametype', 'filename', 'directory']: if col not in tbl_cols: continue indx = np.where([t == col for t in tbl_cols])[0][0] if indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure the dithers and combination and background IDs are the last # few columns ncol = len(tbl_cols) for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']: if col not in tbl_cols: continue indx = np.where([t == col for t in tbl_cols])[0][0] if indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the internal table so that it is unaltered output_tbl = self.table.copy() # Select the output rows if a vector was provided if rows is not None: output_tbl = output_tbl[rows] # Select and sort the data by a given column if sort_col is not None: if sort_col not in self.keys(): raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.') # Ignore any NoneTypes indx = output_tbl[sort_col] != None is_None = np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols] if output == 'table': # Instead of writing, just return the modified table return output_tbl # Always write the table in ascii format with io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\n')[:-1] if ofile is None: # Output file not defined so just print it print('\n'.join(data_lines)) return None # Write the output to an ascii file with open(ofile, 'w') as f: if header is not None: _header = header if isinstance(header, list) else [header] for h in _header: f.write(f'# {h}\n') f.write('\n') f.write('\n'.join(data_lines)) f.write('\n') # Just to be explicit that the method returns None when writing to a # file... return None def find_calib_group(self, grp): """ Find all the frames associated with the provided calibration group. Args: grp (:obj:`int`): The calibration group integer. Returns: numpy.ndarray: Boolean array selecting those frames in the table included in the selected calibration group. Raises: PypeItError: Raised if the 'calibbit' column is not defined. """ if 'calibbit' not in self.keys(): msgs.error('Calibration groups are not set. First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row): """ Find the calibration groups associated with a specific frame. """ return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there a reason why this is not an attribute of # PypeItMetaData? def row_match_config(row, config, spectrograph): """ Queries whether a row from the fitstbl matches the input configuration Args: row (astropy.table.Row): From fitstbl config (dict): Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the rtol value for float meta (e.g. dispangle) Returns: bool: True if the row matches the input configuration """ # Loop on keys in config match = [] for k in config.keys(): # Deal with floating configs (e.g. grating angle) if isinstance(config[k], float): if row[k] is None: match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else: # The np.all allows for arrays in the Table (e.g. binning) match.append(np.all(config[k] == row[k])) # Check return np.all(match)
42.817836
122
0.575411
76,774
0.975007
0
0
266
0.003378
0
0
47,429
0.602334
53d54a4c34c0a67e36d2d017230ceb288acd1564
2,341
py
Python
aql/aql/main/aql_builtin_tools.py
menify/sandbox
32166c71044f0d5b414335b2b6559adc571f568c
[ "MIT" ]
null
null
null
aql/aql/main/aql_builtin_tools.py
menify/sandbox
32166c71044f0d5b414335b2b6559adc571f568c
[ "MIT" ]
null
null
null
aql/aql/main/aql_builtin_tools.py
menify/sandbox
32166c71044f0d5b414335b2b6559adc571f568c
[ "MIT" ]
null
null
null
import os.path import shutil import errno from aql.nodes import Builder, FileBuilder from .aql_tools import Tool __all__ = ( "ExecuteCommand", "InstallBuilder", "BuiltinTool", ) """ Unique Value - name + type value node node = ExecuteCommand('gcc --help -v') tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx, '--help -v' ) node = ExecuteMethod( target = my_function ) dir_node = CopyFiles( prog_node, target = dir_name ) dir_node = CopyFilesAs( prog_node, target = dir_name ) dir_node = MoveFiles( prog_node, ) dir_node = MoveFilesAs( prog_node ) dir_node = RemoveFiles( prog_node ) node = FindFiles( dir_node ) dir_node = FileDir( prog_node ) """ def _makeTagetDirs( path_dir ): try: os.makedirs( path_dir ) except OSError as e: if e.errno != errno.EEXIST: raise #//===========================================================================// class ExecuteCommand (Builder): def build( self, node ): cmd = node.getSources() out = self.execCmd( cmd ) node.setNoTargets() return out #//-------------------------------------------------------// def getBuildStrArgs( self, node, brief ): cmd = node.getSourceValues() return (cmd,) #//===========================================================================// class InstallBuilder (FileBuilder): def __init__(self, options, target ): self.target = os.path.abspath( target ) #//-------------------------------------------------------// def build( self, node ): sources = node.getSources() target = self.target _makeTagetDirs( target ) for source in sources: if os.path.isfile( source ): shutil.copy( source, target ) node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self, node, brief ): return self.target #//===========================================================================// class BuiltinTool( Tool ): def ExecuteCommand( self, options ): return ExecuteCommand( options ) def Install(self, options, target ): return InstallBuilder( options, target ) def DirName(self, options): raise NotImplementedError() def BaseName(self, options): raise NotImplementedError()
22.509615
80
0.529688
1,257
0.53695
0
0
0
0
0
0
943
0.402819
53d57360a984bc0c7e7afecf352b5a5635dc9a06
3,303
py
Python
cms/test_utils/project/placeholderapp/models.py
stefanw/django-cms
048ec9e7a529549d51f4805fdfbcd50ea1e624b0
[ "BSD-3-Clause" ]
null
null
null
cms/test_utils/project/placeholderapp/models.py
stefanw/django-cms
048ec9e7a529549d51f4805fdfbcd50ea1e624b0
[ "BSD-3-Clause" ]
null
null
null
cms/test_utils/project/placeholderapp/models.py
stefanw/django-cms
048ec9e7a529549d51f4805fdfbcd50ea1e624b0
[ "BSD-3-Clause" ]
null
null
null
from django.core.urlresolvers import reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible from cms.models.fields import PlaceholderField from cms.utils import get_language_from_request from cms.utils.urlutils import admin_reverse from hvad.models import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url = '' def __init__(self, *args, **kwargs): super(Example1, self).__init__(*args, **kwargs) def callable_item(self, request): return self.char_1 def __str__(self): return self.char_1 def get_absolute_url(self): return reverse("example_detail", args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url() def get_public_url(self): return '/public/view/' def set_static_url(self, request): language = get_language_from_request(request) if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk def dynamic_url(self, request): language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return "%s - %s" % (self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1') def __str__(self): return self.char_1 def get_absolute_url(self): return reverse("detail_multi", args=(self.pk,))
33.363636
113
0.737511
2,731
0.826824
0
0
2,060
0.623675
0
0
374
0.11323
53d609582a8fdb847888342336e2fc62ce309ea0
159
py
Python
150-Challenges/Challenges 80 - 87/Challenge 84.py
DGrifferty/Python
d725301664db2cbcfd5c4f5974745b4d81c8e28a
[ "Apache-2.0" ]
null
null
null
150-Challenges/Challenges 80 - 87/Challenge 84.py
DGrifferty/Python
d725301664db2cbcfd5c4f5974745b4d81c8e28a
[ "Apache-2.0" ]
null
null
null
150-Challenges/Challenges 80 - 87/Challenge 84.py
DGrifferty/Python
d725301664db2cbcfd5c4f5974745b4d81c8e28a
[ "Apache-2.0" ]
null
null
null
# 084 # Ask the user to type in their postcode.Display the first two # letters in uppercase. # very simple print(input('Enter your postcode: ')[0:2].upper())
22.714286
62
0.716981
0
0
0
0
0
0
0
0
126
0.792453